mirror of
https://github.com/logos-storage/logos-storage-nim.git
synced 2026-02-04 13:43:12 +00:00
refactor!: remove unused modules (#1362)
Signed-off-by: Chrysostomos Nanakos <chris@include.gr> Co-authored-by: Chrysostomos Nanakos <chris@include.gr> Co-authored-by: gmega <giuliano.mega@gmail.com>
This commit is contained in:
parent
e894fb03fa
commit
2a1a548341
34
.github/workflows/ci-reusable.yml
vendored
34
.github/workflows/ci-reusable.yml
vendored
@ -48,36 +48,7 @@ jobs:
|
||||
if: matrix.tests == 'unittest' || matrix.tests == 'all'
|
||||
run: make -j${ncpu} test
|
||||
|
||||
- name: Setup Node.js
|
||||
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Install Ethereum node dependencies
|
||||
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
|
||||
working-directory: vendor/logos-storage-contracts-eth
|
||||
env:
|
||||
MSYS2_PATH_TYPE: inherit
|
||||
run: |
|
||||
npm ci
|
||||
|
||||
- name: Run Ethereum node with Logos Storage contracts
|
||||
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
|
||||
working-directory: vendor/logos-storage-contracts-eth
|
||||
env:
|
||||
MSYS2_PATH_TYPE: inherit
|
||||
run: |
|
||||
npm start &
|
||||
# Wait for the contracts to be deployed
|
||||
sleep 5
|
||||
|
||||
## Part 2 Tests ##
|
||||
- name: Contract tests
|
||||
if: matrix.tests == 'contract' || matrix.tests == 'all'
|
||||
run: make -j${ncpu} testContracts
|
||||
|
||||
## Part 3 Tests ##
|
||||
- name: Integration tests
|
||||
if: matrix.tests == 'integration' || matrix.tests == 'all'
|
||||
env:
|
||||
@ -92,11 +63,6 @@ jobs:
|
||||
path: tests/integration/logs/
|
||||
retention-days: 1
|
||||
|
||||
## Part 4 Tools ##
|
||||
- name: Tools tests
|
||||
if: matrix.tests == 'tools' || matrix.tests == 'all'
|
||||
run: make -j${ncpu} testTools
|
||||
|
||||
status:
|
||||
if: always()
|
||||
needs: [build]
|
||||
|
||||
16
.github/workflows/docker.yml
vendored
16
.github/workflows/docker.yml
vendored
@ -19,26 +19,10 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
get-contracts-hash:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
hash: ${{ steps.get-hash.outputs.hash }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Get submodule short hash
|
||||
id: get-hash
|
||||
run: |
|
||||
hash=$(git rev-parse --short HEAD:vendor/logos-storage-contracts-eth)
|
||||
echo "hash=$hash" >> $GITHUB_OUTPUT
|
||||
build-and-push:
|
||||
name: Build and Push
|
||||
uses: ./.github/workflows/docker-reusable.yml
|
||||
needs: get-contracts-hash
|
||||
with:
|
||||
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
|
||||
tag_stable: ${{ startsWith(github.ref, 'refs/tags/') }}
|
||||
contract_image: "codexstorage/codex-contracts-eth:sha-${{ needs.get-contracts-hash.outputs.hash }}"
|
||||
secrets: inherit
|
||||
23
.gitmodules
vendored
23
.gitmodules
vendored
@ -156,9 +156,6 @@
|
||||
url = https://github.com/status-im/nim-taskpools.git
|
||||
ignore = untracked
|
||||
branch = stable
|
||||
[submodule "vendor/nim-leopard"]
|
||||
path = vendor/nim-leopard
|
||||
url = https://github.com/status-im/nim-leopard.git
|
||||
[submodule "vendor/logos-storage-nim-dht"]
|
||||
path = vendor/logos-storage-nim-dht
|
||||
url = https://github.com/logos-storage/logos-storage-nim-dht.git
|
||||
@ -173,11 +170,6 @@
|
||||
[submodule "vendor/nim-eth"]
|
||||
path = vendor/nim-eth
|
||||
url = https://github.com/status-im/nim-eth
|
||||
[submodule "vendor/logos-storage-contracts-eth"]
|
||||
path = vendor/logos-storage-contracts-eth
|
||||
url = https://github.com/logos-storage/logos-storage-contracts-eth.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nim-protobuf-serialization"]
|
||||
path = vendor/nim-protobuf-serialization
|
||||
url = https://github.com/status-im/nim-protobuf-serialization
|
||||
@ -190,24 +182,9 @@
|
||||
[submodule "vendor/npeg"]
|
||||
path = vendor/npeg
|
||||
url = https://github.com/zevv/npeg
|
||||
[submodule "vendor/nim-poseidon2"]
|
||||
path = vendor/nim-poseidon2
|
||||
url = https://github.com/logos-storage/nim-poseidon2.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/constantine"]
|
||||
path = vendor/constantine
|
||||
url = https://github.com/mratsim/constantine.git
|
||||
[submodule "vendor/nim-circom-compat"]
|
||||
path = vendor/nim-circom-compat
|
||||
url = https://github.com/logos-storage/nim-circom-compat.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/logos-storage-proofs-circuits"]
|
||||
path = vendor/logos-storage-proofs-circuits
|
||||
url = https://github.com/logos-storage/logos-storage-proofs-circuits.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nim-serde"]
|
||||
path = vendor/nim-serde
|
||||
url = https://github.com/logos-storage/nim-serde.git
|
||||
|
||||
42
Makefile
42
Makefile
@ -98,11 +98,6 @@ all: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim storage $(NIM_PARAMS) build.nims
|
||||
|
||||
# Build tools/cirdl
|
||||
cirdl: | deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim toolsCirdl $(NIM_PARAMS) build.nims
|
||||
|
||||
# must be included after the default target
|
||||
-include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk
|
||||
|
||||
@ -135,25 +130,6 @@ test: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim test $(NIM_PARAMS) build.nims
|
||||
|
||||
# Builds and runs the smart contract tests
|
||||
testContracts: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) --define:ws_resubscribe=240 build.nims
|
||||
|
||||
TEST_PARAMS :=
|
||||
ifdef DEBUG
|
||||
TEST_PARAMS := $(TEST_PARAMS) -d:DebugTestHarness=$(DEBUG)
|
||||
TEST_PARAMS := $(TEST_PARAMS) -d:NoCodexLogFilters=$(DEBUG)
|
||||
TEST_PARAMS := $(TEST_PARAMS) -d:ShowContinuousStatusUpdates=$(DEBUG)
|
||||
TEST_PARAMS := $(TEST_PARAMS) -d:DebugHardhat=$(DEBUG)
|
||||
endif
|
||||
ifdef TEST_TIMEOUT
|
||||
TEST_PARAMS := $(TEST_PARAMS) -d:TestTimeout=$(TEST_TIMEOUT)
|
||||
endif
|
||||
ifdef PARALLEL
|
||||
TEST_PARAMS := $(TEST_PARAMS) -d:EnableParallelTests=$(PARALLEL)
|
||||
endif
|
||||
|
||||
# Builds and runs the integration tests
|
||||
testIntegration: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
@ -164,16 +140,6 @@ testAll: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim testAll $(NIM_PARAMS) build.nims
|
||||
|
||||
# Builds and runs Taiko L2 tests
|
||||
testTaiko: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim testTaiko $(NIM_PARAMS) build.nims
|
||||
|
||||
# Builds and runs tool tests
|
||||
testTools: | cirdl
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim testTools $(NIM_PARAMS) build.nims
|
||||
|
||||
# nim-libbacktrace
|
||||
LIBBACKTRACE_MAKE_FLAGS := -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0
|
||||
libbacktrace:
|
||||
@ -274,15 +240,15 @@ libstorage:
|
||||
|
||||
ifeq ($(STATIC), 1)
|
||||
echo -e $(BUILD_MSG) "build/$@.a" && \
|
||||
$(ENV_SCRIPT) nim libstorageStatic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
|
||||
$(ENV_SCRIPT) nim libstorageStatic $(NIM_PARAMS) codex.nims
|
||||
else ifeq ($(detected_OS),Windows)
|
||||
echo -e $(BUILD_MSG) "build/$@.dll" && \
|
||||
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-G \\\"MSYS Makefiles\\\" -DCMAKE_BUILD_TYPE=Release\"" codex.nims
|
||||
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) codex.nims
|
||||
else ifeq ($(detected_OS),macOS)
|
||||
echo -e $(BUILD_MSG) "build/$@.dylib" && \
|
||||
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
|
||||
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) codex.nims
|
||||
else
|
||||
echo -e $(BUILD_MSG) "build/$@.so" && \
|
||||
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
|
||||
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) codex.nims
|
||||
endif
|
||||
endif # "variables.mk" was not included
|
||||
|
||||
2
benchmarks/.gitignore
vendored
2
benchmarks/.gitignore
vendored
@ -1,2 +0,0 @@
|
||||
ceremony
|
||||
circuit_bench_*
|
||||
@ -1,33 +0,0 @@
|
||||
|
||||
## Benchmark Runner
|
||||
|
||||
Modify `runAllBenchmarks` proc in `run_benchmarks.nim` to the desired parameters and variations.
|
||||
|
||||
Then run it:
|
||||
|
||||
```sh
|
||||
nim c -r run_benchmarks
|
||||
```
|
||||
|
||||
By default all circuit files for each combinations of circuit args will be generated in a unique folder named like:
|
||||
logos-storage-nim/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3
|
||||
|
||||
Generating the circuit files often takes longer than running benchmarks, so caching the results allows re-running the benchmark as needed.
|
||||
|
||||
You can modify the `CircuitArgs` and `CircuitEnv` objects in `runAllBenchMarks` to suite your needs. See `create_circuits.nim` for their definition.
|
||||
|
||||
The runner executes all commands relative to the `logos-storage-nim` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this.
|
||||
|
||||
## Logos Storage Ark Circom CLI
|
||||
|
||||
Runs Logos Storage's prover setup with Ark / Circom.
|
||||
|
||||
Compile:
|
||||
```sh
|
||||
nim c codex_ark_prover_cli.nim
|
||||
```
|
||||
|
||||
Run to see usage:
|
||||
```sh
|
||||
./codex_ark_prover_cli.nim -h
|
||||
```
|
||||
@ -1,15 +0,0 @@
|
||||
--path:
|
||||
".."
|
||||
--path:
|
||||
"../tests"
|
||||
--threads:
|
||||
on
|
||||
--tlsEmulation:
|
||||
off
|
||||
--d:
|
||||
release
|
||||
|
||||
# when not defined(chronicles_log_level):
|
||||
# --define:"chronicles_log_level:NONE" # compile all log statements
|
||||
# --define:"chronicles_sinks:textlines[dynamic]" # allow logs to be filtered at runtime
|
||||
# --"import":"logging" # ensure that logging is ignored at runtime
|
||||
@ -1,187 +0,0 @@
|
||||
import std/[hashes, json, strutils, strformat, os, osproc, uri]
|
||||
|
||||
import ./utils
|
||||
|
||||
type
|
||||
CircuitEnv* = object
|
||||
nimCircuitCli*: string
|
||||
circuitDirIncludes*: string
|
||||
ptauPath*: string
|
||||
ptauUrl*: Uri
|
||||
codexProjDir*: string
|
||||
|
||||
CircuitArgs* = object
|
||||
depth*: int
|
||||
maxslots*: int
|
||||
cellsize*: int
|
||||
blocksize*: int
|
||||
nsamples*: int
|
||||
entropy*: int
|
||||
seed*: int
|
||||
nslots*: int
|
||||
ncells*: int
|
||||
index*: int
|
||||
|
||||
proc findCodexProjectDir(): string =
|
||||
## find codex proj dir -- assumes this script is in codex/benchmarks
|
||||
result = currentSourcePath().parentDir.parentDir
|
||||
|
||||
func default*(tp: typedesc[CircuitEnv]): CircuitEnv =
|
||||
let codexDir = findCodexProjectDir()
|
||||
result.nimCircuitCli =
|
||||
codexDir / "vendor" / "logos-storage-proofs-circuits" / "reference" / "nim" /
|
||||
"proof_input" / "cli"
|
||||
result.circuitDirIncludes =
|
||||
codexDir / "vendor" / "logos-storage-proofs-circuits" / "circuit"
|
||||
result.ptauPath =
|
||||
codexDir / "benchmarks" / "ceremony" / "powersOfTau28_hez_final_23.ptau"
|
||||
result.ptauUrl = "https://storage.googleapis.com/zkevm/ptau".parseUri
|
||||
result.codexProjDir = codexDir
|
||||
|
||||
proc check*(env: var CircuitEnv) =
|
||||
## check that the CWD of script is in the codex parent
|
||||
let codexProjDir = findCodexProjectDir()
|
||||
echo "\n\nFound project dir: ", codexProjDir
|
||||
|
||||
let snarkjs = findExe("snarkjs")
|
||||
if snarkjs == "":
|
||||
echo dedent"""
|
||||
ERROR: must install snarkjs first
|
||||
|
||||
npm install -g snarkjs@latest
|
||||
"""
|
||||
|
||||
let circom = findExe("circom")
|
||||
if circom == "":
|
||||
echo dedent"""
|
||||
ERROR: must install circom first
|
||||
|
||||
git clone https://github.com/iden3/circom.git
|
||||
cargo install --path circom
|
||||
"""
|
||||
|
||||
if snarkjs == "" or circom == "":
|
||||
quit 2
|
||||
|
||||
echo "Found SnarkJS: ", snarkjs
|
||||
echo "Found Circom: ", circom
|
||||
|
||||
if not env.nimCircuitCli.fileExists:
|
||||
echo "Nim Circuit reference cli not found: ", env.nimCircuitCli
|
||||
echo "Building Circuit reference cli...\n"
|
||||
withDir env.nimCircuitCli.parentDir:
|
||||
runit "nimble build -d:release --styleCheck:off cli"
|
||||
echo "CWD: ", getCurrentDir()
|
||||
assert env.nimCircuitCli.fileExists()
|
||||
|
||||
echo "Found NimCircuitCli: ", env.nimCircuitCli
|
||||
echo "Found Circuit Path: ", env.circuitDirIncludes
|
||||
echo "Found PTAU file: ", env.ptauPath
|
||||
|
||||
proc downloadPtau*(ptauPath: string, ptauUrl: Uri) =
|
||||
## download ptau file using curl if needed
|
||||
if not ptauPath.fileExists:
|
||||
echo "Ceremony file not found, downloading..."
|
||||
createDir ptauPath.parentDir
|
||||
withDir ptauPath.parentDir:
|
||||
runit fmt"curl --output '{ptauPath}' '{$ptauUrl}/{ptauPath.splitPath().tail}'"
|
||||
else:
|
||||
echo "Found PTAU file at: ", ptauPath
|
||||
|
||||
proc getCircuitBenchStr*(args: CircuitArgs): string =
|
||||
for f, v in fieldPairs(args):
|
||||
result &= "_" & f & $v
|
||||
|
||||
proc getCircuitBenchPath*(args: CircuitArgs, env: CircuitEnv): string =
|
||||
## generate folder name for unique circuit args
|
||||
result = env.codexProjDir / "benchmarks/circuit_bench" & getCircuitBenchStr(args)
|
||||
|
||||
proc generateCircomAndSamples*(args: CircuitArgs, env: CircuitEnv, name: string) =
|
||||
## run nim circuit and sample generator
|
||||
var cliCmd = env.nimCircuitCli
|
||||
for f, v in fieldPairs(args):
|
||||
cliCmd &= " --" & f & "=" & $v
|
||||
|
||||
if not "input.json".fileExists:
|
||||
echo "Generating Circom Files..."
|
||||
runit fmt"{cliCmd} -v --circom={name}.circom --output=input.json"
|
||||
|
||||
proc createCircuit*(
|
||||
args: CircuitArgs,
|
||||
env: CircuitEnv,
|
||||
name = "proof_main",
|
||||
circBenchDir = getCircuitBenchPath(args, env),
|
||||
someEntropy = "some_entropy_75289v3b7rcawcsyiur",
|
||||
doGenerateWitness = false,
|
||||
): tuple[dir: string, name: string] =
|
||||
## Generates all the files needed for to run a proof circuit. Downloads the PTAU file if needed.
|
||||
##
|
||||
## All needed circuit files will be generated as needed.
|
||||
## They will be located in `circBenchDir` which defaults to a folder like:
|
||||
## `logos-storage-nim/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3`
|
||||
## with all the given CircuitArgs.
|
||||
##
|
||||
let circdir = circBenchDir
|
||||
|
||||
downloadPtau env.ptauPath, env.ptauUrl
|
||||
|
||||
echo "Creating circuit dir: ", circdir
|
||||
createDir circdir
|
||||
withDir circdir:
|
||||
writeFile("circuit_params.json", pretty(%*args))
|
||||
let
|
||||
inputs = circdir / "input.json"
|
||||
zkey = circdir / fmt"{name}.zkey"
|
||||
wasm = circdir / fmt"{name}.wasm"
|
||||
r1cs = circdir / fmt"{name}.r1cs"
|
||||
wtns = circdir / fmt"{name}.wtns"
|
||||
|
||||
generateCircomAndSamples(args, env, name)
|
||||
|
||||
if not wasm.fileExists or not r1cs.fileExists:
|
||||
runit fmt"circom --r1cs --wasm --O2 -l{env.circuitDirIncludes} {name}.circom"
|
||||
moveFile fmt"{name}_js" / fmt"{name}.wasm", fmt"{name}.wasm"
|
||||
echo "Found wasm: ", wasm
|
||||
echo "Found r1cs: ", r1cs
|
||||
|
||||
if not zkey.fileExists:
|
||||
echo "ZKey not found, generating..."
|
||||
putEnv "NODE_OPTIONS", "--max-old-space-size=8192"
|
||||
if not fmt"{name}_0000.zkey".fileExists:
|
||||
runit fmt"snarkjs groth16 setup {r1cs} {env.ptauPath} {name}_0000.zkey"
|
||||
echo fmt"Generated {name}_0000.zkey"
|
||||
|
||||
let cmd =
|
||||
fmt"snarkjs zkey contribute {name}_0000.zkey {name}_0001.zkey --name='1st Contributor Name'"
|
||||
echo "CMD: ", cmd
|
||||
let cmdRes = execCmdEx(cmd, options = {}, input = someEntropy & "\n")
|
||||
assert cmdRes.exitCode == 0
|
||||
|
||||
moveFile fmt"{name}_0001.zkey", fmt"{name}.zkey"
|
||||
removeFile fmt"{name}_0000.zkey"
|
||||
|
||||
if not wtns.fileExists and doGenerateWitness:
|
||||
runit fmt"node generate_witness.js {wtns} ../input.json ../witness.wtns"
|
||||
|
||||
return (circdir, name)
|
||||
|
||||
when isMainModule:
|
||||
echo "findCodexProjectDir: ", findCodexProjectDir()
|
||||
## test run creating a circuit
|
||||
var env = CircuitEnv.default()
|
||||
env.check()
|
||||
|
||||
let args = CircuitArgs(
|
||||
depth: 32, # maximum depth of the slot tree
|
||||
maxslots: 256, # maximum number of slots
|
||||
cellsize: 2048, # cell size in bytes
|
||||
blocksize: 65536, # block size in bytes
|
||||
nsamples: 5, # number of samples to prove
|
||||
entropy: 1234567, # external randomness
|
||||
seed: 12345, # seed for creating fake data
|
||||
nslots: 11, # number of slots in the dataset
|
||||
index: 3, # which slot we prove (0..NSLOTS-1)
|
||||
ncells: 512, # number of cells in this slot
|
||||
)
|
||||
let benchenv = createCircuit(args, env)
|
||||
echo "\nBench dir:\n", benchenv
|
||||
@ -1,105 +0,0 @@
|
||||
import std/[sequtils, strformat, os, options, importutils]
|
||||
import std/[times, os, strutils, terminal]
|
||||
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/datastore
|
||||
|
||||
import pkg/codex/[rng, stores, merkletree, codextypes, slots]
|
||||
import pkg/codex/utils/[json, poseidon2digest]
|
||||
import pkg/codex/slots/[builder, sampler/utils, backends/helpers]
|
||||
import pkg/constantine/math/[arithmetic, io/io_bigints, io/io_fields]
|
||||
|
||||
import ./utils
|
||||
import ./create_circuits
|
||||
|
||||
type CircuitFiles* = object
|
||||
r1cs*: string
|
||||
wasm*: string
|
||||
zkey*: string
|
||||
inputs*: string
|
||||
|
||||
proc runArkCircom(args: CircuitArgs, files: CircuitFiles, benchmarkLoops: int) =
|
||||
echo "Loading sample proof..."
|
||||
var
|
||||
inputData = files.inputs.readFile()
|
||||
inputJson = !JsonNode.parse(inputData)
|
||||
proofInputs = Poseidon2Hash.jsonToProofInput(inputJson)
|
||||
circom = CircomCompat.init(
|
||||
files.r1cs,
|
||||
files.wasm,
|
||||
files.zkey,
|
||||
slotDepth = args.depth,
|
||||
numSamples = args.nsamples,
|
||||
)
|
||||
defer:
|
||||
circom.release() # this comes from the rust FFI
|
||||
|
||||
echo "Sample proof loaded..."
|
||||
echo "Proving..."
|
||||
|
||||
let nameArgs = getCircuitBenchStr(args)
|
||||
var proof: CircomProof
|
||||
benchmark fmt"prover-{nameArgs}", benchmarkLoops:
|
||||
proof = circom.prove(proofInputs).tryGet
|
||||
|
||||
var verRes: bool
|
||||
benchmark fmt"verify-{nameArgs}", benchmarkLoops:
|
||||
verRes = circom.verify(proof, proofInputs).tryGet
|
||||
echo "verify result: ", verRes
|
||||
|
||||
proc runRapidSnark(args: CircuitArgs, files: CircuitFiles, benchmarkLoops: int) =
|
||||
# time rapidsnark ${CIRCUIT_MAIN}.zkey witness.wtns proof.json public.json
|
||||
|
||||
echo "generating the witness..."
|
||||
## TODO
|
||||
|
||||
proc runBenchmark(args: CircuitArgs, env: CircuitEnv, benchmarkLoops: int) =
|
||||
## execute benchmarks given a set of args
|
||||
## will create a folder in `benchmarks/circuit_bench_$(args)`
|
||||
##
|
||||
|
||||
let env = createCircuit(args, env)
|
||||
|
||||
## TODO: copy over testcircomcompat proving
|
||||
let files = CircuitFiles(
|
||||
r1cs: env.dir / fmt"{env.name}.r1cs",
|
||||
wasm: env.dir / fmt"{env.name}.wasm",
|
||||
zkey: env.dir / fmt"{env.name}.zkey",
|
||||
inputs: env.dir / fmt"input.json",
|
||||
)
|
||||
|
||||
runArkCircom(args, files, benchmarkLoops)
|
||||
|
||||
proc runAllBenchmarks*() =
|
||||
echo "Running benchmark"
|
||||
# setup()
|
||||
var env = CircuitEnv.default()
|
||||
env.check()
|
||||
|
||||
var args = CircuitArgs(
|
||||
depth: 32, # maximum depth of the slot tree
|
||||
maxslots: 256, # maximum number of slots
|
||||
cellsize: 2048, # cell size in bytes
|
||||
blocksize: 65536, # block size in bytes
|
||||
nsamples: 1, # number of samples to prove
|
||||
entropy: 1234567, # external randomness
|
||||
seed: 12345, # seed for creating fake data
|
||||
nslots: 11, # number of slots in the dataset
|
||||
index: 3, # which slot we prove (0..NSLOTS-1)
|
||||
ncells: 512, # number of cells in this slot
|
||||
)
|
||||
|
||||
let
|
||||
numberSamples = 3
|
||||
benchmarkLoops = 5
|
||||
|
||||
for i in 1 .. numberSamples:
|
||||
args.nsamples = i
|
||||
stdout.styledWriteLine(fgYellow, "\nbenchmarking args: ", $args)
|
||||
runBenchmark(args, env, benchmarkLoops)
|
||||
|
||||
printBenchMarkSummaries()
|
||||
|
||||
when isMainModule:
|
||||
runAllBenchmarks()
|
||||
@ -1,75 +0,0 @@
|
||||
import std/tables
|
||||
|
||||
template withDir*(dir: string, blk: untyped) =
|
||||
## set working dir for duration of blk
|
||||
let prev = getCurrentDir()
|
||||
try:
|
||||
setCurrentDir(dir)
|
||||
`blk`
|
||||
finally:
|
||||
setCurrentDir(prev)
|
||||
|
||||
template runit*(cmd: string) =
|
||||
## run shell commands and verify it runs without an error code
|
||||
echo "RUNNING: ", cmd
|
||||
let cmdRes = execShellCmd(cmd)
|
||||
echo "STATUS: ", cmdRes
|
||||
assert cmdRes == 0
|
||||
|
||||
var benchRuns* = newTable[string, tuple[avgTimeSec: float, count: int]]()
|
||||
|
||||
func avg(vals: openArray[float]): float =
|
||||
for v in vals:
|
||||
result += v / vals.len().toFloat()
|
||||
|
||||
template benchmark*(name: untyped, count: int, blk: untyped) =
|
||||
let benchmarkName: string = name
|
||||
## simple benchmarking of a block of code
|
||||
var runs = newSeqOfCap[float](count)
|
||||
for i in 1 .. count:
|
||||
block:
|
||||
let t0 = epochTime()
|
||||
`blk`
|
||||
let elapsed = epochTime() - t0
|
||||
runs.add elapsed
|
||||
|
||||
var elapsedStr = ""
|
||||
for v in runs:
|
||||
elapsedStr &= ", " & v.formatFloat(format = ffDecimal, precision = 3)
|
||||
stdout.styledWriteLine(
|
||||
fgGreen, "CPU Time [", benchmarkName, "] ", "avg(", $count, "): ", elapsedStr, " s"
|
||||
)
|
||||
benchRuns[benchmarkName] = (runs.avg(), count)
|
||||
|
||||
template printBenchMarkSummaries*(printRegular = true, printTsv = true) =
|
||||
if printRegular:
|
||||
echo ""
|
||||
for k, v in benchRuns:
|
||||
echo "Benchmark average run ", v.avgTimeSec, " for ", v.count, " runs ", "for ", k
|
||||
|
||||
if printTsv:
|
||||
echo ""
|
||||
echo "name", "\t", "avgTimeSec", "\t", "count"
|
||||
for k, v in benchRuns:
|
||||
echo k, "\t", v.avgTimeSec, "\t", v.count
|
||||
|
||||
import std/math
|
||||
|
||||
func floorLog2*(x: int): int =
|
||||
var k = -1
|
||||
var y = x
|
||||
while (y > 0):
|
||||
k += 1
|
||||
y = y shr 1
|
||||
return k
|
||||
|
||||
func ceilingLog2*(x: int): int =
|
||||
if (x == 0):
|
||||
return -1
|
||||
else:
|
||||
return (floorLog2(x - 1) + 1)
|
||||
|
||||
func checkPowerOfTwo*(x: int, what: string): int =
|
||||
let k = ceilingLog2(x)
|
||||
assert(x == 2 ^ k, ("`" & what & "` is expected to be a power of 2"))
|
||||
return x
|
||||
40
build.nims
40
build.nims
@ -48,14 +48,14 @@ proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "dynamic")
|
||||
exec "nim c" & " --out:build/" & lib_name &
|
||||
" --threads:on --app:lib --opt:size --noMain --mm:refc --header --d:metrics " &
|
||||
"--nimMainPrefix:libstorage -d:noSignalHandler " &
|
||||
"-d:LeopardExtraCompilerFlags=-fPIC " & "-d:chronicles_runtime_filtering " &
|
||||
"-d:chronicles_log_level=TRACE " & params & " " & srcDir & name & ".nim"
|
||||
"-d:chronicles_runtime_filtering " & "-d:chronicles_log_level=TRACE " & params &
|
||||
" " & srcDir & name & ".nim"
|
||||
else:
|
||||
exec "nim c" & " --out:build/" & name &
|
||||
".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --d:metrics " &
|
||||
"--nimMainPrefix:libstorage -d:noSignalHandler " &
|
||||
"-d:LeopardExtraCompilerFlags=-fPIC " & "-d:chronicles_runtime_filtering " &
|
||||
"-d:chronicles_log_level=TRACE " & params & " " & srcDir & name & ".nim"
|
||||
"-d:chronicles_runtime_filtering " & "-d:chronicles_log_level=TRACE " & params &
|
||||
" " & srcDir & name & ".nim"
|
||||
|
||||
proc test(name: string, outName = name, srcDir = "tests/", params = "", lang = "c") =
|
||||
buildBinary name, outName, srcDir, params
|
||||
@ -70,25 +70,13 @@ task toolsCirdl, "build tools/cirdl binary":
|
||||
buildBinary "tools/cirdl/cirdl"
|
||||
|
||||
task testStorage, "Build & run Logos Storage tests":
|
||||
test "testCodex",
|
||||
outName = "testStorage", params = "-d:storage_enable_proof_failures=true"
|
||||
|
||||
task testContracts, "Build & run Logos Storage Contract tests":
|
||||
test "testContracts"
|
||||
test "testCodex", outName = "testStorage"
|
||||
|
||||
task testIntegration, "Run integration tests":
|
||||
buildBinary "codex",
|
||||
outName = "storage",
|
||||
params =
|
||||
"-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:chronicles_disabled_topics=JSONRPC-HTTP-CLIENT,websock,libp2p,discv5 -d:codex_enable_proof_failures=true"
|
||||
var sinks = @["textlines[nocolors,file]"]
|
||||
for i in 2 ..< paramCount():
|
||||
if "DebugTestHarness" in paramStr(i) and truthy paramStr(i).split('=')[1]:
|
||||
sinks.add "textlines[stdout]"
|
||||
break
|
||||
var testParams =
|
||||
"-d:chronicles_log_level=TRACE -d:chronicles_sinks=\"" & sinks.join(",") & "\""
|
||||
test "testIntegration", params = testParams
|
||||
params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
|
||||
test "testIntegration"
|
||||
# use params to enable logging from the integration test executable
|
||||
# test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " &
|
||||
# "-d:chronicles_enabled_topics:integration:TRACE"
|
||||
@ -99,19 +87,9 @@ task build, "build Logos Storage binary":
|
||||
task test, "Run tests":
|
||||
testStorageTask()
|
||||
|
||||
task testTools, "Run Tools tests":
|
||||
toolsCirdlTask()
|
||||
test "testTools"
|
||||
|
||||
task testAll, "Run all tests (except for Taiko L2 tests)":
|
||||
testStorageTask()
|
||||
testContractsTask()
|
||||
testIntegrationTask()
|
||||
testToolsTask()
|
||||
|
||||
task testTaiko, "Run Taiko L2 tests":
|
||||
storageTask()
|
||||
test "testTaiko"
|
||||
|
||||
import strutils
|
||||
import os
|
||||
@ -141,9 +119,7 @@ task coverage, "generates code coverage report":
|
||||
|
||||
echo "======== Running Tests ======== "
|
||||
test "coverage",
|
||||
srcDir = "tests/",
|
||||
params =
|
||||
" --nimcache:nimcache/coverage -d:release -d:storage_enable_proof_failures=true"
|
||||
srcDir = "tests/", params = " --nimcache:nimcache/coverage -d:release"
|
||||
exec("rm nimcache/coverage/*.c")
|
||||
rmDir("coverage")
|
||||
mkDir("coverage")
|
||||
|
||||
@ -71,9 +71,6 @@ when isMainModule:
|
||||
# permissions are insecure.
|
||||
quit QuitFailure
|
||||
|
||||
if config.prover() and not (checkAndCreateDataDir((config.circuitDir).string)):
|
||||
quit QuitFailure
|
||||
|
||||
trace "Data dir initialized", dir = $config.dataDir
|
||||
|
||||
if not (checkAndCreateDataDir((config.dataDir / "repo"))):
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
import ./engine/discovery
|
||||
import ./engine/advertiser
|
||||
import ./engine/engine
|
||||
import ./engine/payments
|
||||
|
||||
export discovery, advertiser, engine, payments
|
||||
export discovery, advertiser, engine
|
||||
|
||||
@ -37,12 +37,11 @@ import ../protobuf/presence
|
||||
import ../network
|
||||
import ../peers
|
||||
|
||||
import ./payments
|
||||
import ./discovery
|
||||
import ./advertiser
|
||||
import ./pendingblocks
|
||||
|
||||
export peers, pendingblocks, payments, discovery
|
||||
export peers, pendingblocks, discovery
|
||||
|
||||
logScope:
|
||||
topics = "codex blockexcengine"
|
||||
@ -113,16 +112,10 @@ type
|
||||
maxBlocksPerMessage: int
|
||||
# Maximum number of blocks we can squeeze in a single message
|
||||
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
|
||||
wallet*: WalletRef # Nitro wallet for micropayments
|
||||
pricing*: ?Pricing # Optional bandwidth pricing
|
||||
discovery*: DiscoveryEngine
|
||||
advertiser*: Advertiser
|
||||
lastDiscRequest: Moment # time of last discovery request
|
||||
|
||||
Pricing* = object
|
||||
address*: EthAddress
|
||||
price*: UInt256
|
||||
|
||||
# attach task scheduler to engine
|
||||
proc scheduleTask(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, raises: [].} =
|
||||
if self.taskQueue.pushOrUpdateNoWait(task).isOk():
|
||||
@ -644,17 +637,6 @@ proc resolveBlocks*(
|
||||
)
|
||||
)
|
||||
|
||||
proc payForBlocks(
|
||||
self: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
let
|
||||
sendPayment = self.network.request.sendPayment
|
||||
price = peer.price(blocksDelivery.mapIt(it.address))
|
||||
|
||||
if payment =? self.wallet.pay(peer, price):
|
||||
trace "Sending payment for blocks", price, len = blocksDelivery.len
|
||||
await sendPayment(peer.id, payment)
|
||||
|
||||
proc validateBlockDelivery(self: BlockExcEngine, bd: BlockDelivery): ?!void =
|
||||
if bd.address notin self.pendingBlocks:
|
||||
return failure("Received block is not currently a pending block")
|
||||
@ -749,11 +731,6 @@ proc blocksDeliveryHandler*(
|
||||
|
||||
codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64)
|
||||
|
||||
if peerCtx != nil:
|
||||
if err =? catch(await self.payForBlocks(peerCtx, blocksDelivery)).errorOption:
|
||||
warn "Error paying for blocks", err = err.msg
|
||||
return
|
||||
|
||||
if err =? catch(await self.resolveBlocks(validatedBlocksDelivery)).errorOption:
|
||||
warn "Error resolving blocks", err = err.msg
|
||||
return
|
||||
@ -783,14 +760,12 @@ proc wantListHandler*(
|
||||
wantType = $e.wantType
|
||||
|
||||
if e.address notin peerCtx.wantedBlocks: # Adding new entry to peer wants
|
||||
let
|
||||
have =
|
||||
try:
|
||||
await e.address in self.localStore
|
||||
except CatchableError as exc:
|
||||
# TODO: should not be necessary once we have proper exception tracking on the BlockStore interface
|
||||
false
|
||||
price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE)
|
||||
let have =
|
||||
try:
|
||||
await e.address in self.localStore
|
||||
except CatchableError as exc:
|
||||
# TODO: should not be necessary once we have proper exception tracking on the BlockStore interface
|
||||
false
|
||||
|
||||
if e.cancel:
|
||||
# This is sort of expected if we sent the block to the peer, as we have removed
|
||||
@ -805,17 +780,13 @@ proc wantListHandler*(
|
||||
if have:
|
||||
trace "We HAVE the block", address = e.address
|
||||
presence.add(
|
||||
BlockPresence(
|
||||
address: e.address, `type`: BlockPresenceType.Have, price: price
|
||||
)
|
||||
BlockPresence(address: e.address, `type`: BlockPresenceType.Have)
|
||||
)
|
||||
else:
|
||||
trace "We DON'T HAVE the block", address = e.address
|
||||
if e.sendDontHave:
|
||||
presence.add(
|
||||
BlockPresence(
|
||||
address: e.address, `type`: BlockPresenceType.DontHave, price: price
|
||||
)
|
||||
BlockPresence(address: e.address, `type`: BlockPresenceType.DontHave)
|
||||
)
|
||||
|
||||
codex_block_exchange_want_have_lists_received.inc()
|
||||
@ -856,30 +827,6 @@ proc wantListHandler*(
|
||||
except CancelledError as exc: #TODO: replace with CancelledError
|
||||
warn "Error processing want list", error = exc.msg
|
||||
|
||||
proc accountHandler*(
|
||||
self: BlockExcEngine, peer: PeerId, account: Account
|
||||
) {.async: (raises: []).} =
|
||||
let context = self.peers.get(peer)
|
||||
if context.isNil:
|
||||
return
|
||||
|
||||
context.account = account.some
|
||||
|
||||
proc paymentHandler*(
|
||||
self: BlockExcEngine, peer: PeerId, payment: SignedState
|
||||
) {.async: (raises: []).} =
|
||||
trace "Handling payments", peer
|
||||
|
||||
without context =? self.peers.get(peer).option and account =? context.account:
|
||||
trace "No context or account for peer", peer
|
||||
return
|
||||
|
||||
if channel =? context.paymentChannel:
|
||||
let sender = account.address
|
||||
discard self.wallet.acceptPayment(channel, Asset, sender, payment)
|
||||
else:
|
||||
context.paymentChannel = self.wallet.acceptChannel(payment).option
|
||||
|
||||
proc peerAddedHandler*(
|
||||
self: BlockExcEngine, peer: PeerId
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
@ -896,10 +843,6 @@ proc peerAddedHandler*(
|
||||
trace "Added peer", peers = self.peers.len
|
||||
await self.refreshBlockKnowledge(peerCtx)
|
||||
|
||||
if address =? self.pricing .? address:
|
||||
trace "Sending account to peer", peer
|
||||
await self.network.request.sendAccount(peer, Account(address: address))
|
||||
|
||||
proc localLookup(
|
||||
self: BlockExcEngine, address: BlockAddress
|
||||
): Future[?!BlockDelivery] {.async: (raises: [CancelledError]).} =
|
||||
@ -1023,7 +966,6 @@ proc selectRandom*(
|
||||
proc new*(
|
||||
T: type BlockExcEngine,
|
||||
localStore: BlockStore,
|
||||
wallet: WalletRef,
|
||||
network: BlockExcNetwork,
|
||||
discovery: DiscoveryEngine,
|
||||
advertiser: Advertiser,
|
||||
@ -1041,7 +983,6 @@ proc new*(
|
||||
peers: peerStore,
|
||||
pendingBlocks: pendingBlocks,
|
||||
network: network,
|
||||
wallet: wallet,
|
||||
concurrentTasks: concurrentTasks,
|
||||
trackedFutures: TrackedFutures(),
|
||||
maxBlocksPerMessage: maxBlocksPerMessage,
|
||||
@ -1066,16 +1007,6 @@ proc new*(
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
self.blocksDeliveryHandler(peer, blocksDelivery)
|
||||
|
||||
proc accountHandler(
|
||||
peer: PeerId, account: Account
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
self.accountHandler(peer, account)
|
||||
|
||||
proc paymentHandler(
|
||||
peer: PeerId, payment: SignedState
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
self.paymentHandler(peer, payment)
|
||||
|
||||
proc peerAddedHandler(
|
||||
peer: PeerId
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
@ -1090,8 +1021,6 @@ proc new*(
|
||||
onWantList: blockWantListHandler,
|
||||
onBlocksDelivery: blocksDeliveryHandler,
|
||||
onPresence: blockPresenceHandler,
|
||||
onAccount: accountHandler,
|
||||
onPayment: paymentHandler,
|
||||
onPeerJoined: peerAddedHandler,
|
||||
onPeerDeparted: peerDepartedHandler,
|
||||
)
|
||||
|
||||
@ -1,46 +0,0 @@
|
||||
## Logos Storage
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/math
|
||||
import pkg/nitro
|
||||
import pkg/questionable/results
|
||||
import ../peers
|
||||
|
||||
export nitro
|
||||
export results
|
||||
|
||||
const ChainId* = 0.u256 # invalid chain id for now
|
||||
const Asset* = EthAddress.zero # invalid ERC20 asset address for now
|
||||
const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals
|
||||
|
||||
func openLedgerChannel*(
|
||||
wallet: WalletRef, hub: EthAddress, asset: EthAddress
|
||||
): ?!ChannelId =
|
||||
wallet.openLedgerChannel(hub, ChainId, asset, AmountPerChannel)
|
||||
|
||||
func getOrOpenChannel(wallet: WalletRef, peer: BlockExcPeerCtx): ?!ChannelId =
|
||||
if channel =? peer.paymentChannel:
|
||||
success channel
|
||||
elif account =? peer.account:
|
||||
let channel = ?wallet.openLedgerChannel(account.address, Asset)
|
||||
peer.paymentChannel = channel.some
|
||||
success channel
|
||||
else:
|
||||
failure "no account set for peer"
|
||||
|
||||
func pay*(wallet: WalletRef, peer: BlockExcPeerCtx, amount: UInt256): ?!SignedState =
|
||||
if account =? peer.account:
|
||||
let asset = Asset
|
||||
let receiver = account.address
|
||||
let channel = ?wallet.getOrOpenChannel(peer)
|
||||
wallet.pay(channel, asset, receiver, amount)
|
||||
else:
|
||||
failure "no account set for peer"
|
||||
@ -20,12 +20,11 @@ import pkg/questionable/results
|
||||
import ../../blocktype as bt
|
||||
import ../../logutils
|
||||
import ../protobuf/blockexc as pb
|
||||
import ../protobuf/payments
|
||||
import ../../utils/trackedfutures
|
||||
|
||||
import ./networkpeer
|
||||
|
||||
export networkpeer, payments
|
||||
export networkpeer
|
||||
|
||||
logScope:
|
||||
topics = "codex blockexcnetwork"
|
||||
@ -40,16 +39,12 @@ type
|
||||
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.async: (raises: []).}
|
||||
BlockPresenceHandler* =
|
||||
proc(peer: PeerId, precense: seq[BlockPresence]) {.async: (raises: []).}
|
||||
AccountHandler* = proc(peer: PeerId, account: Account) {.async: (raises: []).}
|
||||
PaymentHandler* = proc(peer: PeerId, payment: SignedState) {.async: (raises: []).}
|
||||
PeerEventHandler* = proc(peer: PeerId) {.async: (raises: [CancelledError]).}
|
||||
|
||||
BlockExcHandlers* = object
|
||||
onWantList*: WantListHandler
|
||||
onBlocksDelivery*: BlocksDeliveryHandler
|
||||
onPresence*: BlockPresenceHandler
|
||||
onAccount*: AccountHandler
|
||||
onPayment*: PaymentHandler
|
||||
onPeerJoined*: PeerEventHandler
|
||||
onPeerDeparted*: PeerEventHandler
|
||||
onPeerDropped*: PeerEventHandler
|
||||
@ -72,18 +67,12 @@ type
|
||||
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]) {.
|
||||
async: (raises: [CancelledError])
|
||||
.}
|
||||
AccountSender* =
|
||||
proc(peer: PeerId, account: Account) {.async: (raises: [CancelledError]).}
|
||||
PaymentSender* =
|
||||
proc(peer: PeerId, payment: SignedState) {.async: (raises: [CancelledError]).}
|
||||
|
||||
BlockExcRequest* = object
|
||||
sendWantList*: WantListSender
|
||||
sendWantCancellations*: WantCancellationSender
|
||||
sendBlocksDelivery*: BlocksDeliverySender
|
||||
sendPresence*: PresenceSender
|
||||
sendAccount*: AccountSender
|
||||
sendPayment*: PaymentSender
|
||||
|
||||
BlockExcNetwork* = ref object of LPProtocol
|
||||
peers*: Table[PeerId, NetworkPeer]
|
||||
@ -207,40 +196,6 @@ proc sendBlockPresence*(
|
||||
|
||||
b.send(id, Message(blockPresences: @presence))
|
||||
|
||||
proc handleAccount(
|
||||
network: BlockExcNetwork, peer: NetworkPeer, account: Account
|
||||
) {.async: (raises: []).} =
|
||||
## Handle account info
|
||||
##
|
||||
|
||||
if not network.handlers.onAccount.isNil:
|
||||
await network.handlers.onAccount(peer.id, account)
|
||||
|
||||
proc sendAccount*(
|
||||
b: BlockExcNetwork, id: PeerId, account: Account
|
||||
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||
## Send account info to remote
|
||||
##
|
||||
|
||||
b.send(id, Message(account: AccountMessage.init(account)))
|
||||
|
||||
proc sendPayment*(
|
||||
b: BlockExcNetwork, id: PeerId, payment: SignedState
|
||||
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||
## Send payment to remote
|
||||
##
|
||||
|
||||
b.send(id, Message(payment: StateChannelUpdate.init(payment)))
|
||||
|
||||
proc handlePayment(
|
||||
network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState
|
||||
) {.async: (raises: []).} =
|
||||
## Handle payment
|
||||
##
|
||||
|
||||
if not network.handlers.onPayment.isNil:
|
||||
await network.handlers.onPayment(peer.id, payment)
|
||||
|
||||
proc rpcHandler(
|
||||
self: BlockExcNetwork, peer: NetworkPeer, msg: Message
|
||||
) {.async: (raises: []).} =
|
||||
@ -255,12 +210,6 @@ proc rpcHandler(
|
||||
if msg.blockPresences.len > 0:
|
||||
self.trackedFutures.track(self.handleBlockPresence(peer, msg.blockPresences))
|
||||
|
||||
if account =? Account.init(msg.account):
|
||||
self.trackedFutures.track(self.handleAccount(peer, account))
|
||||
|
||||
if payment =? SignedState.init(msg.payment):
|
||||
self.trackedFutures.track(self.handlePayment(peer, payment))
|
||||
|
||||
proc getOrCreatePeer(self: BlockExcNetwork, peer: PeerId): NetworkPeer =
|
||||
## Creates or retrieves a BlockExcNetwork Peer
|
||||
##
|
||||
@ -413,23 +362,11 @@ proc new*(
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendBlockPresence(id, presence)
|
||||
|
||||
proc sendAccount(
|
||||
id: PeerId, account: Account
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendAccount(id, account)
|
||||
|
||||
proc sendPayment(
|
||||
id: PeerId, payment: SignedState
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendPayment(id, payment)
|
||||
|
||||
self.request = BlockExcRequest(
|
||||
sendWantList: sendWantList,
|
||||
sendWantCancellations: sendWantCancellations,
|
||||
sendBlocksDelivery: sendBlocksDelivery,
|
||||
sendPresence: sendPresence,
|
||||
sendAccount: sendAccount,
|
||||
sendPayment: sendPayment,
|
||||
)
|
||||
|
||||
self.init()
|
||||
|
||||
@ -13,18 +13,14 @@ import std/sets
|
||||
|
||||
import pkg/libp2p
|
||||
import pkg/chronos
|
||||
import pkg/nitro
|
||||
import pkg/questionable
|
||||
|
||||
import ../protobuf/blockexc
|
||||
import ../protobuf/payments
|
||||
import ../protobuf/presence
|
||||
|
||||
import ../../blocktype
|
||||
import ../../logutils
|
||||
|
||||
export payments, nitro
|
||||
|
||||
const
|
||||
MinRefreshInterval = 1.seconds
|
||||
MaxRefreshBackoff = 36 # 36 seconds
|
||||
@ -32,14 +28,12 @@ const
|
||||
|
||||
type BlockExcPeerCtx* = ref object of RootObj
|
||||
id*: PeerId
|
||||
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
|
||||
blocks*: Table[BlockAddress, Presence] # remote peer have list
|
||||
wantedBlocks*: HashSet[BlockAddress] # blocks that the peer wants
|
||||
exchanged*: int # times peer has exchanged with us
|
||||
refreshInProgress*: bool # indicates if a refresh is in progress
|
||||
lastRefresh*: Moment # last time we refreshed our knowledge of the blocks this peer has
|
||||
refreshBackoff*: int = 1 # backoff factor for refresh requests
|
||||
account*: ?Account # ethereum account of this peer
|
||||
paymentChannel*: ?ChannelId # payment channel id
|
||||
blocksSent*: HashSet[BlockAddress] # blocks sent to peer
|
||||
blocksRequested*: HashSet[BlockAddress] # pending block requests to this peer
|
||||
lastExchange*: Moment # last time peer has sent us a block
|
||||
@ -105,14 +99,6 @@ func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
|
||||
func cleanPresence*(self: BlockExcPeerCtx, address: BlockAddress) =
|
||||
self.cleanPresence(@[address])
|
||||
|
||||
func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 =
|
||||
var price = 0.u256
|
||||
for a in addresses:
|
||||
self.blocks.withValue(a, precense):
|
||||
price += precense[].price
|
||||
|
||||
price
|
||||
|
||||
proc blockRequestScheduled*(self: BlockExcPeerCtx, address: BlockAddress) =
|
||||
## Adds a block the set of blocks that have been requested to this peer
|
||||
## (its request schedule).
|
||||
|
||||
@ -17,7 +17,6 @@ import ../../blocktype
|
||||
export Message, protobufEncode, protobufDecode
|
||||
export Wantlist, WantType, WantListEntry
|
||||
export BlockDelivery, BlockPresenceType, BlockPresence
|
||||
export AccountMessage, StateChannelUpdate
|
||||
|
||||
proc hash*(e: WantListEntry): Hash =
|
||||
hash(e.address)
|
||||
|
||||
@ -51,10 +51,6 @@ type
|
||||
BlockPresence* = object
|
||||
address*: BlockAddress
|
||||
`type`*: BlockPresenceType
|
||||
price*: seq[byte] # Amount of assets to pay for the block (UInt256)
|
||||
|
||||
AccountMessage* = object
|
||||
address*: seq[byte] # Ethereum address to which payments should be made
|
||||
|
||||
StateChannelUpdate* = object
|
||||
update*: seq[byte] # Signed Nitro state, serialized as JSON
|
||||
@ -64,8 +60,6 @@ type
|
||||
payload*: seq[BlockDelivery]
|
||||
blockPresences*: seq[BlockPresence]
|
||||
pendingBytes*: uint
|
||||
account*: AccountMessage
|
||||
payment*: StateChannelUpdate
|
||||
|
||||
#
|
||||
# Encoding Message into seq[byte] in Protobuf format
|
||||
@ -115,19 +109,6 @@ proc write*(pb: var ProtoBuffer, field: int, value: BlockPresence) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, value.address)
|
||||
ipb.write(2, value.`type`.uint)
|
||||
ipb.write(3, value.price)
|
||||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: AccountMessage) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, value.address)
|
||||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, value.update)
|
||||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
|
||||
@ -135,12 +116,10 @@ proc protobufEncode*(value: Message): seq[byte] =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, value.wantList)
|
||||
for v in value.payload:
|
||||
ipb.write(3, v)
|
||||
ipb.write(3, v) # is this meant to be 2?
|
||||
for v in value.blockPresences:
|
||||
ipb.write(4, v)
|
||||
ipb.write(5, value.pendingBytes)
|
||||
ipb.write(6, value.account)
|
||||
ipb.write(7, value.payment)
|
||||
ipb.finish()
|
||||
ipb.buffer
|
||||
|
||||
@ -240,19 +219,6 @@ proc decode*(_: type BlockPresence, pb: ProtoBuffer): ProtoResult[BlockPresence]
|
||||
value.address = ?BlockAddress.decode(ipb)
|
||||
if ?pb.getField(2, field):
|
||||
value.`type` = BlockPresenceType(field)
|
||||
discard ?pb.getField(3, value.price)
|
||||
ok(value)
|
||||
|
||||
proc decode*(_: type AccountMessage, pb: ProtoBuffer): ProtoResult[AccountMessage] =
|
||||
var value = AccountMessage()
|
||||
discard ?pb.getField(1, value.address)
|
||||
ok(value)
|
||||
|
||||
proc decode*(
|
||||
_: type StateChannelUpdate, pb: ProtoBuffer
|
||||
): ProtoResult[StateChannelUpdate] =
|
||||
var value = StateChannelUpdate()
|
||||
discard ?pb.getField(1, value.update)
|
||||
ok(value)
|
||||
|
||||
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
|
||||
@ -263,15 +229,11 @@ proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
|
||||
sublist: seq[seq[byte]]
|
||||
if ?pb.getField(1, ipb):
|
||||
value.wantList = ?WantList.decode(ipb)
|
||||
if ?pb.getRepeatedField(3, sublist):
|
||||
if ?pb.getRepeatedField(3, sublist): # meant to be 2?
|
||||
for item in sublist:
|
||||
value.payload.add(?BlockDelivery.decode(initProtoBuffer(item)))
|
||||
if ?pb.getRepeatedField(4, sublist):
|
||||
for item in sublist:
|
||||
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))
|
||||
discard ?pb.getField(5, value.pendingBytes)
|
||||
if ?pb.getField(6, ipb):
|
||||
value.account = ?AccountMessage.decode(ipb)
|
||||
if ?pb.getField(7, ipb):
|
||||
value.payment = ?StateChannelUpdate.decode(ipb)
|
||||
ok(value)
|
||||
|
||||
@ -38,21 +38,10 @@ message Message {
|
||||
message BlockPresence {
|
||||
bytes cid = 1;
|
||||
BlockPresenceType type = 2;
|
||||
bytes price = 3; // Amount of assets to pay for the block (UInt256)
|
||||
}
|
||||
|
||||
message AccountMessage {
|
||||
bytes address = 1; // Ethereum address to which payments should be made
|
||||
}
|
||||
|
||||
message StateChannelUpdate {
|
||||
bytes update = 1; // Signed Nitro state, serialized as JSON
|
||||
}
|
||||
|
||||
Wantlist wantlist = 1;
|
||||
repeated Block payload = 3;
|
||||
repeated Block payload = 3; // what happened to 2?
|
||||
repeated BlockPresence blockPresences = 4;
|
||||
int32 pendingBytes = 5;
|
||||
AccountMessage account = 6;
|
||||
StateChannelUpdate payment = 7;
|
||||
}
|
||||
|
||||
@ -1,38 +0,0 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import pkg/stew/byteutils
|
||||
import pkg/stint
|
||||
import pkg/nitro
|
||||
import pkg/questionable
|
||||
import ./blockexc
|
||||
|
||||
export AccountMessage
|
||||
export StateChannelUpdate
|
||||
|
||||
export stint
|
||||
export nitro
|
||||
|
||||
type Account* = object
|
||||
address*: EthAddress
|
||||
|
||||
func init*(_: type AccountMessage, account: Account): AccountMessage =
|
||||
AccountMessage(address: @(account.address.toArray))
|
||||
|
||||
func parse(_: type EthAddress, bytes: seq[byte]): ?EthAddress =
|
||||
var address: array[20, byte]
|
||||
if bytes.len != address.len:
|
||||
return EthAddress.none
|
||||
for i in 0 ..< address.len:
|
||||
address[i] = bytes[i]
|
||||
EthAddress(address).some
|
||||
|
||||
func init*(_: type Account, message: AccountMessage): ?Account =
|
||||
without address =? EthAddress.parse(message.address):
|
||||
return none Account
|
||||
some Account(address: address)
|
||||
|
||||
func init*(_: type StateChannelUpdate, state: SignedState): StateChannelUpdate =
|
||||
StateChannelUpdate(update: state.toJson.toBytes)
|
||||
|
||||
proc init*(_: type SignedState, update: StateChannelUpdate): ?SignedState =
|
||||
SignedState.fromJson(string.fromBytes(update.update))
|
||||
@ -17,7 +17,6 @@ type
|
||||
Presence* = object
|
||||
address*: BlockAddress
|
||||
have*: bool
|
||||
price*: UInt256
|
||||
|
||||
func parse(_: type UInt256, bytes: seq[byte]): ?UInt256 =
|
||||
if bytes.len > 32:
|
||||
@ -25,18 +24,12 @@ func parse(_: type UInt256, bytes: seq[byte]): ?UInt256 =
|
||||
UInt256.fromBytesBE(bytes).some
|
||||
|
||||
func init*(_: type Presence, message: PresenceMessage): ?Presence =
|
||||
without price =? UInt256.parse(message.price):
|
||||
return none Presence
|
||||
|
||||
some Presence(
|
||||
address: message.address,
|
||||
have: message.`type` == BlockPresenceType.Have,
|
||||
price: price,
|
||||
address: message.address, have: message.`type` == BlockPresenceType.Have
|
||||
)
|
||||
|
||||
func init*(_: type PresenceMessage, presence: Presence): PresenceMessage =
|
||||
PresenceMessage(
|
||||
address: presence.address,
|
||||
`type`: if presence.have: BlockPresenceType.Have else: BlockPresenceType.DontHave,
|
||||
price: @(presence.price.toBytesBE),
|
||||
)
|
||||
|
||||
117
codex/codex.nim
117
codex/codex.nim
@ -20,10 +20,8 @@ import pkg/presto
|
||||
import pkg/libp2p
|
||||
import pkg/confutils
|
||||
import pkg/confutils/defs
|
||||
import pkg/nitro
|
||||
import pkg/stew/io2
|
||||
import pkg/datastore
|
||||
import pkg/ethers except Rng
|
||||
import pkg/stew/io2
|
||||
|
||||
import ./node
|
||||
@ -31,15 +29,10 @@ import ./conf
|
||||
import ./rng as random
|
||||
import ./rest/api
|
||||
import ./stores
|
||||
import ./slots
|
||||
import ./blockexchange
|
||||
import ./utils/fileutils
|
||||
import ./erasure
|
||||
import ./discovery
|
||||
import ./contracts
|
||||
import ./systemclock
|
||||
import ./contracts/clock
|
||||
import ./contracts/deployment
|
||||
import ./utils/addrutils
|
||||
import ./namespaces
|
||||
import ./codextypes
|
||||
@ -60,7 +53,6 @@ type
|
||||
isStarted: bool
|
||||
|
||||
CodexPrivateKey* = libp2p.PrivateKey # alias
|
||||
EthWallet = ethers.Wallet
|
||||
|
||||
func config*(self: CodexServer): CodexConf =
|
||||
return self.config
|
||||
@ -71,103 +63,6 @@ func node*(self: CodexServer): CodexNodeRef =
|
||||
func repoStore*(self: CodexServer): RepoStore =
|
||||
return self.repoStore
|
||||
|
||||
proc waitForSync(provider: Provider): Future[void] {.async.} =
|
||||
var sleepTime = 1
|
||||
trace "Checking sync state of Ethereum provider..."
|
||||
while await provider.isSyncing:
|
||||
notice "Waiting for Ethereum provider to sync..."
|
||||
await sleepAsync(sleepTime.seconds)
|
||||
if sleepTime < 10:
|
||||
inc sleepTime
|
||||
trace "Ethereum provider is synced."
|
||||
|
||||
proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
|
||||
## bootstrap interactions and return contracts
|
||||
## using clients, hosts, validators pairings
|
||||
##
|
||||
let
|
||||
config = s.config
|
||||
repo = s.repoStore
|
||||
|
||||
if config.persistence:
|
||||
if not config.ethAccount.isSome and not config.ethPrivateKey.isSome:
|
||||
error "Persistence enabled, but no Ethereum account was set"
|
||||
quit QuitFailure
|
||||
|
||||
let provider = JsonRpcProvider.new(
|
||||
config.ethProvider, maxPriorityFeePerGas = config.maxPriorityFeePerGas.u256
|
||||
)
|
||||
await waitForSync(provider)
|
||||
var signer: Signer
|
||||
if account =? config.ethAccount:
|
||||
signer = provider.getSigner(account)
|
||||
elif keyFile =? config.ethPrivateKey:
|
||||
without isSecure =? checkSecureFile(keyFile):
|
||||
error "Could not check file permissions: does Ethereum private key file exist?"
|
||||
quit QuitFailure
|
||||
if not isSecure:
|
||||
error "Ethereum private key file does not have safe file permissions"
|
||||
quit QuitFailure
|
||||
without key =? keyFile.readAllChars():
|
||||
error "Unable to read Ethereum private key file"
|
||||
quit QuitFailure
|
||||
without wallet =? EthWallet.new(key.strip(), provider):
|
||||
error "Invalid Ethereum private key in file"
|
||||
quit QuitFailure
|
||||
signer = wallet
|
||||
|
||||
let deploy = Deployment.new(provider, config.marketplaceAddress)
|
||||
without marketplaceAddress =? await deploy.address(Marketplace):
|
||||
error "No Marketplace address was specified or there is no known address for the current network"
|
||||
quit QuitFailure
|
||||
|
||||
let marketplace = Marketplace.new(marketplaceAddress, signer)
|
||||
let market = OnChainMarket.new(
|
||||
marketplace, config.rewardRecipient, config.marketplaceRequestCacheSize
|
||||
)
|
||||
let clock = OnChainClock.new(provider)
|
||||
|
||||
var client: ?ClientInteractions
|
||||
var host: ?HostInteractions
|
||||
var validator: ?ValidatorInteractions
|
||||
|
||||
if config.validator or config.persistence:
|
||||
s.codexNode.clock = clock
|
||||
else:
|
||||
s.codexNode.clock = SystemClock()
|
||||
|
||||
# This is used for simulation purposes. Normal nodes won't be compiled with this flag
|
||||
# and hence the proof failure will always be 0.
|
||||
when storage_enable_proof_failures:
|
||||
let proofFailures = config.simulateProofFailures
|
||||
if proofFailures > 0:
|
||||
warn "Enabling proof failure simulation!"
|
||||
else:
|
||||
let proofFailures = 0
|
||||
if config.simulateProofFailures > 0:
|
||||
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
|
||||
|
||||
if error =? (await market.loadConfig()).errorOption:
|
||||
fatal "Cannot load market configuration", error = error.msg
|
||||
quit QuitFailure
|
||||
|
||||
let purchasing = Purchasing.new(market, clock)
|
||||
let sales = Sales.new(market, clock, repo, proofFailures)
|
||||
client = some ClientInteractions.new(clock, purchasing)
|
||||
host = some HostInteractions.new(clock, sales)
|
||||
|
||||
if config.validator:
|
||||
without validationConfig =?
|
||||
ValidationConfig.init(
|
||||
config.validatorMaxSlots, config.validatorGroups, config.validatorGroupIndex
|
||||
), err:
|
||||
error "Invalid validation parameters", err = err.msg
|
||||
quit QuitFailure
|
||||
let validation = Validation.new(clock, market, validationConfig)
|
||||
validator = some ValidatorInteractions.new(clock, validation)
|
||||
|
||||
s.codexNode.contracts = (client, host, validator)
|
||||
|
||||
proc start*(s: CodexServer) {.async.} =
|
||||
if s.isStarted:
|
||||
warn "Storage server already started, skipping"
|
||||
@ -187,7 +82,6 @@ proc start*(s: CodexServer) {.async.} =
|
||||
s.codexNode.discovery.updateAnnounceRecord(announceAddrs)
|
||||
s.codexNode.discovery.updateDhtRecord(discoveryAddrs)
|
||||
|
||||
await s.bootstrapInteractions()
|
||||
await s.codexNode.start()
|
||||
|
||||
if s.restServer != nil:
|
||||
@ -304,7 +198,6 @@ proc new*(
|
||||
store = discoveryStore,
|
||||
)
|
||||
|
||||
wallet = WalletRef.new(EthPrivateKey.random())
|
||||
network = BlockExcNetwork.new(switch)
|
||||
|
||||
repoData =
|
||||
@ -349,23 +242,15 @@ proc new*(
|
||||
blockDiscovery =
|
||||
DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
|
||||
engine = BlockExcEngine.new(
|
||||
repoStore, wallet, network, blockDiscovery, advertiser, peerStore, pendingBlocks
|
||||
repoStore, network, blockDiscovery, advertiser, peerStore, pendingBlocks
|
||||
)
|
||||
store = NetworkStore.new(engine, repoStore)
|
||||
prover =
|
||||
if config.prover:
|
||||
let backend =
|
||||
config.initializeBackend().expect("Unable to create prover backend.")
|
||||
some Prover.new(store, backend, config.numProofSamples, taskPool)
|
||||
else:
|
||||
none Prover
|
||||
|
||||
codexNode = CodexNodeRef.new(
|
||||
switch = switch,
|
||||
networkStore = store,
|
||||
engine = engine,
|
||||
discovery = discovery,
|
||||
prover = prover,
|
||||
taskPool = taskPool,
|
||||
)
|
||||
|
||||
|
||||
@ -26,34 +26,15 @@ export tables
|
||||
const
|
||||
# Size of blocks for storage / network exchange,
|
||||
DefaultBlockSize* = NBytes 1024 * 64
|
||||
DefaultCellSize* = NBytes 2048
|
||||
|
||||
# Proving defaults
|
||||
DefaultMaxSlotDepth* = 32
|
||||
DefaultMaxDatasetDepth* = 8
|
||||
DefaultBlockDepth* = 5
|
||||
DefaultCellElms* = 67
|
||||
DefaultSamplesNum* = 5
|
||||
|
||||
# hashes
|
||||
Sha256HashCodec* = multiCodec("sha2-256")
|
||||
Sha512HashCodec* = multiCodec("sha2-512")
|
||||
Pos2Bn128SpngCodec* = multiCodec("poseidon2-alt_bn_128-sponge-r2")
|
||||
Pos2Bn128MrklCodec* = multiCodec("poseidon2-alt_bn_128-merkle-2kb")
|
||||
|
||||
ManifestCodec* = multiCodec("codex-manifest")
|
||||
DatasetRootCodec* = multiCodec("codex-root")
|
||||
BlockCodec* = multiCodec("codex-block")
|
||||
SlotRootCodec* = multiCodec("codex-slot-root")
|
||||
SlotProvingRootCodec* = multiCodec("codex-proving-root")
|
||||
CodexSlotCellCodec* = multiCodec("codex-slot-cell")
|
||||
|
||||
CodexHashesCodecs* = [Sha256HashCodec, Pos2Bn128SpngCodec, Pos2Bn128MrklCodec]
|
||||
|
||||
CodexPrimitivesCodecs* = [
|
||||
ManifestCodec, DatasetRootCodec, BlockCodec, SlotRootCodec, SlotProvingRootCodec,
|
||||
CodexSlotCellCodec,
|
||||
]
|
||||
CodexPrimitivesCodecs* = [ManifestCodec, DatasetRootCodec, BlockCodec]
|
||||
|
||||
proc initEmptyCidTable(): ?!Table[(CidVersion, MultiCodec, MultiCodec), Cid] =
|
||||
## Initialize padding blocks table
|
||||
@ -66,8 +47,7 @@ proc initEmptyCidTable(): ?!Table[(CidVersion, MultiCodec, MultiCodec), Cid] =
|
||||
let
|
||||
emptyData: seq[byte] = @[]
|
||||
PadHashes = {
|
||||
Sha256HashCodec: ?MultiHash.digest($Sha256HashCodec, emptyData).mapFailure,
|
||||
Sha512HashCodec: ?MultiHash.digest($Sha512HashCodec, emptyData).mapFailure,
|
||||
Sha256HashCodec: ?MultiHash.digest($Sha256HashCodec, emptyData).mapFailure
|
||||
}.toTable
|
||||
|
||||
var table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]()
|
||||
|
||||
222
codex/conf.nim
222
codex/conf.nim
@ -31,7 +31,6 @@ import pkg/metrics
|
||||
import pkg/metrics/chronos_httpserver
|
||||
import pkg/stew/byteutils
|
||||
import pkg/libp2p
|
||||
import pkg/ethers
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/stew/base64
|
||||
@ -45,16 +44,13 @@ import ./utils
|
||||
import ./nat
|
||||
import ./utils/natutils
|
||||
|
||||
from ./contracts/config import DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas
|
||||
from ./validationconfig import MaxSlots, ValidationGroups
|
||||
from ./blockexchange/engine/pendingblocks import DefaultBlockRetries
|
||||
|
||||
export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig
|
||||
export ValidationGroups, MaxSlots
|
||||
|
||||
export
|
||||
DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval,
|
||||
DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas, DefaultBlockRetries
|
||||
DefaultBlockRetries
|
||||
|
||||
type ThreadCount* = distinct Natural
|
||||
|
||||
@ -73,7 +69,6 @@ proc defaultDataDir*(): string =
|
||||
|
||||
const
|
||||
storage_enable_api_debug_peers* {.booldefine.} = false
|
||||
storage_enable_proof_failures* {.booldefine.} = false
|
||||
storage_enable_log_counter* {.booldefine.} = false
|
||||
|
||||
DefaultThreadCount* = ThreadCount(0)
|
||||
@ -83,10 +78,6 @@ type
|
||||
noCmd
|
||||
persistence
|
||||
|
||||
PersistenceCmd* {.pure.} = enum
|
||||
noCmd
|
||||
prover
|
||||
|
||||
LogKind* {.pure.} = enum
|
||||
Auto = "auto"
|
||||
Colors = "colors"
|
||||
@ -286,204 +277,12 @@ type
|
||||
desc: "Logs to file", defaultValue: string.none, name: "log-file", hidden
|
||||
.}: Option[string]
|
||||
|
||||
case cmd* {.defaultValue: noCmd, command.}: StartUpCmd
|
||||
of persistence:
|
||||
ethProvider* {.
|
||||
desc: "The URL of the JSON-RPC API of the Ethereum node",
|
||||
defaultValue: "ws://localhost:8545",
|
||||
name: "eth-provider"
|
||||
.}: string
|
||||
|
||||
ethAccount* {.
|
||||
desc: "The Ethereum account that is used for storage contracts",
|
||||
defaultValue: EthAddress.none,
|
||||
defaultValueDesc: "",
|
||||
name: "eth-account"
|
||||
.}: Option[EthAddress]
|
||||
|
||||
ethPrivateKey* {.
|
||||
desc: "File containing Ethereum private key for storage contracts",
|
||||
defaultValue: string.none,
|
||||
defaultValueDesc: "",
|
||||
name: "eth-private-key"
|
||||
.}: Option[string]
|
||||
|
||||
marketplaceAddress* {.
|
||||
desc: "Address of deployed Marketplace contract",
|
||||
defaultValue: EthAddress.none,
|
||||
defaultValueDesc: "",
|
||||
name: "marketplace-address"
|
||||
.}: Option[EthAddress]
|
||||
|
||||
# TODO: should go behind a feature flag
|
||||
simulateProofFailures* {.
|
||||
desc: "Simulates proof failures once every N proofs. 0 = disabled.",
|
||||
defaultValue: 0,
|
||||
name: "simulate-proof-failures",
|
||||
hidden
|
||||
.}: int
|
||||
|
||||
validator* {.
|
||||
desc: "Enables validator, requires an Ethereum node",
|
||||
defaultValue: false,
|
||||
name: "validator"
|
||||
.}: bool
|
||||
|
||||
validatorMaxSlots* {.
|
||||
desc: "Maximum number of slots that the validator monitors",
|
||||
longDesc:
|
||||
"If set to 0, the validator will not limit " &
|
||||
"the maximum number of slots it monitors",
|
||||
defaultValue: 1000,
|
||||
name: "validator-max-slots"
|
||||
.}: MaxSlots
|
||||
|
||||
validatorGroups* {.
|
||||
desc: "Slot validation groups",
|
||||
longDesc:
|
||||
"A number indicating total number of groups into " &
|
||||
"which the whole slot id space will be divided. " &
|
||||
"The value must be in the range [2, 65535]. " &
|
||||
"If not provided, the validator will observe " &
|
||||
"the whole slot id space and the value of " &
|
||||
"the --validator-group-index parameter will be ignored. " &
|
||||
"Powers of twos are advised for even distribution",
|
||||
defaultValue: ValidationGroups.none,
|
||||
name: "validator-groups"
|
||||
.}: Option[ValidationGroups]
|
||||
|
||||
validatorGroupIndex* {.
|
||||
desc: "Slot validation group index",
|
||||
longDesc:
|
||||
"The value provided must be in the range " &
|
||||
"[0, validatorGroups). Ignored when --validator-groups " &
|
||||
"is not provided. Only slot ids satisfying condition " &
|
||||
"[(slotId mod validationGroups) == groupIndex] will be " &
|
||||
"observed by the validator",
|
||||
defaultValue: 0,
|
||||
name: "validator-group-index"
|
||||
.}: uint16
|
||||
|
||||
rewardRecipient* {.
|
||||
desc: "Address to send payouts to (eg rewards and refunds)",
|
||||
name: "reward-recipient"
|
||||
.}: Option[EthAddress]
|
||||
|
||||
marketplaceRequestCacheSize* {.
|
||||
desc:
|
||||
"Maximum number of StorageRequests kept in memory." &
|
||||
"Reduces fetching of StorageRequest data from the contract.",
|
||||
defaultValue: DefaultRequestCacheSize,
|
||||
defaultValueDesc: $DefaultRequestCacheSize,
|
||||
name: "request-cache-size",
|
||||
hidden
|
||||
.}: uint16
|
||||
|
||||
maxPriorityFeePerGas* {.
|
||||
desc:
|
||||
"Sets the default maximum priority fee per gas for Ethereum EIP-1559 transactions, in wei, when not provided by the network.",
|
||||
defaultValue: DefaultMaxPriorityFeePerGas,
|
||||
defaultValueDesc: $DefaultMaxPriorityFeePerGas,
|
||||
name: "max-priority-fee-per-gas",
|
||||
hidden
|
||||
.}: uint64
|
||||
|
||||
case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd
|
||||
of PersistenceCmd.prover:
|
||||
circuitDir* {.
|
||||
desc: "Directory where Storage will store proof circuit data",
|
||||
defaultValue: defaultDataDir() / "circuits",
|
||||
defaultValueDesc: "data/circuits",
|
||||
abbr: "cd",
|
||||
name: "circuit-dir"
|
||||
.}: OutDir
|
||||
|
||||
circomR1cs* {.
|
||||
desc: "The r1cs file for the storage circuit",
|
||||
defaultValue: defaultDataDir() / "circuits" / "proof_main.r1cs",
|
||||
defaultValueDesc: "data/circuits/proof_main.r1cs",
|
||||
name: "circom-r1cs"
|
||||
.}: InputFile
|
||||
|
||||
circomWasm* {.
|
||||
desc: "The wasm file for the storage circuit",
|
||||
defaultValue: defaultDataDir() / "circuits" / "proof_main.wasm",
|
||||
defaultValueDesc: "data/circuits/proof_main.wasm",
|
||||
name: "circom-wasm"
|
||||
.}: InputFile
|
||||
|
||||
circomZkey* {.
|
||||
desc: "The zkey file for the storage circuit",
|
||||
defaultValue: defaultDataDir() / "circuits" / "proof_main.zkey",
|
||||
defaultValueDesc: "data/circuits/proof_main.zkey",
|
||||
name: "circom-zkey"
|
||||
.}: InputFile
|
||||
|
||||
# TODO: should probably be hidden and behind a feature flag
|
||||
circomNoZkey* {.
|
||||
desc: "Ignore the zkey file - use only for testing!",
|
||||
defaultValue: false,
|
||||
name: "circom-no-zkey"
|
||||
.}: bool
|
||||
|
||||
numProofSamples* {.
|
||||
desc: "Number of samples to prove",
|
||||
defaultValue: DefaultSamplesNum,
|
||||
defaultValueDesc: $DefaultSamplesNum,
|
||||
name: "proof-samples"
|
||||
.}: int
|
||||
|
||||
maxSlotDepth* {.
|
||||
desc: "The maximum depth of the slot tree",
|
||||
defaultValue: DefaultMaxSlotDepth,
|
||||
defaultValueDesc: $DefaultMaxSlotDepth,
|
||||
name: "max-slot-depth"
|
||||
.}: int
|
||||
|
||||
maxDatasetDepth* {.
|
||||
desc: "The maximum depth of the dataset tree",
|
||||
defaultValue: DefaultMaxDatasetDepth,
|
||||
defaultValueDesc: $DefaultMaxDatasetDepth,
|
||||
name: "max-dataset-depth"
|
||||
.}: int
|
||||
|
||||
maxBlockDepth* {.
|
||||
desc: "The maximum depth of the network block merkle tree",
|
||||
defaultValue: DefaultBlockDepth,
|
||||
defaultValueDesc: $DefaultBlockDepth,
|
||||
name: "max-block-depth"
|
||||
.}: int
|
||||
|
||||
maxCellElms* {.
|
||||
desc: "The maximum number of elements in a cell",
|
||||
defaultValue: DefaultCellElms,
|
||||
defaultValueDesc: $DefaultCellElms,
|
||||
name: "max-cell-elements"
|
||||
.}: int
|
||||
of PersistenceCmd.noCmd:
|
||||
discard
|
||||
of StartUpCmd.noCmd:
|
||||
discard # end of persistence
|
||||
|
||||
EthAddress* = ethers.Address
|
||||
|
||||
logutils.formatIt(LogFormat.textLines, EthAddress):
|
||||
it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.json, EthAddress):
|
||||
%it
|
||||
|
||||
func defaultAddress*(conf: CodexConf): IpAddress =
|
||||
result = static parseIpAddress("127.0.0.1")
|
||||
|
||||
func defaultNatConfig*(): NatConfig =
|
||||
result = NatConfig(hasExtIp: false, nat: NatStrategy.NatAny)
|
||||
|
||||
func persistence*(self: CodexConf): bool =
|
||||
self.cmd == StartUpCmd.persistence
|
||||
|
||||
func prover*(self: CodexConf): bool =
|
||||
self.persistence and self.persistenceCmd == PersistenceCmd.prover
|
||||
|
||||
proc getCodexVersion(): string =
|
||||
let tag = strip(staticExec("git describe --tags --abbrev=0"))
|
||||
if tag.isEmptyOrWhitespace:
|
||||
@ -495,23 +294,17 @@ proc getCodexRevision(): string =
|
||||
var res = strip(staticExec("git rev-parse --short HEAD"))
|
||||
return res
|
||||
|
||||
proc getCodexContractsRevision(): string =
|
||||
let res =
|
||||
strip(staticExec("git rev-parse --short HEAD:vendor/logos-storage-contracts-eth"))
|
||||
return res
|
||||
|
||||
proc getNimBanner(): string =
|
||||
staticExec("nim --version | grep Version")
|
||||
|
||||
const
|
||||
codexVersion* = getCodexVersion()
|
||||
codexRevision* = getCodexRevision()
|
||||
codexContractsRevision* = getCodexContractsRevision()
|
||||
nimBanner* = getNimBanner()
|
||||
|
||||
codexFullVersion* =
|
||||
"Storage version: " & codexVersion & "\p" & "Storage revision: " & codexRevision &
|
||||
"\p" & "Storage contracts revision: " & codexContractsRevision & "\p" & nimBanner
|
||||
"\p"
|
||||
|
||||
proc parseCmdArg*(
|
||||
T: typedesc[MultiAddress], input: string
|
||||
@ -593,9 +386,6 @@ proc parseCmdArg*(T: type NatConfig, p: string): T =
|
||||
proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type EthAddress, address: string): T =
|
||||
EthAddress.init($address).get()
|
||||
|
||||
func parse*(T: type NBytes, p: string): Result[NBytes, string] =
|
||||
var num = 0'i64
|
||||
let count = parseSize(p, num, alwaysBin = true)
|
||||
@ -618,11 +408,6 @@ proc parseCmdArg*(T: type Duration, val: string): T =
|
||||
quit QuitFailure
|
||||
dur
|
||||
|
||||
proc readValue*(
|
||||
r: var TomlReader, val: var EthAddress
|
||||
) {.raises: [SerializationError, IOError].} =
|
||||
val = EthAddress.init(r.readValue(string)).get()
|
||||
|
||||
proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
|
||||
without uri =? r.readValue(string).catch, err:
|
||||
error "invalid SignedPeerRecord configuration value", error = err.msg
|
||||
@ -687,9 +472,6 @@ proc readValue*(
|
||||
raise newException(SerializationError, err.msg)
|
||||
|
||||
# no idea why confutils needs this:
|
||||
proc completeCmdArg*(T: type EthAddress, val: string): seq[string] =
|
||||
discard
|
||||
|
||||
proc completeCmdArg*(T: type NBytes, val: string): seq[string] =
|
||||
discard
|
||||
|
||||
|
||||
@ -1,8 +1,2 @@
|
||||
const ContentIdsExts = [
|
||||
multiCodec("codex-root"),
|
||||
multiCodec("codex-manifest"),
|
||||
multiCodec("codex-block"),
|
||||
multiCodec("codex-slot-root"),
|
||||
multiCodec("codex-proving-root"),
|
||||
multiCodec("codex-slot-cell"),
|
||||
]
|
||||
const ContentIdsExts =
|
||||
[multiCodec("codex-root"), multiCodec("codex-manifest"), multiCodec("codex-block")]
|
||||
|
||||
@ -1,11 +0,0 @@
|
||||
import contracts/requests
|
||||
import contracts/marketplace
|
||||
import contracts/market
|
||||
import contracts/interactions
|
||||
import contracts/provider
|
||||
|
||||
export requests
|
||||
export marketplace
|
||||
export market
|
||||
export interactions
|
||||
export provider
|
||||
@ -1,148 +0,0 @@
|
||||
Logos Storage Contracts in Nim
|
||||
=======================
|
||||
|
||||
Nim API for the [Logos Storage smart contracts][1].
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
For a global overview of the steps involved in starting and fulfilling a
|
||||
storage contract, see [Logos Storage Contracts][1].
|
||||
|
||||
Smart contract
|
||||
--------------
|
||||
|
||||
Connecting to the smart contract on an Ethereum node:
|
||||
|
||||
```nim
|
||||
import codex/contracts
|
||||
import ethers
|
||||
|
||||
let address = # fill in address where the contract was deployed
|
||||
let provider = JsonRpcProvider.new("ws://localhost:8545")
|
||||
let marketplace = Marketplace.new(address, provider)
|
||||
```
|
||||
|
||||
Setup client and host so that they can sign transactions; here we use the first
|
||||
two accounts on the Ethereum node:
|
||||
|
||||
```nim
|
||||
let accounts = await provider.listAccounts()
|
||||
let client = provider.getSigner(accounts[0])
|
||||
let host = provider.getSigner(accounts[1])
|
||||
```
|
||||
|
||||
Storage requests
|
||||
----------------
|
||||
|
||||
Creating a request for storage:
|
||||
|
||||
```nim
|
||||
let request : StorageRequest = (
|
||||
client: # address of the client requesting storage
|
||||
duration: # duration of the contract in seconds
|
||||
size: # size in bytes
|
||||
contentHash: # SHA256 hash of the content that's going to be stored
|
||||
proofProbability: # require a storage proof roughly once every N periods
|
||||
maxPrice: # maximum price the client is willing to pay
|
||||
expiry: # expiration time of the request (in unix time)
|
||||
nonce: # random nonce to differentiate between similar requests
|
||||
)
|
||||
```
|
||||
|
||||
When a client wants to submit this request to the network, it needs to pay the
|
||||
maximum price to the smart contract in advance. The difference between the
|
||||
maximum price and the offered price will be reimbursed later.
|
||||
|
||||
Once the payment has been prepared, the client can submit the request to the
|
||||
network:
|
||||
|
||||
```nim
|
||||
await storage
|
||||
.connect(client)
|
||||
.requestStorage(request)
|
||||
```
|
||||
|
||||
Storage offers
|
||||
--------------
|
||||
|
||||
Creating a storage offer:
|
||||
|
||||
```nim
|
||||
let offer: StorageOffer = (
|
||||
host: # address of the host that is offering storage
|
||||
requestId: request.id,
|
||||
price: # offered price (in number of tokens)
|
||||
expiry: # expiration time of the offer (in unix time)
|
||||
)
|
||||
```
|
||||
|
||||
Hosts submits an offer:
|
||||
|
||||
```nim
|
||||
await storage
|
||||
.connect(host)
|
||||
.offerStorage(offer)
|
||||
```
|
||||
|
||||
Client selects an offer:
|
||||
|
||||
```nim
|
||||
await storage
|
||||
.connect(client)
|
||||
.selectOffer(offer.id)
|
||||
```
|
||||
|
||||
Starting and finishing a storage contract
|
||||
-----------------------------------------
|
||||
|
||||
The host whose offer got selected can start the storage contract once it
|
||||
received the data that needs to be stored:
|
||||
|
||||
```nim
|
||||
await storage
|
||||
.connect(host)
|
||||
.startContract(offer.id)
|
||||
```
|
||||
|
||||
Once the storage contract is finished, the host can release payment:
|
||||
|
||||
```nim
|
||||
await storage
|
||||
.connect(host)
|
||||
.finishContract(id)
|
||||
```
|
||||
|
||||
Storage proofs
|
||||
--------------
|
||||
|
||||
Time is divided into periods, and each period a storage proof may be required
|
||||
from the host. The odds of requiring a storage proof are negotiated through the
|
||||
storage request. For more details about the timing of storage proofs, please
|
||||
refer to the [design document][2].
|
||||
|
||||
At the start of each period of time, the host can check whether a storage proof
|
||||
is required:
|
||||
|
||||
```nim
|
||||
let isProofRequired = await storage.isProofRequired(offer.id)
|
||||
```
|
||||
|
||||
If a proof is required, the host can submit it before the end of the period:
|
||||
|
||||
```nim
|
||||
await storage
|
||||
.connect(host)
|
||||
.submitProof(id, proof)
|
||||
```
|
||||
|
||||
If a proof is not submitted, then a validator can mark a proof as missing:
|
||||
|
||||
```nim
|
||||
await storage
|
||||
.connect(validator)
|
||||
.markProofAsMissing(id, period)
|
||||
```
|
||||
|
||||
[1]: https://github.com/logos-storage/logos-storage-contracts-eth/
|
||||
[2]: https://github.com/logos-storage/logos-storage-research/blob/master/design/storage-proof-timing.md
|
||||
@ -1,82 +0,0 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/times
|
||||
import pkg/ethers
|
||||
import pkg/questionable
|
||||
import pkg/chronos
|
||||
import pkg/stint
|
||||
import ../clock
|
||||
import ../conf
|
||||
import ../utils/trackedfutures
|
||||
|
||||
export clock
|
||||
|
||||
logScope:
|
||||
topics = "contracts clock"
|
||||
|
||||
type OnChainClock* = ref object of Clock
|
||||
provider: Provider
|
||||
subscription: Subscription
|
||||
offset: times.Duration
|
||||
blockNumber: UInt256
|
||||
started: bool
|
||||
newBlock: AsyncEvent
|
||||
trackedFutures: TrackedFutures
|
||||
|
||||
proc new*(_: type OnChainClock, provider: Provider): OnChainClock =
|
||||
OnChainClock(
|
||||
provider: provider, newBlock: newAsyncEvent(), trackedFutures: TrackedFutures()
|
||||
)
|
||||
|
||||
proc update(clock: OnChainClock, blck: Block) =
|
||||
if number =? blck.number and number > clock.blockNumber:
|
||||
let blockTime = initTime(blck.timestamp.truncate(int64), 0)
|
||||
let computerTime = getTime()
|
||||
clock.offset = blockTime - computerTime
|
||||
clock.blockNumber = number
|
||||
trace "updated clock",
|
||||
blockTime = blck.timestamp, blockNumber = number, offset = clock.offset
|
||||
clock.newBlock.fire()
|
||||
|
||||
proc update(clock: OnChainClock) {.async: (raises: []).} =
|
||||
try:
|
||||
if latest =? (await clock.provider.getBlock(BlockTag.latest)):
|
||||
clock.update(latest)
|
||||
except CatchableError as error:
|
||||
debug "error updating clock: ", error = error.msg
|
||||
|
||||
method start*(clock: OnChainClock) {.async.} =
|
||||
if clock.started:
|
||||
return
|
||||
|
||||
proc onBlock(blckResult: ?!Block) =
|
||||
if eventError =? blckResult.errorOption:
|
||||
error "There was an error in block subscription", msg = eventError.msg
|
||||
return
|
||||
|
||||
# ignore block parameter; hardhat may call this with pending blocks
|
||||
clock.trackedFutures.track(clock.update())
|
||||
|
||||
await clock.update()
|
||||
|
||||
clock.subscription = await clock.provider.subscribe(onBlock)
|
||||
clock.started = true
|
||||
|
||||
method stop*(clock: OnChainClock) {.async.} =
|
||||
if not clock.started:
|
||||
return
|
||||
|
||||
await clock.subscription.unsubscribe()
|
||||
await clock.trackedFutures.cancelTracked()
|
||||
clock.started = false
|
||||
|
||||
method now*(clock: OnChainClock): SecondsSince1970 =
|
||||
doAssert clock.started, "clock should be started before calling now()"
|
||||
return toUnix(getTime() + clock.offset)
|
||||
|
||||
method waitUntil*(
|
||||
clock: OnChainClock, time: SecondsSince1970
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
while (let difference = time - clock.now(); difference > 0):
|
||||
clock.newBlock.clear()
|
||||
discard await clock.newBlock.wait().withTimeout(chronos.seconds(difference))
|
||||
@ -1,104 +0,0 @@
|
||||
import pkg/contractabi
|
||||
import pkg/ethers/contracts/fields
|
||||
import pkg/questionable/results
|
||||
|
||||
export contractabi
|
||||
|
||||
const DefaultRequestCacheSize* = 128.uint16
|
||||
const DefaultMaxPriorityFeePerGas* = 1_000_000_000.uint64
|
||||
|
||||
type
|
||||
MarketplaceConfig* = object
|
||||
collateral*: CollateralConfig
|
||||
proofs*: ProofConfig
|
||||
reservations*: SlotReservationsConfig
|
||||
requestDurationLimit*: uint64
|
||||
|
||||
CollateralConfig* = object
|
||||
repairRewardPercentage*: uint8
|
||||
# percentage of remaining collateral slot has after it has been freed
|
||||
maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value
|
||||
slashPercentage*: uint8 # percentage of the collateral that is slashed
|
||||
validatorRewardPercentage*: uint8
|
||||
# percentage of the slashed amount going to the validators
|
||||
|
||||
ProofConfig* = object
|
||||
period*: uint64 # proofs requirements are calculated per period (in seconds)
|
||||
timeout*: uint64 # mark proofs as missing before the timeout (in seconds)
|
||||
downtime*: uint8 # ignore this much recent blocks for proof requirements
|
||||
downtimeProduct*: uint8
|
||||
zkeyHash*: string # hash of the zkey file which is linked to the verifier
|
||||
# Ensures the pointer does not remain in downtime for many consecutive
|
||||
# periods. For each period increase, move the pointer `pointerProduct`
|
||||
# blocks. Should be a prime number to ensure there are no cycles.
|
||||
|
||||
SlotReservationsConfig* = object
|
||||
maxReservations*: uint8
|
||||
|
||||
func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig =
|
||||
ProofConfig(
|
||||
period: tupl[0],
|
||||
timeout: tupl[1],
|
||||
downtime: tupl[2],
|
||||
downtimeProduct: tupl[3],
|
||||
zkeyHash: tupl[4],
|
||||
)
|
||||
|
||||
func fromTuple(_: type SlotReservationsConfig, tupl: tuple): SlotReservationsConfig =
|
||||
SlotReservationsConfig(maxReservations: tupl[0])
|
||||
|
||||
func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig =
|
||||
CollateralConfig(
|
||||
repairRewardPercentage: tupl[0],
|
||||
maxNumberOfSlashes: tupl[1],
|
||||
slashPercentage: tupl[2],
|
||||
validatorRewardPercentage: tupl[3],
|
||||
)
|
||||
|
||||
func fromTuple(_: type MarketplaceConfig, tupl: tuple): MarketplaceConfig =
|
||||
MarketplaceConfig(
|
||||
collateral: tupl[0],
|
||||
proofs: tupl[1],
|
||||
reservations: tupl[2],
|
||||
requestDurationLimit: tupl[3],
|
||||
)
|
||||
|
||||
func solidityType*(_: type SlotReservationsConfig): string =
|
||||
solidityType(SlotReservationsConfig.fieldTypes)
|
||||
|
||||
func solidityType*(_: type ProofConfig): string =
|
||||
solidityType(ProofConfig.fieldTypes)
|
||||
|
||||
func solidityType*(_: type CollateralConfig): string =
|
||||
solidityType(CollateralConfig.fieldTypes)
|
||||
|
||||
func solidityType*(_: type MarketplaceConfig): string =
|
||||
solidityType(MarketplaceConfig.fieldTypes)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, slot: SlotReservationsConfig) =
|
||||
encoder.write(slot.fieldValues)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, slot: ProofConfig) =
|
||||
encoder.write(slot.fieldValues)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, slot: CollateralConfig) =
|
||||
encoder.write(slot.fieldValues)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, slot: MarketplaceConfig) =
|
||||
encoder.write(slot.fieldValues)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type ProofConfig): ?!T =
|
||||
let tupl = ?decoder.read(ProofConfig.fieldTypes)
|
||||
success ProofConfig.fromTuple(tupl)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type SlotReservationsConfig): ?!T =
|
||||
let tupl = ?decoder.read(SlotReservationsConfig.fieldTypes)
|
||||
success SlotReservationsConfig.fromTuple(tupl)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type CollateralConfig): ?!T =
|
||||
let tupl = ?decoder.read(CollateralConfig.fieldTypes)
|
||||
success CollateralConfig.fromTuple(tupl)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type MarketplaceConfig): ?!T =
|
||||
let tupl = ?decoder.read(MarketplaceConfig.fieldTypes)
|
||||
success MarketplaceConfig.fromTuple(tupl)
|
||||
@ -1,51 +0,0 @@
|
||||
import std/os
|
||||
import std/tables
|
||||
import pkg/ethers
|
||||
import pkg/questionable
|
||||
|
||||
import ../conf
|
||||
import ../logutils
|
||||
import ./marketplace
|
||||
|
||||
type Deployment* = ref object
|
||||
provider: Provider
|
||||
marketplaceAddressOverride: ?Address
|
||||
|
||||
const knownAddresses = {
|
||||
# Hardhat localhost network
|
||||
"31337":
|
||||
{"Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44")}.toTable,
|
||||
# Taiko Alpha-3 Testnet
|
||||
"167005":
|
||||
{"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable,
|
||||
# Codex Testnet - Jun 19 2025 13:11:56 PM (+00:00 UTC)
|
||||
"789987":
|
||||
{"Marketplace": Address.init("0x5378a4EA5dA2a548ce22630A3AE74b052000C62D")}.toTable,
|
||||
# Linea (Status)
|
||||
"1660990954":
|
||||
{"Marketplace": Address.init("0x34F606C65869277f236ce07aBe9af0B8c88F486B")}.toTable,
|
||||
}.toTable
|
||||
|
||||
proc getKnownAddress(T: type, chainId: UInt256): ?Address =
|
||||
let id = chainId.toString(10)
|
||||
notice "Looking for well-known contract address with ChainID ", chainId = id
|
||||
|
||||
if not (id in knownAddresses):
|
||||
return none Address
|
||||
|
||||
return knownAddresses[id].getOrDefault($T, Address.none)
|
||||
|
||||
proc new*(
|
||||
_: type Deployment,
|
||||
provider: Provider,
|
||||
marketplaceAddressOverride: ?Address = none Address,
|
||||
): Deployment =
|
||||
Deployment(provider: provider, marketplaceAddressOverride: marketplaceAddressOverride)
|
||||
|
||||
proc address*(deployment: Deployment, contract: type): Future[?Address] {.async.} =
|
||||
when contract is Marketplace:
|
||||
if address =? deployment.marketplaceAddressOverride:
|
||||
return some address
|
||||
|
||||
let chainId = await deployment.provider.getChainId()
|
||||
return contract.getKnownAddress(chainId)
|
||||
@ -1,9 +0,0 @@
|
||||
import ./interactions/interactions
|
||||
import ./interactions/hostinteractions
|
||||
import ./interactions/clientinteractions
|
||||
import ./interactions/validatorinteractions
|
||||
|
||||
export interactions
|
||||
export hostinteractions
|
||||
export clientinteractions
|
||||
export validatorinteractions
|
||||
@ -1,26 +0,0 @@
|
||||
import pkg/ethers
|
||||
|
||||
import ../../purchasing
|
||||
import ../../logutils
|
||||
import ../market
|
||||
import ../clock
|
||||
import ./interactions
|
||||
|
||||
export purchasing
|
||||
export logutils
|
||||
|
||||
type ClientInteractions* = ref object of ContractInteractions
|
||||
purchasing*: Purchasing
|
||||
|
||||
proc new*(
|
||||
_: type ClientInteractions, clock: OnChainClock, purchasing: Purchasing
|
||||
): ClientInteractions =
|
||||
ClientInteractions(clock: clock, purchasing: purchasing)
|
||||
|
||||
proc start*(self: ClientInteractions) {.async.} =
|
||||
await procCall ContractInteractions(self).start()
|
||||
await self.purchasing.start()
|
||||
|
||||
proc stop*(self: ClientInteractions) {.async.} =
|
||||
await self.purchasing.stop()
|
||||
await procCall ContractInteractions(self).stop()
|
||||
@ -1,24 +0,0 @@
|
||||
import pkg/chronos
|
||||
|
||||
import ../../logutils
|
||||
import ../../sales
|
||||
import ./interactions
|
||||
|
||||
export sales
|
||||
export logutils
|
||||
|
||||
type HostInteractions* = ref object of ContractInteractions
|
||||
sales*: Sales
|
||||
|
||||
proc new*(_: type HostInteractions, clock: Clock, sales: Sales): HostInteractions =
|
||||
## Create a new HostInteractions instance
|
||||
##
|
||||
HostInteractions(clock: clock, sales: sales)
|
||||
|
||||
method start*(self: HostInteractions) {.async.} =
|
||||
await procCall ContractInteractions(self).start()
|
||||
await self.sales.start()
|
||||
|
||||
method stop*(self: HostInteractions) {.async.} =
|
||||
await self.sales.stop()
|
||||
await procCall ContractInteractions(self).start()
|
||||
@ -1,15 +0,0 @@
|
||||
import pkg/ethers
|
||||
import ../clock
|
||||
import ../marketplace
|
||||
import ../market
|
||||
|
||||
export clock
|
||||
|
||||
type ContractInteractions* = ref object of RootObj
|
||||
clock*: Clock
|
||||
|
||||
method start*(self: ContractInteractions) {.async, base.} =
|
||||
discard
|
||||
|
||||
method stop*(self: ContractInteractions) {.async, base.} =
|
||||
discard
|
||||
@ -1,20 +0,0 @@
|
||||
import ./interactions
|
||||
import ../../validation
|
||||
|
||||
export validation
|
||||
|
||||
type ValidatorInteractions* = ref object of ContractInteractions
|
||||
validation: Validation
|
||||
|
||||
proc new*(
|
||||
_: type ValidatorInteractions, clock: OnChainClock, validation: Validation
|
||||
): ValidatorInteractions =
|
||||
ValidatorInteractions(clock: clock, validation: validation)
|
||||
|
||||
proc start*(self: ValidatorInteractions) {.async.} =
|
||||
await procCall ContractInteractions(self).start()
|
||||
await self.validation.start()
|
||||
|
||||
proc stop*(self: ValidatorInteractions) {.async.} =
|
||||
await self.validation.stop()
|
||||
await procCall ContractInteractions(self).stop()
|
||||
@ -1,680 +0,0 @@
|
||||
import std/strformat
|
||||
import std/strutils
|
||||
import pkg/ethers
|
||||
import pkg/questionable
|
||||
import pkg/lrucache
|
||||
import ../utils/exceptions
|
||||
import ../logutils
|
||||
import ../market
|
||||
import ./marketplace
|
||||
import ./proofs
|
||||
import ./provider
|
||||
|
||||
export market
|
||||
|
||||
logScope:
|
||||
topics = "marketplace onchain market"
|
||||
|
||||
type
|
||||
OnChainMarket* = ref object of Market
|
||||
contract: Marketplace
|
||||
signer: Signer
|
||||
rewardRecipient: ?Address
|
||||
configuration: ?MarketplaceConfig
|
||||
requestCache: LruCache[string, StorageRequest]
|
||||
allowanceLock: AsyncLock
|
||||
|
||||
MarketSubscription = market.Subscription
|
||||
EventSubscription = ethers.Subscription
|
||||
OnChainMarketSubscription = ref object of MarketSubscription
|
||||
eventSubscription: EventSubscription
|
||||
|
||||
func new*(
|
||||
_: type OnChainMarket,
|
||||
contract: Marketplace,
|
||||
rewardRecipient = Address.none,
|
||||
requestCacheSize: uint16 = DefaultRequestCacheSize,
|
||||
): OnChainMarket =
|
||||
without signer =? contract.signer:
|
||||
raiseAssert("Marketplace contract should have a signer")
|
||||
|
||||
var requestCache = newLruCache[string, StorageRequest](int(requestCacheSize))
|
||||
|
||||
OnChainMarket(
|
||||
contract: contract,
|
||||
signer: signer,
|
||||
rewardRecipient: rewardRecipient,
|
||||
requestCache: requestCache,
|
||||
)
|
||||
|
||||
proc raiseMarketError(message: string) {.raises: [MarketError].} =
|
||||
raise newException(MarketError, message)
|
||||
|
||||
func prefixWith(suffix, prefix: string, separator = ": "): string =
|
||||
if prefix.len > 0:
|
||||
return &"{prefix}{separator}{suffix}"
|
||||
else:
|
||||
return suffix
|
||||
|
||||
template convertEthersError(msg: string = "", body) =
|
||||
try:
|
||||
body
|
||||
except EthersError as error:
|
||||
raiseMarketError(error.msgDetail.prefixWith(msg))
|
||||
|
||||
proc config(
|
||||
market: OnChainMarket
|
||||
): Future[MarketplaceConfig] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
without resolvedConfig =? market.configuration:
|
||||
if err =? (await market.loadConfig()).errorOption:
|
||||
raiseMarketError(err.msg)
|
||||
|
||||
without config =? market.configuration:
|
||||
raiseMarketError("Failed to access to config from the Marketplace contract")
|
||||
|
||||
return config
|
||||
|
||||
return resolvedConfig
|
||||
|
||||
template withAllowanceLock*(market: OnChainMarket, body: untyped) =
|
||||
if market.allowanceLock.isNil:
|
||||
market.allowanceLock = newAsyncLock()
|
||||
await market.allowanceLock.acquire()
|
||||
try:
|
||||
body
|
||||
finally:
|
||||
try:
|
||||
market.allowanceLock.release()
|
||||
except AsyncLockError as error:
|
||||
raise newException(Defect, error.msg, error)
|
||||
|
||||
proc approveFunds(
|
||||
market: OnChainMarket, amount: UInt256
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
debug "Approving tokens", amount
|
||||
convertEthersError("Failed to approve funds"):
|
||||
let tokenAddress = await market.contract.token()
|
||||
let token = Erc20Token.new(tokenAddress, market.signer)
|
||||
let owner = await market.signer.getAddress()
|
||||
let spender = market.contract.address
|
||||
market.withAllowanceLock:
|
||||
let allowance = await token.allowance(owner, spender)
|
||||
discard await token.approve(spender, allowance + amount).confirm(1)
|
||||
|
||||
method loadConfig*(
|
||||
market: OnChainMarket
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
without config =? market.configuration:
|
||||
let fetchedConfig = await market.contract.configuration()
|
||||
|
||||
market.configuration = some fetchedConfig
|
||||
|
||||
return success()
|
||||
except EthersError as err:
|
||||
return failure newException(
|
||||
MarketError,
|
||||
"Failed to fetch the config from the Marketplace contract: " & err.msg,
|
||||
)
|
||||
|
||||
method getZkeyHash*(
|
||||
market: OnChainMarket
|
||||
): Future[?string] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
let config = await market.config()
|
||||
return some config.proofs.zkeyHash
|
||||
|
||||
method getSigner*(
|
||||
market: OnChainMarket
|
||||
): Future[Address] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get signer address"):
|
||||
return await market.signer.getAddress()
|
||||
|
||||
method periodicity*(
|
||||
market: OnChainMarket
|
||||
): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
let period = config.proofs.period
|
||||
return Periodicity(seconds: period)
|
||||
|
||||
method proofTimeout*(
|
||||
market: OnChainMarket
|
||||
): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
return config.proofs.timeout
|
||||
|
||||
method repairRewardPercentage*(
|
||||
market: OnChainMarket
|
||||
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
return config.collateral.repairRewardPercentage
|
||||
|
||||
method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
return config.requestDurationLimit
|
||||
|
||||
method proofDowntime*(
|
||||
market: OnChainMarket
|
||||
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
return config.proofs.downtime
|
||||
|
||||
method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} =
|
||||
convertEthersError("Failed to get slot pointer"):
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.getPointer(slotId, overrides)
|
||||
|
||||
method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} =
|
||||
convertEthersError("Failed to get my requests"):
|
||||
return await market.contract.myRequests
|
||||
|
||||
method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} =
|
||||
convertEthersError("Failed to get my slots"):
|
||||
let slots = await market.contract.mySlots()
|
||||
debug "Fetched my slots", numSlots = len(slots)
|
||||
|
||||
return slots
|
||||
|
||||
method requestStorage(
|
||||
market: OnChainMarket, request: StorageRequest
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to request storage"):
|
||||
debug "Requesting storage"
|
||||
await market.approveFunds(request.totalPrice())
|
||||
discard await market.contract.requestStorage(request).confirm(1)
|
||||
|
||||
method getRequest*(
|
||||
market: OnChainMarket, id: RequestId
|
||||
): Future[?StorageRequest] {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
let key = $id
|
||||
|
||||
if key in market.requestCache:
|
||||
return some market.requestCache[key]
|
||||
|
||||
let request = await market.contract.getRequest(id)
|
||||
market.requestCache[key] = request
|
||||
return some request
|
||||
except Marketplace_UnknownRequest, KeyError:
|
||||
warn "Cannot retrieve the request", error = getCurrentExceptionMsg()
|
||||
return none StorageRequest
|
||||
except EthersError as e:
|
||||
error "Cannot retrieve the request", error = e.msg
|
||||
return none StorageRequest
|
||||
|
||||
method requestState*(
|
||||
market: OnChainMarket, requestId: RequestId
|
||||
): Future[?RequestState] {.async.} =
|
||||
convertEthersError("Failed to get request state"):
|
||||
try:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return some await market.contract.requestState(requestId, overrides)
|
||||
except Marketplace_UnknownRequest:
|
||||
return none RequestState
|
||||
|
||||
method slotState*(
|
||||
market: OnChainMarket, slotId: SlotId
|
||||
): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to fetch the slot state from the Marketplace contract"):
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.slotState(slotId, overrides)
|
||||
|
||||
method getRequestEnd*(
|
||||
market: OnChainMarket, id: RequestId
|
||||
): Future[SecondsSince1970] {.async.} =
|
||||
convertEthersError("Failed to get request end"):
|
||||
return await market.contract.requestEnd(id)
|
||||
|
||||
method requestExpiresAt*(
|
||||
market: OnChainMarket, id: RequestId
|
||||
): Future[SecondsSince1970] {.async.} =
|
||||
convertEthersError("Failed to get request expiry"):
|
||||
return await market.contract.requestExpiry(id)
|
||||
|
||||
method getHost(
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?Address] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get slot's host"):
|
||||
let slotId = slotId(requestId, slotIndex)
|
||||
let address = await market.contract.getHost(slotId)
|
||||
if address != Address.default:
|
||||
return some address
|
||||
else:
|
||||
return none Address
|
||||
|
||||
method currentCollateral*(
|
||||
market: OnChainMarket, slotId: SlotId
|
||||
): Future[UInt256] {.async: (raises: [MarketError, CancelledError]).} =
|
||||
convertEthersError("Failed to get slot's current collateral"):
|
||||
return await market.contract.currentCollateral(slotId)
|
||||
|
||||
method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} =
|
||||
convertEthersError("Failed to get active slot"):
|
||||
try:
|
||||
return some await market.contract.getActiveSlot(slotId)
|
||||
except Marketplace_SlotIsFree:
|
||||
return none Slot
|
||||
|
||||
method fillSlot(
|
||||
market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
slotIndex: uint64,
|
||||
proof: Groth16Proof,
|
||||
collateral: UInt256,
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to fill slot"):
|
||||
logScope:
|
||||
requestId
|
||||
slotIndex
|
||||
|
||||
try:
|
||||
await market.approveFunds(collateral)
|
||||
|
||||
# Add 10% to gas estimate to deal with different evm code flow when we
|
||||
# happen to be the last one to fill a slot in this request
|
||||
trace "estimating gas for fillSlot"
|
||||
let gas = await market.contract.estimateGas.fillSlot(requestId, slotIndex, proof)
|
||||
let gasLimit = (gas * 110) div 100
|
||||
let overrides = TransactionOverrides(gasLimit: some gasLimit)
|
||||
|
||||
trace "calling fillSlot on contract", estimatedGas = gas, gasLimit = gasLimit
|
||||
discard await market.contract
|
||||
.fillSlot(requestId, slotIndex, proof, overrides)
|
||||
.confirm(1)
|
||||
trace "fillSlot transaction completed"
|
||||
except Marketplace_SlotNotFree as parent:
|
||||
raise newException(
|
||||
SlotStateMismatchError, "Failed to fill slot because the slot is not free",
|
||||
parent,
|
||||
)
|
||||
|
||||
method freeSlot*(
|
||||
market: OnChainMarket, slotId: SlotId
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to free slot"):
|
||||
try:
|
||||
var freeSlot: Future[Confirmable]
|
||||
if rewardRecipient =? market.rewardRecipient:
|
||||
# If --reward-recipient specified, use it as the reward recipient, and use
|
||||
# the SP's address as the collateral recipient
|
||||
let collateralRecipient = await market.getSigner()
|
||||
|
||||
# Add 200% to gas estimate to deal with different evm code flow when we
|
||||
# happen to be the one to make the request fail
|
||||
let gas = await market.contract.estimateGas.freeSlot(
|
||||
slotId, rewardRecipient, collateralRecipient
|
||||
)
|
||||
let gasLimit = gas * 3
|
||||
let overrides = TransactionOverrides(gasLimit: some gasLimit)
|
||||
|
||||
trace "calling freeSlot on contract", estimatedGas = gas, gasLimit = gasLimit
|
||||
|
||||
freeSlot = market.contract.freeSlot(
|
||||
slotId,
|
||||
rewardRecipient, # --reward-recipient
|
||||
collateralRecipient, # SP's address
|
||||
overrides,
|
||||
)
|
||||
else:
|
||||
# Otherwise, use the SP's address as both the reward and collateral
|
||||
# recipient (the contract will use msg.sender for both)
|
||||
|
||||
# Add 200% to gas estimate to deal with different evm code flow when we
|
||||
# happen to be the one to make the request fail
|
||||
let gas = await market.contract.estimateGas.freeSlot(slotId)
|
||||
let gasLimit = gas * 3
|
||||
let overrides = TransactionOverrides(gasLimit: some (gasLimit))
|
||||
|
||||
trace "calling freeSlot on contract", estimatedGas = gas, gasLimit = gasLimit
|
||||
|
||||
freeSlot = market.contract.freeSlot(slotId, overrides)
|
||||
|
||||
discard await freeSlot.confirm(1)
|
||||
except Marketplace_SlotIsFree as parent:
|
||||
raise newException(
|
||||
SlotStateMismatchError, "Failed to free slot, slot is already free", parent
|
||||
)
|
||||
|
||||
method withdrawFunds(
|
||||
market: OnChainMarket, requestId: RequestId
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to withdraw funds"):
|
||||
discard await market.contract.withdrawFunds(requestId).confirm(1)
|
||||
|
||||
method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
|
||||
convertEthersError("Failed to get proof requirement"):
|
||||
try:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.isProofRequired(id, overrides)
|
||||
except Marketplace_SlotIsFree:
|
||||
return false
|
||||
|
||||
method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
|
||||
convertEthersError("Failed to get future proof requirement"):
|
||||
try:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.willProofBeRequired(id, overrides)
|
||||
except Marketplace_SlotIsFree:
|
||||
return false
|
||||
|
||||
method getChallenge*(
|
||||
market: OnChainMarket, id: SlotId
|
||||
): Future[ProofChallenge] {.async.} =
|
||||
convertEthersError("Failed to get proof challenge"):
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.getChallenge(id, overrides)
|
||||
|
||||
method submitProof*(
|
||||
market: OnChainMarket, id: SlotId, proof: Groth16Proof
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to submit proof"):
|
||||
try:
|
||||
discard await market.contract.submitProof(id, proof).confirm(1)
|
||||
except Proofs_InvalidProof as parent:
|
||||
raise newException(
|
||||
ProofInvalidError, "Failed to submit proof because the proof is invalid", parent
|
||||
)
|
||||
|
||||
method markProofAsMissing*(
|
||||
market: OnChainMarket, id: SlotId, period: Period
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to mark proof as missing"):
|
||||
# Add 50% to gas estimate to deal with different evm code flow when we
|
||||
# happen to be the one to make the request fail
|
||||
let gas = await market.contract.estimateGas.markProofAsMissing(id, period)
|
||||
let gasLimit = (gas * 150) div 100
|
||||
let overrides = TransactionOverrides(gasLimit: some gasLimit)
|
||||
|
||||
trace "calling markProofAsMissing on contract",
|
||||
estimatedGas = gas, gasLimit = gasLimit
|
||||
|
||||
discard await market.contract.markProofAsMissing(id, period, overrides).confirm(1)
|
||||
|
||||
method canMarkProofAsMissing*(
|
||||
market: OnChainMarket, id: SlotId, period: Period
|
||||
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
discard await market.contract.canMarkProofAsMissing(id, period, overrides)
|
||||
return true
|
||||
except EthersError as e:
|
||||
trace "Proof cannot be marked as missing", msg = e.msg
|
||||
return false
|
||||
|
||||
method reserveSlot*(
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to reserve slot"):
|
||||
try:
|
||||
# Add 25% to gas estimate to deal with different evm code flow when we
|
||||
# happen to be the last one that is allowed to reserve the slot
|
||||
let gas = await market.contract.estimateGas.reserveSlot(requestId, slotIndex)
|
||||
let gasLimit = (gas * 125) div 100
|
||||
let overrides = TransactionOverrides(gasLimit: some gasLimit)
|
||||
|
||||
trace "calling reserveSlot on contract", estimatedGas = gas, gasLimit = gasLimit
|
||||
|
||||
discard
|
||||
await market.contract.reserveSlot(requestId, slotIndex, overrides).confirm(1)
|
||||
except SlotReservations_ReservationNotAllowed:
|
||||
raise newException(
|
||||
SlotReservationNotAllowedError,
|
||||
"Failed to reserve slot because reservation is not allowed",
|
||||
)
|
||||
|
||||
method canReserveSlot*(
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
): Future[bool] {.async.} =
|
||||
convertEthersError("Unable to determine if slot can be reserved"):
|
||||
return await market.contract.canReserveSlot(requestId, slotIndex)
|
||||
|
||||
method subscribeRequests*(
|
||||
market: OnChainMarket, callback: OnRequest
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!StorageRequested) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in Request subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(event.requestId, event.ask, event.expiry)
|
||||
|
||||
convertEthersError("Failed to subscribe to StorageRequested events"):
|
||||
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeSlotFilled*(
|
||||
market: OnChainMarket, callback: OnSlotFilled
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!SlotFilled) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in SlotFilled subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(event.requestId, event.slotIndex)
|
||||
|
||||
convertEthersError("Failed to subscribe to SlotFilled events"):
|
||||
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeSlotFilled*(
|
||||
market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
slotIndex: uint64,
|
||||
callback: OnSlotFilled,
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: uint64) =
|
||||
if eventRequestId == requestId and eventSlotIndex == slotIndex:
|
||||
callback(requestId, slotIndex)
|
||||
|
||||
convertEthersError("Failed to subscribe to SlotFilled events"):
|
||||
return await market.subscribeSlotFilled(onSlotFilled)
|
||||
|
||||
method subscribeSlotFreed*(
|
||||
market: OnChainMarket, callback: OnSlotFreed
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!SlotFreed) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in SlotFreed subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(event.requestId, event.slotIndex)
|
||||
|
||||
convertEthersError("Failed to subscribe to SlotFreed events"):
|
||||
let subscription = await market.contract.subscribe(SlotFreed, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeSlotReservationsFull*(
|
||||
market: OnChainMarket, callback: OnSlotReservationsFull
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!SlotReservationsFull) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in SlotReservationsFull subscription",
|
||||
msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(event.requestId, event.slotIndex)
|
||||
|
||||
convertEthersError("Failed to subscribe to SlotReservationsFull events"):
|
||||
let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeFulfillment(
|
||||
market: OnChainMarket, callback: OnFulfillment
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError("Failed to subscribe to RequestFulfilled events"):
|
||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeFulfillment(
|
||||
market: OnChainMarket, requestId: RequestId, callback: OnFulfillment
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
if event.requestId == requestId:
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError("Failed to subscribe to RequestFulfilled events"):
|
||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeRequestCancelled*(
|
||||
market: OnChainMarket, callback: OnRequestCancelled
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError("Failed to subscribe to RequestCancelled events"):
|
||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeRequestCancelled*(
|
||||
market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
if event.requestId == requestId:
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError("Failed to subscribe to RequestCancelled events"):
|
||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeRequestFailed*(
|
||||
market: OnChainMarket, callback: OnRequestFailed
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestFailed subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError("Failed to subscribe to RequestFailed events"):
|
||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeRequestFailed*(
|
||||
market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in RequestFailed subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
if event.requestId == requestId:
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError("Failed to subscribe to RequestFailed events"):
|
||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeProofSubmission*(
|
||||
market: OnChainMarket, callback: OnProofSubmitted
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onEvent(eventResult: ?!ProofSubmitted) {.raises: [].} =
|
||||
without event =? eventResult, eventErr:
|
||||
error "There was an error in ProofSubmitted subscription", msg = eventErr.msg
|
||||
return
|
||||
|
||||
callback(event.id)
|
||||
|
||||
convertEthersError("Failed to subscribe to ProofSubmitted events"):
|
||||
let subscription = await market.contract.subscribe(ProofSubmitted, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
|
||||
await subscription.eventSubscription.unsubscribe()
|
||||
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: OnChainMarket, fromBlock: BlockTag
|
||||
): Future[seq[SlotFilled]] {.async.} =
|
||||
convertEthersError("Failed to get past SlotFilled events from block"):
|
||||
return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest)
|
||||
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: OnChainMarket, blocksAgo: int
|
||||
): Future[seq[SlotFilled]] {.async.} =
|
||||
convertEthersError("Failed to get past SlotFilled events"):
|
||||
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
|
||||
|
||||
return await market.queryPastSlotFilledEvents(fromBlock)
|
||||
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: OnChainMarket, fromTime: SecondsSince1970
|
||||
): Future[seq[SlotFilled]] {.async.} =
|
||||
convertEthersError("Failed to get past SlotFilled events from time"):
|
||||
let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime)
|
||||
return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock))
|
||||
|
||||
method queryPastStorageRequestedEvents*(
|
||||
market: OnChainMarket, fromBlock: BlockTag
|
||||
): Future[seq[StorageRequested]] {.async.} =
|
||||
convertEthersError("Failed to get past StorageRequested events from block"):
|
||||
return
|
||||
await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest)
|
||||
|
||||
method queryPastStorageRequestedEvents*(
|
||||
market: OnChainMarket, blocksAgo: int
|
||||
): Future[seq[StorageRequested]] {.async.} =
|
||||
convertEthersError("Failed to get past StorageRequested events"):
|
||||
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
|
||||
|
||||
return await market.queryPastStorageRequestedEvents(fromBlock)
|
||||
|
||||
method slotCollateral*(
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?!UInt256] {.async: (raises: [CancelledError]).} =
|
||||
let slotid = slotId(requestId, slotIndex)
|
||||
|
||||
try:
|
||||
let slotState = await market.slotState(slotid)
|
||||
|
||||
without request =? await market.getRequest(requestId):
|
||||
return failure newException(
|
||||
MarketError, "Failure calculating the slotCollateral, cannot get the request"
|
||||
)
|
||||
|
||||
return market.slotCollateral(request.ask.collateralPerSlot, slotState)
|
||||
except MarketError as error:
|
||||
error "Error when trying to calculate the slotCollateral", error = error.msg
|
||||
return failure error
|
||||
|
||||
method slotCollateral*(
|
||||
market: OnChainMarket, collateralPerSlot: UInt256, slotState: SlotState
|
||||
): ?!UInt256 {.raises: [].} =
|
||||
if slotState == SlotState.Repair:
|
||||
without repairRewardPercentage =?
|
||||
market.configuration .? collateral .? repairRewardPercentage:
|
||||
return failure newException(
|
||||
MarketError,
|
||||
"Failure calculating the slotCollateral, cannot get the reward percentage",
|
||||
)
|
||||
|
||||
return success (
|
||||
collateralPerSlot - (collateralPerSlot * repairRewardPercentage.u256).div(
|
||||
100.u256
|
||||
)
|
||||
)
|
||||
|
||||
return success(collateralPerSlot)
|
||||
@ -1,198 +0,0 @@
|
||||
import pkg/ethers
|
||||
import pkg/ethers/erc20
|
||||
import pkg/json_rpc/rpcclient
|
||||
import pkg/stint
|
||||
import pkg/chronos
|
||||
import ../clock
|
||||
import ./requests
|
||||
import ./proofs
|
||||
import ./config
|
||||
|
||||
export stint
|
||||
export ethers except `%`, `%*`, toJson
|
||||
export erc20 except `%`, `%*`, toJson
|
||||
export config
|
||||
export requests
|
||||
|
||||
type
|
||||
Marketplace* = ref object of Contract
|
||||
|
||||
Marketplace_RepairRewardPercentageTooHigh* = object of SolidityError
|
||||
Marketplace_SlashPercentageTooHigh* = object of SolidityError
|
||||
Marketplace_MaximumSlashingTooHigh* = object of SolidityError
|
||||
Marketplace_InvalidExpiry* = object of SolidityError
|
||||
Marketplace_InvalidMaxSlotLoss* = object of SolidityError
|
||||
Marketplace_InsufficientSlots* = object of SolidityError
|
||||
Marketplace_InvalidClientAddress* = object of SolidityError
|
||||
Marketplace_RequestAlreadyExists* = object of SolidityError
|
||||
Marketplace_InvalidSlot* = object of SolidityError
|
||||
Marketplace_SlotNotFree* = object of SolidityError
|
||||
Marketplace_InvalidSlotHost* = object of SolidityError
|
||||
Marketplace_AlreadyPaid* = object of SolidityError
|
||||
Marketplace_TransferFailed* = object of SolidityError
|
||||
Marketplace_UnknownRequest* = object of SolidityError
|
||||
Marketplace_InvalidState* = object of SolidityError
|
||||
Marketplace_StartNotBeforeExpiry* = object of SolidityError
|
||||
Marketplace_SlotNotAcceptingProofs* = object of SolidityError
|
||||
Marketplace_SlotIsFree* = object of SolidityError
|
||||
Marketplace_ReservationRequired* = object of SolidityError
|
||||
Marketplace_NothingToWithdraw* = object of SolidityError
|
||||
Marketplace_InsufficientDuration* = object of SolidityError
|
||||
Marketplace_InsufficientProofProbability* = object of SolidityError
|
||||
Marketplace_InsufficientCollateral* = object of SolidityError
|
||||
Marketplace_InsufficientReward* = object of SolidityError
|
||||
Marketplace_InvalidCid* = object of SolidityError
|
||||
Marketplace_DurationExceedsLimit* = object of SolidityError
|
||||
Proofs_InsufficientBlockHeight* = object of SolidityError
|
||||
Proofs_InvalidProof* = object of SolidityError
|
||||
Proofs_ProofAlreadySubmitted* = object of SolidityError
|
||||
Proofs_PeriodNotEnded* = object of SolidityError
|
||||
Proofs_ValidationTimedOut* = object of SolidityError
|
||||
Proofs_ProofNotMissing* = object of SolidityError
|
||||
Proofs_ProofNotRequired* = object of SolidityError
|
||||
Proofs_ProofAlreadyMarkedMissing* = object of SolidityError
|
||||
Periods_InvalidSecondsPerPeriod* = object of SolidityError
|
||||
SlotReservations_ReservationNotAllowed* = object of SolidityError
|
||||
|
||||
proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
|
||||
proc token*(marketplace: Marketplace): Address {.contract, view.}
|
||||
proc currentCollateral*(
|
||||
marketplace: Marketplace, id: SlotId
|
||||
): UInt256 {.contract, view.}
|
||||
|
||||
proc requestStorage*(
|
||||
marketplace: Marketplace, request: StorageRequest
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors: [
|
||||
Marketplace_InvalidClientAddress, Marketplace_RequestAlreadyExists,
|
||||
Marketplace_InvalidExpiry, Marketplace_InsufficientSlots,
|
||||
Marketplace_InvalidMaxSlotLoss, Marketplace_InsufficientDuration,
|
||||
Marketplace_InsufficientProofProbability, Marketplace_InsufficientCollateral,
|
||||
Marketplace_InsufficientReward, Marketplace_InvalidCid,
|
||||
]
|
||||
.}
|
||||
|
||||
proc fillSlot*(
|
||||
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64, proof: Groth16Proof
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors: [
|
||||
Marketplace_InvalidSlot, Marketplace_ReservationRequired, Marketplace_SlotNotFree,
|
||||
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest,
|
||||
]
|
||||
.}
|
||||
|
||||
proc withdrawFunds*(
|
||||
marketplace: Marketplace, requestId: RequestId
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors: [
|
||||
Marketplace_InvalidClientAddress, Marketplace_InvalidState,
|
||||
Marketplace_NothingToWithdraw, Marketplace_UnknownRequest,
|
||||
]
|
||||
.}
|
||||
|
||||
proc withdrawFunds*(
|
||||
marketplace: Marketplace, requestId: RequestId, withdrawAddress: Address
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors: [
|
||||
Marketplace_InvalidClientAddress, Marketplace_InvalidState,
|
||||
Marketplace_NothingToWithdraw, Marketplace_UnknownRequest,
|
||||
]
|
||||
.}
|
||||
|
||||
proc freeSlot*(
|
||||
marketplace: Marketplace, id: SlotId
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors: [
|
||||
Marketplace_InvalidSlotHost, Marketplace_AlreadyPaid,
|
||||
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, Marketplace_SlotIsFree,
|
||||
]
|
||||
.}
|
||||
|
||||
proc freeSlot*(
|
||||
marketplace: Marketplace,
|
||||
id: SlotId,
|
||||
rewardRecipient: Address,
|
||||
collateralRecipient: Address,
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors: [
|
||||
Marketplace_InvalidSlotHost, Marketplace_AlreadyPaid,
|
||||
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, Marketplace_SlotIsFree,
|
||||
]
|
||||
.}
|
||||
|
||||
proc getRequest*(
|
||||
marketplace: Marketplace, id: RequestId
|
||||
): StorageRequest {.contract, view, errors: [Marketplace_UnknownRequest].}
|
||||
|
||||
proc getHost*(marketplace: Marketplace, id: SlotId): Address {.contract, view.}
|
||||
proc getActiveSlot*(
|
||||
marketplace: Marketplace, id: SlotId
|
||||
): Slot {.contract, view, errors: [Marketplace_SlotIsFree].}
|
||||
|
||||
proc myRequests*(marketplace: Marketplace): seq[RequestId] {.contract, view.}
|
||||
proc mySlots*(marketplace: Marketplace): seq[SlotId] {.contract, view.}
|
||||
proc requestState*(
|
||||
marketplace: Marketplace, requestId: RequestId
|
||||
): RequestState {.contract, view, errors: [Marketplace_UnknownRequest].}
|
||||
|
||||
proc slotState*(marketplace: Marketplace, slotId: SlotId): SlotState {.contract, view.}
|
||||
proc requestEnd*(
|
||||
marketplace: Marketplace, requestId: RequestId
|
||||
): SecondsSince1970 {.contract, view.}
|
||||
|
||||
proc requestExpiry*(
|
||||
marketplace: Marketplace, requestId: RequestId
|
||||
): SecondsSince1970 {.contract, view.}
|
||||
|
||||
proc missingProofs*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
|
||||
proc isProofRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
|
||||
proc willProofBeRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
|
||||
proc getChallenge*(
|
||||
marketplace: Marketplace, id: SlotId
|
||||
): array[32, byte] {.contract, view.}
|
||||
|
||||
proc getPointer*(marketplace: Marketplace, id: SlotId): uint8 {.contract, view.}
|
||||
|
||||
proc submitProof*(
|
||||
marketplace: Marketplace, id: SlotId, proof: Groth16Proof
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors:
|
||||
[Proofs_ProofAlreadySubmitted, Proofs_InvalidProof, Marketplace_UnknownRequest]
|
||||
.}
|
||||
|
||||
proc markProofAsMissing*(
|
||||
marketplace: Marketplace, id: SlotId, period: uint64
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors: [
|
||||
Marketplace_SlotNotAcceptingProofs, Marketplace_StartNotBeforeExpiry,
|
||||
Proofs_PeriodNotEnded, Proofs_ValidationTimedOut, Proofs_ProofNotMissing,
|
||||
Proofs_ProofNotRequired, Proofs_ProofAlreadyMarkedMissing,
|
||||
]
|
||||
.}
|
||||
|
||||
proc canMarkProofAsMissing*(
|
||||
marketplace: Marketplace, id: SlotId, period: uint64
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors: [
|
||||
Marketplace_SlotNotAcceptingProofs, Proofs_PeriodNotEnded,
|
||||
Proofs_ValidationTimedOut, Proofs_ProofNotMissing, Proofs_ProofNotRequired,
|
||||
Proofs_ProofAlreadyMarkedMissing,
|
||||
]
|
||||
.}
|
||||
|
||||
proc reserveSlot*(
|
||||
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64
|
||||
): Confirmable {.contract.}
|
||||
|
||||
proc canReserveSlot*(
|
||||
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64
|
||||
): bool {.contract, view.}
|
||||
@ -1,46 +0,0 @@
|
||||
import pkg/stint
|
||||
import pkg/contractabi
|
||||
import pkg/ethers/contracts/fields
|
||||
|
||||
type
|
||||
Groth16Proof* = object
|
||||
a*: G1Point
|
||||
b*: G2Point
|
||||
c*: G1Point
|
||||
|
||||
G1Point* = object
|
||||
x*: UInt256
|
||||
y*: UInt256
|
||||
|
||||
# A field element F_{p^2} encoded as `real + i * imag`
|
||||
Fp2Element* = object
|
||||
real*: UInt256
|
||||
imag*: UInt256
|
||||
|
||||
G2Point* = object
|
||||
x*: Fp2Element
|
||||
y*: Fp2Element
|
||||
|
||||
func solidityType*(_: type G1Point): string =
|
||||
solidityType(G1Point.fieldTypes)
|
||||
|
||||
func solidityType*(_: type Fp2Element): string =
|
||||
solidityType(Fp2Element.fieldTypes)
|
||||
|
||||
func solidityType*(_: type G2Point): string =
|
||||
solidityType(G2Point.fieldTypes)
|
||||
|
||||
func solidityType*(_: type Groth16Proof): string =
|
||||
solidityType(Groth16Proof.fieldTypes)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, point: G1Point) =
|
||||
encoder.write(point.fieldValues)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, element: Fp2Element) =
|
||||
encoder.write(element.fieldValues)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, point: G2Point) =
|
||||
encoder.write(point.fieldValues)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, proof: Groth16Proof) =
|
||||
encoder.write(proof.fieldValues)
|
||||
@ -1,123 +0,0 @@
|
||||
import pkg/ethers/provider
|
||||
import pkg/chronos
|
||||
import pkg/questionable
|
||||
|
||||
import ../logutils
|
||||
|
||||
from ../clock import SecondsSince1970
|
||||
|
||||
logScope:
|
||||
topics = "marketplace onchain provider"
|
||||
|
||||
proc raiseProviderError(message: string) {.raises: [ProviderError].} =
|
||||
raise newException(ProviderError, message)
|
||||
|
||||
proc blockNumberAndTimestamp*(
|
||||
provider: Provider, blockTag: BlockTag
|
||||
): Future[(UInt256, UInt256)] {.async: (raises: [ProviderError, CancelledError]).} =
|
||||
without latestBlock =? await provider.getBlock(blockTag):
|
||||
raiseProviderError("Could not get latest block")
|
||||
|
||||
without latestBlockNumber =? latestBlock.number:
|
||||
raiseProviderError("Could not get latest block number")
|
||||
|
||||
return (latestBlockNumber, latestBlock.timestamp)
|
||||
|
||||
proc binarySearchFindClosestBlock(
|
||||
provider: Provider, epochTime: int, low: UInt256, high: UInt256
|
||||
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
|
||||
let (_, lowTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(low))
|
||||
let (_, highTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(high))
|
||||
if abs(lowTimestamp.truncate(int) - epochTime) <
|
||||
abs(highTimestamp.truncate(int) - epochTime):
|
||||
return low
|
||||
else:
|
||||
return high
|
||||
|
||||
proc binarySearchBlockNumberForEpoch(
|
||||
provider: Provider,
|
||||
epochTime: UInt256,
|
||||
latestBlockNumber: UInt256,
|
||||
earliestBlockNumber: UInt256,
|
||||
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
|
||||
var low = earliestBlockNumber
|
||||
var high = latestBlockNumber
|
||||
|
||||
while low <= high:
|
||||
if low == 0 and high == 0:
|
||||
return low
|
||||
let mid = (low + high) div 2
|
||||
let (midBlockNumber, midBlockTimestamp) =
|
||||
await provider.blockNumberAndTimestamp(BlockTag.init(mid))
|
||||
|
||||
if midBlockTimestamp < epochTime:
|
||||
low = mid + 1
|
||||
elif midBlockTimestamp > epochTime:
|
||||
high = mid - 1
|
||||
else:
|
||||
return midBlockNumber
|
||||
# NOTICE that by how the binary search is implemented, when it finishes
|
||||
# low is always greater than high - this is why we use high, where
|
||||
# intuitively we would use low:
|
||||
await provider.binarySearchFindClosestBlock(
|
||||
epochTime.truncate(int), low = high, high = low
|
||||
)
|
||||
|
||||
proc blockNumberForEpoch*(
|
||||
provider: Provider, epochTime: SecondsSince1970
|
||||
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
|
||||
let epochTimeUInt256 = epochTime.u256
|
||||
let (latestBlockNumber, latestBlockTimestamp) =
|
||||
await provider.blockNumberAndTimestamp(BlockTag.latest)
|
||||
let (earliestBlockNumber, earliestBlockTimestamp) =
|
||||
await provider.blockNumberAndTimestamp(BlockTag.earliest)
|
||||
|
||||
# Initially we used the average block time to predict
|
||||
# the number of blocks we need to look back in order to find
|
||||
# the block number corresponding to the given epoch time.
|
||||
# This estimation can be highly inaccurate if block time
|
||||
# was changing in the past or is fluctuating and therefore
|
||||
# we used that information initially only to find out
|
||||
# if the available history is long enough to perform effective search.
|
||||
# It turns out we do not have to do that. There is an easier way.
|
||||
#
|
||||
# First we check if the given epoch time equals the timestamp of either
|
||||
# the earliest or the latest block. If it does, we just return the
|
||||
# block number of that block.
|
||||
#
|
||||
# Otherwise, if the earliest available block is not the genesis block,
|
||||
# we should check the timestamp of that earliest block and if it is greater
|
||||
# than the epoch time, we should issue a warning and return
|
||||
# that earliest block number.
|
||||
# In all other cases, thus when the earliest block is not the genesis
|
||||
# block but its timestamp is not greater than the requested epoch time, or
|
||||
# if the earliest available block is the genesis block,
|
||||
# (which means we have the whole history available), we should proceed with
|
||||
# the binary search.
|
||||
#
|
||||
# Additional benefit of this method is that we do not have to rely
|
||||
# on the average block time, which not only makes the whole thing
|
||||
# more reliable, but also easier to test.
|
||||
|
||||
# Are lucky today?
|
||||
if earliestBlockTimestamp == epochTimeUInt256:
|
||||
return earliestBlockNumber
|
||||
if latestBlockTimestamp == epochTimeUInt256:
|
||||
return latestBlockNumber
|
||||
|
||||
if earliestBlockNumber > 0 and earliestBlockTimestamp > epochTimeUInt256:
|
||||
let availableHistoryInDays =
|
||||
(latestBlockTimestamp - earliestBlockTimestamp) div 1.days.secs.u256
|
||||
warn "Short block history detected.",
|
||||
earliestBlockTimestamp = earliestBlockTimestamp, days = availableHistoryInDays
|
||||
return earliestBlockNumber
|
||||
|
||||
return await provider.binarySearchBlockNumberForEpoch(
|
||||
epochTimeUInt256, latestBlockNumber, earliestBlockNumber
|
||||
)
|
||||
|
||||
proc pastBlockTag*(
|
||||
provider: Provider, blocksAgo: int
|
||||
): Future[BlockTag] {.async: (raises: [ProviderError, CancelledError]).} =
|
||||
let head = await provider.getBlockNumber()
|
||||
return BlockTag.init(head - blocksAgo.abs.u256)
|
||||
@ -1,206 +0,0 @@
|
||||
import std/hashes
|
||||
import std/sequtils
|
||||
import std/typetraits
|
||||
import pkg/contractabi
|
||||
import pkg/nimcrypto/keccak
|
||||
import pkg/ethers/contracts/fields
|
||||
import pkg/questionable/results
|
||||
import pkg/stew/byteutils
|
||||
import pkg/libp2p/[cid, multicodec]
|
||||
import ../logutils
|
||||
import ../utils/json
|
||||
from ../errors import mapFailure
|
||||
|
||||
export contractabi
|
||||
|
||||
type
|
||||
StorageRequest* = object
|
||||
client* {.serialize.}: Address
|
||||
ask* {.serialize.}: StorageAsk
|
||||
content* {.serialize.}: StorageContent
|
||||
expiry* {.serialize.}: uint64
|
||||
nonce*: Nonce
|
||||
|
||||
StorageAsk* = object
|
||||
proofProbability* {.serialize.}: UInt256
|
||||
pricePerBytePerSecond* {.serialize.}: UInt256
|
||||
collateralPerByte* {.serialize.}: UInt256
|
||||
slots* {.serialize.}: uint64
|
||||
slotSize* {.serialize.}: uint64
|
||||
duration* {.serialize.}: uint64
|
||||
maxSlotLoss* {.serialize.}: uint64
|
||||
|
||||
StorageContent* = object
|
||||
cid* {.serialize.}: Cid
|
||||
merkleRoot*: array[32, byte]
|
||||
|
||||
Slot* = object
|
||||
request* {.serialize.}: StorageRequest
|
||||
slotIndex* {.serialize.}: uint64
|
||||
|
||||
SlotId* = distinct array[32, byte]
|
||||
RequestId* = distinct array[32, byte]
|
||||
Nonce* = distinct array[32, byte]
|
||||
RequestState* {.pure.} = enum
|
||||
New
|
||||
Started
|
||||
Cancelled
|
||||
Finished
|
||||
Failed
|
||||
|
||||
SlotState* {.pure.} = enum
|
||||
Free
|
||||
Filled
|
||||
Finished
|
||||
Failed
|
||||
Paid
|
||||
Cancelled
|
||||
Repair
|
||||
|
||||
proc `==`*(x, y: Nonce): bool {.borrow.}
|
||||
proc `==`*(x, y: RequestId): bool {.borrow.}
|
||||
proc `==`*(x, y: SlotId): bool {.borrow.}
|
||||
proc hash*(x: SlotId): Hash {.borrow.}
|
||||
proc hash*(x: Nonce): Hash {.borrow.}
|
||||
proc hash*(x: Address): Hash {.borrow.}
|
||||
|
||||
func toArray*(id: RequestId | SlotId | Nonce): array[32, byte] =
|
||||
array[32, byte](id)
|
||||
|
||||
proc `$`*(id: RequestId | SlotId | Nonce): string =
|
||||
id.toArray.toHex
|
||||
|
||||
proc fromHex*(T: type RequestId, hex: string): T =
|
||||
T array[32, byte].fromHex(hex)
|
||||
|
||||
proc fromHex*(T: type SlotId, hex: string): T =
|
||||
T array[32, byte].fromHex(hex)
|
||||
|
||||
proc fromHex*(T: type Nonce, hex: string): T =
|
||||
T array[32, byte].fromHex(hex)
|
||||
|
||||
proc fromHex*[T: distinct](_: type T, hex: string): T =
|
||||
type baseType = T.distinctBase
|
||||
T baseType.fromHex(hex)
|
||||
|
||||
proc toHex*[T: distinct](id: T): string =
|
||||
type baseType = T.distinctBase
|
||||
baseType(id).toHex
|
||||
|
||||
logutils.formatIt(LogFormat.textLines, Nonce):
|
||||
it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.textLines, RequestId):
|
||||
it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.textLines, SlotId):
|
||||
it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.json, Nonce):
|
||||
it.to0xHexLog
|
||||
logutils.formatIt(LogFormat.json, RequestId):
|
||||
it.to0xHexLog
|
||||
logutils.formatIt(LogFormat.json, SlotId):
|
||||
it.to0xHexLog
|
||||
|
||||
func fromTuple(_: type StorageRequest, tupl: tuple): StorageRequest =
|
||||
StorageRequest(
|
||||
client: tupl[0], ask: tupl[1], content: tupl[2], expiry: tupl[3], nonce: tupl[4]
|
||||
)
|
||||
|
||||
func fromTuple(_: type Slot, tupl: tuple): Slot =
|
||||
Slot(request: tupl[0], slotIndex: tupl[1])
|
||||
|
||||
func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk =
|
||||
StorageAsk(
|
||||
proofProbability: tupl[0],
|
||||
pricePerBytePerSecond: tupl[1],
|
||||
collateralPerByte: tupl[2],
|
||||
slots: tupl[3],
|
||||
slotSize: tupl[4],
|
||||
duration: tupl[5],
|
||||
maxSlotLoss: tupl[6],
|
||||
)
|
||||
|
||||
func fromTuple(_: type StorageContent, tupl: tuple): StorageContent =
|
||||
StorageContent(cid: tupl[0], merkleRoot: tupl[1])
|
||||
|
||||
func solidityType*(_: type Cid): string =
|
||||
solidityType(seq[byte])
|
||||
|
||||
func solidityType*(_: type StorageContent): string =
|
||||
solidityType(StorageContent.fieldTypes)
|
||||
|
||||
func solidityType*(_: type StorageAsk): string =
|
||||
solidityType(StorageAsk.fieldTypes)
|
||||
|
||||
func solidityType*(_: type StorageRequest): string =
|
||||
solidityType(StorageRequest.fieldTypes)
|
||||
|
||||
# Note: it seems to be ok to ignore the vbuffer offset for now
|
||||
func encode*(encoder: var AbiEncoder, cid: Cid) =
|
||||
encoder.write(cid.data.buffer)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, content: StorageContent) =
|
||||
encoder.write(content.fieldValues)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, ask: StorageAsk) =
|
||||
encoder.write(ask.fieldValues)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, id: RequestId | SlotId | Nonce) =
|
||||
encoder.write(id.toArray)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, request: StorageRequest) =
|
||||
encoder.write(request.fieldValues)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, slot: Slot) =
|
||||
encoder.write(slot.fieldValues)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type Cid): ?!T =
|
||||
let data = ?decoder.read(seq[byte])
|
||||
Cid.init(data).mapFailure
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type StorageContent): ?!T =
|
||||
let tupl = ?decoder.read(StorageContent.fieldTypes)
|
||||
success StorageContent.fromTuple(tupl)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type StorageAsk): ?!T =
|
||||
let tupl = ?decoder.read(StorageAsk.fieldTypes)
|
||||
success StorageAsk.fromTuple(tupl)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type StorageRequest): ?!T =
|
||||
let tupl = ?decoder.read(StorageRequest.fieldTypes)
|
||||
success StorageRequest.fromTuple(tupl)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type Slot): ?!T =
|
||||
let tupl = ?decoder.read(Slot.fieldTypes)
|
||||
success Slot.fromTuple(tupl)
|
||||
|
||||
func id*(request: StorageRequest): RequestId =
|
||||
let encoding = AbiEncoder.encode((request,))
|
||||
RequestId(keccak256.digest(encoding).data)
|
||||
|
||||
func slotId*(requestId: RequestId, slotIndex: uint64): SlotId =
|
||||
let encoding = AbiEncoder.encode((requestId, slotIndex))
|
||||
SlotId(keccak256.digest(encoding).data)
|
||||
|
||||
func slotId*(request: StorageRequest, slotIndex: uint64): SlotId =
|
||||
slotId(request.id, slotIndex)
|
||||
|
||||
func id*(slot: Slot): SlotId =
|
||||
slotId(slot.request, slot.slotIndex)
|
||||
|
||||
func pricePerSlotPerSecond*(ask: StorageAsk): UInt256 =
|
||||
ask.pricePerBytePerSecond * ask.slotSize.u256
|
||||
|
||||
func pricePerSlot*(ask: StorageAsk): UInt256 =
|
||||
ask.duration.u256 * ask.pricePerSlotPerSecond
|
||||
|
||||
func totalPrice*(ask: StorageAsk): UInt256 =
|
||||
ask.slots.u256 * ask.pricePerSlot
|
||||
|
||||
func totalPrice*(request: StorageRequest): UInt256 =
|
||||
request.ask.totalPrice
|
||||
|
||||
func collateralPerSlot*(ask: StorageAsk): UInt256 =
|
||||
ask.collateralPerByte * ask.slotSize.u256
|
||||
|
||||
func size*(ask: StorageAsk): uint64 =
|
||||
ask.slots * ask.slotSize
|
||||
@ -1,25 +0,0 @@
|
||||
## Logos Storage
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import ./erasure/erasure
|
||||
import ./erasure/backends/leopard
|
||||
|
||||
export erasure
|
||||
|
||||
func leoEncoderProvider*(
|
||||
size, buffers, parity: int
|
||||
): EncoderBackend {.raises: [Defect].} =
|
||||
## create new Leo Encoder
|
||||
LeoEncoderBackend.new(size, buffers, parity)
|
||||
|
||||
func leoDecoderProvider*(
|
||||
size, buffers, parity: int
|
||||
): DecoderBackend {.raises: [Defect].} =
|
||||
## create new Leo Decoder
|
||||
LeoDecoderBackend.new(size, buffers, parity)
|
||||
@ -1,44 +0,0 @@
|
||||
## Logos Storage
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import ../stores
|
||||
|
||||
type
|
||||
ErasureBackend* = ref object of RootObj
|
||||
blockSize*: int # block size in bytes
|
||||
buffers*: int # number of original pieces
|
||||
parity*: int # number of redundancy pieces
|
||||
|
||||
EncoderBackend* = ref object of ErasureBackend
|
||||
DecoderBackend* = ref object of ErasureBackend
|
||||
|
||||
method release*(self: ErasureBackend) {.base, gcsafe.} =
|
||||
## release the backend
|
||||
##
|
||||
raiseAssert("not implemented!")
|
||||
|
||||
method encode*(
|
||||
self: EncoderBackend,
|
||||
buffers, parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
dataLen, parityLen: int,
|
||||
): Result[void, cstring] {.base, gcsafe.} =
|
||||
## encode buffers using a backend
|
||||
##
|
||||
raiseAssert("not implemented!")
|
||||
|
||||
method decode*(
|
||||
self: DecoderBackend,
|
||||
buffers, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
dataLen, parityLen, recoveredLen: int,
|
||||
): Result[void, cstring] {.base, gcsafe.} =
|
||||
## decode buffers using a backend
|
||||
##
|
||||
raiseAssert("not implemented!")
|
||||
@ -1,79 +0,0 @@
|
||||
## Logos Storage
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import std/options
|
||||
|
||||
import pkg/leopard
|
||||
import pkg/results
|
||||
|
||||
import ../backend
|
||||
|
||||
type
|
||||
LeoEncoderBackend* = ref object of EncoderBackend
|
||||
encoder*: Option[LeoEncoder]
|
||||
|
||||
LeoDecoderBackend* = ref object of DecoderBackend
|
||||
decoder*: Option[LeoDecoder]
|
||||
|
||||
method encode*(
|
||||
self: LeoEncoderBackend,
|
||||
data, parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
dataLen, parityLen: int,
|
||||
): Result[void, cstring] =
|
||||
## Encode data using Leopard backend
|
||||
|
||||
if parityLen == 0:
|
||||
return ok()
|
||||
|
||||
var encoder =
|
||||
if self.encoder.isNone:
|
||||
self.encoder = (?LeoEncoder.init(self.blockSize, self.buffers, self.parity)).some
|
||||
self.encoder.get()
|
||||
else:
|
||||
self.encoder.get()
|
||||
|
||||
encoder.encode(data, parity, dataLen, parityLen)
|
||||
|
||||
method decode*(
|
||||
self: LeoDecoderBackend,
|
||||
data, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
dataLen, parityLen, recoveredLen: int,
|
||||
): Result[void, cstring] =
|
||||
## Decode data using given Leopard backend
|
||||
|
||||
var decoder =
|
||||
if self.decoder.isNone:
|
||||
self.decoder = (?LeoDecoder.init(self.blockSize, self.buffers, self.parity)).some
|
||||
self.decoder.get()
|
||||
else:
|
||||
self.decoder.get()
|
||||
|
||||
decoder.decode(data, parity, recovered, dataLen, parityLen, recoveredLen)
|
||||
|
||||
method release*(self: LeoEncoderBackend) =
|
||||
if self.encoder.isSome:
|
||||
self.encoder.get().free()
|
||||
|
||||
method release*(self: LeoDecoderBackend) =
|
||||
if self.decoder.isSome:
|
||||
self.decoder.get().free()
|
||||
|
||||
proc new*(
|
||||
T: type LeoEncoderBackend, blockSize, buffers, parity: int
|
||||
): LeoEncoderBackend =
|
||||
## Create an instance of an Leopard Encoder backend
|
||||
##
|
||||
LeoEncoderBackend(blockSize: blockSize, buffers: buffers, parity: parity)
|
||||
|
||||
proc new*(
|
||||
T: type LeoDecoderBackend, blockSize, buffers, parity: int
|
||||
): LeoDecoderBackend =
|
||||
## Create an instance of an Leopard Decoder backend
|
||||
##
|
||||
LeoDecoderBackend(blockSize: blockSize, buffers: buffers, parity: parity)
|
||||
@ -1,730 +0,0 @@
|
||||
## Logos Storage
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import std/[sugar, atomics, sequtils]
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronos/threadsync
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p/[multicodec, cid, multihash]
|
||||
import pkg/libp2p/protobuf/minprotobuf
|
||||
import pkg/taskpools
|
||||
|
||||
import ../logutils
|
||||
import ../manifest
|
||||
import ../merkletree
|
||||
import ../stores
|
||||
import ../clock
|
||||
import ../blocktype as bt
|
||||
import ../utils
|
||||
import ../utils/asynciter
|
||||
import ../indexingstrategy
|
||||
import ../errors
|
||||
import ../utils/arrayutils
|
||||
|
||||
import pkg/stew/byteutils
|
||||
|
||||
import ./backend
|
||||
|
||||
export backend
|
||||
|
||||
logScope:
|
||||
topics = "codex erasure"
|
||||
|
||||
type
|
||||
## Encode a manifest into one that is erasure protected.
|
||||
##
|
||||
## The new manifest has K `blocks` that are encoded into
|
||||
## additional M `parity` blocks. The resulting dataset
|
||||
## is padded with empty blocks if it doesn't have a square
|
||||
## shape.
|
||||
##
|
||||
## NOTE: The padding blocks could be excluded
|
||||
## from transmission, but they aren't for now.
|
||||
##
|
||||
## The resulting dataset is logically divided into rows
|
||||
## where a row is made up of B blocks. There are then,
|
||||
## K + M = N rows in total, each of length B blocks. Rows
|
||||
## are assumed to be of the same number of (B) blocks.
|
||||
##
|
||||
## The encoding is systematic and the rows can be
|
||||
## read sequentially by any node without decoding.
|
||||
##
|
||||
## Decoding is possible with any K rows or partial K
|
||||
## columns (with up to M blocks missing per column),
|
||||
## or any combination there of.
|
||||
##
|
||||
EncoderProvider* =
|
||||
proc(size, blocks, parity: int): EncoderBackend {.raises: [Defect], noSideEffect.}
|
||||
|
||||
DecoderProvider* =
|
||||
proc(size, blocks, parity: int): DecoderBackend {.raises: [Defect], noSideEffect.}
|
||||
|
||||
Erasure* = ref object
|
||||
taskPool: Taskpool
|
||||
encoderProvider*: EncoderProvider
|
||||
decoderProvider*: DecoderProvider
|
||||
store*: BlockStore
|
||||
|
||||
EncodingParams = object
|
||||
ecK: Natural
|
||||
ecM: Natural
|
||||
rounded: Natural
|
||||
steps: Natural
|
||||
blocksCount: Natural
|
||||
strategy: StrategyType
|
||||
|
||||
ErasureError* = object of CodexError
|
||||
InsufficientBlocksError* = object of ErasureError
|
||||
# Minimum size, in bytes, that the dataset must have had
|
||||
# for the encoding request to have succeeded with the parameters
|
||||
# provided.
|
||||
minSize*: NBytes
|
||||
|
||||
EncodeTask = object
|
||||
success: Atomic[bool]
|
||||
erasure: ptr Erasure
|
||||
blocks: ptr UncheckedArray[ptr UncheckedArray[byte]]
|
||||
parity: ptr UncheckedArray[ptr UncheckedArray[byte]]
|
||||
blockSize, blocksLen, parityLen: int
|
||||
signal: ThreadSignalPtr
|
||||
|
||||
DecodeTask = object
|
||||
success: Atomic[bool]
|
||||
erasure: ptr Erasure
|
||||
blocks: ptr UncheckedArray[ptr UncheckedArray[byte]]
|
||||
parity: ptr UncheckedArray[ptr UncheckedArray[byte]]
|
||||
recovered: ptr UncheckedArray[ptr UncheckedArray[byte]]
|
||||
blockSize, blocksLen: int
|
||||
parityLen, recoveredLen: int
|
||||
signal: ThreadSignalPtr
|
||||
|
||||
func indexToPos(steps, idx, step: int): int {.inline.} =
|
||||
## Convert an index to a position in the encoded
|
||||
## dataset
|
||||
## `idx` - the index to convert
|
||||
## `step` - the current step
|
||||
## `pos` - the position in the encoded dataset
|
||||
##
|
||||
|
||||
(idx - step) div steps
|
||||
|
||||
proc getPendingBlocks(
|
||||
self: Erasure, manifest: Manifest, indices: seq[int]
|
||||
): AsyncIter[(?!bt.Block, int)] =
|
||||
## Get pending blocks iterator
|
||||
##
|
||||
var pendingBlocks: seq[Future[(?!bt.Block, int)]] = @[]
|
||||
|
||||
proc attachIndex(
|
||||
fut: Future[?!bt.Block], i: int
|
||||
): Future[(?!bt.Block, int)] {.async.} =
|
||||
## avoids closure capture issues
|
||||
return (await fut, i)
|
||||
|
||||
for blockIndex in indices:
|
||||
# request blocks from the store
|
||||
let fut = self.store.getBlock(BlockAddress.init(manifest.treeCid, blockIndex))
|
||||
pendingBlocks.add(attachIndex(fut, blockIndex))
|
||||
|
||||
proc isFinished(): bool =
|
||||
pendingBlocks.len == 0
|
||||
|
||||
proc genNext(): Future[(?!bt.Block, int)] {.async.} =
|
||||
let completedFut = await one(pendingBlocks)
|
||||
if (let i = pendingBlocks.find(completedFut); i >= 0):
|
||||
pendingBlocks.del(i)
|
||||
return await completedFut
|
||||
else:
|
||||
let (_, index) = await completedFut
|
||||
raise newException(
|
||||
CatchableError,
|
||||
"Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " &
|
||||
$index,
|
||||
)
|
||||
|
||||
AsyncIter[(?!bt.Block, int)].new(genNext, isFinished)
|
||||
|
||||
proc prepareEncodingData(
|
||||
self: Erasure,
|
||||
manifest: Manifest,
|
||||
params: EncodingParams,
|
||||
step: Natural,
|
||||
data: ref seq[seq[byte]],
|
||||
cids: ref seq[Cid],
|
||||
emptyBlock: seq[byte],
|
||||
): Future[?!Natural] {.async.} =
|
||||
## Prepare data for encoding
|
||||
##
|
||||
|
||||
let
|
||||
strategy = params.strategy.init(
|
||||
firstIndex = 0, lastIndex = params.rounded - 1, iterations = params.steps
|
||||
)
|
||||
indices = toSeq(strategy.getIndices(step))
|
||||
pendingBlocksIter =
|
||||
self.getPendingBlocks(manifest, indices.filterIt(it < manifest.blocksCount))
|
||||
|
||||
var resolved = 0
|
||||
for fut in pendingBlocksIter:
|
||||
let (blkOrErr, idx) = await fut
|
||||
without blk =? blkOrErr, err:
|
||||
warn "Failed retrieving a block", treeCid = manifest.treeCid, idx, msg = err.msg
|
||||
return failure(err)
|
||||
|
||||
let pos = indexToPos(params.steps, idx, step)
|
||||
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
|
||||
cids[idx] = blk.cid
|
||||
|
||||
resolved.inc()
|
||||
|
||||
for idx in indices.filterIt(it >= manifest.blocksCount):
|
||||
let pos = indexToPos(params.steps, idx, step)
|
||||
trace "Padding with empty block", idx
|
||||
shallowCopy(data[pos], emptyBlock)
|
||||
without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec),
|
||||
err:
|
||||
return failure(err)
|
||||
cids[idx] = emptyBlockCid
|
||||
|
||||
success(resolved.Natural)
|
||||
|
||||
proc prepareDecodingData(
|
||||
self: Erasure,
|
||||
encoded: Manifest,
|
||||
step: Natural,
|
||||
data: ref seq[seq[byte]],
|
||||
parityData: ref seq[seq[byte]],
|
||||
cids: ref seq[Cid],
|
||||
emptyBlock: seq[byte],
|
||||
): Future[?!(Natural, Natural)] {.async.} =
|
||||
## Prepare data for decoding
|
||||
## `encoded` - the encoded manifest
|
||||
## `step` - the current step
|
||||
## `data` - the data to be prepared
|
||||
## `parityData` - the parityData to be prepared
|
||||
## `cids` - cids of prepared data
|
||||
## `emptyBlock` - the empty block to be used for padding
|
||||
##
|
||||
|
||||
let
|
||||
strategy = encoded.protectedStrategy.init(
|
||||
firstIndex = 0, lastIndex = encoded.blocksCount - 1, iterations = encoded.steps
|
||||
)
|
||||
indices = toSeq(strategy.getIndices(step))
|
||||
pendingBlocksIter = self.getPendingBlocks(encoded, indices)
|
||||
|
||||
var
|
||||
dataPieces = 0
|
||||
parityPieces = 0
|
||||
resolved = 0
|
||||
for fut in pendingBlocksIter:
|
||||
# Continue to receive blocks until we have just enough for decoding
|
||||
# or no more blocks can arrive
|
||||
if resolved >= encoded.ecK:
|
||||
break
|
||||
|
||||
let (blkOrErr, idx) = await fut
|
||||
without blk =? blkOrErr, err:
|
||||
trace "Failed retrieving a block", idx, treeCid = encoded.treeCid, msg = err.msg
|
||||
continue
|
||||
|
||||
let pos = indexToPos(encoded.steps, idx, step)
|
||||
|
||||
logScope:
|
||||
cid = blk.cid
|
||||
idx = idx
|
||||
pos = pos
|
||||
step = step
|
||||
empty = blk.isEmpty
|
||||
|
||||
cids[idx] = blk.cid
|
||||
if idx >= encoded.rounded:
|
||||
trace "Retrieved parity block"
|
||||
shallowCopy(
|
||||
parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data
|
||||
)
|
||||
parityPieces.inc
|
||||
else:
|
||||
trace "Retrieved data block"
|
||||
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
|
||||
dataPieces.inc
|
||||
|
||||
resolved.inc
|
||||
|
||||
return success (dataPieces.Natural, parityPieces.Natural)
|
||||
|
||||
proc init*(
|
||||
_: type EncodingParams,
|
||||
manifest: Manifest,
|
||||
ecK: Natural,
|
||||
ecM: Natural,
|
||||
strategy: StrategyType,
|
||||
): ?!EncodingParams =
|
||||
if ecK > manifest.blocksCount:
|
||||
let exc = (ref InsufficientBlocksError)(
|
||||
msg:
|
||||
"Unable to encode manifest, not enough blocks, ecK = " & $ecK &
|
||||
", blocksCount = " & $manifest.blocksCount,
|
||||
minSize: ecK.NBytes * manifest.blockSize,
|
||||
)
|
||||
return failure(exc)
|
||||
|
||||
let
|
||||
rounded = roundUp(manifest.blocksCount, ecK)
|
||||
steps = divUp(rounded, ecK)
|
||||
blocksCount = rounded + (steps * ecM)
|
||||
|
||||
success EncodingParams(
|
||||
ecK: ecK,
|
||||
ecM: ecM,
|
||||
rounded: rounded,
|
||||
steps: steps,
|
||||
blocksCount: blocksCount,
|
||||
strategy: strategy,
|
||||
)
|
||||
|
||||
proc leopardEncodeTask(tp: Taskpool, task: ptr EncodeTask) {.gcsafe.} =
|
||||
# Task suitable for running in taskpools - look, no GC!
|
||||
let encoder =
|
||||
task[].erasure.encoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
|
||||
defer:
|
||||
encoder.release()
|
||||
discard task[].signal.fireSync()
|
||||
|
||||
if (
|
||||
let res =
|
||||
encoder.encode(task[].blocks, task[].parity, task[].blocksLen, task[].parityLen)
|
||||
res.isErr
|
||||
):
|
||||
warn "Error from leopard encoder backend!", error = $res.error
|
||||
|
||||
task[].success.store(false)
|
||||
else:
|
||||
task[].success.store(true)
|
||||
|
||||
proc asyncEncode*(
|
||||
self: Erasure,
|
||||
blockSize, blocksLen, parityLen: int,
|
||||
blocks: ref seq[seq[byte]],
|
||||
parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
without threadPtr =? ThreadSignalPtr.new():
|
||||
return failure("Unable to create thread signal")
|
||||
|
||||
defer:
|
||||
threadPtr.close().expect("closing once works")
|
||||
|
||||
var data = makeUncheckedArray(blocks)
|
||||
|
||||
defer:
|
||||
dealloc(data)
|
||||
|
||||
## Create an ecode task with block data
|
||||
var task = EncodeTask(
|
||||
erasure: addr self,
|
||||
blockSize: blockSize,
|
||||
blocksLen: blocksLen,
|
||||
parityLen: parityLen,
|
||||
blocks: data,
|
||||
parity: parity,
|
||||
signal: threadPtr,
|
||||
)
|
||||
|
||||
doAssert self.taskPool.numThreads > 1,
|
||||
"Must have at least one separate thread or signal will never be fired"
|
||||
self.taskPool.spawn leopardEncodeTask(self.taskPool, addr task)
|
||||
let threadFut = threadPtr.wait()
|
||||
|
||||
if joinErr =? catch(await threadFut.join()).errorOption:
|
||||
if err =? catch(await noCancel threadFut).errorOption:
|
||||
return failure(err)
|
||||
if joinErr of CancelledError:
|
||||
raise (ref CancelledError) joinErr
|
||||
else:
|
||||
return failure(joinErr)
|
||||
|
||||
if not task.success.load():
|
||||
return failure("Leopard encoding task failed")
|
||||
|
||||
success()
|
||||
|
||||
proc encodeData(
|
||||
self: Erasure, manifest: Manifest, params: EncodingParams
|
||||
): Future[?!Manifest] {.async.} =
|
||||
## Encode blocks pointed to by the protected manifest
|
||||
##
|
||||
## `manifest` - the manifest to encode
|
||||
##
|
||||
logScope:
|
||||
steps = params.steps
|
||||
rounded_blocks = params.rounded
|
||||
blocks_count = params.blocksCount
|
||||
ecK = params.ecK
|
||||
ecM = params.ecM
|
||||
|
||||
var
|
||||
cids = seq[Cid].new()
|
||||
emptyBlock = newSeq[byte](manifest.blockSize.int)
|
||||
|
||||
cids[].setLen(params.blocksCount)
|
||||
|
||||
try:
|
||||
for step in 0 ..< params.steps:
|
||||
# TODO: Don't allocate a new seq every time, allocate once and zero out
|
||||
var
|
||||
data = seq[seq[byte]].new() # number of blocks to encode
|
||||
parity = createDoubleArray(params.ecM, manifest.blockSize.int)
|
||||
defer:
|
||||
freeDoubleArray(parity, params.ecM)
|
||||
|
||||
data[].setLen(params.ecK)
|
||||
# TODO: this is a tight blocking loop so we sleep here to allow
|
||||
# other events to be processed, this should be addressed
|
||||
# by threading
|
||||
await sleepAsync(10.millis)
|
||||
|
||||
without resolved =?
|
||||
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)),
|
||||
err:
|
||||
trace "Unable to prepare data", error = err.msg
|
||||
return failure(err)
|
||||
|
||||
trace "Erasure coding data", data = data[].len
|
||||
|
||||
try:
|
||||
if err =? (
|
||||
await self.asyncEncode(
|
||||
manifest.blockSize.int, params.ecK, params.ecM, data, parity
|
||||
)
|
||||
).errorOption:
|
||||
return failure(err)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
|
||||
var idx = params.rounded + step
|
||||
for j in 0 ..< params.ecM:
|
||||
var innerPtr: ptr UncheckedArray[byte] = parity[][j]
|
||||
without blk =? bt.Block.new(innerPtr.toOpenArray(0, manifest.blockSize.int - 1)),
|
||||
error:
|
||||
trace "Unable to create parity block", err = error.msg
|
||||
return failure(error)
|
||||
|
||||
trace "Adding parity block", cid = blk.cid, idx
|
||||
cids[idx] = blk.cid
|
||||
if error =? (await self.store.putBlock(blk)).errorOption:
|
||||
warn "Unable to store block!", cid = blk.cid, msg = error.msg
|
||||
return failure("Unable to store block!")
|
||||
idx.inc(params.steps)
|
||||
|
||||
without tree =? (await CodexTree.init(self.taskPool, cids[])), err:
|
||||
return failure(err)
|
||||
|
||||
without treeCid =? tree.rootCid, err:
|
||||
return failure(err)
|
||||
|
||||
if err =? (await self.store.putAllProofs(tree)).errorOption:
|
||||
return failure(err)
|
||||
|
||||
let encodedManifest = Manifest.new(
|
||||
manifest = manifest,
|
||||
treeCid = treeCid,
|
||||
datasetSize = (manifest.blockSize.int * params.blocksCount).NBytes,
|
||||
ecK = params.ecK,
|
||||
ecM = params.ecM,
|
||||
strategy = params.strategy,
|
||||
)
|
||||
|
||||
trace "Encoded data successfully", treeCid, blocksCount = params.blocksCount
|
||||
success encodedManifest
|
||||
except CancelledError as exc:
|
||||
trace "Erasure coding encoding cancelled"
|
||||
raise exc # cancellation needs to be propagated
|
||||
except CatchableError as exc:
|
||||
trace "Erasure coding encoding error", exc = exc.msg
|
||||
return failure(exc)
|
||||
|
||||
proc encode*(
|
||||
self: Erasure,
|
||||
manifest: Manifest,
|
||||
blocks: Natural,
|
||||
parity: Natural,
|
||||
strategy = SteppedStrategy,
|
||||
): Future[?!Manifest] {.async.} =
|
||||
## Encode a manifest into one that is erasure protected.
|
||||
##
|
||||
## `manifest` - the original manifest to be encoded
|
||||
## `blocks` - the number of blocks to be encoded - K
|
||||
## `parity` - the number of parity blocks to generate - M
|
||||
##
|
||||
|
||||
without params =? EncodingParams.init(manifest, blocks.int, parity.int, strategy), err:
|
||||
return failure(err)
|
||||
|
||||
without encodedManifest =? await self.encodeData(manifest, params), err:
|
||||
return failure(err)
|
||||
|
||||
return success encodedManifest
|
||||
|
||||
proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} =
|
||||
# Task suitable for running in taskpools - look, no GC!
|
||||
let decoder =
|
||||
task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
|
||||
defer:
|
||||
decoder.release()
|
||||
discard task[].signal.fireSync()
|
||||
|
||||
if (
|
||||
let res = decoder.decode(
|
||||
task[].blocks,
|
||||
task[].parity,
|
||||
task[].recovered,
|
||||
task[].blocksLen,
|
||||
task[].parityLen,
|
||||
task[].recoveredLen,
|
||||
)
|
||||
res.isErr
|
||||
):
|
||||
warn "Error from leopard decoder backend!", error = $res.error
|
||||
task[].success.store(false)
|
||||
else:
|
||||
task[].success.store(true)
|
||||
|
||||
proc asyncDecode*(
|
||||
self: Erasure,
|
||||
blockSize, blocksLen, parityLen: int,
|
||||
blocks, parity: ref seq[seq[byte]],
|
||||
recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
without threadPtr =? ThreadSignalPtr.new():
|
||||
return failure("Unable to create thread signal")
|
||||
|
||||
defer:
|
||||
threadPtr.close().expect("closing once works")
|
||||
|
||||
var
|
||||
blockData = makeUncheckedArray(blocks)
|
||||
parityData = makeUncheckedArray(parity)
|
||||
|
||||
defer:
|
||||
dealloc(blockData)
|
||||
dealloc(parityData)
|
||||
|
||||
## Create an decode task with block data
|
||||
var task = DecodeTask(
|
||||
erasure: addr self,
|
||||
blockSize: blockSize,
|
||||
blocksLen: blocksLen,
|
||||
parityLen: parityLen,
|
||||
recoveredLen: blocksLen,
|
||||
blocks: blockData,
|
||||
parity: parityData,
|
||||
recovered: recovered,
|
||||
signal: threadPtr,
|
||||
)
|
||||
|
||||
doAssert self.taskPool.numThreads > 1,
|
||||
"Must have at least one separate thread or signal will never be fired"
|
||||
self.taskPool.spawn leopardDecodeTask(self.taskPool, addr task)
|
||||
let threadFut = threadPtr.wait()
|
||||
|
||||
if joinErr =? catch(await threadFut.join()).errorOption:
|
||||
if err =? catch(await noCancel threadFut).errorOption:
|
||||
return failure(err)
|
||||
if joinErr of CancelledError:
|
||||
raise (ref CancelledError) joinErr
|
||||
else:
|
||||
return failure(joinErr)
|
||||
|
||||
if not task.success.load():
|
||||
return failure("Leopard decoding task failed")
|
||||
|
||||
success()
|
||||
|
||||
proc decodeInternal(
|
||||
self: Erasure, encoded: Manifest
|
||||
): Future[?!(ref seq[Cid], seq[Natural])] {.async.} =
|
||||
logScope:
|
||||
steps = encoded.steps
|
||||
rounded_blocks = encoded.rounded
|
||||
new_manifest = encoded.blocksCount
|
||||
|
||||
var
|
||||
cids = seq[Cid].new()
|
||||
recoveredIndices = newSeq[Natural]()
|
||||
decoder = self.decoderProvider(encoded.blockSize.int, encoded.ecK, encoded.ecM)
|
||||
emptyBlock = newSeq[byte](encoded.blockSize.int)
|
||||
|
||||
cids[].setLen(encoded.blocksCount)
|
||||
try:
|
||||
for step in 0 ..< encoded.steps:
|
||||
# TODO: this is a tight blocking loop so we sleep here to allow
|
||||
# other events to be processed, this should be addressed
|
||||
# by threading
|
||||
await sleepAsync(10.millis)
|
||||
|
||||
var
|
||||
data = seq[seq[byte]].new()
|
||||
parityData = seq[seq[byte]].new()
|
||||
recovered = createDoubleArray(encoded.ecK, encoded.blockSize.int)
|
||||
defer:
|
||||
freeDoubleArray(recovered, encoded.ecK)
|
||||
|
||||
data[].setLen(encoded.ecK) # set len to K
|
||||
parityData[].setLen(encoded.ecM) # set len to M
|
||||
|
||||
without (dataPieces, _) =? (
|
||||
await self.prepareDecodingData(
|
||||
encoded, step, data, parityData, cids, emptyBlock
|
||||
)
|
||||
), err:
|
||||
trace "Unable to prepare data", error = err.msg
|
||||
return failure(err)
|
||||
|
||||
if dataPieces >= encoded.ecK:
|
||||
trace "Retrieved all the required data blocks"
|
||||
continue
|
||||
|
||||
trace "Erasure decoding data"
|
||||
try:
|
||||
if err =? (
|
||||
await self.asyncDecode(
|
||||
encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered
|
||||
)
|
||||
).errorOption:
|
||||
return failure(err)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
|
||||
for i in 0 ..< encoded.ecK:
|
||||
let idx = i * encoded.steps + step
|
||||
if data[i].len <= 0 and not cids[idx].isEmpty:
|
||||
var innerPtr: ptr UncheckedArray[byte] = recovered[][i]
|
||||
|
||||
without blk =? bt.Block.new(
|
||||
innerPtr.toOpenArray(0, encoded.blockSize.int - 1)
|
||||
), error:
|
||||
trace "Unable to create block!", exc = error.msg
|
||||
return failure(error)
|
||||
|
||||
trace "Recovered block", cid = blk.cid, index = i
|
||||
if error =? (await self.store.putBlock(blk)).errorOption:
|
||||
warn "Unable to store block!", cid = blk.cid, msg = error.msg
|
||||
return failure("Unable to store block!")
|
||||
|
||||
self.store.completeBlock(BlockAddress.init(encoded.treeCid, idx), blk)
|
||||
|
||||
cids[idx] = blk.cid
|
||||
recoveredIndices.add(idx)
|
||||
except CancelledError as exc:
|
||||
trace "Erasure coding decoding cancelled"
|
||||
raise exc # cancellation needs to be propagated
|
||||
except CatchableError as exc:
|
||||
trace "Erasure coding decoding error", exc = exc.msg
|
||||
return failure(exc)
|
||||
finally:
|
||||
decoder.release()
|
||||
|
||||
return (cids, recoveredIndices).success
|
||||
|
||||
proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
|
||||
## Decode a protected manifest into it's original
|
||||
## manifest
|
||||
##
|
||||
## `encoded` - the encoded (protected) manifest to
|
||||
## be recovered
|
||||
##
|
||||
|
||||
without (cids, recoveredIndices) =? (await self.decodeInternal(encoded)), err:
|
||||
return failure(err)
|
||||
|
||||
without tree =?
|
||||
(await CodexTree.init(self.taskPool, cids[0 ..< encoded.originalBlocksCount])), err:
|
||||
return failure(err)
|
||||
|
||||
without treeCid =? tree.rootCid, err:
|
||||
return failure(err)
|
||||
|
||||
if treeCid != encoded.originalTreeCid:
|
||||
return failure(
|
||||
"Original tree root differs from the tree root computed out of recovered data"
|
||||
)
|
||||
|
||||
let idxIter =
|
||||
Iter[Natural].new(recoveredIndices).filter((i: Natural) => i < tree.leavesCount)
|
||||
|
||||
if err =? (await self.store.putSomeProofs(tree, idxIter)).errorOption:
|
||||
return failure(err)
|
||||
|
||||
let decoded = Manifest.new(encoded)
|
||||
|
||||
return decoded.success
|
||||
|
||||
proc repair*(self: Erasure, encoded: Manifest): Future[?!void] {.async.} =
|
||||
## Repair a protected manifest by reconstructing the full dataset
|
||||
##
|
||||
## `encoded` - the encoded (protected) manifest to
|
||||
## be repaired
|
||||
##
|
||||
|
||||
without (cids, _) =? (await self.decodeInternal(encoded)), err:
|
||||
return failure(err)
|
||||
|
||||
without tree =?
|
||||
(await CodexTree.init(self.taskPool, cids[0 ..< encoded.originalBlocksCount])), err:
|
||||
return failure(err)
|
||||
|
||||
without treeCid =? tree.rootCid, err:
|
||||
return failure(err)
|
||||
|
||||
if treeCid != encoded.originalTreeCid:
|
||||
return failure(
|
||||
"Original tree root differs from the tree root computed out of recovered data"
|
||||
)
|
||||
|
||||
if err =? (await self.store.putAllProofs(tree)).errorOption:
|
||||
return failure(err)
|
||||
|
||||
without repaired =? (
|
||||
await self.encode(
|
||||
Manifest.new(encoded), encoded.ecK, encoded.ecM, encoded.protectedStrategy
|
||||
)
|
||||
), err:
|
||||
return failure(err)
|
||||
|
||||
if repaired.treeCid != encoded.treeCid:
|
||||
return failure(
|
||||
"Original tree root differs from the repaired tree root encoded out of recovered data"
|
||||
)
|
||||
|
||||
return success()
|
||||
|
||||
proc start*(self: Erasure) {.async.} =
|
||||
return
|
||||
|
||||
proc stop*(self: Erasure) {.async.} =
|
||||
return
|
||||
|
||||
proc new*(
|
||||
T: type Erasure,
|
||||
store: BlockStore,
|
||||
encoderProvider: EncoderProvider,
|
||||
decoderProvider: DecoderProvider,
|
||||
taskPool: Taskpool,
|
||||
): Erasure =
|
||||
## Create a new Erasure instance for encoding and decoding manifests
|
||||
##
|
||||
Erasure(
|
||||
store: store,
|
||||
encoderProvider: encoderProvider,
|
||||
decoderProvider: decoderProvider,
|
||||
taskPool: taskPool,
|
||||
)
|
||||
@ -1,126 +0,0 @@
|
||||
import ./errors
|
||||
import ./utils
|
||||
import ./utils/asynciter
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
type
|
||||
StrategyType* = enum
|
||||
# Simplest approach:
|
||||
# 0 => 0, 1, 2
|
||||
# 1 => 3, 4, 5
|
||||
# 2 => 6, 7, 8
|
||||
LinearStrategy
|
||||
|
||||
# Stepped indexing:
|
||||
# 0 => 0, 3, 6
|
||||
# 1 => 1, 4, 7
|
||||
# 2 => 2, 5, 8
|
||||
SteppedStrategy
|
||||
|
||||
# Representing a strategy for grouping indices (of blocks usually)
|
||||
# Given an interation-count as input, will produce a seq of
|
||||
# selected indices.
|
||||
IndexingError* = object of CodexError
|
||||
IndexingWrongIndexError* = object of IndexingError
|
||||
IndexingWrongIterationsError* = object of IndexingError
|
||||
IndexingWrongGroupCountError* = object of IndexingError
|
||||
IndexingWrongPadBlockCountError* = object of IndexingError
|
||||
|
||||
IndexingStrategy* = object
|
||||
strategyType*: StrategyType # Indexing strategy algorithm
|
||||
firstIndex*: int # Lowest index that can be returned
|
||||
lastIndex*: int # Highest index that can be returned
|
||||
iterations*: int # Number of iteration steps (0 ..< iterations)
|
||||
step*: int # Step size between generated indices
|
||||
groupCount*: int # Number of groups to partition indices into
|
||||
padBlockCount*: int # Number of padding blocks to append per group
|
||||
|
||||
func checkIteration(
|
||||
self: IndexingStrategy, iteration: int
|
||||
): void {.raises: [IndexingError].} =
|
||||
if iteration >= self.iterations:
|
||||
raise newException(
|
||||
IndexingError, "Indexing iteration can't be greater than or equal to iterations."
|
||||
)
|
||||
|
||||
func getIter(first, last, step: int): Iter[int] =
|
||||
{.cast(noSideEffect).}:
|
||||
Iter[int].new(first, last, step)
|
||||
|
||||
func getLinearIndices(self: IndexingStrategy, iteration: int): Iter[int] =
|
||||
let
|
||||
first = self.firstIndex + iteration * self.step
|
||||
last = min(first + self.step - 1, self.lastIndex)
|
||||
|
||||
getIter(first, last, 1)
|
||||
|
||||
func getSteppedIndices(self: IndexingStrategy, iteration: int): Iter[int] =
|
||||
let
|
||||
first = self.firstIndex + iteration
|
||||
last = self.lastIndex
|
||||
|
||||
getIter(first, last, self.iterations)
|
||||
|
||||
func getStrategyIndices(self: IndexingStrategy, iteration: int): Iter[int] =
|
||||
case self.strategyType
|
||||
of StrategyType.LinearStrategy:
|
||||
self.getLinearIndices(iteration)
|
||||
of StrategyType.SteppedStrategy:
|
||||
self.getSteppedIndices(iteration)
|
||||
|
||||
func getIndices*(
|
||||
self: IndexingStrategy, iteration: int
|
||||
): Iter[int] {.raises: [IndexingError].} =
|
||||
self.checkIteration(iteration)
|
||||
{.cast(noSideEffect).}:
|
||||
Iter[int].new(
|
||||
iterator (): int {.gcsafe.} =
|
||||
for value in self.getStrategyIndices(iteration):
|
||||
yield value
|
||||
|
||||
for i in 0 ..< self.padBlockCount:
|
||||
yield self.lastIndex + (iteration + 1) + i * self.groupCount
|
||||
|
||||
)
|
||||
|
||||
func init*(
|
||||
strategy: StrategyType,
|
||||
firstIndex, lastIndex, iterations: int,
|
||||
groupCount = 0,
|
||||
padBlockCount = 0,
|
||||
): IndexingStrategy {.raises: [IndexingError].} =
|
||||
if firstIndex > lastIndex:
|
||||
raise newException(
|
||||
IndexingWrongIndexError,
|
||||
"firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex &
|
||||
")",
|
||||
)
|
||||
|
||||
if iterations <= 0:
|
||||
raise newException(
|
||||
IndexingWrongIterationsError,
|
||||
"iterations (" & $iterations & ") must be greater than zero.",
|
||||
)
|
||||
|
||||
if padBlockCount < 0:
|
||||
raise newException(
|
||||
IndexingWrongPadBlockCountError,
|
||||
"padBlockCount (" & $padBlockCount & ") must be equal or greater than zero.",
|
||||
)
|
||||
|
||||
if padBlockCount > 0 and groupCount <= 0:
|
||||
raise newException(
|
||||
IndexingWrongGroupCountError,
|
||||
"groupCount (" & $groupCount & ") must be greater than zero.",
|
||||
)
|
||||
|
||||
IndexingStrategy(
|
||||
strategyType: strategy,
|
||||
firstIndex: firstIndex,
|
||||
lastIndex: lastIndex,
|
||||
iterations: iterations,
|
||||
step: divUp((lastIndex - firstIndex + 1), iterations),
|
||||
groupCount: groupCount,
|
||||
padBlockCount: padBlockCount,
|
||||
)
|
||||
@ -25,32 +25,18 @@ import ./manifest
|
||||
import ../errors
|
||||
import ../blocktype
|
||||
import ../logutils
|
||||
import ../indexingstrategy
|
||||
|
||||
proc encode*(manifest: Manifest): ?!seq[byte] =
|
||||
## Encode the manifest into a ``ManifestCodec``
|
||||
## multicodec container (Dag-pb) for now
|
||||
##
|
||||
|
||||
?manifest.verify()
|
||||
var pbNode = initProtoBuffer()
|
||||
|
||||
# NOTE: The `Data` field in the the `dag-pb`
|
||||
# contains the following protobuf `Message`
|
||||
#
|
||||
# ```protobuf
|
||||
# Message VerificationInfo {
|
||||
# bytes verifyRoot = 1; # Decimal encoded field-element
|
||||
# repeated bytes slotRoots = 2; # Decimal encoded field-elements
|
||||
# }
|
||||
# Message ErasureInfo {
|
||||
# optional uint32 ecK = 1; # number of encoded blocks
|
||||
# optional uint32 ecM = 2; # number of parity blocks
|
||||
# optional bytes originalTreeCid = 3; # cid of the original dataset
|
||||
# optional uint32 originalDatasetSize = 4; # size of the original dataset
|
||||
# optional VerificationInformation verification = 5; # verification information
|
||||
# }
|
||||
#
|
||||
# Message Header {
|
||||
# optional bytes treeCid = 1; # cid (root) of the tree
|
||||
# optional uint32 blockSize = 2; # size of a single block
|
||||
@ -58,9 +44,8 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
|
||||
# optional codec: MultiCodec = 4; # Dataset codec
|
||||
# optional hcodec: MultiCodec = 5 # Multihash codec
|
||||
# optional version: CidVersion = 6; # Cid version
|
||||
# optional ErasureInfo erasure = 7; # erasure coding info
|
||||
# optional filename: ?string = 8; # original filename
|
||||
# optional mimetype: ?string = 9; # original mimetype
|
||||
# optional filename: ?string = 7; # original filename
|
||||
# optional mimetype: ?string = 8; # original mimetype
|
||||
# }
|
||||
# ```
|
||||
#
|
||||
@ -73,31 +58,11 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
|
||||
header.write(5, manifest.hcodec.uint32)
|
||||
header.write(6, manifest.version.uint32)
|
||||
|
||||
if manifest.protected:
|
||||
var erasureInfo = initProtoBuffer()
|
||||
erasureInfo.write(1, manifest.ecK.uint32)
|
||||
erasureInfo.write(2, manifest.ecM.uint32)
|
||||
erasureInfo.write(3, manifest.originalTreeCid.data.buffer)
|
||||
erasureInfo.write(4, manifest.originalDatasetSize.uint64)
|
||||
erasureInfo.write(5, manifest.protectedStrategy.uint32)
|
||||
|
||||
if manifest.verifiable:
|
||||
var verificationInfo = initProtoBuffer()
|
||||
verificationInfo.write(1, manifest.verifyRoot.data.buffer)
|
||||
for slotRoot in manifest.slotRoots:
|
||||
verificationInfo.write(2, slotRoot.data.buffer)
|
||||
verificationInfo.write(3, manifest.cellSize.uint32)
|
||||
verificationInfo.write(4, manifest.verifiableStrategy.uint32)
|
||||
erasureInfo.write(6, verificationInfo)
|
||||
|
||||
erasureInfo.finish()
|
||||
header.write(7, erasureInfo)
|
||||
|
||||
if manifest.filename.isSome:
|
||||
header.write(8, manifest.filename.get())
|
||||
header.write(7, manifest.filename.get())
|
||||
|
||||
if manifest.mimetype.isSome:
|
||||
header.write(9, manifest.mimetype.get())
|
||||
header.write(8, manifest.mimetype.get())
|
||||
|
||||
pbNode.write(1, header) # set the treeCid as the data field
|
||||
pbNode.finish()
|
||||
@ -111,22 +76,12 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
|
||||
var
|
||||
pbNode = initProtoBuffer(data)
|
||||
pbHeader: ProtoBuffer
|
||||
pbErasureInfo: ProtoBuffer
|
||||
pbVerificationInfo: ProtoBuffer
|
||||
treeCidBuf: seq[byte]
|
||||
originalTreeCid: seq[byte]
|
||||
datasetSize: uint64
|
||||
codec: uint32
|
||||
hcodec: uint32
|
||||
version: uint32
|
||||
blockSize: uint32
|
||||
originalDatasetSize: uint64
|
||||
ecK, ecM: uint32
|
||||
protectedStrategy: uint32
|
||||
verifyRoot: seq[byte]
|
||||
slotRoots: seq[seq[byte]]
|
||||
cellSize: uint32
|
||||
verifiableStrategy: uint32
|
||||
filename: string
|
||||
mimetype: string
|
||||
|
||||
@ -153,98 +108,27 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
|
||||
if pbHeader.getField(6, version).isErr:
|
||||
return failure("Unable to decode `version` from manifest!")
|
||||
|
||||
if pbHeader.getField(7, pbErasureInfo).isErr:
|
||||
return failure("Unable to decode `erasureInfo` from manifest!")
|
||||
|
||||
if pbHeader.getField(8, filename).isErr:
|
||||
if pbHeader.getField(7, filename).isErr:
|
||||
return failure("Unable to decode `filename` from manifest!")
|
||||
|
||||
if pbHeader.getField(9, mimetype).isErr:
|
||||
if pbHeader.getField(8, mimetype).isErr:
|
||||
return failure("Unable to decode `mimetype` from manifest!")
|
||||
|
||||
let protected = pbErasureInfo.buffer.len > 0
|
||||
var verifiable = false
|
||||
if protected:
|
||||
if pbErasureInfo.getField(1, ecK).isErr:
|
||||
return failure("Unable to decode `K` from manifest!")
|
||||
|
||||
if pbErasureInfo.getField(2, ecM).isErr:
|
||||
return failure("Unable to decode `M` from manifest!")
|
||||
|
||||
if pbErasureInfo.getField(3, originalTreeCid).isErr:
|
||||
return failure("Unable to decode `originalTreeCid` from manifest!")
|
||||
|
||||
if pbErasureInfo.getField(4, originalDatasetSize).isErr:
|
||||
return failure("Unable to decode `originalDatasetSize` from manifest!")
|
||||
|
||||
if pbErasureInfo.getField(5, protectedStrategy).isErr:
|
||||
return failure("Unable to decode `protectedStrategy` from manifest!")
|
||||
|
||||
if pbErasureInfo.getField(6, pbVerificationInfo).isErr:
|
||||
return failure("Unable to decode `verificationInfo` from manifest!")
|
||||
|
||||
verifiable = pbVerificationInfo.buffer.len > 0
|
||||
if verifiable:
|
||||
if pbVerificationInfo.getField(1, verifyRoot).isErr:
|
||||
return failure("Unable to decode `verifyRoot` from manifest!")
|
||||
|
||||
if pbVerificationInfo.getRequiredRepeatedField(2, slotRoots).isErr:
|
||||
return failure("Unable to decode `slotRoots` from manifest!")
|
||||
|
||||
if pbVerificationInfo.getField(3, cellSize).isErr:
|
||||
return failure("Unable to decode `cellSize` from manifest!")
|
||||
|
||||
if pbVerificationInfo.getField(4, verifiableStrategy).isErr:
|
||||
return failure("Unable to decode `verifiableStrategy` from manifest!")
|
||||
|
||||
let treeCid = ?Cid.init(treeCidBuf).mapFailure
|
||||
|
||||
var filenameOption = if filename.len == 0: string.none else: filename.some
|
||||
var mimetypeOption = if mimetype.len == 0: string.none else: mimetype.some
|
||||
|
||||
let self =
|
||||
if protected:
|
||||
Manifest.new(
|
||||
treeCid = treeCid,
|
||||
datasetSize = datasetSize.NBytes,
|
||||
blockSize = blockSize.NBytes,
|
||||
version = CidVersion(version),
|
||||
hcodec = hcodec.MultiCodec,
|
||||
codec = codec.MultiCodec,
|
||||
ecK = ecK.int,
|
||||
ecM = ecM.int,
|
||||
originalTreeCid = ?Cid.init(originalTreeCid).mapFailure,
|
||||
originalDatasetSize = originalDatasetSize.NBytes,
|
||||
strategy = StrategyType(protectedStrategy),
|
||||
filename = filenameOption,
|
||||
mimetype = mimetypeOption,
|
||||
)
|
||||
else:
|
||||
Manifest.new(
|
||||
treeCid = treeCid,
|
||||
datasetSize = datasetSize.NBytes,
|
||||
blockSize = blockSize.NBytes,
|
||||
version = CidVersion(version),
|
||||
hcodec = hcodec.MultiCodec,
|
||||
codec = codec.MultiCodec,
|
||||
filename = filenameOption,
|
||||
mimetype = mimetypeOption,
|
||||
)
|
||||
|
||||
?self.verify()
|
||||
|
||||
if verifiable:
|
||||
let
|
||||
verifyRootCid = ?Cid.init(verifyRoot).mapFailure
|
||||
slotRootCids = slotRoots.mapIt(?Cid.init(it).mapFailure)
|
||||
|
||||
return Manifest.new(
|
||||
manifest = self,
|
||||
verifyRoot = verifyRootCid,
|
||||
slotRoots = slotRootCids,
|
||||
cellSize = cellSize.NBytes,
|
||||
strategy = StrategyType(verifiableStrategy),
|
||||
)
|
||||
let self = Manifest.new(
|
||||
treeCid = treeCid,
|
||||
datasetSize = datasetSize.NBytes,
|
||||
blockSize = blockSize.NBytes,
|
||||
version = CidVersion(version),
|
||||
hcodec = hcodec.MultiCodec,
|
||||
codec = codec.MultiCodec,
|
||||
filename = filenameOption,
|
||||
mimetype = mimetypeOption,
|
||||
)
|
||||
|
||||
self.success
|
||||
|
||||
|
||||
@ -20,7 +20,6 @@ import ../utils
|
||||
import ../utils/json
|
||||
import ../units
|
||||
import ../blocktype
|
||||
import ../indexingstrategy
|
||||
import ../logutils
|
||||
|
||||
# TODO: Manifest should be reworked to more concrete types,
|
||||
@ -35,24 +34,6 @@ type Manifest* = ref object of RootObj
|
||||
version: CidVersion # Cid version
|
||||
filename {.serialize.}: ?string # The filename of the content uploaded (optional)
|
||||
mimetype {.serialize.}: ?string # The mimetype of the content uploaded (optional)
|
||||
case protected {.serialize.}: bool # Protected datasets have erasure coded info
|
||||
of true:
|
||||
ecK: int # Number of blocks to encode
|
||||
ecM: int # Number of resulting parity blocks
|
||||
originalTreeCid: Cid # The original root of the dataset being erasure coded
|
||||
originalDatasetSize: NBytes
|
||||
protectedStrategy: StrategyType # Indexing strategy used to build the slot roots
|
||||
case verifiable {.serialize.}: bool
|
||||
# Verifiable datasets can be used to generate storage proofs
|
||||
of true:
|
||||
verifyRoot: Cid # Root of the top level merkle tree built from slot roots
|
||||
slotRoots: seq[Cid] # Individual slot root built from the original dataset blocks
|
||||
cellSize: NBytes # Size of each slot cell
|
||||
verifiableStrategy: StrategyType # Indexing strategy used to build the slot roots
|
||||
else:
|
||||
discard
|
||||
else:
|
||||
discard
|
||||
|
||||
############################################################
|
||||
# Accessors
|
||||
@ -73,54 +54,12 @@ func hcodec*(self: Manifest): MultiCodec =
|
||||
func codec*(self: Manifest): MultiCodec =
|
||||
self.codec
|
||||
|
||||
func protected*(self: Manifest): bool =
|
||||
self.protected
|
||||
|
||||
func ecK*(self: Manifest): int =
|
||||
self.ecK
|
||||
|
||||
func ecM*(self: Manifest): int =
|
||||
self.ecM
|
||||
|
||||
func originalTreeCid*(self: Manifest): Cid =
|
||||
self.originalTreeCid
|
||||
|
||||
func originalBlocksCount*(self: Manifest): int =
|
||||
divUp(self.originalDatasetSize.int, self.blockSize.int)
|
||||
|
||||
func originalDatasetSize*(self: Manifest): NBytes =
|
||||
self.originalDatasetSize
|
||||
|
||||
func treeCid*(self: Manifest): Cid =
|
||||
self.treeCid
|
||||
|
||||
func blocksCount*(self: Manifest): int =
|
||||
divUp(self.datasetSize.int, self.blockSize.int)
|
||||
|
||||
func verifiable*(self: Manifest): bool =
|
||||
bool (self.protected and self.verifiable)
|
||||
|
||||
func verifyRoot*(self: Manifest): Cid =
|
||||
self.verifyRoot
|
||||
|
||||
func slotRoots*(self: Manifest): seq[Cid] =
|
||||
self.slotRoots
|
||||
|
||||
func numSlots*(self: Manifest): int =
|
||||
self.ecK + self.ecM
|
||||
|
||||
func cellSize*(self: Manifest): NBytes =
|
||||
self.cellSize
|
||||
|
||||
func protectedStrategy*(self: Manifest): StrategyType =
|
||||
self.protectedStrategy
|
||||
|
||||
func verifiableStrategy*(self: Manifest): StrategyType =
|
||||
self.verifiableStrategy
|
||||
|
||||
func numSlotBlocks*(self: Manifest): int =
|
||||
divUp(self.blocksCount, self.numSlots)
|
||||
|
||||
func filename*(self: Manifest): ?string =
|
||||
self.filename
|
||||
|
||||
@ -141,51 +80,16 @@ func isManifest*(mc: MultiCodec): ?!bool =
|
||||
# Various sizes and verification
|
||||
############################################################
|
||||
|
||||
func rounded*(self: Manifest): int =
|
||||
## Number of data blocks in *protected* manifest including padding at the end
|
||||
roundUp(self.originalBlocksCount, self.ecK)
|
||||
|
||||
func steps*(self: Manifest): int =
|
||||
## Number of EC groups in *protected* manifest
|
||||
divUp(self.rounded, self.ecK)
|
||||
|
||||
func verify*(self: Manifest): ?!void =
|
||||
## Check manifest correctness
|
||||
##
|
||||
|
||||
if self.protected and (self.blocksCount != self.steps * (self.ecK + self.ecM)):
|
||||
return
|
||||
failure newException(CodexError, "Broken manifest: wrong originalBlocksCount")
|
||||
|
||||
return success()
|
||||
|
||||
func `==`*(a, b: Manifest): bool =
|
||||
(a.treeCid == b.treeCid) and (a.datasetSize == b.datasetSize) and
|
||||
(a.blockSize == b.blockSize) and (a.version == b.version) and (a.hcodec == b.hcodec) and
|
||||
(a.codec == b.codec) and (a.protected == b.protected) and (a.filename == b.filename) and
|
||||
(a.mimetype == b.mimetype) and (
|
||||
if a.protected:
|
||||
(a.ecK == b.ecK) and (a.ecM == b.ecM) and (a.originalTreeCid == b.originalTreeCid) and
|
||||
(a.originalDatasetSize == b.originalDatasetSize) and
|
||||
(a.protectedStrategy == b.protectedStrategy) and (a.verifiable == b.verifiable) and
|
||||
(
|
||||
if a.verifiable:
|
||||
(a.verifyRoot == b.verifyRoot) and (a.slotRoots == b.slotRoots) and
|
||||
(a.cellSize == b.cellSize) and (
|
||||
a.verifiableStrategy == b.verifiableStrategy
|
||||
)
|
||||
else:
|
||||
true
|
||||
)
|
||||
else:
|
||||
true
|
||||
)
|
||||
(a.codec == b.codec) and (a.filename == b.filename) and (a.mimetype == b.mimetype)
|
||||
|
||||
func `$`*(self: Manifest): string =
|
||||
result =
|
||||
"treeCid: " & $self.treeCid & ", datasetSize: " & $self.datasetSize & ", blockSize: " &
|
||||
$self.blockSize & ", version: " & $self.version & ", hcodec: " & $self.hcodec &
|
||||
", codec: " & $self.codec & ", protected: " & $self.protected
|
||||
", codec: " & $self.codec
|
||||
|
||||
if self.filename.isSome:
|
||||
result &= ", filename: " & $self.filename
|
||||
@ -193,20 +97,6 @@ func `$`*(self: Manifest): string =
|
||||
if self.mimetype.isSome:
|
||||
result &= ", mimetype: " & $self.mimetype
|
||||
|
||||
result &= (
|
||||
if self.protected:
|
||||
", ecK: " & $self.ecK & ", ecM: " & $self.ecM & ", originalTreeCid: " &
|
||||
$self.originalTreeCid & ", originalDatasetSize: " & $self.originalDatasetSize &
|
||||
", verifiable: " & $self.verifiable & (
|
||||
if self.verifiable:
|
||||
", verifyRoot: " & $self.verifyRoot & ", slotRoots: " & $self.slotRoots
|
||||
else:
|
||||
""
|
||||
)
|
||||
else:
|
||||
""
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
############################################################
|
||||
@ -221,7 +111,6 @@ func new*(
|
||||
version: CidVersion = CIDv1,
|
||||
hcodec = Sha256HashCodec,
|
||||
codec = BlockCodec,
|
||||
protected = false,
|
||||
filename: ?string = string.none,
|
||||
mimetype: ?string = string.none,
|
||||
): Manifest =
|
||||
@ -232,132 +121,10 @@ func new*(
|
||||
version: version,
|
||||
codec: codec,
|
||||
hcodec: hcodec,
|
||||
protected: protected,
|
||||
filename: filename,
|
||||
mimetype: mimetype,
|
||||
)
|
||||
|
||||
func new*(
|
||||
T: type Manifest,
|
||||
manifest: Manifest,
|
||||
treeCid: Cid,
|
||||
datasetSize: NBytes,
|
||||
ecK, ecM: int,
|
||||
strategy = SteppedStrategy,
|
||||
): Manifest =
|
||||
## Create an erasure protected dataset from an
|
||||
## unprotected one
|
||||
##
|
||||
|
||||
Manifest(
|
||||
treeCid: treeCid,
|
||||
datasetSize: datasetSize,
|
||||
version: manifest.version,
|
||||
codec: manifest.codec,
|
||||
hcodec: manifest.hcodec,
|
||||
blockSize: manifest.blockSize,
|
||||
protected: true,
|
||||
ecK: ecK,
|
||||
ecM: ecM,
|
||||
originalTreeCid: manifest.treeCid,
|
||||
originalDatasetSize: manifest.datasetSize,
|
||||
protectedStrategy: strategy,
|
||||
filename: manifest.filename,
|
||||
mimetype: manifest.mimetype,
|
||||
)
|
||||
|
||||
func new*(T: type Manifest, manifest: Manifest): Manifest =
|
||||
## Create an unprotected dataset from an
|
||||
## erasure protected one
|
||||
##
|
||||
|
||||
Manifest(
|
||||
treeCid: manifest.originalTreeCid,
|
||||
datasetSize: manifest.originalDatasetSize,
|
||||
version: manifest.version,
|
||||
codec: manifest.codec,
|
||||
hcodec: manifest.hcodec,
|
||||
blockSize: manifest.blockSize,
|
||||
protected: false,
|
||||
filename: manifest.filename,
|
||||
mimetype: manifest.mimetype,
|
||||
)
|
||||
|
||||
func new*(
|
||||
T: type Manifest,
|
||||
treeCid: Cid,
|
||||
datasetSize: NBytes,
|
||||
blockSize: NBytes,
|
||||
version: CidVersion,
|
||||
hcodec: MultiCodec,
|
||||
codec: MultiCodec,
|
||||
ecK: int,
|
||||
ecM: int,
|
||||
originalTreeCid: Cid,
|
||||
originalDatasetSize: NBytes,
|
||||
strategy = SteppedStrategy,
|
||||
filename: ?string = string.none,
|
||||
mimetype: ?string = string.none,
|
||||
): Manifest =
|
||||
Manifest(
|
||||
treeCid: treeCid,
|
||||
datasetSize: datasetSize,
|
||||
blockSize: blockSize,
|
||||
version: version,
|
||||
hcodec: hcodec,
|
||||
codec: codec,
|
||||
protected: true,
|
||||
ecK: ecK,
|
||||
ecM: ecM,
|
||||
originalTreeCid: originalTreeCid,
|
||||
originalDatasetSize: originalDatasetSize,
|
||||
protectedStrategy: strategy,
|
||||
filename: filename,
|
||||
mimetype: mimetype,
|
||||
)
|
||||
|
||||
func new*(
|
||||
T: type Manifest,
|
||||
manifest: Manifest,
|
||||
verifyRoot: Cid,
|
||||
slotRoots: openArray[Cid],
|
||||
cellSize = DefaultCellSize,
|
||||
strategy = LinearStrategy,
|
||||
): ?!Manifest =
|
||||
## Create a verifiable dataset from an
|
||||
## protected one
|
||||
##
|
||||
|
||||
if not manifest.protected:
|
||||
return failure newException(
|
||||
CodexError, "Can create verifiable manifest only from protected manifest."
|
||||
)
|
||||
|
||||
if slotRoots.len != manifest.numSlots:
|
||||
return failure newException(CodexError, "Wrong number of slot roots.")
|
||||
|
||||
success Manifest(
|
||||
treeCid: manifest.treeCid,
|
||||
datasetSize: manifest.datasetSize,
|
||||
version: manifest.version,
|
||||
codec: manifest.codec,
|
||||
hcodec: manifest.hcodec,
|
||||
blockSize: manifest.blockSize,
|
||||
protected: true,
|
||||
ecK: manifest.ecK,
|
||||
ecM: manifest.ecM,
|
||||
originalTreeCid: manifest.originalTreeCid,
|
||||
originalDatasetSize: manifest.originalDatasetSize,
|
||||
protectedStrategy: manifest.protectedStrategy,
|
||||
verifiable: true,
|
||||
verifyRoot: verifyRoot,
|
||||
slotRoots: @slotRoots,
|
||||
cellSize: cellSize,
|
||||
verifiableStrategy: strategy,
|
||||
filename: manifest.filename,
|
||||
mimetype: manifest.mimetype,
|
||||
)
|
||||
|
||||
func new*(T: type Manifest, data: openArray[byte]): ?!Manifest =
|
||||
## Create a manifest instance from given data
|
||||
##
|
||||
|
||||
313
codex/market.nim
313
codex/market.nim
@ -1,313 +0,0 @@
|
||||
import pkg/chronos
|
||||
import pkg/questionable
|
||||
import pkg/ethers/erc20
|
||||
import ./contracts/requests
|
||||
import ./contracts/proofs
|
||||
import ./clock
|
||||
import ./errors
|
||||
import ./periods
|
||||
|
||||
export chronos
|
||||
export questionable
|
||||
export requests
|
||||
export proofs
|
||||
export SecondsSince1970
|
||||
export periods
|
||||
|
||||
type
|
||||
Market* = ref object of RootObj
|
||||
MarketError* = object of CodexError
|
||||
SlotStateMismatchError* = object of MarketError
|
||||
SlotReservationNotAllowedError* = object of MarketError
|
||||
ProofInvalidError* = object of MarketError
|
||||
Subscription* = ref object of RootObj
|
||||
OnRequest* =
|
||||
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, raises: [].}
|
||||
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, raises: [].}
|
||||
OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||
OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||
OnSlotReservationsFull* =
|
||||
proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, raises: [].}
|
||||
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, raises: [].}
|
||||
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, raises: [].}
|
||||
ProofChallenge* = array[32, byte]
|
||||
|
||||
# Marketplace events -- located here due to the Market abstraction
|
||||
MarketplaceEvent* = Event
|
||||
StorageRequested* = object of MarketplaceEvent
|
||||
requestId*: RequestId
|
||||
ask*: StorageAsk
|
||||
expiry*: uint64
|
||||
|
||||
SlotFilled* = object of MarketplaceEvent
|
||||
requestId* {.indexed.}: RequestId
|
||||
slotIndex*: uint64
|
||||
|
||||
SlotFreed* = object of MarketplaceEvent
|
||||
requestId* {.indexed.}: RequestId
|
||||
slotIndex*: uint64
|
||||
|
||||
SlotReservationsFull* = object of MarketplaceEvent
|
||||
requestId* {.indexed.}: RequestId
|
||||
slotIndex*: uint64
|
||||
|
||||
RequestFulfilled* = object of MarketplaceEvent
|
||||
requestId* {.indexed.}: RequestId
|
||||
|
||||
RequestCancelled* = object of MarketplaceEvent
|
||||
requestId* {.indexed.}: RequestId
|
||||
|
||||
RequestFailed* = object of MarketplaceEvent
|
||||
requestId* {.indexed.}: RequestId
|
||||
|
||||
ProofSubmitted* = object of MarketplaceEvent
|
||||
id*: SlotId
|
||||
|
||||
method loadConfig*(
|
||||
market: Market
|
||||
): Future[?!void] {.base, async: (raises: [CancelledError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getZkeyHash*(
|
||||
market: Market
|
||||
): Future[?string] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getSigner*(
|
||||
market: Market
|
||||
): Future[Address] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method periodicity*(
|
||||
market: Market
|
||||
): Future[Periodicity] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method proofTimeout*(
|
||||
market: Market
|
||||
): Future[uint64] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method repairRewardPercentage*(
|
||||
market: Market
|
||||
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method requestDurationLimit*(market: Market): Future[uint64] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method proofDowntime*(
|
||||
market: Market
|
||||
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getPointer*(market: Market, slotId: SlotId): Future[uint8] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
proc inDowntime*(market: Market, slotId: SlotId): Future[bool] {.async.} =
|
||||
let downtime = await market.proofDowntime
|
||||
let pntr = await market.getPointer(slotId)
|
||||
return pntr < downtime
|
||||
|
||||
method requestStorage*(
|
||||
market: Market, request: StorageRequest
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getRequest*(
|
||||
market: Market, id: RequestId
|
||||
): Future[?StorageRequest] {.base, async: (raises: [CancelledError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method requestState*(
|
||||
market: Market, requestId: RequestId
|
||||
): Future[?RequestState] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method slotState*(
|
||||
market: Market, slotId: SlotId
|
||||
): Future[SlotState] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getRequestEnd*(
|
||||
market: Market, id: RequestId
|
||||
): Future[SecondsSince1970] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method requestExpiresAt*(
|
||||
market: Market, id: RequestId
|
||||
): Future[SecondsSince1970] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getHost*(
|
||||
market: Market, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?Address] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method currentCollateral*(
|
||||
market: Market, slotId: SlotId
|
||||
): Future[UInt256] {.base, async: (raises: [MarketError, CancelledError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method fillSlot*(
|
||||
market: Market,
|
||||
requestId: RequestId,
|
||||
slotIndex: uint64,
|
||||
proof: Groth16Proof,
|
||||
collateral: UInt256,
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method freeSlot*(
|
||||
market: Market, slotId: SlotId
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method withdrawFunds*(
|
||||
market: Market, requestId: RequestId
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequests*(
|
||||
market: Market, callback: OnRequest
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method isProofRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method willProofBeRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getChallenge*(
|
||||
market: Market, id: SlotId
|
||||
): Future[ProofChallenge] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method submitProof*(
|
||||
market: Market, id: SlotId, proof: Groth16Proof
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method markProofAsMissing*(
|
||||
market: Market, id: SlotId, period: Period
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method canMarkProofAsMissing*(
|
||||
market: Market, id: SlotId, period: Period
|
||||
): Future[bool] {.base, async: (raises: [CancelledError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method reserveSlot*(
|
||||
market: Market, requestId: RequestId, slotIndex: uint64
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method canReserveSlot*(
|
||||
market: Market, requestId: RequestId, slotIndex: uint64
|
||||
): Future[bool] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeFulfillment*(
|
||||
market: Market, callback: OnFulfillment
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeFulfillment*(
|
||||
market: Market, requestId: RequestId, callback: OnFulfillment
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeSlotFilled*(
|
||||
market: Market, callback: OnSlotFilled
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeSlotFilled*(
|
||||
market: Market, requestId: RequestId, slotIndex: uint64, callback: OnSlotFilled
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeSlotFreed*(
|
||||
market: Market, callback: OnSlotFreed
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeSlotReservationsFull*(
|
||||
market: Market, callback: OnSlotReservationsFull
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequestCancelled*(
|
||||
market: Market, callback: OnRequestCancelled
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequestCancelled*(
|
||||
market: Market, requestId: RequestId, callback: OnRequestCancelled
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequestFailed*(
|
||||
market: Market, callback: OnRequestFailed
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequestFailed*(
|
||||
market: Market, requestId: RequestId, callback: OnRequestFailed
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeProofSubmission*(
|
||||
market: Market, callback: OnProofSubmitted
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method unsubscribe*(subscription: Subscription) {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: Market, fromBlock: BlockTag
|
||||
): Future[seq[SlotFilled]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: Market, blocksAgo: int
|
||||
): Future[seq[SlotFilled]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: Market, fromTime: SecondsSince1970
|
||||
): Future[seq[SlotFilled]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method queryPastStorageRequestedEvents*(
|
||||
market: Market, fromBlock: BlockTag
|
||||
): Future[seq[StorageRequested]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method queryPastStorageRequestedEvents*(
|
||||
market: Market, blocksAgo: int
|
||||
): Future[seq[StorageRequested]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method slotCollateral*(
|
||||
market: Market, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?!UInt256] {.base, async: (raises: [CancelledError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method slotCollateral*(
|
||||
market: Market, collateralPerSlot: UInt256, slotState: SlotState
|
||||
): ?!UInt256 {.base, gcsafe, raises: [].} =
|
||||
raiseAssert("not implemented")
|
||||
@ -1,10 +1,4 @@
|
||||
import ./merkletree/merkletree
|
||||
import ./merkletree/codex
|
||||
import ./merkletree/poseidon2
|
||||
|
||||
export codex, poseidon2, merkletree
|
||||
|
||||
type
|
||||
SomeMerkleTree* = ByteTree | CodexTree | Poseidon2Tree
|
||||
SomeMerkleProof* = ByteProof | CodexProof | Poseidon2Proof
|
||||
SomeMerkleHash* = ByteHash | Poseidon2Hash
|
||||
export codex, merkletree
|
||||
|
||||
@ -1,152 +0,0 @@
|
||||
## Logos Storage
|
||||
## Copyright (c) 2023 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils, atomics]
|
||||
|
||||
import pkg/poseidon2
|
||||
import pkg/taskpools
|
||||
import pkg/chronos/threadsync
|
||||
import pkg/constantine/math/io/io_fields
|
||||
import pkg/constantine/platforms/abstractions
|
||||
import pkg/questionable/results
|
||||
|
||||
import ../utils
|
||||
import ../rng
|
||||
|
||||
import ./merkletree
|
||||
|
||||
export merkletree, poseidon2
|
||||
|
||||
const
|
||||
KeyNoneF = F.fromHex("0x0")
|
||||
KeyBottomLayerF = F.fromHex("0x1")
|
||||
KeyOddF = F.fromHex("0x2")
|
||||
KeyOddAndBottomLayerF = F.fromHex("0x3")
|
||||
|
||||
Poseidon2Zero* = zero
|
||||
|
||||
type
|
||||
Bn254Fr* = F
|
||||
Poseidon2Hash* = Bn254Fr
|
||||
|
||||
PoseidonKeysEnum* = enum # can't use non-ordinals as enum values
|
||||
KeyNone
|
||||
KeyBottomLayer
|
||||
KeyOdd
|
||||
KeyOddAndBottomLayer
|
||||
|
||||
Poseidon2Tree* = MerkleTree[Poseidon2Hash, PoseidonKeysEnum]
|
||||
Poseidon2Proof* = MerkleProof[Poseidon2Hash, PoseidonKeysEnum]
|
||||
|
||||
proc len*(v: Poseidon2Hash): int =
|
||||
sizeof(v)
|
||||
|
||||
proc assign*(v: var openArray[byte], h: Poseidon2Hash) =
|
||||
doAssert v.len == sizeof(h)
|
||||
copyMem(addr v[0], addr h, sizeof(h))
|
||||
|
||||
proc assign*(h: var Poseidon2Hash, v: openArray[byte]) =
|
||||
doAssert v.len == sizeof(h)
|
||||
copyMem(addr h, addr v[0], sizeof(h))
|
||||
|
||||
proc `$`*(self: Poseidon2Tree): string =
|
||||
let root = if self.root.isOk: self.root.get.toHex else: "none"
|
||||
"Poseidon2Tree(" & " root: " & root & ", leavesCount: " & $self.leavesCount &
|
||||
", levels: " & $self.levels & " )"
|
||||
|
||||
proc `$`*(self: Poseidon2Proof): string =
|
||||
"Poseidon2Proof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index &
|
||||
", path: " & $self.path.mapIt(it.toHex) & " )"
|
||||
|
||||
func toArray32*(bytes: openArray[byte]): array[32, byte] =
|
||||
result[0 ..< bytes.len] = bytes[0 ..< bytes.len]
|
||||
|
||||
converter toKey*(key: PoseidonKeysEnum): Poseidon2Hash =
|
||||
case key
|
||||
of KeyNone: KeyNoneF
|
||||
of KeyBottomLayer: KeyBottomLayerF
|
||||
of KeyOdd: KeyOddF
|
||||
of KeyOddAndBottomLayer: KeyOddAndBottomLayerF
|
||||
|
||||
proc initTree(leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
|
||||
if leaves.len == 0:
|
||||
return failure "Empty leaves"
|
||||
|
||||
let compressor = proc(
|
||||
x, y: Poseidon2Hash, key: PoseidonKeysEnum
|
||||
): ?!Poseidon2Hash {.noSideEffect.} =
|
||||
success compress(x, y, key.toKey)
|
||||
|
||||
var self = Poseidon2Tree()
|
||||
?self.prepare(compressor, Poseidon2Zero, leaves)
|
||||
success self
|
||||
|
||||
func init*(_: type Poseidon2Tree, leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
|
||||
let self = ?initTree(leaves)
|
||||
?self.compute()
|
||||
|
||||
success self
|
||||
|
||||
proc init*(
|
||||
_: type Poseidon2Tree, tp: Taskpool, leaves: seq[Poseidon2Hash]
|
||||
): Future[?!Poseidon2Tree] {.async: (raises: [CancelledError]).} =
|
||||
let self = ?initTree(leaves)
|
||||
|
||||
?await self.compute(tp)
|
||||
|
||||
success self
|
||||
|
||||
func init*(_: type Poseidon2Tree, leaves: openArray[array[31, byte]]): ?!Poseidon2Tree =
|
||||
Poseidon2Tree.init(leaves.mapIt(Poseidon2Hash.fromBytes(it)))
|
||||
|
||||
proc init*(
|
||||
_: type Poseidon2Tree, tp: Taskpool, leaves: seq[array[31, byte]]
|
||||
): Future[?!Poseidon2Tree] {.async: (raises: [CancelledError]).} =
|
||||
await Poseidon2Tree.init(tp, leaves.mapIt(Poseidon2Hash.fromBytes(it)))
|
||||
|
||||
proc fromNodes*(
|
||||
_: type Poseidon2Tree, nodes: openArray[Poseidon2Hash], nleaves: int
|
||||
): ?!Poseidon2Tree =
|
||||
let compressor = proc(
|
||||
x, y: Poseidon2Hash, key: PoseidonKeysEnum
|
||||
): ?!Poseidon2Hash {.noSideEffect.} =
|
||||
success compress(x, y, key.toKey)
|
||||
|
||||
let self = Poseidon2Tree()
|
||||
?self.fromNodes(compressor, Poseidon2Zero, nodes, nleaves)
|
||||
|
||||
let
|
||||
index = Rng.instance.rand(nleaves - 1)
|
||||
proof = ?self.getProof(index)
|
||||
|
||||
if not ?proof.verify(self.leaves[index], ?self.root): # sanity check
|
||||
return failure "Unable to verify tree built from nodes"
|
||||
|
||||
success self
|
||||
|
||||
func init*(
|
||||
_: type Poseidon2Proof, index: int, nleaves: int, nodes: openArray[Poseidon2Hash]
|
||||
): ?!Poseidon2Proof =
|
||||
if nodes.len == 0:
|
||||
return failure "Empty nodes"
|
||||
|
||||
let compressor = proc(
|
||||
x, y: Poseidon2Hash, key: PoseidonKeysEnum
|
||||
): ?!Poseidon2Hash {.noSideEffect.} =
|
||||
success compress(x, y, key.toKey)
|
||||
|
||||
success Poseidon2Proof(
|
||||
compress: compressor,
|
||||
zero: Poseidon2Zero,
|
||||
index: index,
|
||||
nleaves: nleaves,
|
||||
path: @nodes,
|
||||
)
|
||||
@ -1,11 +1,2 @@
|
||||
const CodecExts = [
|
||||
("poseidon2-alt_bn_128-sponge-r2", 0xCD10), # bn128 rate 2 sponge
|
||||
("poseidon2-alt_bn_128-merkle-2kb", 0xCD11), # bn128 2kb compress & merkleize
|
||||
("poseidon2-alt_bn_128-keyed-compress", 0xCD12), # bn128 keyed compress]
|
||||
("codex-manifest", 0xCD01),
|
||||
("codex-block", 0xCD02),
|
||||
("codex-root", 0xCD03),
|
||||
("codex-slot-root", 0xCD04),
|
||||
("codex-proving-root", 0xCD05),
|
||||
("codex-slot-cell", 0xCD06),
|
||||
]
|
||||
const CodecExts =
|
||||
[("codex-manifest", 0xCD01), ("codex-block", 0xCD02), ("codex-root", 0xCD03)]
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
import blscurve/bls_public_exports
|
||||
import pkg/constantine/hashes
|
||||
import poseidon2
|
||||
|
||||
proc sha2_256hash_constantine(data: openArray[byte], output: var openArray[byte]) =
|
||||
# Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing
|
||||
@ -9,16 +8,6 @@ proc sha2_256hash_constantine(data: openArray[byte], output: var openArray[byte]
|
||||
let digest = hashes.sha256.hash(data)
|
||||
copyMem(addr output[0], addr digest[0], 32)
|
||||
|
||||
proc poseidon2_sponge_rate2(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = poseidon2.Sponge.digest(data).toBytes()
|
||||
copyMem(addr output[0], addr digest[0], uint(len(output)))
|
||||
|
||||
proc poseidon2_merkle_2kb_sponge(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = poseidon2.SpongeMerkle.digest(data, 2048).toBytes()
|
||||
copyMem(addr output[0], addr digest[0], uint(len(output)))
|
||||
|
||||
const Sha2256MultiHash* = MHash(
|
||||
mcodec: multiCodec("sha2-256"),
|
||||
size: sha256.sizeDigest,
|
||||
@ -26,15 +15,5 @@ const Sha2256MultiHash* = MHash(
|
||||
)
|
||||
const HashExts = [
|
||||
# override sha2-256 hash function
|
||||
Sha2256MultiHash,
|
||||
MHash(
|
||||
mcodec: multiCodec("poseidon2-alt_bn_128-sponge-r2"),
|
||||
size: 32,
|
||||
coder: poseidon2_sponge_rate2,
|
||||
),
|
||||
MHash(
|
||||
mcodec: multiCodec("poseidon2-alt_bn_128-merkle-2kb"),
|
||||
size: 32,
|
||||
coder: poseidon2_merkle_2kb_sponge,
|
||||
),
|
||||
Sha2256MultiHash
|
||||
]
|
||||
|
||||
384
codex/node.nim
384
codex/node.nim
@ -19,7 +19,6 @@ import pkg/taskpools
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/chronos
|
||||
import pkg/poseidon2
|
||||
|
||||
import pkg/libp2p/[switch, multicodec, multihash]
|
||||
import pkg/libp2p/stream/bufferstream
|
||||
@ -29,7 +28,6 @@ import pkg/libp2p/routing_record
|
||||
import pkg/libp2p/signed_envelope
|
||||
|
||||
import ./chunker
|
||||
import ./slots
|
||||
import ./clock
|
||||
import ./blocktype as bt
|
||||
import ./manifest
|
||||
@ -37,10 +35,7 @@ import ./merkletree
|
||||
import ./stores
|
||||
import ./blockexchange
|
||||
import ./streams
|
||||
import ./erasure
|
||||
import ./discovery
|
||||
import ./contracts
|
||||
import ./indexingstrategy
|
||||
import ./utils
|
||||
import ./errors
|
||||
import ./logutils
|
||||
@ -58,23 +53,13 @@ const
|
||||
BatchRefillThreshold = 0.75 # Refill when 75% of window completes
|
||||
|
||||
type
|
||||
Contracts* =
|
||||
tuple[
|
||||
client: ?ClientInteractions,
|
||||
host: ?HostInteractions,
|
||||
validator: ?ValidatorInteractions,
|
||||
]
|
||||
|
||||
CodexNode* = object
|
||||
switch: Switch
|
||||
networkId: PeerId
|
||||
networkStore: NetworkStore
|
||||
engine: BlockExcEngine
|
||||
prover: ?Prover
|
||||
discovery: Discovery
|
||||
contracts*: Contracts
|
||||
clock*: Clock
|
||||
storage*: Contracts
|
||||
taskPool: Taskpool
|
||||
trackedFutures: TrackedFutures
|
||||
|
||||
@ -319,20 +304,6 @@ proc streamEntireDataset(
|
||||
|
||||
var jobs: seq[Future[void]]
|
||||
let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false))
|
||||
if manifest.protected:
|
||||
# Retrieve, decode and save to the local store all EС groups
|
||||
proc erasureJob(): Future[void] {.async: (raises: []).} =
|
||||
try:
|
||||
# Spawn an erasure decoding job
|
||||
let erasure = Erasure.new(
|
||||
self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskPool
|
||||
)
|
||||
without _ =? (await erasure.decode(manifest)), error:
|
||||
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
|
||||
except CatchableError as exc:
|
||||
trace "Error erasure decoding manifest", manifestCid, exc = exc.msg
|
||||
|
||||
jobs.add(erasureJob())
|
||||
|
||||
jobs.add(self.fetchDatasetAsync(manifest, fetchLocal = false))
|
||||
|
||||
@ -530,298 +501,11 @@ proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} =
|
||||
|
||||
onManifest(cid, manifest)
|
||||
|
||||
proc setupRequest(
|
||||
self: CodexNodeRef,
|
||||
cid: Cid,
|
||||
duration: uint64,
|
||||
proofProbability: UInt256,
|
||||
nodes: uint,
|
||||
tolerance: uint,
|
||||
pricePerBytePerSecond: UInt256,
|
||||
collateralPerByte: UInt256,
|
||||
expiry: uint64,
|
||||
): Future[?!StorageRequest] {.async.} =
|
||||
## Setup slots for a given dataset
|
||||
##
|
||||
|
||||
let
|
||||
ecK = nodes - tolerance
|
||||
ecM = tolerance
|
||||
|
||||
logScope:
|
||||
cid = cid
|
||||
duration = duration
|
||||
nodes = nodes
|
||||
tolerance = tolerance
|
||||
pricePerBytePerSecond = pricePerBytePerSecond
|
||||
proofProbability = proofProbability
|
||||
collateralPerByte = collateralPerByte
|
||||
expiry = expiry
|
||||
ecK = ecK
|
||||
ecM = ecM
|
||||
|
||||
trace "Setting up slots"
|
||||
|
||||
without manifest =? await self.fetchManifest(cid), error:
|
||||
trace "Unable to fetch manifest for cid"
|
||||
return failure error
|
||||
|
||||
# Erasure code the dataset according to provided parameters
|
||||
let erasure = Erasure.new(
|
||||
self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider, self.taskPool
|
||||
)
|
||||
|
||||
without encoded =? (await erasure.encode(manifest, ecK, ecM)), error:
|
||||
trace "Unable to erasure code dataset"
|
||||
return failure(error)
|
||||
|
||||
without builder =?
|
||||
Poseidon2Builder.new(self.networkStore.localStore, encoded, self.taskPool), err:
|
||||
trace "Unable to create slot builder"
|
||||
return failure(err)
|
||||
|
||||
without verifiable =? (await builder.buildManifest()), err:
|
||||
trace "Unable to build verifiable manifest"
|
||||
return failure(err)
|
||||
|
||||
without manifestBlk =? await self.storeManifest(verifiable), err:
|
||||
trace "Unable to store verifiable manifest"
|
||||
return failure(err)
|
||||
|
||||
let
|
||||
verifyRoot =
|
||||
if builder.verifyRoot.isNone:
|
||||
return failure("No slots root")
|
||||
else:
|
||||
builder.verifyRoot.get.toBytes
|
||||
|
||||
request = StorageRequest(
|
||||
ask: StorageAsk(
|
||||
slots: verifiable.numSlots.uint64,
|
||||
slotSize: builder.slotBytes.uint64,
|
||||
duration: duration,
|
||||
proofProbability: proofProbability,
|
||||
pricePerBytePerSecond: pricePerBytePerSecond,
|
||||
collateralPerByte: collateralPerByte,
|
||||
maxSlotLoss: tolerance,
|
||||
),
|
||||
content: StorageContent(cid: manifestBlk.cid, merkleRoot: verifyRoot),
|
||||
expiry: expiry,
|
||||
)
|
||||
|
||||
trace "Request created", request = $request
|
||||
success request
|
||||
|
||||
proc requestStorage*(
|
||||
self: CodexNodeRef,
|
||||
cid: Cid,
|
||||
duration: uint64,
|
||||
proofProbability: UInt256,
|
||||
nodes: uint,
|
||||
tolerance: uint,
|
||||
pricePerBytePerSecond: UInt256,
|
||||
collateralPerByte: UInt256,
|
||||
expiry: uint64,
|
||||
): Future[?!PurchaseId] {.async.} =
|
||||
## Initiate a request for storage sequence, this might
|
||||
## be a multistep procedure.
|
||||
##
|
||||
|
||||
logScope:
|
||||
cid = cid
|
||||
duration = duration
|
||||
nodes = nodes
|
||||
tolerance = tolerance
|
||||
pricePerBytePerSecond = pricePerBytePerSecond
|
||||
proofProbability = proofProbability
|
||||
collateralPerByte = collateralPerByte
|
||||
expiry = expiry
|
||||
now = self.clock.now
|
||||
|
||||
trace "Received a request for storage!"
|
||||
|
||||
without contracts =? self.contracts.client:
|
||||
trace "Purchasing not available"
|
||||
return failure "Purchasing not available"
|
||||
|
||||
without request =? (
|
||||
await self.setupRequest(
|
||||
cid, duration, proofProbability, nodes, tolerance, pricePerBytePerSecond,
|
||||
collateralPerByte, expiry,
|
||||
)
|
||||
), err:
|
||||
trace "Unable to setup request"
|
||||
return failure err
|
||||
|
||||
let purchase = await contracts.purchasing.purchase(request)
|
||||
success purchase.id
|
||||
|
||||
proc onStore(
|
||||
self: CodexNodeRef,
|
||||
request: StorageRequest,
|
||||
expiry: SecondsSince1970,
|
||||
slotIdx: uint64,
|
||||
blocksCb: BlocksCb,
|
||||
isRepairing: bool = false,
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
## store data in local storage
|
||||
##
|
||||
|
||||
let cid = request.content.cid
|
||||
|
||||
logScope:
|
||||
cid = $cid
|
||||
slotIdx = slotIdx
|
||||
|
||||
trace "Received a request to store a slot"
|
||||
|
||||
without manifest =? (await self.fetchManifest(cid)), err:
|
||||
trace "Unable to fetch manifest for cid", cid, err = err.msg
|
||||
return failure(err)
|
||||
|
||||
without builder =?
|
||||
Poseidon2Builder.new(
|
||||
self.networkStore, manifest, self.taskPool, manifest.verifiableStrategy
|
||||
), err:
|
||||
trace "Unable to create slots builder", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
if slotIdx > manifest.slotRoots.high.uint64:
|
||||
trace "Slot index not in manifest", slotIdx
|
||||
return failure(newException(CodexError, "Slot index not in manifest"))
|
||||
|
||||
proc updateExpiry(
|
||||
blocks: seq[bt.Block]
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
trace "Updating expiry for blocks", blocks = blocks.len
|
||||
|
||||
let ensureExpiryFutures =
|
||||
blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry))
|
||||
|
||||
let res = await allFinishedFailed[?!void](ensureExpiryFutures)
|
||||
if res.failure.len > 0:
|
||||
trace "Some blocks failed to update expiry", len = res.failure.len
|
||||
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
|
||||
|
||||
if not blocksCb.isNil and err =? (await blocksCb(blocks)).errorOption:
|
||||
trace "Unable to process blocks", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
return success()
|
||||
|
||||
if slotIdx > int.high.uint64:
|
||||
error "Cannot cast slot index to int", slotIndex = slotIdx
|
||||
return
|
||||
|
||||
if isRepairing:
|
||||
trace "start repairing slot", slotIdx
|
||||
try:
|
||||
let erasure = Erasure.new(
|
||||
self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskPool
|
||||
)
|
||||
if err =? (await erasure.repair(manifest)).errorOption:
|
||||
error "Unable to erasure decode repairing manifest",
|
||||
cid = manifest.treeCid, exc = err.msg
|
||||
return failure(err)
|
||||
except CatchableError as exc:
|
||||
error "Error erasure decoding repairing manifest",
|
||||
cid = manifest.treeCid, exc = exc.msg
|
||||
return failure(exc.msg)
|
||||
else:
|
||||
without indexer =?
|
||||
manifest.verifiableStrategy.init(0, manifest.blocksCount - 1, manifest.numSlots).catch,
|
||||
err:
|
||||
trace "Unable to create indexing strategy from protected manifest", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
without blksIter =? indexer.getIndices(slotIdx.int).catch, err:
|
||||
trace "Unable to get indices from strategy", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
if err =? (
|
||||
await self.fetchBatched(manifest.treeCid, blksIter, onBatch = updateExpiry)
|
||||
).errorOption:
|
||||
trace "Unable to fetch blocks", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
without slotRoot =? (await builder.buildSlot(slotIdx.int)), err:
|
||||
trace "Unable to build slot", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx]:
|
||||
trace "Slot root mismatch",
|
||||
manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid()
|
||||
return failure(newException(CodexError, "Slot root mismatch"))
|
||||
|
||||
trace "Slot successfully retrieved and reconstructed"
|
||||
|
||||
return success()
|
||||
|
||||
proc onProve(
|
||||
self: CodexNodeRef, slot: Slot, challenge: ProofChallenge
|
||||
): Future[?!Groth16Proof] {.async: (raises: [CancelledError]).} =
|
||||
## Generats a proof for a given slot and challenge
|
||||
##
|
||||
|
||||
let
|
||||
cidStr = $slot.request.content.cid
|
||||
slotIdx = slot.slotIndex
|
||||
|
||||
logScope:
|
||||
cid = cidStr
|
||||
slot = slotIdx
|
||||
challenge = challenge
|
||||
|
||||
trace "Received proof challenge"
|
||||
|
||||
if prover =? self.prover:
|
||||
trace "Prover enabled"
|
||||
|
||||
without cid =? Cid.init(cidStr).mapFailure, err:
|
||||
error "Unable to parse Cid", cid, err = err.msg
|
||||
return failure(err)
|
||||
|
||||
without manifest =? await self.fetchManifest(cid), err:
|
||||
error "Unable to fetch manifest for cid", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
when defined(verify_circuit):
|
||||
without (inputs, proof) =? await prover.prove(slotIdx.int, manifest, challenge),
|
||||
err:
|
||||
error "Unable to generate proof", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
without checked =? await prover.verify(proof, inputs), err:
|
||||
error "Unable to verify proof", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
if not checked:
|
||||
error "Proof verification failed"
|
||||
return failure("Proof verification failed")
|
||||
|
||||
trace "Proof verified successfully"
|
||||
else:
|
||||
without (_, proof) =? await prover.prove(slotIdx.int, manifest, challenge), err:
|
||||
error "Unable to generate proof", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
let groth16Proof = proof.toGroth16Proof()
|
||||
trace "Proof generated successfully", groth16Proof
|
||||
|
||||
success groth16Proof
|
||||
else:
|
||||
warn "Prover not enabled"
|
||||
failure "Prover not enabled"
|
||||
|
||||
proc onExpiryUpdate(
|
||||
self: CodexNodeRef, rootCid: Cid, expiry: SecondsSince1970
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
return await self.updateExpiry(rootCid, expiry)
|
||||
|
||||
proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: uint64) =
|
||||
# TODO: remove data from local storage
|
||||
discard
|
||||
|
||||
proc start*(self: CodexNodeRef) {.async.} =
|
||||
if not self.engine.isNil:
|
||||
await self.engine.start()
|
||||
@ -832,57 +516,6 @@ proc start*(self: CodexNodeRef) {.async.} =
|
||||
if not self.clock.isNil:
|
||||
await self.clock.start()
|
||||
|
||||
if hostContracts =? self.contracts.host:
|
||||
hostContracts.sales.onStore = proc(
|
||||
request: StorageRequest,
|
||||
expiry: SecondsSince1970,
|
||||
slot: uint64,
|
||||
onBatch: BatchProc,
|
||||
isRepairing: bool = false,
|
||||
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.onStore(request, expiry, slot, onBatch, isRepairing)
|
||||
|
||||
hostContracts.sales.onExpiryUpdate = proc(
|
||||
rootCid: Cid, expiry: SecondsSince1970
|
||||
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.onExpiryUpdate(rootCid, expiry)
|
||||
|
||||
hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: uint64) =
|
||||
# TODO: remove data from local storage
|
||||
self.onClear(request, slotIndex)
|
||||
|
||||
hostContracts.sales.onProve = proc(
|
||||
slot: Slot, challenge: ProofChallenge
|
||||
): Future[?!Groth16Proof] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
# TODO: generate proof
|
||||
self.onProve(slot, challenge)
|
||||
|
||||
try:
|
||||
await hostContracts.start()
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as error:
|
||||
error "Unable to start host contract interactions", error = error.msg
|
||||
self.contracts.host = HostInteractions.none
|
||||
|
||||
if clientContracts =? self.contracts.client:
|
||||
try:
|
||||
await clientContracts.start()
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as error:
|
||||
error "Unable to start client contract interactions: ", error = error.msg
|
||||
self.contracts.client = ClientInteractions.none
|
||||
|
||||
if validatorContracts =? self.contracts.validator:
|
||||
try:
|
||||
await validatorContracts.start()
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as error:
|
||||
error "Unable to start validator contract interactions: ", error = error.msg
|
||||
self.contracts.validator = ValidatorInteractions.none
|
||||
|
||||
self.networkId = self.switch.peerInfo.peerId
|
||||
notice "Started Storage node", id = self.networkId, addrs = self.switch.peerInfo.addrs
|
||||
|
||||
@ -897,15 +530,6 @@ proc stop*(self: CodexNodeRef) {.async.} =
|
||||
if not self.discovery.isNil:
|
||||
await self.discovery.stop()
|
||||
|
||||
if clientContracts =? self.contracts.client:
|
||||
await clientContracts.stop()
|
||||
|
||||
if hostContracts =? self.contracts.host:
|
||||
await hostContracts.stop()
|
||||
|
||||
if validatorContracts =? self.contracts.validator:
|
||||
await validatorContracts.stop()
|
||||
|
||||
if not self.clock.isNil:
|
||||
await self.clock.stop()
|
||||
|
||||
@ -919,9 +543,7 @@ proc new*(
|
||||
networkStore: NetworkStore,
|
||||
engine: BlockExcEngine,
|
||||
discovery: Discovery,
|
||||
taskPool: Taskpool,
|
||||
prover = Prover.none,
|
||||
contracts = Contracts.default,
|
||||
taskpool: Taskpool,
|
||||
): CodexNodeRef =
|
||||
## Create new instance of a Codex self, call `start` to run it
|
||||
##
|
||||
@ -930,10 +552,8 @@ proc new*(
|
||||
switch: switch,
|
||||
networkStore: networkStore,
|
||||
engine: engine,
|
||||
prover: prover,
|
||||
discovery: discovery,
|
||||
taskPool: taskPool,
|
||||
contracts: contracts,
|
||||
taskPool: taskpool,
|
||||
trackedFutures: TrackedFutures(),
|
||||
)
|
||||
|
||||
|
||||
@ -1,17 +0,0 @@
|
||||
import pkg/stint
|
||||
|
||||
type
|
||||
Periodicity* = object
|
||||
seconds*: uint64
|
||||
|
||||
Period* = uint64
|
||||
Timestamp* = uint64
|
||||
|
||||
func periodOf*(periodicity: Periodicity, timestamp: Timestamp): Period =
|
||||
timestamp div periodicity.seconds
|
||||
|
||||
func periodStart*(periodicity: Periodicity, period: Period): Timestamp =
|
||||
period * periodicity.seconds
|
||||
|
||||
func periodEnd*(periodicity: Periodicity, period: Period): Timestamp =
|
||||
periodicity.periodStart(period + 1)
|
||||
@ -1,74 +0,0 @@
|
||||
import std/tables
|
||||
import pkg/stint
|
||||
import pkg/chronos
|
||||
import pkg/questionable
|
||||
import pkg/nimcrypto
|
||||
import ./market
|
||||
import ./clock
|
||||
import ./purchasing/purchase
|
||||
|
||||
export questionable
|
||||
export chronos
|
||||
export market
|
||||
export purchase
|
||||
|
||||
type
|
||||
Purchasing* = ref object
|
||||
market*: Market
|
||||
clock: Clock
|
||||
purchases: Table[PurchaseId, Purchase]
|
||||
proofProbability*: UInt256
|
||||
|
||||
PurchaseTimeout* = Timeout
|
||||
|
||||
const DefaultProofProbability = 100.u256
|
||||
|
||||
proc new*(_: type Purchasing, market: Market, clock: Clock): Purchasing =
|
||||
Purchasing(market: market, clock: clock, proofProbability: DefaultProofProbability)
|
||||
|
||||
proc load*(purchasing: Purchasing) {.async.} =
|
||||
let market = purchasing.market
|
||||
let requestIds = await market.myRequests()
|
||||
for requestId in requestIds:
|
||||
let purchase = Purchase.new(requestId, purchasing.market, purchasing.clock)
|
||||
purchase.load()
|
||||
purchasing.purchases[purchase.id] = purchase
|
||||
|
||||
proc start*(purchasing: Purchasing) {.async.} =
|
||||
await purchasing.load()
|
||||
|
||||
proc stop*(purchasing: Purchasing) {.async.} =
|
||||
discard
|
||||
|
||||
proc populate*(
|
||||
purchasing: Purchasing, request: StorageRequest
|
||||
): Future[StorageRequest] {.async.} =
|
||||
result = request
|
||||
if result.ask.proofProbability == 0.u256:
|
||||
result.ask.proofProbability = purchasing.proofProbability
|
||||
if result.nonce == Nonce.default:
|
||||
var id = result.nonce.toArray
|
||||
doAssert randomBytes(id) == 32
|
||||
result.nonce = Nonce(id)
|
||||
result.client = await purchasing.market.getSigner()
|
||||
|
||||
proc purchase*(
|
||||
purchasing: Purchasing, request: StorageRequest
|
||||
): Future[Purchase] {.async.} =
|
||||
let request = await purchasing.populate(request)
|
||||
let purchase = Purchase.new(request, purchasing.market, purchasing.clock)
|
||||
purchase.start()
|
||||
purchasing.purchases[purchase.id] = purchase
|
||||
return purchase
|
||||
|
||||
func getPurchase*(purchasing: Purchasing, id: PurchaseId): ?Purchase =
|
||||
if purchasing.purchases.hasKey(id):
|
||||
some purchasing.purchases[id]
|
||||
else:
|
||||
none Purchase
|
||||
|
||||
func getPurchaseIds*(purchasing: Purchasing): seq[PurchaseId] =
|
||||
var pIds: seq[PurchaseId] = @[]
|
||||
for key in purchasing.purchases.keys:
|
||||
pIds.add(key)
|
||||
return pIds
|
||||
@ -1,74 +0,0 @@
|
||||
import ./statemachine
|
||||
import ./states/pending
|
||||
import ./states/unknown
|
||||
import ./purchaseid
|
||||
|
||||
# Purchase is implemented as a state machine.
|
||||
#
|
||||
# It can either be a new (pending) purchase that still needs to be submitted
|
||||
# on-chain, or it is a purchase that was previously submitted on-chain, and
|
||||
# we're just restoring its (unknown) state after a node restart.
|
||||
#
|
||||
# |
|
||||
# v
|
||||
# ------------------------- unknown
|
||||
# | / /
|
||||
# v v /
|
||||
# pending ----> submitted ----> started ---------> finished <----/
|
||||
# \ \ /
|
||||
# \ ------------> failed <----/
|
||||
# \ /
|
||||
# --> cancelled <-----------------------
|
||||
|
||||
export Purchase
|
||||
export purchaseid
|
||||
export statemachine
|
||||
|
||||
func new*(
|
||||
_: type Purchase, requestId: RequestId, market: Market, clock: Clock
|
||||
): Purchase =
|
||||
## create a new instance of a Purchase
|
||||
##
|
||||
var purchase = Purchase.new()
|
||||
{.cast(noSideEffect).}:
|
||||
purchase.future = newFuture[void]()
|
||||
purchase.requestId = requestId
|
||||
purchase.market = market
|
||||
purchase.clock = clock
|
||||
|
||||
return purchase
|
||||
|
||||
func new*(
|
||||
_: type Purchase, request: StorageRequest, market: Market, clock: Clock
|
||||
): Purchase =
|
||||
## Create a new purchase using the given market and clock
|
||||
let purchase = Purchase.new(request.id, market, clock)
|
||||
purchase.request = some request
|
||||
return purchase
|
||||
|
||||
proc start*(purchase: Purchase) =
|
||||
purchase.start(PurchasePending())
|
||||
|
||||
proc load*(purchase: Purchase) =
|
||||
purchase.start(PurchaseUnknown())
|
||||
|
||||
proc wait*(purchase: Purchase) {.async.} =
|
||||
await purchase.future
|
||||
|
||||
func id*(purchase: Purchase): PurchaseId =
|
||||
PurchaseId(purchase.requestId)
|
||||
|
||||
func finished*(purchase: Purchase): bool =
|
||||
purchase.future.finished
|
||||
|
||||
func error*(purchase: Purchase): ?(ref CatchableError) =
|
||||
if purchase.future.failed:
|
||||
some purchase.future.error
|
||||
else:
|
||||
none (ref CatchableError)
|
||||
|
||||
func state*(purchase: Purchase): ?string =
|
||||
proc description(state: State): string =
|
||||
$state
|
||||
|
||||
purchase.query(description)
|
||||
@ -1,14 +0,0 @@
|
||||
import std/hashes
|
||||
import ../logutils
|
||||
|
||||
type PurchaseId* = distinct array[32, byte]
|
||||
|
||||
logutils.formatIt(LogFormat.textLines, PurchaseId):
|
||||
it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.json, PurchaseId):
|
||||
it.to0xHexLog
|
||||
|
||||
proc hash*(x: PurchaseId): Hash {.borrow.}
|
||||
proc `==`*(x, y: PurchaseId): bool {.borrow.}
|
||||
proc toHex*(x: PurchaseId): string =
|
||||
array[32, byte](x).toHex
|
||||
@ -1,19 +0,0 @@
|
||||
import ../utils/asyncstatemachine
|
||||
import ../market
|
||||
import ../clock
|
||||
import ../errors
|
||||
|
||||
export market
|
||||
export clock
|
||||
export asyncstatemachine
|
||||
|
||||
type
|
||||
Purchase* = ref object of Machine
|
||||
future*: Future[void]
|
||||
market*: Market
|
||||
clock*: Clock
|
||||
requestId*: RequestId
|
||||
request*: ?StorageRequest
|
||||
|
||||
PurchaseState* = ref object of State
|
||||
PurchaseError* = object of CodexError
|
||||
@ -1,35 +0,0 @@
|
||||
import pkg/metrics
|
||||
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_cancelled, "codex purchases cancelled")
|
||||
|
||||
logScope:
|
||||
topics = "marketplace purchases cancelled"
|
||||
|
||||
type PurchaseCancelled* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchaseCancelled): string =
|
||||
"cancelled"
|
||||
|
||||
method run*(
|
||||
state: PurchaseCancelled, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_cancelled.inc()
|
||||
let purchase = Purchase(machine)
|
||||
|
||||
try:
|
||||
warn "Request cancelled, withdrawing remaining funds",
|
||||
requestId = purchase.requestId
|
||||
await purchase.market.withdrawFunds(purchase.requestId)
|
||||
|
||||
let error = newException(Timeout, "Purchase cancelled due to timeout")
|
||||
purchase.future.fail(error)
|
||||
except CancelledError as e:
|
||||
trace "PurchaseCancelled.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchaseCancelled.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
@ -1,26 +0,0 @@
|
||||
import pkg/metrics
|
||||
import ../statemachine
|
||||
import ../../utils/exceptions
|
||||
import ../../logutils
|
||||
|
||||
declareCounter(codex_purchases_error, "codex purchases error")
|
||||
|
||||
logScope:
|
||||
topics = "marketplace purchases errored"
|
||||
|
||||
type PurchaseErrored* = ref object of PurchaseState
|
||||
error*: ref CatchableError
|
||||
|
||||
method `$`*(state: PurchaseErrored): string =
|
||||
"errored"
|
||||
|
||||
method run*(
|
||||
state: PurchaseErrored, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_error.inc()
|
||||
let purchase = Purchase(machine)
|
||||
|
||||
error "Purchasing error",
|
||||
error = state.error.msgDetail, requestId = purchase.requestId
|
||||
|
||||
purchase.future.fail(state.error)
|
||||
@ -1,30 +0,0 @@
|
||||
import pkg/metrics
|
||||
import ../statemachine
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_failed, "codex purchases failed")
|
||||
|
||||
type PurchaseFailed* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchaseFailed): string =
|
||||
"failed"
|
||||
|
||||
method run*(
|
||||
state: PurchaseFailed, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_failed.inc()
|
||||
let purchase = Purchase(machine)
|
||||
|
||||
try:
|
||||
warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId
|
||||
await purchase.market.withdrawFunds(purchase.requestId)
|
||||
except CancelledError as e:
|
||||
trace "PurchaseFailed.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchaseFailed.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
|
||||
let error = newException(PurchaseError, "Purchase failed")
|
||||
return some State(PurchaseErrored(error: error))
|
||||
@ -1,33 +0,0 @@
|
||||
import pkg/metrics
|
||||
|
||||
import ../statemachine
|
||||
import ../../utils/exceptions
|
||||
import ../../logutils
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_finished, "codex purchases finished")
|
||||
|
||||
logScope:
|
||||
topics = "marketplace purchases finished"
|
||||
|
||||
type PurchaseFinished* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchaseFinished): string =
|
||||
"finished"
|
||||
|
||||
method run*(
|
||||
state: PurchaseFinished, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_finished.inc()
|
||||
let purchase = Purchase(machine)
|
||||
try:
|
||||
info "Purchase finished, withdrawing remaining funds",
|
||||
requestId = purchase.requestId
|
||||
await purchase.market.withdrawFunds(purchase.requestId)
|
||||
|
||||
purchase.future.complete()
|
||||
except CancelledError as e:
|
||||
trace "PurchaseFinished.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchaseFinished.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
@ -1,28 +0,0 @@
|
||||
import pkg/metrics
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ./submitted
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_pending, "codex purchases pending")
|
||||
|
||||
type PurchasePending* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchasePending): string =
|
||||
"pending"
|
||||
|
||||
method run*(
|
||||
state: PurchasePending, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_pending.inc()
|
||||
let purchase = Purchase(machine)
|
||||
try:
|
||||
let request = !purchase.request
|
||||
await purchase.market.requestStorage(request)
|
||||
return some State(PurchaseSubmitted())
|
||||
except CancelledError as e:
|
||||
trace "PurchasePending.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchasePending.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
@ -1,54 +0,0 @@
|
||||
import pkg/metrics
|
||||
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ./finished
|
||||
import ./failed
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_started, "codex purchases started")
|
||||
|
||||
logScope:
|
||||
topics = "marketplace purchases started"
|
||||
|
||||
type PurchaseStarted* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchaseStarted): string =
|
||||
"started"
|
||||
|
||||
method run*(
|
||||
state: PurchaseStarted, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_started.inc()
|
||||
let purchase = Purchase(machine)
|
||||
|
||||
let clock = purchase.clock
|
||||
let market = purchase.market
|
||||
info "All required slots filled, purchase started", requestId = purchase.requestId
|
||||
|
||||
let failed = newFuture[void]()
|
||||
proc callback(_: RequestId) =
|
||||
failed.complete()
|
||||
|
||||
var ended: Future[void]
|
||||
try:
|
||||
let subscription = await market.subscribeRequestFailed(purchase.requestId, callback)
|
||||
|
||||
# Ensure that we're past the request end by waiting an additional second
|
||||
ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1)
|
||||
let fut = await one(ended, failed)
|
||||
await subscription.unsubscribe()
|
||||
if fut.id == failed.id:
|
||||
ended.cancelSoon()
|
||||
return some State(PurchaseFailed())
|
||||
else:
|
||||
failed.cancelSoon()
|
||||
return some State(PurchaseFinished())
|
||||
except CancelledError as e:
|
||||
ended.cancelSoon()
|
||||
failed.cancelSoon()
|
||||
trace "PurchaseStarted.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchaseStarted.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
@ -1,56 +0,0 @@
|
||||
import pkg/metrics
|
||||
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ./started
|
||||
import ./cancelled
|
||||
import ./error
|
||||
|
||||
logScope:
|
||||
topics = "marketplace purchases submitted"
|
||||
|
||||
declareCounter(codex_purchases_submitted, "codex purchases submitted")
|
||||
|
||||
type PurchaseSubmitted* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchaseSubmitted): string =
|
||||
"submitted"
|
||||
|
||||
method run*(
|
||||
state: PurchaseSubmitted, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_submitted.inc()
|
||||
let purchase = Purchase(machine)
|
||||
let request = !purchase.request
|
||||
let market = purchase.market
|
||||
let clock = purchase.clock
|
||||
|
||||
info "Request submitted, waiting for slots to be filled",
|
||||
requestId = purchase.requestId
|
||||
|
||||
proc wait() {.async.} =
|
||||
let done = newAsyncEvent()
|
||||
proc callback(_: RequestId) =
|
||||
done.fire()
|
||||
|
||||
let subscription = await market.subscribeFulfillment(request.id, callback)
|
||||
await done.wait()
|
||||
await subscription.unsubscribe()
|
||||
|
||||
proc withTimeout(future: Future[void]) {.async.} =
|
||||
let expiry = (await market.requestExpiresAt(request.id)) + 1
|
||||
trace "waiting for request fulfillment or expiry", expiry
|
||||
await future.withTimeout(clock, expiry)
|
||||
|
||||
try:
|
||||
await wait().withTimeout()
|
||||
except Timeout:
|
||||
return some State(PurchaseCancelled())
|
||||
except CancelledError as e:
|
||||
trace "PurchaseSubmitted.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchaseSubmitted.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
|
||||
return some State(PurchaseStarted())
|
||||
@ -1,44 +0,0 @@
|
||||
import pkg/metrics
|
||||
import ../../utils/exceptions
|
||||
import ../../logutils
|
||||
import ../statemachine
|
||||
import ./submitted
|
||||
import ./started
|
||||
import ./cancelled
|
||||
import ./finished
|
||||
import ./failed
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_unknown, "codex purchases unknown")
|
||||
|
||||
type PurchaseUnknown* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchaseUnknown): string =
|
||||
"unknown"
|
||||
|
||||
method run*(
|
||||
state: PurchaseUnknown, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
try:
|
||||
codex_purchases_unknown.inc()
|
||||
let purchase = Purchase(machine)
|
||||
if (request =? await purchase.market.getRequest(purchase.requestId)) and
|
||||
(requestState =? await purchase.market.requestState(purchase.requestId)):
|
||||
purchase.request = some request
|
||||
|
||||
case requestState
|
||||
of RequestState.New:
|
||||
return some State(PurchaseSubmitted())
|
||||
of RequestState.Started:
|
||||
return some State(PurchaseStarted())
|
||||
of RequestState.Cancelled:
|
||||
return some State(PurchaseCancelled())
|
||||
of RequestState.Finished:
|
||||
return some State(PurchaseFinished())
|
||||
of RequestState.Failed:
|
||||
return some State(PurchaseFailed())
|
||||
except CancelledError as e:
|
||||
trace "PurchaseUnknown.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchaseUnknown.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
@ -30,8 +30,6 @@ import ../logutils
|
||||
import ../node
|
||||
import ../blocktype
|
||||
import ../conf
|
||||
import ../contracts
|
||||
import ../erasure/erasure
|
||||
import ../manifest
|
||||
import ../streams/asyncstreamwrapper
|
||||
import ../stores
|
||||
@ -116,9 +114,7 @@ proc retrieveCid(
|
||||
# For erasure-coded datasets, we need to return the _original_ length; i.e.,
|
||||
# the length of the non-erasure-coded dataset, as that's what we will be
|
||||
# returning to the client.
|
||||
let contentLength =
|
||||
if manifest.protected: manifest.originalDatasetSize else: manifest.datasetSize
|
||||
resp.setHeader("Content-Length", $(contentLength.int))
|
||||
resp.setHeader("Content-Length", $(manifest.datasetSize.int))
|
||||
|
||||
await resp.prepare(HttpResponseStreamType.Plain)
|
||||
|
||||
@ -388,459 +384,6 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
||||
)
|
||||
return RestApiResponse.response($json, contentType = "application/json")
|
||||
|
||||
proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
let allowedOrigin = router.allowedOrigin
|
||||
|
||||
router.api(MethodGet, "/api/storage/v1/sales/slots") do() -> RestApiResponse:
|
||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||
|
||||
## Returns active slots for the host
|
||||
try:
|
||||
without contracts =? node.contracts.host:
|
||||
return RestApiResponse.error(
|
||||
Http503, "Persistence is not enabled", headers = headers
|
||||
)
|
||||
|
||||
let json = %(await contracts.sales.mySlots())
|
||||
return RestApiResponse.response(
|
||||
$json, contentType = "application/json", headers = headers
|
||||
)
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500, headers = headers)
|
||||
|
||||
router.api(MethodGet, "/api/storage/v1/sales/slots/{slotId}") do(
|
||||
slotId: SlotId
|
||||
) -> RestApiResponse:
|
||||
## Returns active slot with id {slotId} for the host. Returns 404 if the
|
||||
## slot is not active for the host.
|
||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||
|
||||
without contracts =? node.contracts.host:
|
||||
return
|
||||
RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers)
|
||||
|
||||
without slotId =? slotId.tryGet.catch, error:
|
||||
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
||||
|
||||
without agent =? await contracts.sales.activeSale(slotId):
|
||||
return
|
||||
RestApiResponse.error(Http404, "Provider not filling slot", headers = headers)
|
||||
|
||||
let restAgent = RestSalesAgent(
|
||||
state: agent.state() |? "none",
|
||||
slotIndex: agent.data.slotIndex,
|
||||
requestId: agent.data.requestId,
|
||||
request: agent.data.request,
|
||||
reservation: agent.data.reservation,
|
||||
)
|
||||
|
||||
return RestApiResponse.response(
|
||||
restAgent.toJson, contentType = "application/json", headers = headers
|
||||
)
|
||||
|
||||
router.api(MethodGet, "/api/storage/v1/sales/availability") do() -> RestApiResponse:
|
||||
## Returns storage that is for sale
|
||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||
|
||||
try:
|
||||
without contracts =? node.contracts.host:
|
||||
return RestApiResponse.error(
|
||||
Http503, "Persistence is not enabled", headers = headers
|
||||
)
|
||||
|
||||
without avails =? (await contracts.sales.context.reservations.all(Availability)),
|
||||
err:
|
||||
return RestApiResponse.error(Http500, err.msg, headers = headers)
|
||||
|
||||
let json = %avails
|
||||
return RestApiResponse.response(
|
||||
$json, contentType = "application/json", headers = headers
|
||||
)
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500, headers = headers)
|
||||
|
||||
router.rawApi(MethodPost, "/api/storage/v1/sales/availability") do() -> RestApiResponse:
|
||||
## Add available storage to sell.
|
||||
## Every time Availability's offer finishes, its capacity is
|
||||
## returned to the availability.
|
||||
##
|
||||
## totalSize - size of available storage in bytes
|
||||
## duration - maximum time the storage should be sold for (in seconds)
|
||||
## minPricePerBytePerSecond - minimal price per byte paid (in amount of
|
||||
## tokens) to be matched against the request's pricePerBytePerSecond
|
||||
## totalCollateral - total collateral (in amount of
|
||||
## tokens) that can be distributed among matching requests
|
||||
|
||||
var headers = buildCorsHeaders("POST", allowedOrigin)
|
||||
|
||||
try:
|
||||
without contracts =? node.contracts.host:
|
||||
return RestApiResponse.error(
|
||||
Http503, "Persistence is not enabled", headers = headers
|
||||
)
|
||||
|
||||
let body = await request.getBody()
|
||||
|
||||
without restAv =? RestAvailability.fromJson(body), error:
|
||||
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
||||
|
||||
let reservations = contracts.sales.context.reservations
|
||||
|
||||
if restAv.totalSize == 0:
|
||||
return RestApiResponse.error(
|
||||
Http422, "Total size must be larger then zero", headers = headers
|
||||
)
|
||||
|
||||
if restAv.duration == 0:
|
||||
return RestApiResponse.error(
|
||||
Http422, "duration must be larger then zero", headers = headers
|
||||
)
|
||||
|
||||
if restAv.minPricePerBytePerSecond == 0:
|
||||
return RestApiResponse.error(
|
||||
Http422,
|
||||
"minPricePerBytePerSecond must be larger then zero",
|
||||
headers = headers,
|
||||
)
|
||||
|
||||
if restAv.totalCollateral == 0:
|
||||
return RestApiResponse.error(
|
||||
Http422, "totalCollateral must be larger then zero", headers = headers
|
||||
)
|
||||
|
||||
if not reservations.hasAvailable(restAv.totalSize):
|
||||
return
|
||||
RestApiResponse.error(Http422, "Not enough storage quota", headers = headers)
|
||||
|
||||
without availability =? (
|
||||
await reservations.createAvailability(
|
||||
restAv.totalSize,
|
||||
restAv.duration,
|
||||
restAv.minPricePerBytePerSecond,
|
||||
restAv.totalCollateral,
|
||||
enabled = restAv.enabled |? true,
|
||||
until = restAv.until |? 0,
|
||||
)
|
||||
), error:
|
||||
if error of CancelledError:
|
||||
raise error
|
||||
if error of UntilOutOfBoundsError:
|
||||
return RestApiResponse.error(Http422, error.msg)
|
||||
|
||||
return RestApiResponse.error(Http500, error.msg, headers = headers)
|
||||
|
||||
return RestApiResponse.response(
|
||||
availability.toJson,
|
||||
Http201,
|
||||
contentType = "application/json",
|
||||
headers = headers,
|
||||
)
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500, headers = headers)
|
||||
|
||||
router.api(MethodOptions, "/api/storage/v1/sales/availability/{id}") do(
|
||||
id: AvailabilityId, resp: HttpResponseRef
|
||||
) -> RestApiResponse:
|
||||
if corsOrigin =? allowedOrigin:
|
||||
resp.setCorsHeaders("PATCH", corsOrigin)
|
||||
|
||||
resp.status = Http204
|
||||
await resp.sendBody("")
|
||||
|
||||
router.rawApi(MethodPatch, "/api/storage/v1/sales/availability/{id}") do(
|
||||
id: AvailabilityId
|
||||
) -> RestApiResponse:
|
||||
## Updates Availability.
|
||||
## The new parameters will be only considered for new requests.
|
||||
## Existing Requests linked to this Availability will continue as is.
|
||||
##
|
||||
## totalSize - size of available storage in bytes.
|
||||
## When decreasing the size, then lower limit is
|
||||
## the currently `totalSize - freeSize`.
|
||||
## duration - maximum time the storage should be sold for (in seconds)
|
||||
## minPricePerBytePerSecond - minimal price per byte paid (in amount of
|
||||
## tokens) to be matched against the request's pricePerBytePerSecond
|
||||
## totalCollateral - total collateral (in amount of
|
||||
## tokens) that can be distributed among matching requests
|
||||
|
||||
try:
|
||||
without contracts =? node.contracts.host:
|
||||
return RestApiResponse.error(Http503, "Persistence is not enabled")
|
||||
|
||||
without id =? id.tryGet.catch, error:
|
||||
return RestApiResponse.error(Http400, error.msg)
|
||||
without keyId =? id.key.tryGet.catch, error:
|
||||
return RestApiResponse.error(Http400, error.msg)
|
||||
|
||||
let
|
||||
body = await request.getBody()
|
||||
reservations = contracts.sales.context.reservations
|
||||
|
||||
type OptRestAvailability = Optionalize(RestAvailability)
|
||||
without restAv =? OptRestAvailability.fromJson(body), error:
|
||||
return RestApiResponse.error(Http400, error.msg)
|
||||
|
||||
without availability =? (await reservations.get(keyId, Availability)), error:
|
||||
if error of NotExistsError:
|
||||
return RestApiResponse.error(Http404, "Availability not found")
|
||||
|
||||
return RestApiResponse.error(Http500, error.msg)
|
||||
|
||||
if isSome restAv.freeSize:
|
||||
return RestApiResponse.error(Http422, "Updating freeSize is not allowed")
|
||||
|
||||
if size =? restAv.totalSize:
|
||||
if size == 0:
|
||||
return RestApiResponse.error(Http422, "Total size must be larger then zero")
|
||||
|
||||
# we don't allow lowering the totalSize bellow currently utilized size
|
||||
if size < (availability.totalSize - availability.freeSize):
|
||||
return RestApiResponse.error(
|
||||
Http422,
|
||||
"New totalSize must be larger then current totalSize - freeSize, which is currently: " &
|
||||
$(availability.totalSize - availability.freeSize),
|
||||
)
|
||||
|
||||
if not reservations.hasAvailable(size):
|
||||
return RestApiResponse.error(Http422, "Not enough storage quota")
|
||||
|
||||
availability.freeSize += size - availability.totalSize
|
||||
availability.totalSize = size
|
||||
|
||||
if duration =? restAv.duration:
|
||||
availability.duration = duration
|
||||
|
||||
if minPricePerBytePerSecond =? restAv.minPricePerBytePerSecond:
|
||||
availability.minPricePerBytePerSecond = minPricePerBytePerSecond
|
||||
|
||||
if totalCollateral =? restAv.totalCollateral:
|
||||
availability.totalCollateral = totalCollateral
|
||||
|
||||
if until =? restAv.until:
|
||||
availability.until = until
|
||||
|
||||
if enabled =? restAv.enabled:
|
||||
availability.enabled = enabled
|
||||
|
||||
if err =? (await reservations.update(availability)).errorOption:
|
||||
if err of CancelledError:
|
||||
raise err
|
||||
if err of UntilOutOfBoundsError:
|
||||
return RestApiResponse.error(Http422, err.msg)
|
||||
else:
|
||||
return RestApiResponse.error(Http500, err.msg)
|
||||
|
||||
return RestApiResponse.response(Http204)
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
|
||||
router.rawApi(MethodGet, "/api/storage/v1/sales/availability/{id}/reservations") do(
|
||||
id: AvailabilityId
|
||||
) -> RestApiResponse:
|
||||
## Gets Availability's reservations.
|
||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||
|
||||
try:
|
||||
without contracts =? node.contracts.host:
|
||||
return RestApiResponse.error(
|
||||
Http503, "Persistence is not enabled", headers = headers
|
||||
)
|
||||
|
||||
without id =? id.tryGet.catch, error:
|
||||
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
||||
without keyId =? id.key.tryGet.catch, error:
|
||||
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
||||
|
||||
let reservations = contracts.sales.context.reservations
|
||||
let market = contracts.sales.context.market
|
||||
|
||||
if error =? (await reservations.get(keyId, Availability)).errorOption:
|
||||
if error of NotExistsError:
|
||||
return
|
||||
RestApiResponse.error(Http404, "Availability not found", headers = headers)
|
||||
else:
|
||||
return RestApiResponse.error(Http500, error.msg, headers = headers)
|
||||
|
||||
without availabilitysReservations =? (await reservations.all(Reservation, id)),
|
||||
err:
|
||||
return RestApiResponse.error(Http500, err.msg, headers = headers)
|
||||
|
||||
# TODO: Expand this structure with information about the linked StorageRequest not only RequestID
|
||||
return RestApiResponse.response(
|
||||
availabilitysReservations.toJson,
|
||||
contentType = "application/json",
|
||||
headers = headers,
|
||||
)
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500, headers = headers)
|
||||
|
||||
proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
let allowedOrigin = router.allowedOrigin
|
||||
|
||||
router.rawApi(MethodPost, "/api/storage/v1/storage/request/{cid}") do(
|
||||
cid: Cid
|
||||
) -> RestApiResponse:
|
||||
var headers = buildCorsHeaders("POST", allowedOrigin)
|
||||
|
||||
## Create a request for storage
|
||||
##
|
||||
## cid - the cid of a previously uploaded dataset
|
||||
## duration - the duration of the request in seconds
|
||||
## proofProbability - how often storage proofs are required
|
||||
## pricePerBytePerSecond - the amount of tokens paid per byte per second to hosts the client is willing to pay
|
||||
## expiry - specifies threshold in seconds from now when the request expires if the Request does not find requested amount of nodes to host the data
|
||||
## nodes - number of nodes the content should be stored on
|
||||
## tolerance - allowed number of nodes that can be lost before content is lost
|
||||
## colateralPerByte - requested collateral per byte from hosts when they fill slot
|
||||
try:
|
||||
without contracts =? node.contracts.client:
|
||||
return RestApiResponse.error(
|
||||
Http503, "Persistence is not enabled", headers = headers
|
||||
)
|
||||
|
||||
without cid =? cid.tryGet.catch, error:
|
||||
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
||||
|
||||
let body = await request.getBody()
|
||||
|
||||
without params =? StorageRequestParams.fromJson(body), error:
|
||||
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
||||
|
||||
let expiry = params.expiry
|
||||
|
||||
if expiry <= 0 or expiry >= params.duration:
|
||||
return RestApiResponse.error(
|
||||
Http422,
|
||||
"Expiry must be greater than zero and less than the request's duration",
|
||||
headers = headers,
|
||||
)
|
||||
|
||||
if params.proofProbability <= 0:
|
||||
return RestApiResponse.error(
|
||||
Http422, "Proof probability must be greater than zero", headers = headers
|
||||
)
|
||||
|
||||
if params.collateralPerByte <= 0:
|
||||
return RestApiResponse.error(
|
||||
Http422, "Collateral per byte must be greater than zero", headers = headers
|
||||
)
|
||||
|
||||
if params.pricePerBytePerSecond <= 0:
|
||||
return RestApiResponse.error(
|
||||
Http422,
|
||||
"Price per byte per second must be greater than zero",
|
||||
headers = headers,
|
||||
)
|
||||
|
||||
let requestDurationLimit = await contracts.purchasing.market.requestDurationLimit
|
||||
if params.duration > requestDurationLimit:
|
||||
return RestApiResponse.error(
|
||||
Http422,
|
||||
"Duration exceeds limit of " & $requestDurationLimit & " seconds",
|
||||
headers = headers,
|
||||
)
|
||||
|
||||
let nodes = params.nodes |? 3
|
||||
let tolerance = params.tolerance |? 1
|
||||
|
||||
if tolerance == 0:
|
||||
return RestApiResponse.error(
|
||||
Http422, "Tolerance needs to be bigger then zero", headers = headers
|
||||
)
|
||||
|
||||
# prevent underflow
|
||||
if tolerance > nodes:
|
||||
return RestApiResponse.error(
|
||||
Http422,
|
||||
"Invalid parameters: `tolerance` cannot be greater than `nodes`",
|
||||
headers = headers,
|
||||
)
|
||||
|
||||
let ecK = nodes - tolerance
|
||||
let ecM = tolerance # for readability
|
||||
|
||||
# ensure leopard constrainst of 1 < K ≥ M
|
||||
if ecK <= 1 or ecK < ecM:
|
||||
return RestApiResponse.error(
|
||||
Http422,
|
||||
"Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`",
|
||||
headers = headers,
|
||||
)
|
||||
|
||||
without purchaseId =?
|
||||
await node.requestStorage(
|
||||
cid, params.duration, params.proofProbability, nodes, tolerance,
|
||||
params.pricePerBytePerSecond, params.collateralPerByte, expiry,
|
||||
), error:
|
||||
if error of InsufficientBlocksError:
|
||||
return RestApiResponse.error(
|
||||
Http422,
|
||||
"Dataset too small for erasure parameters, need at least " &
|
||||
$(ref InsufficientBlocksError)(error).minSize.int & " bytes",
|
||||
headers = headers,
|
||||
)
|
||||
|
||||
return RestApiResponse.error(Http500, error.msg, headers = headers)
|
||||
|
||||
return RestApiResponse.response(purchaseId.toHex)
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500, headers = headers)
|
||||
|
||||
router.api(MethodGet, "/api/storage/v1/storage/purchases/{id}") do(
|
||||
id: PurchaseId
|
||||
) -> RestApiResponse:
|
||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||
|
||||
try:
|
||||
without contracts =? node.contracts.client:
|
||||
return RestApiResponse.error(
|
||||
Http503, "Persistence is not enabled", headers = headers
|
||||
)
|
||||
|
||||
without id =? id.tryGet.catch, error:
|
||||
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
||||
|
||||
without purchase =? contracts.purchasing.getPurchase(id):
|
||||
return RestApiResponse.error(Http404, headers = headers)
|
||||
|
||||
let json =
|
||||
%RestPurchase(
|
||||
state: purchase.state |? "none",
|
||||
error: purchase.error .? msg,
|
||||
request: purchase.request,
|
||||
requestId: purchase.requestId,
|
||||
)
|
||||
|
||||
return RestApiResponse.response(
|
||||
$json, contentType = "application/json", headers = headers
|
||||
)
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500, headers = headers)
|
||||
|
||||
router.api(MethodGet, "/api/storage/v1/storage/purchases") do() -> RestApiResponse:
|
||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||
|
||||
try:
|
||||
without contracts =? node.contracts.client:
|
||||
return RestApiResponse.error(
|
||||
Http503, "Persistence is not enabled", headers = headers
|
||||
)
|
||||
|
||||
let purchaseIds = contracts.purchasing.getPurchaseIds()
|
||||
return RestApiResponse.response(
|
||||
$ %purchaseIds, contentType = "application/json", headers = headers
|
||||
)
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500, headers = headers)
|
||||
|
||||
proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
||||
let allowedOrigin = router.allowedOrigin
|
||||
|
||||
@ -946,11 +489,7 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
||||
"",
|
||||
"announceAddresses": node.discovery.announceAddrs,
|
||||
"table": table,
|
||||
"storage": {
|
||||
"version": $codexVersion,
|
||||
"revision": $codexRevision,
|
||||
"contracts": $codexContractsRevision,
|
||||
},
|
||||
"storage": {"version": $codexVersion, "revision": $codexRevision},
|
||||
}
|
||||
|
||||
# return pretty json for human readability
|
||||
@ -1016,8 +555,6 @@ proc initRestApi*(
|
||||
var router = RestRouter.init(validate, corsAllowedOrigin)
|
||||
|
||||
initDataApi(node, repoStore, router)
|
||||
initSalesApi(node, router)
|
||||
initPurchasingApi(node, router)
|
||||
initNodeApi(node, conf, router)
|
||||
initDebugApi(node, conf, router)
|
||||
|
||||
|
||||
@ -17,8 +17,6 @@ import pkg/stew/byteutils
|
||||
import pkg/results
|
||||
import pkg/stint
|
||||
|
||||
import ../sales
|
||||
import ../purchasing
|
||||
import ../utils/stintutils
|
||||
|
||||
proc encodeString*(cid: type Cid): Result[string, cstring] =
|
||||
@ -82,11 +80,6 @@ proc decodeString*(
|
||||
except ValueError as e:
|
||||
err e.msg.cstring
|
||||
|
||||
proc decodeString*[T: PurchaseId | RequestId | Nonce | SlotId | AvailabilityId](
|
||||
_: type T, value: string
|
||||
): Result[T, cstring] =
|
||||
array[32, byte].decodeString(value).map(id => T(id))
|
||||
|
||||
proc decodeString*(t: typedesc[string], value: string): Result[string, cstring] =
|
||||
ok(value)
|
||||
|
||||
|
||||
@ -3,8 +3,6 @@ import pkg/stew/byteutils
|
||||
import pkg/libp2p
|
||||
import pkg/codexdht/discv5/node as dn
|
||||
import pkg/codexdht/discv5/routing_table as rt
|
||||
import ../sales
|
||||
import ../purchasing
|
||||
import ../utils/json
|
||||
import ../manifest
|
||||
import ../units
|
||||
@ -12,37 +10,6 @@ import ../units
|
||||
export json
|
||||
|
||||
type
|
||||
StorageRequestParams* = object
|
||||
duration* {.serialize.}: uint64
|
||||
proofProbability* {.serialize.}: UInt256
|
||||
pricePerBytePerSecond* {.serialize.}: UInt256
|
||||
collateralPerByte* {.serialize.}: UInt256
|
||||
expiry* {.serialize.}: uint64
|
||||
nodes* {.serialize.}: ?uint
|
||||
tolerance* {.serialize.}: ?uint
|
||||
|
||||
RestPurchase* = object
|
||||
requestId* {.serialize.}: RequestId
|
||||
request* {.serialize.}: ?StorageRequest
|
||||
state* {.serialize.}: string
|
||||
error* {.serialize.}: ?string
|
||||
|
||||
RestAvailability* = object
|
||||
totalSize* {.serialize.}: uint64
|
||||
duration* {.serialize.}: uint64
|
||||
minPricePerBytePerSecond* {.serialize.}: UInt256
|
||||
totalCollateral* {.serialize.}: UInt256
|
||||
freeSize* {.serialize.}: ?uint64
|
||||
enabled* {.serialize.}: ?bool
|
||||
until* {.serialize.}: ?SecondsSince1970
|
||||
|
||||
RestSalesAgent* = object
|
||||
state* {.serialize.}: string
|
||||
requestId* {.serialize.}: RequestId
|
||||
slotIndex* {.serialize.}: uint64
|
||||
request* {.serialize.}: ?StorageRequest
|
||||
reservation* {.serialize.}: ?Reservation
|
||||
|
||||
RestContent* = object
|
||||
cid* {.serialize.}: Cid
|
||||
manifest* {.serialize.}: Manifest
|
||||
@ -106,13 +73,5 @@ proc init*(_: type RestPeerRecord, peerRecord: PeerRecord): RestPeerRecord =
|
||||
proc init*(_: type RestNodeId, id: NodeId): RestNodeId =
|
||||
RestNodeId(id: id)
|
||||
|
||||
proc `%`*(obj: StorageRequest | Slot): JsonNode =
|
||||
let jsonObj = newJObject()
|
||||
for k, v in obj.fieldPairs:
|
||||
jsonObj[k] = %v
|
||||
jsonObj["id"] = %(obj.id)
|
||||
|
||||
return jsonObj
|
||||
|
||||
proc `%`*(obj: RestNodeId): JsonNode =
|
||||
% $obj.id
|
||||
|
||||
555
codex/sales.nim
555
codex/sales.nim
@ -1,555 +0,0 @@
|
||||
import std/sequtils
|
||||
import std/sugar
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/stint
|
||||
import pkg/datastore
|
||||
import ./market
|
||||
import ./clock
|
||||
import ./stores
|
||||
import ./contracts/requests
|
||||
import ./contracts/marketplace
|
||||
import ./logutils
|
||||
import ./sales/salescontext
|
||||
import ./sales/salesagent
|
||||
import ./sales/statemachine
|
||||
import ./sales/slotqueue
|
||||
import ./sales/states/preparing
|
||||
import ./sales/states/unknown
|
||||
import ./utils/trackedfutures
|
||||
import ./utils/exceptions
|
||||
|
||||
## Sales holds a list of available storage that it may sell.
|
||||
##
|
||||
## When storage is requested on the market that matches availability, the Sales
|
||||
## object will instruct the Logos Storage node to persist the requested data. Once the
|
||||
## data has been persisted, it uploads a proof of storage to the market in an
|
||||
## attempt to win a storage contract.
|
||||
##
|
||||
## Node Sales Market
|
||||
## | | |
|
||||
## | -- add availability --> | |
|
||||
## | | <-- storage request --- |
|
||||
## | <----- store data ------ | |
|
||||
## | -----------------------> | |
|
||||
## | | |
|
||||
## | <----- prove data ---- | |
|
||||
## | -----------------------> | |
|
||||
## | | ---- storage proof ---> |
|
||||
|
||||
export stint
|
||||
export reservations
|
||||
export salesagent
|
||||
export salescontext
|
||||
|
||||
logScope:
|
||||
topics = "sales marketplace"
|
||||
|
||||
type Sales* = ref object
|
||||
context*: SalesContext
|
||||
agents*: seq[SalesAgent]
|
||||
running: bool
|
||||
subscriptions: seq[market.Subscription]
|
||||
trackedFutures: TrackedFutures
|
||||
|
||||
proc `onStore=`*(sales: Sales, onStore: OnStore) =
|
||||
sales.context.onStore = some onStore
|
||||
|
||||
proc `onClear=`*(sales: Sales, onClear: OnClear) =
|
||||
sales.context.onClear = some onClear
|
||||
|
||||
proc `onSale=`*(sales: Sales, callback: OnSale) =
|
||||
sales.context.onSale = some callback
|
||||
|
||||
proc `onProve=`*(sales: Sales, callback: OnProve) =
|
||||
sales.context.onProve = some callback
|
||||
|
||||
proc `onExpiryUpdate=`*(sales: Sales, callback: OnExpiryUpdate) =
|
||||
sales.context.onExpiryUpdate = some callback
|
||||
|
||||
proc onStore*(sales: Sales): ?OnStore =
|
||||
sales.context.onStore
|
||||
|
||||
proc onClear*(sales: Sales): ?OnClear =
|
||||
sales.context.onClear
|
||||
|
||||
proc onSale*(sales: Sales): ?OnSale =
|
||||
sales.context.onSale
|
||||
|
||||
proc onProve*(sales: Sales): ?OnProve =
|
||||
sales.context.onProve
|
||||
|
||||
proc onExpiryUpdate*(sales: Sales): ?OnExpiryUpdate =
|
||||
sales.context.onExpiryUpdate
|
||||
|
||||
proc new*(_: type Sales, market: Market, clock: Clock, repo: RepoStore): Sales =
|
||||
Sales.new(market, clock, repo, 0)
|
||||
|
||||
proc new*(
|
||||
_: type Sales,
|
||||
market: Market,
|
||||
clock: Clock,
|
||||
repo: RepoStore,
|
||||
simulateProofFailures: int,
|
||||
): Sales =
|
||||
let reservations = Reservations.new(repo)
|
||||
Sales(
|
||||
context: SalesContext(
|
||||
market: market,
|
||||
clock: clock,
|
||||
reservations: reservations,
|
||||
slotQueue: SlotQueue.new(),
|
||||
simulateProofFailures: simulateProofFailures,
|
||||
),
|
||||
trackedFutures: TrackedFutures.new(),
|
||||
subscriptions: @[],
|
||||
)
|
||||
|
||||
proc remove(sales: Sales, agent: SalesAgent) {.async: (raises: []).} =
|
||||
await agent.stop()
|
||||
|
||||
if sales.running:
|
||||
sales.agents.keepItIf(it != agent)
|
||||
|
||||
proc cleanUp(
|
||||
sales: Sales, agent: SalesAgent, reprocessSlot: bool, returnedCollateral: ?UInt256
|
||||
) {.async: (raises: []).} =
|
||||
let data = agent.data
|
||||
|
||||
logScope:
|
||||
topics = "sales cleanUp"
|
||||
requestId = data.requestId
|
||||
slotIndex = data.slotIndex
|
||||
reservationId = data.reservation .? id |? ReservationId.default
|
||||
availabilityId = data.reservation .? availabilityId |? AvailabilityId.default
|
||||
|
||||
trace "cleaning up sales agent"
|
||||
|
||||
# if reservation for the SalesAgent was not created, then it means
|
||||
# that the cleanUp was called before the sales process really started, so
|
||||
# there are not really any bytes to be returned
|
||||
if request =? data.request and reservation =? data.reservation:
|
||||
if returnErr =? (
|
||||
await noCancel sales.context.reservations.returnBytesToAvailability(
|
||||
reservation.availabilityId, reservation.id, request.ask.slotSize
|
||||
)
|
||||
).errorOption:
|
||||
error "failure returning bytes",
|
||||
error = returnErr.msg, bytes = request.ask.slotSize
|
||||
|
||||
# delete reservation and return reservation bytes back to the availability
|
||||
if reservation =? data.reservation and
|
||||
deleteErr =? (
|
||||
await noCancel sales.context.reservations.deleteReservation(
|
||||
reservation.id, reservation.availabilityId, returnedCollateral
|
||||
)
|
||||
).errorOption:
|
||||
error "failure deleting reservation", error = deleteErr.msg
|
||||
|
||||
# Re-add items back into the queue to prevent small availabilities from
|
||||
# draining the queue. Seen items will be ordered last.
|
||||
if reprocessSlot and request =? data.request and var item =? agent.data.slotQueueItem:
|
||||
let queue = sales.context.slotQueue
|
||||
item.seen = true
|
||||
trace "pushing ignored item to queue, marked as seen"
|
||||
if err =? queue.push(item).errorOption:
|
||||
error "failed to readd slot to queue", errorType = $(type err), error = err.msg
|
||||
|
||||
let fut = sales.remove(agent)
|
||||
sales.trackedFutures.track(fut)
|
||||
|
||||
proc filled(sales: Sales, request: StorageRequest, slotIndex: uint64) =
|
||||
if onSale =? sales.context.onSale:
|
||||
onSale(request, slotIndex)
|
||||
|
||||
proc processSlot(
|
||||
sales: Sales, item: SlotQueueItem
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
debug "Processing slot from queue", requestId = item.requestId, slot = item.slotIndex
|
||||
|
||||
let agent = newSalesAgent(
|
||||
sales.context, item.requestId, item.slotIndex, none StorageRequest, some item
|
||||
)
|
||||
|
||||
let completed = newAsyncEvent()
|
||||
|
||||
agent.onCleanUp = proc(
|
||||
reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
) {.async: (raises: []).} =
|
||||
trace "slot cleanup"
|
||||
await sales.cleanUp(agent, reprocessSlot, returnedCollateral)
|
||||
completed.fire()
|
||||
|
||||
agent.onFilled = some proc(request: StorageRequest, slotIndex: uint64) =
|
||||
trace "slot filled"
|
||||
sales.filled(request, slotIndex)
|
||||
completed.fire()
|
||||
|
||||
agent.start(SalePreparing())
|
||||
sales.agents.add agent
|
||||
|
||||
trace "waiting for slot processing to complete"
|
||||
await completed.wait()
|
||||
trace "slot processing completed"
|
||||
|
||||
proc deleteInactiveReservations(sales: Sales, activeSlots: seq[Slot]) {.async.} =
|
||||
let reservations = sales.context.reservations
|
||||
without reservs =? await reservations.all(Reservation):
|
||||
return
|
||||
|
||||
let unused = reservs.filter(
|
||||
r => (
|
||||
let slotId = slotId(r.requestId, r.slotIndex)
|
||||
not activeSlots.any(slot => slot.id == slotId)
|
||||
)
|
||||
)
|
||||
|
||||
if unused.len == 0:
|
||||
return
|
||||
|
||||
info "Found unused reservations for deletion", unused = unused.len
|
||||
|
||||
for reservation in unused:
|
||||
logScope:
|
||||
reservationId = reservation.id
|
||||
availabilityId = reservation.availabilityId
|
||||
|
||||
if err =? (
|
||||
await reservations.deleteReservation(reservation.id, reservation.availabilityId)
|
||||
).errorOption:
|
||||
error "Failed to delete unused reservation", error = err.msg
|
||||
else:
|
||||
trace "Deleted unused reservation"
|
||||
|
||||
proc mySlots*(sales: Sales): Future[seq[Slot]] {.async.} =
|
||||
let market = sales.context.market
|
||||
let slotIds = await market.mySlots()
|
||||
var slots: seq[Slot] = @[]
|
||||
|
||||
info "Loading active slots", slotsCount = len(slots)
|
||||
for slotId in slotIds:
|
||||
if slot =? (await market.getActiveSlot(slotId)):
|
||||
slots.add slot
|
||||
|
||||
return slots
|
||||
|
||||
proc activeSale*(sales: Sales, slotId: SlotId): Future[?SalesAgent] {.async.} =
|
||||
for agent in sales.agents:
|
||||
if slotId(agent.data.requestId, agent.data.slotIndex) == slotId:
|
||||
return some agent
|
||||
|
||||
return none SalesAgent
|
||||
|
||||
proc load*(sales: Sales) {.async.} =
|
||||
let activeSlots = await sales.mySlots()
|
||||
|
||||
await sales.deleteInactiveReservations(activeSlots)
|
||||
|
||||
for slot in activeSlots:
|
||||
let agent =
|
||||
newSalesAgent(sales.context, slot.request.id, slot.slotIndex, some slot.request)
|
||||
|
||||
agent.onCleanUp = proc(
|
||||
reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
) {.async: (raises: []).} =
|
||||
await sales.cleanUp(agent, reprocessSlot, returnedCollateral)
|
||||
|
||||
# There is no need to assign agent.onFilled as slots loaded from `mySlots`
|
||||
# are inherently already filled and so assigning agent.onFilled would be
|
||||
# superfluous.
|
||||
|
||||
agent.start(SaleUnknown())
|
||||
sales.agents.add agent
|
||||
|
||||
proc OnAvailabilitySaved(
|
||||
sales: Sales, availability: Availability
|
||||
) {.async: (raises: []).} =
|
||||
## When availabilities are modified or added, the queue should be unpaused if
|
||||
## it was paused and any slots in the queue should have their `seen` flag
|
||||
## cleared.
|
||||
let queue = sales.context.slotQueue
|
||||
|
||||
queue.clearSeenFlags()
|
||||
if queue.paused:
|
||||
trace "unpausing queue after new availability added"
|
||||
queue.unpause()
|
||||
|
||||
proc onStorageRequested(
|
||||
sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: uint64
|
||||
) {.raises: [].} =
|
||||
logScope:
|
||||
topics = "marketplace sales onStorageRequested"
|
||||
requestId
|
||||
slots = ask.slots
|
||||
expiry
|
||||
|
||||
let slotQueue = sales.context.slotQueue
|
||||
|
||||
trace "storage requested, adding slots to queue"
|
||||
|
||||
let market = sales.context.market
|
||||
|
||||
without collateral =? market.slotCollateral(ask.collateralPerSlot, SlotState.Free),
|
||||
err:
|
||||
error "Request failure, unable to calculate collateral", error = err.msg
|
||||
return
|
||||
|
||||
without items =? SlotQueueItem.init(requestId, ask, expiry, collateral).catch, err:
|
||||
if err of SlotsOutOfRangeError:
|
||||
warn "Too many slots, cannot add to queue"
|
||||
else:
|
||||
warn "Failed to create slot queue items from request", error = err.msg
|
||||
return
|
||||
|
||||
for item in items:
|
||||
# continue on failure
|
||||
if err =? slotQueue.push(item).errorOption:
|
||||
if err of SlotQueueItemExistsError:
|
||||
error "Failed to push item to queue becaue it already exists"
|
||||
elif err of QueueNotRunningError:
|
||||
warn "Failed to push item to queue becaue queue is not running"
|
||||
else:
|
||||
warn "Error adding request to SlotQueue", error = err.msg
|
||||
|
||||
proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) =
|
||||
logScope:
|
||||
topics = "marketplace sales onSlotFreed"
|
||||
requestId
|
||||
slotIndex
|
||||
|
||||
trace "slot freed, adding to queue"
|
||||
|
||||
proc addSlotToQueue() {.async: (raises: []).} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
try:
|
||||
without request =? (await market.getRequest(requestId)), err:
|
||||
error "unknown request in contract", error = err.msgDetail
|
||||
return
|
||||
|
||||
# Take the repairing state into consideration to calculate the collateral.
|
||||
# This is particularly needed because it will affect the priority in the queue
|
||||
# and we want to give the user the ability to tweak the parameters.
|
||||
# Adding the repairing state directly in the queue priority calculation
|
||||
# would not allow this flexibility.
|
||||
without collateral =?
|
||||
market.slotCollateral(request.ask.collateralPerSlot, SlotState.Repair), err:
|
||||
error "Failed to add freed slot to queue: unable to calculate collateral",
|
||||
error = err.msg
|
||||
return
|
||||
|
||||
if slotIndex > uint16.high.uint64:
|
||||
error "Cannot cast slot index to uint16, value = ", slotIndex
|
||||
return
|
||||
|
||||
without slotQueueItem =?
|
||||
SlotQueueItem.init(request, slotIndex.uint16, collateral = collateral).catch,
|
||||
err:
|
||||
warn "Too many slots, cannot add to queue", error = err.msgDetail
|
||||
return
|
||||
|
||||
if err =? queue.push(slotQueueItem).errorOption:
|
||||
if err of SlotQueueItemExistsError:
|
||||
error "Failed to push item to queue because it already exists",
|
||||
error = err.msgDetail
|
||||
elif err of QueueNotRunningError:
|
||||
warn "Failed to push item to queue because queue is not running",
|
||||
error = err.msgDetail
|
||||
except CancelledError as e:
|
||||
trace "sales.addSlotToQueue was cancelled"
|
||||
|
||||
# We could get rid of this by adding the storage ask in the SlotFreed event,
|
||||
# so we would not need to call getRequest to get the collateralPerSlot.
|
||||
let fut = addSlotToQueue()
|
||||
sales.trackedFutures.track(fut)
|
||||
|
||||
proc subscribeRequested(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
|
||||
proc onStorageRequested(
|
||||
requestId: RequestId, ask: StorageAsk, expiry: uint64
|
||||
) {.raises: [].} =
|
||||
sales.onStorageRequested(requestId, ask, expiry)
|
||||
|
||||
try:
|
||||
let sub = await market.subscribeRequests(onStorageRequested)
|
||||
sales.subscriptions.add(sub)
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as e:
|
||||
error "Unable to subscribe to storage request events", msg = e.msg
|
||||
|
||||
proc subscribeCancellation(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
proc onCancelled(requestId: RequestId) =
|
||||
trace "request cancelled (via contract RequestCancelled event), removing all request slots from queue"
|
||||
queue.delete(requestId)
|
||||
|
||||
try:
|
||||
let sub = await market.subscribeRequestCancelled(onCancelled)
|
||||
sales.subscriptions.add(sub)
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as e:
|
||||
error "Unable to subscribe to cancellation events", msg = e.msg
|
||||
|
||||
proc subscribeFulfilled*(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
proc onFulfilled(requestId: RequestId) =
|
||||
trace "request fulfilled, removing all request slots from queue"
|
||||
queue.delete(requestId)
|
||||
|
||||
for agent in sales.agents:
|
||||
agent.onFulfilled(requestId)
|
||||
|
||||
try:
|
||||
let sub = await market.subscribeFulfillment(onFulfilled)
|
||||
sales.subscriptions.add(sub)
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as e:
|
||||
error "Unable to subscribe to storage fulfilled events", msg = e.msg
|
||||
|
||||
proc subscribeFailure(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
proc onFailed(requestId: RequestId) =
|
||||
trace "request failed, removing all request slots from queue"
|
||||
queue.delete(requestId)
|
||||
|
||||
for agent in sales.agents:
|
||||
agent.onFailed(requestId)
|
||||
|
||||
try:
|
||||
let sub = await market.subscribeRequestFailed(onFailed)
|
||||
sales.subscriptions.add(sub)
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as e:
|
||||
error "Unable to subscribe to storage failure events", msg = e.msg
|
||||
|
||||
proc subscribeSlotFilled(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
proc onSlotFilled(requestId: RequestId, slotIndex: uint64) =
|
||||
if slotIndex > uint16.high.uint64:
|
||||
error "Cannot cast slot index to uint16, value = ", slotIndex
|
||||
return
|
||||
|
||||
trace "slot filled, removing from slot queue", requestId, slotIndex
|
||||
queue.delete(requestId, slotIndex.uint16)
|
||||
|
||||
for agent in sales.agents:
|
||||
agent.onSlotFilled(requestId, slotIndex)
|
||||
|
||||
try:
|
||||
let sub = await market.subscribeSlotFilled(onSlotFilled)
|
||||
sales.subscriptions.add(sub)
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as e:
|
||||
error "Unable to subscribe to slot filled events", msg = e.msg
|
||||
|
||||
proc subscribeSlotFreed(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
|
||||
proc onSlotFreed(requestId: RequestId, slotIndex: uint64) =
|
||||
sales.onSlotFreed(requestId, slotIndex)
|
||||
|
||||
try:
|
||||
let sub = await market.subscribeSlotFreed(onSlotFreed)
|
||||
sales.subscriptions.add(sub)
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as e:
|
||||
error "Unable to subscribe to slot freed events", msg = e.msg
|
||||
|
||||
proc subscribeSlotReservationsFull(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
proc onSlotReservationsFull(requestId: RequestId, slotIndex: uint64) =
|
||||
if slotIndex > uint16.high.uint64:
|
||||
error "Cannot cast slot index to uint16, value = ", slotIndex
|
||||
return
|
||||
|
||||
trace "reservations for slot full, removing from slot queue", requestId, slotIndex
|
||||
queue.delete(requestId, slotIndex.uint16)
|
||||
|
||||
try:
|
||||
let sub = await market.subscribeSlotReservationsFull(onSlotReservationsFull)
|
||||
sales.subscriptions.add(sub)
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as e:
|
||||
error "Unable to subscribe to slot filled events", msg = e.msg
|
||||
|
||||
proc startSlotQueue(sales: Sales) =
|
||||
let slotQueue = sales.context.slotQueue
|
||||
let reservations = sales.context.reservations
|
||||
|
||||
slotQueue.onProcessSlot = proc(item: SlotQueueItem) {.async: (raises: []).} =
|
||||
trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex
|
||||
try:
|
||||
await sales.processSlot(item)
|
||||
except CancelledError:
|
||||
discard
|
||||
|
||||
slotQueue.start()
|
||||
|
||||
proc OnAvailabilitySaved(availability: Availability) {.async: (raises: []).} =
|
||||
if availability.enabled:
|
||||
await sales.OnAvailabilitySaved(availability)
|
||||
|
||||
reservations.OnAvailabilitySaved = OnAvailabilitySaved
|
||||
|
||||
proc subscribe(sales: Sales) {.async.} =
|
||||
await sales.subscribeRequested()
|
||||
await sales.subscribeFulfilled()
|
||||
await sales.subscribeFailure()
|
||||
await sales.subscribeSlotFilled()
|
||||
await sales.subscribeSlotFreed()
|
||||
await sales.subscribeCancellation()
|
||||
await sales.subscribeSlotReservationsFull()
|
||||
|
||||
proc unsubscribe(sales: Sales) {.async.} =
|
||||
for sub in sales.subscriptions:
|
||||
try:
|
||||
await sub.unsubscribe()
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as e:
|
||||
error "Unable to unsubscribe from subscription", error = e.msg
|
||||
|
||||
proc start*(sales: Sales) {.async.} =
|
||||
await sales.load()
|
||||
sales.startSlotQueue()
|
||||
await sales.subscribe()
|
||||
sales.running = true
|
||||
|
||||
proc stop*(sales: Sales) {.async.} =
|
||||
trace "stopping sales"
|
||||
sales.running = false
|
||||
await sales.context.slotQueue.stop()
|
||||
await sales.unsubscribe()
|
||||
await sales.trackedFutures.cancelTracked()
|
||||
|
||||
for agent in sales.agents:
|
||||
await agent.stop()
|
||||
|
||||
sales.agents = @[]
|
||||
@ -1,759 +0,0 @@
|
||||
## Logos Storage
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
##
|
||||
## +--------------------------------------+
|
||||
## | RESERVATION |
|
||||
## +---------------------------------------------------+ |--------------------------------------|
|
||||
## | AVAILABILITY | | ReservationId | id | PK |
|
||||
## |---------------------------------------------------| |--------------------------------------|
|
||||
## | AvailabilityId | id | PK |<-||-------o<-| AvailabilityId | availabilityId | FK |
|
||||
## |---------------------------------------------------| |--------------------------------------|
|
||||
## | UInt256 | totalSize | | | UInt256 | size | |
|
||||
## |---------------------------------------------------| |--------------------------------------|
|
||||
## | UInt256 | freeSize | | | UInt256 | slotIndex | |
|
||||
## |---------------------------------------------------| +--------------------------------------+
|
||||
## | UInt256 | duration | |
|
||||
## |---------------------------------------------------|
|
||||
## | UInt256 | minPricePerBytePerSecond | |
|
||||
## |---------------------------------------------------|
|
||||
## | UInt256 | totalCollateral | |
|
||||
## |---------------------------------------------------|
|
||||
## | UInt256 | totalRemainingCollateral | |
|
||||
## +---------------------------------------------------+
|
||||
|
||||
{.push raises: [], gcsafe.}
|
||||
|
||||
import std/sequtils
|
||||
import std/sugar
|
||||
import std/typetraits
|
||||
import std/sequtils
|
||||
import std/times
|
||||
import pkg/chronos
|
||||
import pkg/datastore
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/stint
|
||||
import pkg/stew/byteutils
|
||||
import ../codextypes
|
||||
import ../logutils
|
||||
import ../clock
|
||||
import ../stores
|
||||
import ../market
|
||||
import ../contracts/requests
|
||||
import ../utils/json
|
||||
import ../units
|
||||
|
||||
export requests
|
||||
export logutils
|
||||
|
||||
from nimcrypto import randomBytes
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales reservations"
|
||||
|
||||
type
|
||||
AvailabilityId* = distinct array[32, byte]
|
||||
ReservationId* = distinct array[32, byte]
|
||||
SomeStorableObject = Availability | Reservation
|
||||
SomeStorableId = AvailabilityId | ReservationId
|
||||
Availability* = ref object
|
||||
id* {.serialize.}: AvailabilityId
|
||||
totalSize* {.serialize.}: uint64
|
||||
freeSize* {.serialize.}: uint64
|
||||
duration* {.serialize.}: uint64
|
||||
minPricePerBytePerSecond* {.serialize.}: UInt256
|
||||
totalCollateral {.serialize.}: UInt256
|
||||
totalRemainingCollateral* {.serialize.}: UInt256
|
||||
# If set to false, the availability will not accept new slots.
|
||||
# If enabled, it will not impact any existing slots that are already being hosted.
|
||||
enabled* {.serialize.}: bool
|
||||
# Specifies the latest timestamp after which the availability will no longer host any slots.
|
||||
# If set to 0, there will be no restrictions.
|
||||
until* {.serialize.}: SecondsSince1970
|
||||
|
||||
Reservation* = ref object
|
||||
id* {.serialize.}: ReservationId
|
||||
availabilityId* {.serialize.}: AvailabilityId
|
||||
size* {.serialize.}: uint64
|
||||
requestId* {.serialize.}: RequestId
|
||||
slotIndex* {.serialize.}: uint64
|
||||
validUntil* {.serialize.}: SecondsSince1970
|
||||
|
||||
Reservations* = ref object of RootObj
|
||||
availabilityLock: AsyncLock
|
||||
# Lock for protecting assertions of availability's sizes when searching for matching availability
|
||||
repo: RepoStore
|
||||
OnAvailabilitySaved: ?OnAvailabilitySaved
|
||||
|
||||
GetNext* = proc(): Future[?seq[byte]] {.async: (raises: [CancelledError]), closure.}
|
||||
IterDispose* = proc(): Future[?!void] {.async: (raises: [CancelledError]), closure.}
|
||||
OnAvailabilitySaved* =
|
||||
proc(availability: Availability): Future[void] {.async: (raises: []).}
|
||||
StorableIter* = ref object
|
||||
finished*: bool
|
||||
next*: GetNext
|
||||
dispose*: IterDispose
|
||||
|
||||
ReservationsError* = object of CodexError
|
||||
ReserveFailedError* = object of ReservationsError
|
||||
ReleaseFailedError* = object of ReservationsError
|
||||
DeleteFailedError* = object of ReservationsError
|
||||
GetFailedError* = object of ReservationsError
|
||||
NotExistsError* = object of ReservationsError
|
||||
SerializationError* = object of ReservationsError
|
||||
UpdateFailedError* = object of ReservationsError
|
||||
BytesOutOfBoundsError* = object of ReservationsError
|
||||
UntilOutOfBoundsError* = object of ReservationsError
|
||||
|
||||
const
|
||||
SalesKey = (CodexMetaKey / "sales").tryGet # TODO: move to sales module
|
||||
ReservationsKey = (SalesKey / "reservations").tryGet
|
||||
|
||||
proc hash*(x: AvailabilityId): Hash {.borrow.}
|
||||
proc all*(
|
||||
self: Reservations, T: type SomeStorableObject
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).}
|
||||
|
||||
proc all*(
|
||||
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).}
|
||||
|
||||
template withLock(lock, body) =
|
||||
try:
|
||||
await lock.acquire()
|
||||
body
|
||||
finally:
|
||||
if lock.locked:
|
||||
lock.release()
|
||||
|
||||
proc new*(T: type Reservations, repo: RepoStore): Reservations =
|
||||
T(availabilityLock: newAsyncLock(), repo: repo)
|
||||
|
||||
proc init*(
|
||||
_: type Availability,
|
||||
totalSize: uint64,
|
||||
freeSize: uint64,
|
||||
duration: uint64,
|
||||
minPricePerBytePerSecond: UInt256,
|
||||
totalCollateral: UInt256,
|
||||
enabled: bool,
|
||||
until: SecondsSince1970,
|
||||
): Availability =
|
||||
var id: array[32, byte]
|
||||
doAssert randomBytes(id) == 32
|
||||
Availability(
|
||||
id: AvailabilityId(id),
|
||||
totalSize: totalSize,
|
||||
freeSize: freeSize,
|
||||
duration: duration,
|
||||
minPricePerBytePerSecond: minPricePerBytePerSecond,
|
||||
totalCollateral: totalCollateral,
|
||||
totalRemainingCollateral: totalCollateral,
|
||||
enabled: enabled,
|
||||
until: until,
|
||||
)
|
||||
|
||||
func totalCollateral*(self: Availability): UInt256 {.inline.} =
|
||||
return self.totalCollateral
|
||||
|
||||
proc `totalCollateral=`*(self: Availability, value: UInt256) {.inline.} =
|
||||
self.totalCollateral = value
|
||||
self.totalRemainingCollateral = value
|
||||
|
||||
proc init*(
|
||||
_: type Reservation,
|
||||
availabilityId: AvailabilityId,
|
||||
size: uint64,
|
||||
requestId: RequestId,
|
||||
slotIndex: uint64,
|
||||
validUntil: SecondsSince1970,
|
||||
): Reservation =
|
||||
var id: array[32, byte]
|
||||
doAssert randomBytes(id) == 32
|
||||
Reservation(
|
||||
id: ReservationId(id),
|
||||
availabilityId: availabilityId,
|
||||
size: size,
|
||||
requestId: requestId,
|
||||
slotIndex: slotIndex,
|
||||
validUntil: validUntil,
|
||||
)
|
||||
|
||||
func toArray(id: SomeStorableId): array[32, byte] =
|
||||
array[32, byte](id)
|
||||
|
||||
proc `==`*(x, y: AvailabilityId): bool {.borrow.}
|
||||
proc `==`*(x, y: ReservationId): bool {.borrow.}
|
||||
proc `==`*(x, y: Reservation): bool =
|
||||
x.id == y.id
|
||||
|
||||
proc `==`*(x, y: Availability): bool =
|
||||
x.id == y.id
|
||||
|
||||
proc `$`*(id: SomeStorableId): string =
|
||||
id.toArray.toHex
|
||||
|
||||
proc toErr[E1: ref CatchableError, E2: ReservationsError](
|
||||
e1: E1, _: type E2, msg: string = e1.msg
|
||||
): ref E2 =
|
||||
return newException(E2, msg, e1)
|
||||
|
||||
logutils.formatIt(LogFormat.textLines, SomeStorableId):
|
||||
it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.json, SomeStorableId):
|
||||
it.to0xHexLog
|
||||
|
||||
proc `OnAvailabilitySaved=`*(
|
||||
self: Reservations, OnAvailabilitySaved: OnAvailabilitySaved
|
||||
) =
|
||||
self.OnAvailabilitySaved = some OnAvailabilitySaved
|
||||
|
||||
func key*(id: AvailabilityId): ?!Key =
|
||||
## sales / reservations / <availabilityId>
|
||||
(ReservationsKey / $id)
|
||||
|
||||
func key*(reservationId: ReservationId, availabilityId: AvailabilityId): ?!Key =
|
||||
## sales / reservations / <availabilityId> / <reservationId>
|
||||
(availabilityId.key / $reservationId)
|
||||
|
||||
func key*(availability: Availability): ?!Key =
|
||||
return availability.id.key
|
||||
|
||||
func maxCollateralPerByte*(availability: Availability): UInt256 =
|
||||
# If freeSize happens to be zero, we convention that the maxCollateralPerByte
|
||||
# should be equal to totalRemainingCollateral.
|
||||
if availability.freeSize == 0.uint64:
|
||||
return availability.totalRemainingCollateral
|
||||
|
||||
return availability.totalRemainingCollateral div availability.freeSize.stuint(256)
|
||||
|
||||
func key*(reservation: Reservation): ?!Key =
|
||||
return key(reservation.id, reservation.availabilityId)
|
||||
|
||||
func available*(self: Reservations): uint =
|
||||
self.repo.available.uint
|
||||
|
||||
func hasAvailable*(self: Reservations, bytes: uint): bool =
|
||||
self.repo.available(bytes.NBytes)
|
||||
|
||||
proc exists*(
|
||||
self: Reservations, key: Key
|
||||
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||
let exists = await self.repo.metaDs.ds.contains(key)
|
||||
return exists
|
||||
|
||||
iterator items(self: StorableIter): auto =
|
||||
while not self.finished:
|
||||
yield self.next()
|
||||
|
||||
proc getImpl(
|
||||
self: Reservations, key: Key
|
||||
): Future[?!seq[byte]] {.async: (raises: [CancelledError]).} =
|
||||
if not await self.exists(key):
|
||||
let err =
|
||||
newException(NotExistsError, "object with key " & $key & " does not exist")
|
||||
return failure(err)
|
||||
|
||||
without serialized =? await self.repo.metaDs.ds.get(key), error:
|
||||
return failure(error.toErr(GetFailedError))
|
||||
|
||||
return success serialized
|
||||
|
||||
proc get*(
|
||||
self: Reservations, key: Key, T: type SomeStorableObject
|
||||
): Future[?!T] {.async: (raises: [CancelledError]).} =
|
||||
without serialized =? await self.getImpl(key), error:
|
||||
return failure(error)
|
||||
|
||||
without obj =? T.fromJson(serialized), error:
|
||||
return failure(error.toErr(SerializationError))
|
||||
|
||||
return success obj
|
||||
|
||||
proc updateImpl(
|
||||
self: Reservations, obj: SomeStorableObject
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
trace "updating " & $(obj.type), id = obj.id
|
||||
|
||||
without key =? obj.key, error:
|
||||
return failure(error)
|
||||
|
||||
if err =? (await self.repo.metaDs.ds.put(key, @(obj.toJson.toBytes))).errorOption:
|
||||
return failure(err.toErr(UpdateFailedError))
|
||||
|
||||
return success()
|
||||
|
||||
proc updateAvailability(
|
||||
self: Reservations, obj: Availability
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
logScope:
|
||||
availabilityId = obj.id
|
||||
|
||||
if obj.until < 0:
|
||||
let error =
|
||||
newException(UntilOutOfBoundsError, "Cannot set until to a negative value")
|
||||
return failure(error)
|
||||
|
||||
without key =? obj.key, error:
|
||||
return failure(error)
|
||||
|
||||
without oldAvailability =? await self.get(key, Availability), err:
|
||||
if err of NotExistsError:
|
||||
trace "Creating new Availability"
|
||||
let res = await self.updateImpl(obj)
|
||||
# inform subscribers that Availability has been added
|
||||
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
|
||||
await OnAvailabilitySaved(obj)
|
||||
return res
|
||||
else:
|
||||
return failure(err)
|
||||
|
||||
if obj.until > 0:
|
||||
without allReservations =? await self.all(Reservation, obj.id), error:
|
||||
error.msg = "Error updating reservation: " & error.msg
|
||||
return failure(error)
|
||||
|
||||
let requestEnds = allReservations.mapIt(it.validUntil)
|
||||
|
||||
if requestEnds.len > 0 and requestEnds.max > obj.until:
|
||||
let error = newException(
|
||||
UntilOutOfBoundsError,
|
||||
"Until parameter must be greater or equal to the longest currently hosted slot",
|
||||
)
|
||||
return failure(error)
|
||||
|
||||
# Sizing of the availability changed, we need to adjust the repo reservation accordingly
|
||||
if oldAvailability.totalSize != obj.totalSize:
|
||||
trace "totalSize changed, updating repo reservation"
|
||||
if oldAvailability.totalSize < obj.totalSize: # storage added
|
||||
if reserveErr =? (
|
||||
await self.repo.reserve((obj.totalSize - oldAvailability.totalSize).NBytes)
|
||||
).errorOption:
|
||||
return failure(reserveErr.toErr(ReserveFailedError))
|
||||
elif oldAvailability.totalSize > obj.totalSize: # storage removed
|
||||
if reserveErr =? (
|
||||
await self.repo.release((oldAvailability.totalSize - obj.totalSize).NBytes)
|
||||
).errorOption:
|
||||
return failure(reserveErr.toErr(ReleaseFailedError))
|
||||
|
||||
let res = await self.updateImpl(obj)
|
||||
|
||||
if oldAvailability.freeSize < obj.freeSize or oldAvailability.duration < obj.duration or
|
||||
oldAvailability.minPricePerBytePerSecond < obj.minPricePerBytePerSecond or
|
||||
oldAvailability.totalRemainingCollateral < obj.totalRemainingCollateral:
|
||||
# availability updated
|
||||
# inform subscribers that Availability has been modified (with increased
|
||||
# size)
|
||||
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
|
||||
await OnAvailabilitySaved(obj)
|
||||
return res
|
||||
|
||||
proc update*(
|
||||
self: Reservations, obj: Reservation
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
return await self.updateImpl(obj)
|
||||
|
||||
proc update*(
|
||||
self: Reservations, obj: Availability
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
withLock(self.availabilityLock):
|
||||
return await self.updateAvailability(obj)
|
||||
except AsyncLockError as e:
|
||||
error "Lock error when trying to update the availability", err = e.msg
|
||||
return failure(e)
|
||||
|
||||
proc delete(
|
||||
self: Reservations, key: Key
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
trace "deleting object", key
|
||||
|
||||
if not await self.exists(key):
|
||||
return success()
|
||||
|
||||
if err =? (await self.repo.metaDs.ds.delete(key)).errorOption:
|
||||
return failure(err.toErr(DeleteFailedError))
|
||||
|
||||
return success()
|
||||
|
||||
proc deleteReservation*(
|
||||
self: Reservations,
|
||||
reservationId: ReservationId,
|
||||
availabilityId: AvailabilityId,
|
||||
returnedCollateral: ?UInt256 = UInt256.none,
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
logScope:
|
||||
reservationId
|
||||
availabilityId
|
||||
|
||||
trace "deleting reservation"
|
||||
|
||||
without key =? key(reservationId, availabilityId), error:
|
||||
return failure(error)
|
||||
|
||||
try:
|
||||
withLock(self.availabilityLock):
|
||||
without reservation =? (await self.get(key, Reservation)), error:
|
||||
if error of NotExistsError:
|
||||
return success()
|
||||
else:
|
||||
return failure(error)
|
||||
|
||||
without availabilityKey =? availabilityId.key, error:
|
||||
return failure(error)
|
||||
|
||||
without var availability =? await self.get(availabilityKey, Availability), error:
|
||||
return failure(error)
|
||||
|
||||
if reservation.size > 0.uint64:
|
||||
trace "returning remaining reservation bytes to availability",
|
||||
size = reservation.size
|
||||
availability.freeSize += reservation.size
|
||||
|
||||
if collateral =? returnedCollateral:
|
||||
availability.totalRemainingCollateral += collateral
|
||||
|
||||
if updateErr =? (await self.updateAvailability(availability)).errorOption:
|
||||
return failure(updateErr)
|
||||
|
||||
if err =? (await self.repo.metaDs.ds.delete(key)).errorOption:
|
||||
return failure(err.toErr(DeleteFailedError))
|
||||
|
||||
return success()
|
||||
except AsyncLockError as e:
|
||||
error "Lock error when trying to delete the availability", err = e.msg
|
||||
return failure(e)
|
||||
|
||||
# TODO: add support for deleting availabilities
|
||||
# To delete, must not have any active sales.
|
||||
|
||||
proc createAvailability*(
|
||||
self: Reservations,
|
||||
size: uint64,
|
||||
duration: uint64,
|
||||
minPricePerBytePerSecond: UInt256,
|
||||
totalCollateral: UInt256,
|
||||
enabled: bool,
|
||||
until: SecondsSince1970,
|
||||
): Future[?!Availability] {.async: (raises: [CancelledError]).} =
|
||||
trace "creating availability",
|
||||
size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until
|
||||
|
||||
if until < 0:
|
||||
let error =
|
||||
newException(UntilOutOfBoundsError, "Cannot set until to a negative value")
|
||||
return failure(error)
|
||||
|
||||
let availability = Availability.init(
|
||||
size, size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until
|
||||
)
|
||||
let bytes = availability.freeSize
|
||||
|
||||
if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
|
||||
return failure(reserveErr.toErr(ReserveFailedError))
|
||||
|
||||
if updateErr =? (await self.update(availability)).errorOption:
|
||||
# rollback the reserve
|
||||
trace "rolling back reserve"
|
||||
if rollbackErr =? (await self.repo.release(bytes.NBytes)).errorOption:
|
||||
rollbackErr.parent = updateErr
|
||||
return failure(rollbackErr)
|
||||
|
||||
return failure(updateErr)
|
||||
|
||||
return success(availability)
|
||||
|
||||
method createReservation*(
|
||||
self: Reservations,
|
||||
availabilityId: AvailabilityId,
|
||||
slotSize: uint64,
|
||||
requestId: RequestId,
|
||||
slotIndex: uint64,
|
||||
collateralPerByte: UInt256,
|
||||
validUntil: SecondsSince1970,
|
||||
): Future[?!Reservation] {.async: (raises: [CancelledError]), base.} =
|
||||
try:
|
||||
withLock(self.availabilityLock):
|
||||
without availabilityKey =? availabilityId.key, error:
|
||||
return failure(error)
|
||||
|
||||
without availability =? await self.get(availabilityKey, Availability), error:
|
||||
return failure(error)
|
||||
|
||||
# Check that the found availability has enough free space after the lock has been acquired, to prevent asynchronous Availiability modifications
|
||||
if availability.freeSize < slotSize:
|
||||
let error = newException(
|
||||
BytesOutOfBoundsError,
|
||||
"trying to reserve an amount of bytes that is greater than the free size of the Availability",
|
||||
)
|
||||
return failure(error)
|
||||
|
||||
trace "Creating reservation",
|
||||
availabilityId, slotSize, requestId, slotIndex, validUntil = validUntil
|
||||
|
||||
let reservation =
|
||||
Reservation.init(availabilityId, slotSize, requestId, slotIndex, validUntil)
|
||||
|
||||
if createResErr =? (await self.update(reservation)).errorOption:
|
||||
return failure(createResErr)
|
||||
|
||||
# reduce availability freeSize by the slot size, which is now accounted for in
|
||||
# the newly created Reservation
|
||||
availability.freeSize -= slotSize
|
||||
|
||||
# adjust the remaining totalRemainingCollateral
|
||||
availability.totalRemainingCollateral -= slotSize.u256 * collateralPerByte
|
||||
|
||||
# update availability with reduced size
|
||||
trace "Updating availability with reduced size", freeSize = availability.freeSize
|
||||
if updateErr =? (await self.updateAvailability(availability)).errorOption:
|
||||
trace "Updating availability failed, rolling back reservation creation"
|
||||
|
||||
without key =? reservation.key, keyError:
|
||||
keyError.parent = updateErr
|
||||
return failure(keyError)
|
||||
|
||||
# rollback the reservation creation
|
||||
if rollbackErr =? (await self.delete(key)).errorOption:
|
||||
rollbackErr.parent = updateErr
|
||||
return failure(rollbackErr)
|
||||
|
||||
return failure(updateErr)
|
||||
|
||||
trace "Reservation succesfully created"
|
||||
return success(reservation)
|
||||
except AsyncLockError as e:
|
||||
error "Lock error when trying to delete the availability", err = e.msg
|
||||
return failure(e)
|
||||
|
||||
proc returnBytesToAvailability*(
|
||||
self: Reservations,
|
||||
availabilityId: AvailabilityId,
|
||||
reservationId: ReservationId,
|
||||
bytes: uint64,
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
logScope:
|
||||
reservationId
|
||||
availabilityId
|
||||
try:
|
||||
withLock(self.availabilityLock):
|
||||
without key =? key(reservationId, availabilityId), error:
|
||||
return failure(error)
|
||||
|
||||
without var reservation =? (await self.get(key, Reservation)), error:
|
||||
return failure(error)
|
||||
|
||||
# We are ignoring bytes that are still present in the Reservation because
|
||||
# they will be returned to Availability through `deleteReservation`.
|
||||
let bytesToBeReturned = bytes - reservation.size
|
||||
|
||||
if bytesToBeReturned == 0:
|
||||
trace "No bytes are returned",
|
||||
requestSizeBytes = bytes, returningBytes = bytesToBeReturned
|
||||
return success()
|
||||
|
||||
trace "Returning bytes",
|
||||
requestSizeBytes = bytes, returningBytes = bytesToBeReturned
|
||||
|
||||
# First lets see if we can re-reserve the bytes, if the Repo's quota
|
||||
# is depleted then we will fail-fast as there is nothing to be done atm.
|
||||
if reserveErr =? (await self.repo.reserve(bytesToBeReturned.NBytes)).errorOption:
|
||||
return failure(reserveErr.toErr(ReserveFailedError))
|
||||
|
||||
without availabilityKey =? availabilityId.key, error:
|
||||
return failure(error)
|
||||
|
||||
without var availability =? await self.get(availabilityKey, Availability), error:
|
||||
return failure(error)
|
||||
|
||||
availability.freeSize += bytesToBeReturned
|
||||
|
||||
# Update availability with returned size
|
||||
if updateErr =? (await self.updateAvailability(availability)).errorOption:
|
||||
trace "Rolling back returning bytes"
|
||||
if rollbackErr =? (await self.repo.release(bytesToBeReturned.NBytes)).errorOption:
|
||||
rollbackErr.parent = updateErr
|
||||
return failure(rollbackErr)
|
||||
|
||||
return failure(updateErr)
|
||||
|
||||
return success()
|
||||
except AsyncLockError as e:
|
||||
error "Lock error when returning bytes to the availability", err = e.msg
|
||||
return failure(e)
|
||||
|
||||
proc release*(
|
||||
self: Reservations,
|
||||
reservationId: ReservationId,
|
||||
availabilityId: AvailabilityId,
|
||||
bytes: uint,
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
logScope:
|
||||
topics = "release"
|
||||
bytes
|
||||
reservationId
|
||||
availabilityId
|
||||
|
||||
trace "releasing bytes and updating reservation"
|
||||
|
||||
without key =? key(reservationId, availabilityId), error:
|
||||
return failure(error)
|
||||
|
||||
without var reservation =? (await self.get(key, Reservation)), error:
|
||||
return failure(error)
|
||||
|
||||
if reservation.size < bytes:
|
||||
let error = newException(
|
||||
BytesOutOfBoundsError,
|
||||
"trying to release an amount of bytes that is greater than the total size of the Reservation",
|
||||
)
|
||||
return failure(error)
|
||||
|
||||
if releaseErr =? (await self.repo.release(bytes.NBytes)).errorOption:
|
||||
return failure(releaseErr.toErr(ReleaseFailedError))
|
||||
|
||||
reservation.size -= bytes
|
||||
|
||||
# persist partially used Reservation with updated size
|
||||
if err =? (await self.update(reservation)).errorOption:
|
||||
# rollback release if an update error encountered
|
||||
trace "rolling back release"
|
||||
if rollbackErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
|
||||
rollbackErr.parent = err
|
||||
return failure(rollbackErr)
|
||||
return failure(err)
|
||||
|
||||
return success()
|
||||
|
||||
proc storables(
|
||||
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
|
||||
): Future[?!StorableIter] {.async: (raises: [CancelledError]).} =
|
||||
var iter = StorableIter()
|
||||
let query = Query.init(queryKey)
|
||||
when T is Availability:
|
||||
# should indicate key length of 4, but let the .key logic determine it
|
||||
without defaultKey =? AvailabilityId.default.key, error:
|
||||
return failure(error)
|
||||
elif T is Reservation:
|
||||
# should indicate key length of 5, but let the .key logic determine it
|
||||
without defaultKey =? key(ReservationId.default, AvailabilityId.default), error:
|
||||
return failure(error)
|
||||
else:
|
||||
raiseAssert "unknown type"
|
||||
|
||||
without results =? await self.repo.metaDs.ds.query(query), error:
|
||||
return failure(error)
|
||||
|
||||
# /sales/reservations
|
||||
proc next(): Future[?seq[byte]] {.async: (raises: [CancelledError]).} =
|
||||
await idleAsync()
|
||||
iter.finished = results.finished
|
||||
if not results.finished and res =? (await results.next()) and res.data.len > 0 and
|
||||
key =? res.key and key.namespaces.len == defaultKey.namespaces.len:
|
||||
return some res.data
|
||||
|
||||
return none seq[byte]
|
||||
|
||||
proc dispose(): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
return await results.dispose()
|
||||
|
||||
iter.next = next
|
||||
iter.dispose = dispose
|
||||
return success iter
|
||||
|
||||
proc allImpl(
|
||||
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
|
||||
var ret: seq[T] = @[]
|
||||
|
||||
without storables =? (await self.storables(T, queryKey)), error:
|
||||
return failure(error)
|
||||
|
||||
for storable in storables.items:
|
||||
try:
|
||||
without bytes =? (await storable):
|
||||
continue
|
||||
|
||||
without obj =? T.fromJson(bytes), error:
|
||||
error "json deserialization error",
|
||||
json = string.fromBytes(bytes), error = error.msg
|
||||
continue
|
||||
|
||||
ret.add obj
|
||||
except CancelledError as err:
|
||||
raise err
|
||||
except CatchableError as err:
|
||||
error "Error when retrieving storable", error = err.msg
|
||||
continue
|
||||
|
||||
return success(ret)
|
||||
|
||||
proc all*(
|
||||
self: Reservations, T: type SomeStorableObject
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
|
||||
return await self.allImpl(T)
|
||||
|
||||
proc all*(
|
||||
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
|
||||
without key =? key(availabilityId):
|
||||
return failure("no key")
|
||||
|
||||
return await self.allImpl(T, key)
|
||||
|
||||
proc findAvailability*(
|
||||
self: Reservations,
|
||||
size, duration: uint64,
|
||||
pricePerBytePerSecond, collateralPerByte: UInt256,
|
||||
validUntil: SecondsSince1970,
|
||||
): Future[?Availability] {.async: (raises: [CancelledError]).} =
|
||||
without storables =? (await self.storables(Availability)), e:
|
||||
error "failed to get all storables", error = e.msg
|
||||
return none Availability
|
||||
|
||||
for item in storables.items:
|
||||
if bytes =? (await item) and availability =? Availability.fromJson(bytes):
|
||||
if availability.enabled and size <= availability.freeSize and
|
||||
duration <= availability.duration and
|
||||
collateralPerByte <= availability.maxCollateralPerByte and
|
||||
pricePerBytePerSecond >= availability.minPricePerBytePerSecond and
|
||||
(availability.until == 0 or availability.until >= validUntil):
|
||||
trace "availability matched",
|
||||
id = availability.id,
|
||||
enabled = availability.enabled,
|
||||
size,
|
||||
availFreeSize = availability.freeSize,
|
||||
duration,
|
||||
availDuration = availability.duration,
|
||||
pricePerBytePerSecond,
|
||||
availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond,
|
||||
collateralPerByte,
|
||||
availMaxCollateralPerByte = availability.maxCollateralPerByte,
|
||||
until = availability.until
|
||||
|
||||
# TODO: As soon as we're on ARC-ORC, we can use destructors
|
||||
# to automatically dispose our iterators when they fall out of scope.
|
||||
# For now:
|
||||
if err =? (await storables.dispose()).errorOption:
|
||||
error "failed to dispose storables iter", error = err.msg
|
||||
return none Availability
|
||||
return some availability
|
||||
|
||||
trace "availability did not match",
|
||||
id = availability.id,
|
||||
enabled = availability.enabled,
|
||||
size,
|
||||
availFreeSize = availability.freeSize,
|
||||
duration,
|
||||
availDuration = availability.duration,
|
||||
pricePerBytePerSecond,
|
||||
availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond,
|
||||
collateralPerByte,
|
||||
availMaxCollateralPerByte = availability.maxCollateralPerByte,
|
||||
until = availability.until
|
||||
@ -1,152 +0,0 @@
|
||||
import pkg/chronos
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/stint
|
||||
import ../contracts/requests
|
||||
import ../errors
|
||||
import ../logutils
|
||||
import ../utils/exceptions
|
||||
import ./statemachine
|
||||
import ./salescontext
|
||||
import ./salesdata
|
||||
import ./reservations
|
||||
import ./slotqueue
|
||||
|
||||
export reservations
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales"
|
||||
|
||||
type
|
||||
SalesAgent* = ref object of Machine
|
||||
context*: SalesContext
|
||||
data*: SalesData
|
||||
subscribed: bool
|
||||
# Slot-level callbacks.
|
||||
onCleanUp*: OnCleanUp
|
||||
onFilled*: ?OnFilled
|
||||
|
||||
OnCleanUp* = proc(reprocessSlot = false, returnedCollateral = UInt256.none) {.
|
||||
async: (raises: [])
|
||||
.}
|
||||
OnFilled* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||
|
||||
SalesAgentError = object of CodexError
|
||||
AllSlotsFilledError* = object of SalesAgentError
|
||||
|
||||
func `==`*(a, b: SalesAgent): bool =
|
||||
a.data.requestId == b.data.requestId and a.data.slotIndex == b.data.slotIndex
|
||||
|
||||
proc newSalesAgent*(
|
||||
context: SalesContext,
|
||||
requestId: RequestId,
|
||||
slotIndex: uint64,
|
||||
request: ?StorageRequest,
|
||||
slotQueueItem = SlotQueueItem.none,
|
||||
): SalesAgent =
|
||||
var agent = SalesAgent.new()
|
||||
agent.context = context
|
||||
agent.data = SalesData(
|
||||
requestId: requestId,
|
||||
slotIndex: slotIndex,
|
||||
request: request,
|
||||
slotQueueItem: slotQueueItem,
|
||||
)
|
||||
return agent
|
||||
|
||||
proc retrieveRequest*(agent: SalesAgent) {.async.} =
|
||||
let data = agent.data
|
||||
let market = agent.context.market
|
||||
if data.request.isNone:
|
||||
data.request = await market.getRequest(data.requestId)
|
||||
|
||||
proc retrieveRequestState*(agent: SalesAgent): Future[?RequestState] {.async.} =
|
||||
let data = agent.data
|
||||
let market = agent.context.market
|
||||
return await market.requestState(data.requestId)
|
||||
|
||||
func state*(agent: SalesAgent): ?string =
|
||||
proc description(state: State): string =
|
||||
$state
|
||||
|
||||
agent.query(description)
|
||||
|
||||
proc subscribeCancellation(agent: SalesAgent) {.async.} =
|
||||
let data = agent.data
|
||||
let clock = agent.context.clock
|
||||
|
||||
proc onCancelled() {.async: (raises: []).} =
|
||||
without request =? data.request:
|
||||
return
|
||||
|
||||
try:
|
||||
let market = agent.context.market
|
||||
let expiry = await market.requestExpiresAt(data.requestId)
|
||||
|
||||
while true:
|
||||
let deadline = max(clock.now, expiry) + 1
|
||||
trace "Waiting for request to be cancelled", now = clock.now, expiry = deadline
|
||||
await clock.waitUntil(deadline)
|
||||
|
||||
without state =? await agent.retrieveRequestState():
|
||||
error "Unknown request", requestId = data.requestId
|
||||
return
|
||||
|
||||
case state
|
||||
of New:
|
||||
discard
|
||||
of RequestState.Cancelled:
|
||||
agent.schedule(cancelledEvent(request))
|
||||
break
|
||||
of RequestState.Started, RequestState.Finished, RequestState.Failed:
|
||||
break
|
||||
|
||||
debug "The request is not yet canceled, even though it should be. Waiting for some more time.",
|
||||
currentState = state, now = clock.now
|
||||
except CancelledError:
|
||||
trace "Waiting for expiry to lapse was cancelled", requestId = data.requestId
|
||||
except CatchableError as e:
|
||||
error "Error while waiting for expiry to lapse", error = e.msgDetail
|
||||
|
||||
data.cancelled = onCancelled()
|
||||
|
||||
method onFulfilled*(
|
||||
agent: SalesAgent, requestId: RequestId
|
||||
) {.base, gcsafe, raises: [].} =
|
||||
let cancelled = agent.data.cancelled
|
||||
if agent.data.requestId == requestId and not cancelled.isNil and not cancelled.finished:
|
||||
cancelled.cancelSoon()
|
||||
|
||||
method onFailed*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, raises: [].} =
|
||||
without request =? agent.data.request:
|
||||
return
|
||||
if agent.data.requestId == requestId:
|
||||
agent.schedule(failedEvent(request))
|
||||
|
||||
method onSlotFilled*(
|
||||
agent: SalesAgent, requestId: RequestId, slotIndex: uint64
|
||||
) {.base, gcsafe, raises: [].} =
|
||||
if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex:
|
||||
agent.schedule(slotFilledEvent(requestId, slotIndex))
|
||||
|
||||
proc subscribe*(agent: SalesAgent) {.async.} =
|
||||
if agent.subscribed:
|
||||
return
|
||||
|
||||
await agent.subscribeCancellation()
|
||||
agent.subscribed = true
|
||||
|
||||
proc unsubscribe*(agent: SalesAgent) {.async: (raises: []).} =
|
||||
if not agent.subscribed:
|
||||
return
|
||||
|
||||
let data = agent.data
|
||||
if not data.cancelled.isNil and not data.cancelled.finished:
|
||||
await data.cancelled.cancelAndWait()
|
||||
data.cancelled = nil
|
||||
|
||||
agent.subscribed = false
|
||||
|
||||
proc stop*(agent: SalesAgent) {.async: (raises: []).} =
|
||||
await Machine(agent).stop()
|
||||
await agent.unsubscribe()
|
||||
@ -1,42 +0,0 @@
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/libp2p/cid
|
||||
|
||||
import ../market
|
||||
import ../clock
|
||||
import ./slotqueue
|
||||
import ./reservations
|
||||
import ../blocktype as bt
|
||||
|
||||
type
|
||||
SalesContext* = ref object
|
||||
market*: Market
|
||||
clock*: Clock
|
||||
# Sales-level callbacks. Closure will be overwritten each time a slot is
|
||||
# processed.
|
||||
onStore*: ?OnStore
|
||||
onClear*: ?OnClear
|
||||
onSale*: ?OnSale
|
||||
onProve*: ?OnProve
|
||||
onExpiryUpdate*: ?OnExpiryUpdate
|
||||
reservations*: Reservations
|
||||
slotQueue*: SlotQueue
|
||||
simulateProofFailures*: int
|
||||
|
||||
BlocksCb* =
|
||||
proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).}
|
||||
OnStore* = proc(
|
||||
request: StorageRequest,
|
||||
expiry: SecondsSince1970,
|
||||
slot: uint64,
|
||||
blocksCb: BlocksCb,
|
||||
isRepairing: bool,
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).}
|
||||
OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.
|
||||
async: (raises: [CancelledError])
|
||||
.}
|
||||
OnExpiryUpdate* = proc(rootCid: Cid, expiry: SecondsSince1970): Future[?!void] {.
|
||||
async: (raises: [CancelledError])
|
||||
.}
|
||||
OnClear* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||
OnSale* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}
|
||||
@ -1,14 +0,0 @@
|
||||
import pkg/chronos
|
||||
import ../contracts/requests
|
||||
import ../market
|
||||
import ./reservations
|
||||
import ./slotqueue
|
||||
|
||||
type SalesData* = ref object
|
||||
requestId*: RequestId
|
||||
ask*: StorageAsk
|
||||
request*: ?StorageRequest
|
||||
slotIndex*: uint64
|
||||
cancelled*: Future[void]
|
||||
reservation*: ?Reservation
|
||||
slotQueueItem*: ?SlotQueueItem
|
||||
@ -1,408 +0,0 @@
|
||||
import std/sequtils
|
||||
import std/tables
|
||||
import pkg/chronos
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import ../errors
|
||||
import ../logutils
|
||||
import ../rng
|
||||
import ../utils
|
||||
import ../contracts/requests
|
||||
import ../utils/asyncheapqueue
|
||||
import ../utils/trackedfutures
|
||||
|
||||
logScope:
|
||||
topics = "marketplace slotqueue"
|
||||
|
||||
type
|
||||
OnProcessSlot* = proc(item: SlotQueueItem): Future[void] {.async: (raises: []).}
|
||||
|
||||
# Non-ref obj copies value when assigned, preventing accidental modification
|
||||
# of values which could cause an incorrect order (eg
|
||||
# ``slotQueue[1].collateral = 1`` would cause ``collateral`` to be updated,
|
||||
# but the heap invariant would no longer be honoured. When non-ref, the
|
||||
# compiler can ensure that statement will fail).
|
||||
SlotQueueItem* = object
|
||||
requestId: RequestId
|
||||
slotIndex: uint16
|
||||
slotSize: uint64
|
||||
duration: uint64
|
||||
pricePerBytePerSecond: UInt256
|
||||
collateral: UInt256 # Collateral computed
|
||||
expiry: ?uint64
|
||||
seen: bool
|
||||
|
||||
# don't need to -1 to prevent overflow when adding 1 (to always allow push)
|
||||
# because AsyncHeapQueue size is of type `int`, which is larger than `uint16`
|
||||
SlotQueueSize = range[1'u16 .. uint16.high]
|
||||
|
||||
SlotQueue* = ref object
|
||||
maxWorkers: int
|
||||
onProcessSlot: ?OnProcessSlot
|
||||
queue: AsyncHeapQueue[SlotQueueItem]
|
||||
running: bool
|
||||
trackedFutures: TrackedFutures
|
||||
unpaused: AsyncEvent
|
||||
|
||||
SlotQueueError = object of CodexError
|
||||
SlotQueueItemExistsError* = object of SlotQueueError
|
||||
SlotQueueItemNotExistsError* = object of SlotQueueError
|
||||
SlotsOutOfRangeError* = object of SlotQueueError
|
||||
QueueNotRunningError* = object of SlotQueueError
|
||||
|
||||
# Number of concurrent workers used for processing SlotQueueItems
|
||||
const DefaultMaxWorkers = 3
|
||||
|
||||
# Cap slot queue size to prevent unbounded growth and make sifting more
|
||||
# efficient. Max size is not equivalent to the number of slots a host can
|
||||
# service, which is limited by host availabilities and new requests circulating
|
||||
# the network. Additionally, each new request/slot in the network will be
|
||||
# included in the queue if it is higher priority than any of the exisiting
|
||||
# items. Older slots should be unfillable over time as other hosts fill the
|
||||
# slots.
|
||||
const DefaultMaxSize = 128'u16
|
||||
|
||||
proc profitability(item: SlotQueueItem): UInt256 =
|
||||
StorageAsk(
|
||||
duration: item.duration,
|
||||
pricePerBytePerSecond: item.pricePerBytePerSecond,
|
||||
slotSize: item.slotSize,
|
||||
).pricePerSlot
|
||||
|
||||
proc `<`*(a, b: SlotQueueItem): bool =
|
||||
# for A to have a higher priority than B (in a min queue), A must be less than
|
||||
# B.
|
||||
var scoreA: uint8 = 0
|
||||
var scoreB: uint8 = 0
|
||||
|
||||
proc addIf(score: var uint8, condition: bool, addition: int) =
|
||||
if condition:
|
||||
score += 1'u8 shl addition
|
||||
|
||||
scoreA.addIf(a.seen < b.seen, 4)
|
||||
scoreB.addIf(a.seen > b.seen, 4)
|
||||
|
||||
scoreA.addIf(a.profitability > b.profitability, 3)
|
||||
scoreB.addIf(a.profitability < b.profitability, 3)
|
||||
|
||||
scoreA.addIf(a.collateral < b.collateral, 2)
|
||||
scoreB.addIf(a.collateral > b.collateral, 2)
|
||||
|
||||
if expiryA =? a.expiry and expiryB =? b.expiry:
|
||||
scoreA.addIf(expiryA > expiryB, 1)
|
||||
scoreB.addIf(expiryA < expiryB, 1)
|
||||
|
||||
return scoreA > scoreB
|
||||
|
||||
proc `==`*(a, b: SlotQueueItem): bool =
|
||||
a.requestId == b.requestId and a.slotIndex == b.slotIndex
|
||||
|
||||
proc new*(
|
||||
_: type SlotQueue,
|
||||
maxWorkers = DefaultMaxWorkers,
|
||||
maxSize: SlotQueueSize = DefaultMaxSize,
|
||||
): SlotQueue =
|
||||
if maxWorkers <= 0:
|
||||
raise newException(ValueError, "maxWorkers must be positive")
|
||||
if maxWorkers.uint16 > maxSize:
|
||||
raise newException(ValueError, "maxWorkers must be less than maxSize")
|
||||
|
||||
SlotQueue(
|
||||
maxWorkers: maxWorkers,
|
||||
# Add 1 to always allow for an extra item to be pushed onto the queue
|
||||
# temporarily. After push (and sort), the bottom-most item will be deleted
|
||||
queue: newAsyncHeapQueue[SlotQueueItem](maxSize.int + 1),
|
||||
running: false,
|
||||
trackedFutures: TrackedFutures.new(),
|
||||
unpaused: newAsyncEvent(),
|
||||
)
|
||||
# avoid instantiating `workers` in constructor to avoid side effects in
|
||||
# `newAsyncQueue` procedure
|
||||
|
||||
proc init*(
|
||||
_: type SlotQueueItem,
|
||||
requestId: RequestId,
|
||||
slotIndex: uint16,
|
||||
ask: StorageAsk,
|
||||
expiry: ?uint64,
|
||||
collateral: UInt256,
|
||||
seen = false,
|
||||
): SlotQueueItem =
|
||||
SlotQueueItem(
|
||||
requestId: requestId,
|
||||
slotIndex: slotIndex,
|
||||
slotSize: ask.slotSize,
|
||||
duration: ask.duration,
|
||||
pricePerBytePerSecond: ask.pricePerBytePerSecond,
|
||||
collateral: collateral,
|
||||
expiry: expiry,
|
||||
seen: seen,
|
||||
)
|
||||
|
||||
proc init*(
|
||||
_: type SlotQueueItem,
|
||||
requestId: RequestId,
|
||||
slotIndex: uint16,
|
||||
ask: StorageAsk,
|
||||
expiry: uint64,
|
||||
collateral: UInt256,
|
||||
seen = false,
|
||||
): SlotQueueItem =
|
||||
SlotQueueItem.init(requestId, slotIndex, ask, some expiry, collateral, seen)
|
||||
|
||||
proc init*(
|
||||
_: type SlotQueueItem,
|
||||
request: StorageRequest,
|
||||
slotIndex: uint16,
|
||||
collateral: UInt256,
|
||||
): SlotQueueItem =
|
||||
SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry, collateral)
|
||||
|
||||
proc init*(
|
||||
_: type SlotQueueItem,
|
||||
requestId: RequestId,
|
||||
ask: StorageAsk,
|
||||
expiry: ?uint64,
|
||||
collateral: UInt256,
|
||||
): seq[SlotQueueItem] {.raises: [SlotsOutOfRangeError].} =
|
||||
if not ask.slots.inRange:
|
||||
raise newException(SlotsOutOfRangeError, "Too many slots")
|
||||
|
||||
var i = 0'u16
|
||||
proc initSlotQueueItem(): SlotQueueItem =
|
||||
let item = SlotQueueItem.init(requestId, i, ask, expiry, collateral)
|
||||
inc i
|
||||
return item
|
||||
|
||||
var items = newSeqWith(ask.slots.int, initSlotQueueItem())
|
||||
Rng.instance.shuffle(items)
|
||||
return items
|
||||
|
||||
proc init*(
|
||||
_: type SlotQueueItem,
|
||||
requestId: RequestId,
|
||||
ask: StorageAsk,
|
||||
expiry: uint64,
|
||||
collateral: UInt256,
|
||||
): seq[SlotQueueItem] {.raises: [SlotsOutOfRangeError].} =
|
||||
SlotQueueItem.init(requestId, ask, some expiry, collateral)
|
||||
|
||||
proc init*(
|
||||
_: type SlotQueueItem, request: StorageRequest, collateral: UInt256
|
||||
): seq[SlotQueueItem] =
|
||||
return SlotQueueItem.init(request.id, request.ask, uint64.none, collateral)
|
||||
|
||||
proc inRange*(val: SomeUnsignedInt): bool =
|
||||
val.uint16 in SlotQueueSize.low .. SlotQueueSize.high
|
||||
|
||||
proc requestId*(self: SlotQueueItem): RequestId =
|
||||
self.requestId
|
||||
|
||||
proc slotIndex*(self: SlotQueueItem): uint16 =
|
||||
self.slotIndex
|
||||
|
||||
proc slotSize*(self: SlotQueueItem): uint64 =
|
||||
self.slotSize
|
||||
|
||||
proc duration*(self: SlotQueueItem): uint64 =
|
||||
self.duration
|
||||
|
||||
proc pricePerBytePerSecond*(self: SlotQueueItem): UInt256 =
|
||||
self.pricePerBytePerSecond
|
||||
|
||||
proc collateralPerByte*(self: SlotQueueItem): UInt256 =
|
||||
self.collateralPerByte
|
||||
|
||||
proc seen*(self: SlotQueueItem): bool =
|
||||
self.seen
|
||||
|
||||
proc `seen=`*(self: var SlotQueueItem, seen: bool) =
|
||||
self.seen = seen
|
||||
|
||||
proc running*(self: SlotQueue): bool =
|
||||
self.running
|
||||
|
||||
proc len*(self: SlotQueue): int =
|
||||
self.queue.len
|
||||
|
||||
proc size*(self: SlotQueue): int =
|
||||
self.queue.size - 1
|
||||
|
||||
proc paused*(self: SlotQueue): bool =
|
||||
not self.unpaused.isSet
|
||||
|
||||
proc `$`*(self: SlotQueue): string =
|
||||
$self.queue
|
||||
|
||||
proc `onProcessSlot=`*(self: SlotQueue, onProcessSlot: OnProcessSlot) =
|
||||
self.onProcessSlot = some onProcessSlot
|
||||
|
||||
proc contains*(self: SlotQueue, item: SlotQueueItem): bool =
|
||||
self.queue.contains(item)
|
||||
|
||||
proc pause*(self: SlotQueue) =
|
||||
# set unpaused flag to false -- coroutines will block on unpaused.wait()
|
||||
self.unpaused.clear()
|
||||
|
||||
proc unpause*(self: SlotQueue) =
|
||||
# set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait()
|
||||
self.unpaused.fire()
|
||||
|
||||
proc push*(self: SlotQueue, item: SlotQueueItem): ?!void {.raises: [].} =
|
||||
logScope:
|
||||
requestId = item.requestId
|
||||
slotIndex = item.slotIndex
|
||||
seen = item.seen
|
||||
|
||||
trace "pushing item to queue"
|
||||
|
||||
if not self.running:
|
||||
let err = newException(QueueNotRunningError, "queue not running")
|
||||
return failure(err)
|
||||
|
||||
if self.contains(item):
|
||||
let err = newException(SlotQueueItemExistsError, "item already exists")
|
||||
return failure(err)
|
||||
|
||||
if err =? self.queue.pushNoWait(item).mapFailure.errorOption:
|
||||
return failure(err)
|
||||
|
||||
if self.queue.full():
|
||||
# delete the last item
|
||||
self.queue.del(self.queue.size - 1)
|
||||
|
||||
doAssert self.queue.len <= self.queue.size - 1
|
||||
|
||||
# when slots are pushed to the queue, the queue should be unpaused if it was
|
||||
# paused
|
||||
if self.paused and not item.seen:
|
||||
trace "unpausing queue after new slot pushed"
|
||||
self.unpause()
|
||||
|
||||
return success()
|
||||
|
||||
proc push*(self: SlotQueue, items: seq[SlotQueueItem]): ?!void =
|
||||
for item in items:
|
||||
if err =? self.push(item).errorOption:
|
||||
return failure(err)
|
||||
|
||||
return success()
|
||||
|
||||
proc findByRequest(self: SlotQueue, requestId: RequestId): seq[SlotQueueItem] =
|
||||
var items: seq[SlotQueueItem] = @[]
|
||||
for item in self.queue.items:
|
||||
if item.requestId == requestId:
|
||||
items.add item
|
||||
return items
|
||||
|
||||
proc delete*(self: SlotQueue, item: SlotQueueItem) =
|
||||
logScope:
|
||||
requestId = item.requestId
|
||||
slotIndex = item.slotIndex
|
||||
|
||||
trace "removing item from queue"
|
||||
|
||||
if not self.running:
|
||||
trace "cannot delete item from queue, queue not running"
|
||||
return
|
||||
|
||||
self.queue.delete(item)
|
||||
|
||||
proc delete*(self: SlotQueue, requestId: RequestId, slotIndex: uint16) =
|
||||
let item = SlotQueueItem(requestId: requestId, slotIndex: slotIndex)
|
||||
self.delete(item)
|
||||
|
||||
proc delete*(self: SlotQueue, requestId: RequestId) =
|
||||
let items = self.findByRequest(requestId)
|
||||
for item in items:
|
||||
self.delete(item)
|
||||
|
||||
proc `[]`*(self: SlotQueue, i: Natural): SlotQueueItem =
|
||||
self.queue[i]
|
||||
|
||||
proc clearSeenFlags*(self: SlotQueue) =
|
||||
# Enumerate all items in the queue, overwriting each item with `seen = false`.
|
||||
# To avoid issues with new queue items being pushed to the queue while all
|
||||
# items are being iterated (eg if a new storage request comes in and pushes
|
||||
# new slots to the queue), this routine must remain synchronous.
|
||||
|
||||
if self.queue.empty:
|
||||
return
|
||||
|
||||
for item in self.queue.mitems:
|
||||
item.seen = false # does not maintain the heap invariant
|
||||
|
||||
# force heap reshuffling to maintain the heap invariant
|
||||
doAssert self.queue.update(self.queue[0]), "slot queue failed to reshuffle"
|
||||
|
||||
trace "all 'seen' flags cleared"
|
||||
|
||||
proc runWorker(self: SlotQueue) {.async: (raises: []).} =
|
||||
trace "slot queue worker loop started"
|
||||
while self.running:
|
||||
try:
|
||||
if self.paused:
|
||||
trace "Queue is paused, waiting for new slots or availabilities to be modified/added"
|
||||
|
||||
# block until unpaused is true/fired, ie wait for queue to be unpaused
|
||||
await self.unpaused.wait()
|
||||
|
||||
let item = await self.queue.pop() # if queue empty, wait here for new items
|
||||
|
||||
logScope:
|
||||
reqId = item.requestId
|
||||
slotIdx = item.slotIndex
|
||||
seen = item.seen
|
||||
|
||||
if not self.running: # may have changed after waiting for pop
|
||||
trace "not running, exiting"
|
||||
break
|
||||
|
||||
# If, upon processing a slot, the slot item already has a `seen` flag set,
|
||||
# the queue should be paused.
|
||||
if item.seen:
|
||||
trace "processing already seen item, pausing queue",
|
||||
reqId = item.requestId, slotIdx = item.slotIndex
|
||||
self.pause()
|
||||
# put item back in queue so that if other items are pushed while paused,
|
||||
# it will be sorted accordingly. Otherwise, this item would be processed
|
||||
# immediately (with priority over other items) once unpaused
|
||||
trace "readding seen item back into the queue"
|
||||
discard self.push(item) # on error, drop the item and continue
|
||||
continue
|
||||
|
||||
trace "processing item"
|
||||
without onProcessSlot =? self.onProcessSlot:
|
||||
raiseAssert "slot queue onProcessSlot not set"
|
||||
|
||||
await onProcessSlot(item)
|
||||
except CancelledError:
|
||||
trace "slot queue worker cancelled"
|
||||
break
|
||||
except CatchableError as e: # raised from self.queue.pop()
|
||||
warn "slot queue worker error encountered during processing", error = e.msg
|
||||
trace "slot queue worker loop stopped"
|
||||
|
||||
proc start*(self: SlotQueue) =
|
||||
if self.running:
|
||||
return
|
||||
|
||||
trace "starting slot queue"
|
||||
|
||||
self.running = true
|
||||
|
||||
# Add initial workers to the `AsyncHeapQueue`. Once a worker has completed its
|
||||
# task, a new worker will be pushed to the queue
|
||||
for i in 0 ..< self.maxWorkers:
|
||||
let worker = self.runWorker()
|
||||
self.trackedFutures.track(worker)
|
||||
|
||||
proc stop*(self: SlotQueue) {.async.} =
|
||||
if not self.running:
|
||||
return
|
||||
|
||||
trace "stopping slot queue"
|
||||
|
||||
self.running = false
|
||||
|
||||
await self.trackedFutures.cancelTracked()
|
||||
@ -1,41 +0,0 @@
|
||||
import pkg/questionable
|
||||
import ../errors
|
||||
import ../utils/asyncstatemachine
|
||||
import ../market
|
||||
import ../clock
|
||||
import ../contracts/requests
|
||||
|
||||
export market
|
||||
export clock
|
||||
export asyncstatemachine
|
||||
|
||||
type
|
||||
SaleState* = ref object of State
|
||||
SaleError* = object of CodexError
|
||||
|
||||
method onCancelled*(
|
||||
state: SaleState, request: StorageRequest
|
||||
): ?State {.base, raises: [].} =
|
||||
discard
|
||||
|
||||
method onFailed*(
|
||||
state: SaleState, request: StorageRequest
|
||||
): ?State {.base, raises: [].} =
|
||||
discard
|
||||
|
||||
method onSlotFilled*(
|
||||
state: SaleState, requestId: RequestId, slotIndex: uint64
|
||||
): ?State {.base, raises: [].} =
|
||||
discard
|
||||
|
||||
proc cancelledEvent*(request: StorageRequest): Event =
|
||||
return proc(state: State): ?State =
|
||||
SaleState(state).onCancelled(request)
|
||||
|
||||
proc failedEvent*(request: StorageRequest): Event =
|
||||
return proc(state: State): ?State =
|
||||
SaleState(state).onFailed(request)
|
||||
|
||||
proc slotFilledEvent*(requestId: RequestId, slotIndex: uint64): Event =
|
||||
return proc(state: State): ?State =
|
||||
SaleState(state).onSlotFilled(requestId, slotIndex)
|
||||
@ -1,62 +0,0 @@
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../salesagent
|
||||
import ../statemachine
|
||||
import ./errored
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales cancelled"
|
||||
|
||||
type SaleCancelled* = ref object of SaleState
|
||||
|
||||
method `$`*(state: SaleCancelled): string =
|
||||
"SaleCancelled"
|
||||
|
||||
proc slotIsFilledByMe(
|
||||
market: Market, requestId: RequestId, slotIndex: uint64
|
||||
): Future[bool] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
let host = await market.getHost(requestId, slotIndex)
|
||||
let me = await market.getSigner()
|
||||
|
||||
return host == me.some
|
||||
|
||||
method run*(
|
||||
state: SaleCancelled, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let agent = SalesAgent(machine)
|
||||
let data = agent.data
|
||||
let market = agent.context.market
|
||||
|
||||
without request =? data.request:
|
||||
raiseAssert "no sale request"
|
||||
|
||||
try:
|
||||
var returnedCollateral = UInt256.none
|
||||
|
||||
if await slotIsFilledByMe(market, data.requestId, data.slotIndex):
|
||||
debug "Collecting collateral and partial payout",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
|
||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||
let currentCollateral = await market.currentCollateral(slot.id)
|
||||
|
||||
try:
|
||||
await market.freeSlot(slot.id)
|
||||
except SlotStateMismatchError as e:
|
||||
warn "Failed to free slot because slot is already free", error = e.msg
|
||||
|
||||
returnedCollateral = currentCollateral.some
|
||||
|
||||
if onClear =? agent.context.onClear and request =? data.request:
|
||||
onClear(request, data.slotIndex)
|
||||
|
||||
if onCleanUp =? agent.onCleanUp:
|
||||
await onCleanUp(reprocessSlot = false, returnedCollateral = returnedCollateral)
|
||||
|
||||
warn "Sale cancelled due to timeout",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
except CancelledError as e:
|
||||
trace "SaleCancelled.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleCancelled.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
@ -1,96 +0,0 @@
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
|
||||
import ../../blocktype as bt
|
||||
import ../../logutils
|
||||
import ../../market
|
||||
import ../../utils/exceptions
|
||||
import ../salesagent
|
||||
import ../statemachine
|
||||
import ./cancelled
|
||||
import ./failed
|
||||
import ./filled
|
||||
import ./initialproving
|
||||
import ./errored
|
||||
|
||||
type SaleDownloading* = ref object of SaleState
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales downloading"
|
||||
|
||||
method `$`*(state: SaleDownloading): string =
|
||||
"SaleDownloading"
|
||||
|
||||
method onCancelled*(state: SaleDownloading, request: StorageRequest): ?State =
|
||||
return some State(SaleCancelled())
|
||||
|
||||
method onFailed*(state: SaleDownloading, request: StorageRequest): ?State =
|
||||
return some State(SaleFailed())
|
||||
|
||||
method onSlotFilled*(
|
||||
state: SaleDownloading, requestId: RequestId, slotIndex: uint64
|
||||
): ?State =
|
||||
return some State(SaleFilled())
|
||||
|
||||
method run*(
|
||||
state: SaleDownloading, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let agent = SalesAgent(machine)
|
||||
let data = agent.data
|
||||
let context = agent.context
|
||||
let market = context.market
|
||||
let reservations = context.reservations
|
||||
|
||||
without onStore =? context.onStore:
|
||||
raiseAssert "onStore callback not set"
|
||||
|
||||
without request =? data.request:
|
||||
raiseAssert "no sale request"
|
||||
|
||||
without reservation =? data.reservation:
|
||||
raiseAssert("no reservation")
|
||||
|
||||
logScope:
|
||||
requestId = request.id
|
||||
slotIndex = data.slotIndex
|
||||
reservationId = reservation.id
|
||||
availabilityId = reservation.availabilityId
|
||||
|
||||
proc onBlocks(
|
||||
blocks: seq[bt.Block]
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
# release batches of blocks as they are written to disk and
|
||||
# update availability size
|
||||
var bytes: uint = 0
|
||||
for blk in blocks:
|
||||
if not blk.cid.isEmpty:
|
||||
bytes += blk.data.len.uint
|
||||
|
||||
trace "Releasing batch of bytes written to disk", bytes
|
||||
return await reservations.release(reservation.id, reservation.availabilityId, bytes)
|
||||
|
||||
try:
|
||||
let requestId = request.id
|
||||
let slotId = slotId(requestId, data.slotIndex)
|
||||
let requestState = await market.requestState(requestId)
|
||||
let isRepairing = (await market.slotState(slotId)) == SlotState.Repair
|
||||
|
||||
trace "Retrieving expiry"
|
||||
var expiry: SecondsSince1970
|
||||
if state =? requestState and state == RequestState.Started:
|
||||
expiry = await market.getRequestEnd(requestId)
|
||||
else:
|
||||
expiry = await market.requestExpiresAt(requestId)
|
||||
|
||||
trace "Starting download"
|
||||
if err =?
|
||||
(await onStore(request, expiry, data.slotIndex, onBlocks, isRepairing)).errorOption:
|
||||
return some State(SaleErrored(error: err, reprocessSlot: false))
|
||||
|
||||
trace "Download complete"
|
||||
return some State(SaleInitialProving())
|
||||
except CancelledError as e:
|
||||
trace "SaleDownloading.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleDownloading.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
@ -1,40 +0,0 @@
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
|
||||
import ../statemachine
|
||||
import ../salesagent
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales errored"
|
||||
|
||||
type SaleErrored* = ref object of SaleState
|
||||
error*: ref CatchableError
|
||||
reprocessSlot*: bool
|
||||
|
||||
method `$`*(state: SaleErrored): string =
|
||||
"SaleErrored"
|
||||
|
||||
method run*(
|
||||
state: SaleErrored, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let agent = SalesAgent(machine)
|
||||
let data = agent.data
|
||||
let context = agent.context
|
||||
|
||||
error "Sale error",
|
||||
error = state.error.msgDetail,
|
||||
requestId = data.requestId,
|
||||
slotIndex = data.slotIndex
|
||||
|
||||
try:
|
||||
if onClear =? context.onClear and request =? data.request:
|
||||
onClear(request, data.slotIndex)
|
||||
|
||||
if onCleanUp =? agent.onCleanUp:
|
||||
await onCleanUp(reprocessSlot = state.reprocessSlot)
|
||||
except CancelledError as e:
|
||||
trace "SaleErrored.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleErrored.run", error = e.msgDetail
|
||||
@ -1,40 +0,0 @@
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../../utils/exceptions
|
||||
import ../salesagent
|
||||
import ../statemachine
|
||||
import ./errored
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales failed"
|
||||
|
||||
type
|
||||
SaleFailed* = ref object of SaleState
|
||||
SaleFailedError* = object of SaleError
|
||||
|
||||
method `$`*(state: SaleFailed): string =
|
||||
"SaleFailed"
|
||||
|
||||
method run*(
|
||||
state: SaleFailed, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let data = SalesAgent(machine).data
|
||||
let market = SalesAgent(machine).context.market
|
||||
|
||||
without request =? data.request:
|
||||
raiseAssert "no sale request"
|
||||
|
||||
try:
|
||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||
debug "Removing slot from mySlots",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
|
||||
await market.freeSlot(slot.id)
|
||||
|
||||
let error = newException(SaleFailedError, "Sale failed")
|
||||
return some State(SaleErrored(error: error))
|
||||
except CancelledError as e:
|
||||
trace "SaleFailed.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleFailed.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
@ -1,77 +0,0 @@
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
|
||||
import ../../conf
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ../salesagent
|
||||
import ./errored
|
||||
import ./cancelled
|
||||
import ./failed
|
||||
import ./proving
|
||||
|
||||
when storage_enable_proof_failures:
|
||||
import ./provingsimulated
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales filled"
|
||||
|
||||
type
|
||||
SaleFilled* = ref object of SaleState
|
||||
HostMismatchError* = object of CatchableError
|
||||
|
||||
method onCancelled*(state: SaleFilled, request: StorageRequest): ?State =
|
||||
return some State(SaleCancelled())
|
||||
|
||||
method onFailed*(state: SaleFilled, request: StorageRequest): ?State =
|
||||
return some State(SaleFailed())
|
||||
|
||||
method `$`*(state: SaleFilled): string =
|
||||
"SaleFilled"
|
||||
|
||||
method run*(
|
||||
state: SaleFilled, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let agent = SalesAgent(machine)
|
||||
let data = agent.data
|
||||
let context = agent.context
|
||||
let market = context.market
|
||||
|
||||
try:
|
||||
let host = await market.getHost(data.requestId, data.slotIndex)
|
||||
let me = await market.getSigner()
|
||||
|
||||
if host == me.some:
|
||||
info "Slot succesfully filled",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
|
||||
without request =? data.request:
|
||||
raiseAssert "no sale request"
|
||||
|
||||
if onFilled =? agent.onFilled:
|
||||
onFilled(request, data.slotIndex)
|
||||
|
||||
without onExpiryUpdate =? context.onExpiryUpdate:
|
||||
raiseAssert "onExpiryUpdate callback not set"
|
||||
|
||||
let requestEnd = await market.getRequestEnd(data.requestId)
|
||||
if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption:
|
||||
return some State(SaleErrored(error: err))
|
||||
|
||||
when storage_enable_proof_failures:
|
||||
if context.simulateProofFailures > 0:
|
||||
info "Proving with failure rate", rate = context.simulateProofFailures
|
||||
return some State(
|
||||
SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures)
|
||||
)
|
||||
|
||||
return some State(SaleProving())
|
||||
else:
|
||||
let error = newException(HostMismatchError, "Slot filled by other host")
|
||||
return some State(SaleErrored(error: error))
|
||||
except CancelledError as e:
|
||||
trace "SaleFilled.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleFilled.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
@ -1,63 +0,0 @@
|
||||
import pkg/stint
|
||||
import ../../logutils
|
||||
import ../../market
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ../salesagent
|
||||
import ./filled
|
||||
import ./cancelled
|
||||
import ./failed
|
||||
import ./ignored
|
||||
import ./errored
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales filling"
|
||||
|
||||
type SaleFilling* = ref object of SaleState
|
||||
proof*: Groth16Proof
|
||||
|
||||
method `$`*(state: SaleFilling): string =
|
||||
"SaleFilling"
|
||||
|
||||
method onCancelled*(state: SaleFilling, request: StorageRequest): ?State =
|
||||
return some State(SaleCancelled())
|
||||
|
||||
method onFailed*(state: SaleFilling, request: StorageRequest): ?State =
|
||||
return some State(SaleFailed())
|
||||
|
||||
method run*(
|
||||
state: SaleFilling, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let data = SalesAgent(machine).data
|
||||
let market = SalesAgent(machine).context.market
|
||||
|
||||
without (request =? data.request):
|
||||
raiseAssert "Request not set"
|
||||
|
||||
logScope:
|
||||
requestId = data.requestId
|
||||
slotIndex = data.slotIndex
|
||||
|
||||
try:
|
||||
without collateral =? await market.slotCollateral(data.requestId, data.slotIndex),
|
||||
err:
|
||||
error "Failure attempting to fill slot: unable to calculate collateral",
|
||||
error = err.msg
|
||||
return some State(SaleErrored(error: err))
|
||||
|
||||
debug "Filling slot"
|
||||
try:
|
||||
await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral)
|
||||
except SlotStateMismatchError as e:
|
||||
debug "Slot is already filled, ignoring slot"
|
||||
return some State(SaleIgnored(reprocessSlot: false, returnsCollateral: true))
|
||||
except MarketError as e:
|
||||
return some State(SaleErrored(error: e))
|
||||
# other CatchableErrors are handled "automatically" by the SaleState
|
||||
|
||||
return some State(SaleFilled())
|
||||
except CancelledError as e:
|
||||
trace "SaleFilling.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleFilling.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
@ -1,48 +0,0 @@
|
||||
import pkg/chronos
|
||||
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ../salesagent
|
||||
import ./cancelled
|
||||
import ./failed
|
||||
import ./errored
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales finished"
|
||||
|
||||
type SaleFinished* = ref object of SaleState
|
||||
returnedCollateral*: ?UInt256
|
||||
|
||||
method `$`*(state: SaleFinished): string =
|
||||
"SaleFinished"
|
||||
|
||||
method onCancelled*(state: SaleFinished, request: StorageRequest): ?State =
|
||||
return some State(SaleCancelled())
|
||||
|
||||
method onFailed*(state: SaleFinished, request: StorageRequest): ?State =
|
||||
return some State(SaleFailed())
|
||||
|
||||
method run*(
|
||||
state: SaleFinished, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let agent = SalesAgent(machine)
|
||||
let data = agent.data
|
||||
|
||||
without request =? data.request:
|
||||
raiseAssert "no sale request"
|
||||
|
||||
info "Slot finished and paid out",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
|
||||
try:
|
||||
if onClear =? agent.context.onClear:
|
||||
onClear(request, data.slotIndex)
|
||||
|
||||
if onCleanUp =? agent.onCleanUp:
|
||||
await onCleanUp(returnedCollateral = state.returnedCollateral)
|
||||
except CancelledError as e:
|
||||
trace "SaleFilled.run onCleanUp was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleFilled.run in onCleanUp callback", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
@ -1,51 +0,0 @@
|
||||
import pkg/chronos
|
||||
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ../salesagent
|
||||
import ./errored
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales ignored"
|
||||
|
||||
# Ignored slots could mean there was no availability or that the slot could
|
||||
# not be reserved.
|
||||
|
||||
type SaleIgnored* = ref object of SaleState
|
||||
reprocessSlot*: bool # readd slot to queue with `seen` flag
|
||||
returnsCollateral*: bool # returns collateral when a reservation was created
|
||||
|
||||
method `$`*(state: SaleIgnored): string =
|
||||
"SaleIgnored"
|
||||
|
||||
method run*(
|
||||
state: SaleIgnored, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let agent = SalesAgent(machine)
|
||||
let data = agent.data
|
||||
let market = agent.context.market
|
||||
|
||||
without request =? data.request:
|
||||
raiseAssert "no sale request"
|
||||
|
||||
var returnedCollateral = UInt256.none
|
||||
|
||||
try:
|
||||
if state.returnsCollateral:
|
||||
# The returnedCollateral is needed because a reservation could
|
||||
# be created and the collateral assigned to that reservation.
|
||||
# The returnedCollateral will be used in the cleanup function
|
||||
# and be passed to the deleteReservation function.
|
||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||
returnedCollateral = request.ask.collateralPerSlot.some
|
||||
|
||||
if onCleanUp =? agent.onCleanUp:
|
||||
await onCleanUp(
|
||||
reprocessSlot = state.reprocessSlot, returnedCollateral = returnedCollateral
|
||||
)
|
||||
except CancelledError as e:
|
||||
trace "SaleIgnored.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleIgnored.run in onCleanUp", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
@ -1,71 +0,0 @@
|
||||
import pkg/questionable/results
|
||||
import ../../clock
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ../salesagent
|
||||
import ./filling
|
||||
import ./cancelled
|
||||
import ./errored
|
||||
import ./failed
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales initial-proving"
|
||||
|
||||
type SaleInitialProving* = ref object of SaleState
|
||||
|
||||
method `$`*(state: SaleInitialProving): string =
|
||||
"SaleInitialProving"
|
||||
|
||||
method onCancelled*(state: SaleInitialProving, request: StorageRequest): ?State =
|
||||
return some State(SaleCancelled())
|
||||
|
||||
method onFailed*(state: SaleInitialProving, request: StorageRequest): ?State =
|
||||
return some State(SaleFailed())
|
||||
|
||||
proc waitUntilNextPeriod(clock: Clock, periodicity: Periodicity) {.async.} =
|
||||
trace "Waiting until next period"
|
||||
let period = periodicity.periodOf(clock.now().Timestamp)
|
||||
let periodEnd = periodicity.periodEnd(period)
|
||||
await clock.waitUntil((periodEnd + 1).toSecondsSince1970)
|
||||
|
||||
proc waitForStableChallenge(market: Market, clock: Clock, slotId: SlotId) {.async.} =
|
||||
let periodicity = await market.periodicity()
|
||||
let downtime = await market.proofDowntime()
|
||||
await clock.waitUntilNextPeriod(periodicity)
|
||||
while (await market.getPointer(slotId)) > (256 - downtime):
|
||||
await clock.waitUntilNextPeriod(periodicity)
|
||||
|
||||
method run*(
|
||||
state: SaleInitialProving, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let data = SalesAgent(machine).data
|
||||
let context = SalesAgent(machine).context
|
||||
let market = context.market
|
||||
let clock = context.clock
|
||||
|
||||
without request =? data.request:
|
||||
raiseAssert "no sale request"
|
||||
|
||||
without onProve =? context.onProve:
|
||||
raiseAssert "onProve callback not set"
|
||||
|
||||
try:
|
||||
debug "Waiting for a proof challenge that is valid for the entire period"
|
||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||
await waitForStableChallenge(market, clock, slot.id)
|
||||
|
||||
debug "Generating initial proof", requestId = data.requestId
|
||||
let challenge = await context.market.getChallenge(slot.id)
|
||||
without proof =? (await onProve(slot, challenge)), err:
|
||||
error "Failed to generate initial proof", error = err.msg
|
||||
return some State(SaleErrored(error: err))
|
||||
|
||||
debug "Finished proof calculation", requestId = data.requestId
|
||||
|
||||
return some State(SaleFilling(proof: proof))
|
||||
except CancelledError as e:
|
||||
trace "SaleInitialProving.run onCleanUp was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleInitialProving.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
@ -1,46 +0,0 @@
|
||||
import ../../logutils
|
||||
import ../../market
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ../salesagent
|
||||
import ./cancelled
|
||||
import ./failed
|
||||
import ./finished
|
||||
import ./errored
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales payout"
|
||||
|
||||
type SalePayout* = ref object of SaleState
|
||||
|
||||
method `$`*(state: SalePayout): string =
|
||||
"SalePayout"
|
||||
|
||||
method onCancelled*(state: SalePayout, request: StorageRequest): ?State =
|
||||
return some State(SaleCancelled())
|
||||
|
||||
method onFailed*(state: SalePayout, request: StorageRequest): ?State =
|
||||
return some State(SaleFailed())
|
||||
|
||||
method run*(
|
||||
state: SalePayout, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let data = SalesAgent(machine).data
|
||||
let market = SalesAgent(machine).context.market
|
||||
|
||||
without request =? data.request:
|
||||
raiseAssert "no sale request"
|
||||
|
||||
try:
|
||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||
debug "Collecting finished slot's reward",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
let currentCollateral = await market.currentCollateral(slot.id)
|
||||
await market.freeSlot(slot.id)
|
||||
|
||||
return some State(SaleFinished(returnedCollateral: some currentCollateral))
|
||||
except CancelledError as e:
|
||||
trace "SalePayout.run onCleanUp was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SalePayout.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
@ -1,110 +0,0 @@
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/metrics
|
||||
|
||||
import ../../logutils
|
||||
import ../../market
|
||||
import ../../utils/exceptions
|
||||
import ../salesagent
|
||||
import ../statemachine
|
||||
import ./cancelled
|
||||
import ./failed
|
||||
import ./filled
|
||||
import ./ignored
|
||||
import ./slotreserving
|
||||
import ./errored
|
||||
|
||||
declareCounter(
|
||||
codex_reservations_availability_mismatch, "codex reservations availability_mismatch"
|
||||
)
|
||||
|
||||
type SalePreparing* = ref object of SaleState
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales preparing"
|
||||
|
||||
method `$`*(state: SalePreparing): string =
|
||||
"SalePreparing"
|
||||
|
||||
method onCancelled*(state: SalePreparing, request: StorageRequest): ?State =
|
||||
return some State(SaleCancelled())
|
||||
|
||||
method onFailed*(state: SalePreparing, request: StorageRequest): ?State =
|
||||
return some State(SaleFailed())
|
||||
|
||||
method onSlotFilled*(
|
||||
state: SalePreparing, requestId: RequestId, slotIndex: uint64
|
||||
): ?State =
|
||||
return some State(SaleFilled())
|
||||
|
||||
method run*(
|
||||
state: SalePreparing, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let agent = SalesAgent(machine)
|
||||
let data = agent.data
|
||||
let context = agent.context
|
||||
let market = context.market
|
||||
let reservations = context.reservations
|
||||
|
||||
try:
|
||||
await agent.retrieveRequest()
|
||||
await agent.subscribe()
|
||||
|
||||
without request =? data.request:
|
||||
error "request could not be retrieved", id = data.requestId
|
||||
let error = newException(SaleError, "request could not be retrieved")
|
||||
return some State(SaleErrored(error: error))
|
||||
|
||||
let slotId = slotId(data.requestId, data.slotIndex)
|
||||
let state = await market.slotState(slotId)
|
||||
if state != SlotState.Free and state != SlotState.Repair:
|
||||
return some State(SaleIgnored(reprocessSlot: false))
|
||||
|
||||
# TODO: Once implemented, check to ensure the host is allowed to fill the slot,
|
||||
# due to the [sliding window mechanism](https://github.com/logos-storage/logos-storage-research/blob/master/design/marketplace.md#dispersal)
|
||||
|
||||
logScope:
|
||||
slotIndex = data.slotIndex
|
||||
slotSize = request.ask.slotSize
|
||||
duration = request.ask.duration
|
||||
pricePerBytePerSecond = request.ask.pricePerBytePerSecond
|
||||
collateralPerByte = request.ask.collateralPerByte
|
||||
|
||||
let requestEnd = await market.getRequestEnd(data.requestId)
|
||||
|
||||
without availability =?
|
||||
await reservations.findAvailability(
|
||||
request.ask.slotSize, request.ask.duration, request.ask.pricePerBytePerSecond,
|
||||
request.ask.collateralPerByte, requestEnd,
|
||||
):
|
||||
debug "No availability found for request, ignoring"
|
||||
|
||||
return some State(SaleIgnored(reprocessSlot: true))
|
||||
|
||||
info "Availability found for request, creating reservation"
|
||||
|
||||
without reservation =?
|
||||
await noCancel reservations.createReservation(
|
||||
availability.id, request.ask.slotSize, request.id, data.slotIndex,
|
||||
request.ask.collateralPerByte, requestEnd,
|
||||
), error:
|
||||
trace "Creation of reservation failed"
|
||||
# Race condition:
|
||||
# reservations.findAvailability (line 64) is no guarantee. You can never know for certain that the reservation can be created until after you have it.
|
||||
# Should createReservation fail because there's no space, we proceed to SaleIgnored.
|
||||
if error of BytesOutOfBoundsError:
|
||||
# Lets monitor how often this happen and if it is often we can make it more inteligent to handle it
|
||||
codex_reservations_availability_mismatch.inc()
|
||||
return some State(SaleIgnored(reprocessSlot: true))
|
||||
|
||||
return some State(SaleErrored(error: error))
|
||||
|
||||
trace "Reservation created successfully"
|
||||
|
||||
data.reservation = some reservation
|
||||
return some State(SaleSlotReserving())
|
||||
except CancelledError as e:
|
||||
trace "SalePreparing.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SalePreparing.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
@ -1,166 +0,0 @@
|
||||
import std/options
|
||||
import pkg/questionable/results
|
||||
import ../../clock
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ../salesagent
|
||||
import ../salescontext
|
||||
import ./cancelled
|
||||
import ./failed
|
||||
import ./errored
|
||||
import ./payout
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales proving"
|
||||
|
||||
type
|
||||
SlotFreedError* = object of CatchableError
|
||||
SlotNotFilledError* = object of CatchableError
|
||||
SaleProving* = ref object of SaleState
|
||||
loop: Future[void]
|
||||
|
||||
method prove*(
|
||||
state: SaleProving,
|
||||
slot: Slot,
|
||||
challenge: ProofChallenge,
|
||||
onProve: OnProve,
|
||||
market: Market,
|
||||
currentPeriod: Period,
|
||||
) {.base, async.} =
|
||||
try:
|
||||
without proof =? (await onProve(slot, challenge)), err:
|
||||
error "Failed to generate proof", error = err.msg
|
||||
# In this state, there's nothing we can do except try again next time.
|
||||
return
|
||||
debug "Submitting proof", currentPeriod = currentPeriod, slotId = slot.id
|
||||
await market.submitProof(slot.id, proof)
|
||||
except CancelledError as error:
|
||||
trace "Submitting proof cancelled"
|
||||
raise error
|
||||
except CatchableError as e:
|
||||
error "Submitting proof failed", msg = e.msgDetail
|
||||
|
||||
proc proveLoop(
|
||||
state: SaleProving,
|
||||
market: Market,
|
||||
clock: Clock,
|
||||
request: StorageRequest,
|
||||
slotIndex: uint64,
|
||||
onProve: OnProve,
|
||||
) {.async.} =
|
||||
let slot = Slot(request: request, slotIndex: slotIndex)
|
||||
let slotId = slot.id
|
||||
|
||||
logScope:
|
||||
period = currentPeriod
|
||||
requestId = request.id
|
||||
slotIndex
|
||||
slotId = slot.id
|
||||
|
||||
proc getCurrentPeriod(): Future[Period] {.async.} =
|
||||
let periodicity = await market.periodicity()
|
||||
return periodicity.periodOf(clock.now().Timestamp)
|
||||
|
||||
proc waitUntilPeriod(period: Period) {.async.} =
|
||||
let periodicity = await market.periodicity()
|
||||
# Ensure that we're past the period boundary by waiting an additional second
|
||||
await clock.waitUntil((periodicity.periodStart(period) + 1).toSecondsSince1970)
|
||||
|
||||
while true:
|
||||
let currentPeriod = await getCurrentPeriod()
|
||||
let slotState = await market.slotState(slot.id)
|
||||
|
||||
case slotState
|
||||
of SlotState.Filled:
|
||||
debug "Proving for new period", period = currentPeriod
|
||||
if (await market.isProofRequired(slotId)) or
|
||||
(await market.willProofBeRequired(slotId)):
|
||||
let challenge = await market.getChallenge(slotId)
|
||||
debug "Proof is required", period = currentPeriod, challenge = challenge
|
||||
await state.prove(slot, challenge, onProve, market, currentPeriod)
|
||||
of SlotState.Cancelled:
|
||||
debug "Slot reached cancelled state"
|
||||
# do nothing, let onCancelled callback take care of it
|
||||
of SlotState.Repair:
|
||||
warn "Slot was forcible freed"
|
||||
let message = "Slot was forcible freed and host was removed from its hosting"
|
||||
raise newException(SlotFreedError, message)
|
||||
of SlotState.Failed:
|
||||
debug "Slot reached failed state"
|
||||
# do nothing, let onFailed callback take care of it
|
||||
of SlotState.Finished:
|
||||
debug "Slot reached finished state", period = currentPeriod
|
||||
return # exit the loop
|
||||
else:
|
||||
let message = "Slot is not in Filled state, but in state: " & $slotState
|
||||
raise newException(SlotNotFilledError, message)
|
||||
|
||||
debug "waiting until next period"
|
||||
await waitUntilPeriod(currentPeriod + 1)
|
||||
|
||||
method `$`*(state: SaleProving): string =
|
||||
"SaleProving"
|
||||
|
||||
method onCancelled*(state: SaleProving, request: StorageRequest): ?State =
|
||||
# state.loop cancellation happens automatically when run is cancelled due to
|
||||
# state change
|
||||
return some State(SaleCancelled())
|
||||
|
||||
method onFailed*(state: SaleProving, request: StorageRequest): ?State =
|
||||
# state.loop cancellation happens automatically when run is cancelled due to
|
||||
# state change
|
||||
return some State(SaleFailed())
|
||||
|
||||
method run*(
|
||||
state: SaleProving, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let data = SalesAgent(machine).data
|
||||
let context = SalesAgent(machine).context
|
||||
|
||||
without request =? data.request:
|
||||
raiseAssert "no sale request"
|
||||
|
||||
without onProve =? context.onProve:
|
||||
raiseAssert "onProve callback not set"
|
||||
|
||||
without market =? context.market:
|
||||
raiseAssert("market not set")
|
||||
|
||||
without clock =? context.clock:
|
||||
raiseAssert("clock not set")
|
||||
|
||||
try:
|
||||
debug "Start proving", requestId = data.requestId, slotIndex = data.slotIndex
|
||||
try:
|
||||
let loop = state.proveLoop(market, clock, request, data.slotIndex, onProve)
|
||||
state.loop = loop
|
||||
await loop
|
||||
except CancelledError as e:
|
||||
trace "proving loop cancelled"
|
||||
discard
|
||||
except CatchableError as e:
|
||||
error "Proving failed",
|
||||
msg = e.msg, typ = $(type e), stack = e.getStackTrace(), error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
finally:
|
||||
# Cleanup of the proving loop
|
||||
debug "Stopping proving.", requestId = data.requestId, slotIndex = data.slotIndex
|
||||
|
||||
if not state.loop.isNil:
|
||||
if not state.loop.finished:
|
||||
try:
|
||||
await state.loop.cancelAndWait()
|
||||
except CancelledError:
|
||||
discard
|
||||
except CatchableError as e:
|
||||
error "Error during cancellation of proving loop", msg = e.msg
|
||||
|
||||
state.loop = nil
|
||||
|
||||
return some State(SalePayout())
|
||||
except CancelledError as e:
|
||||
trace "SaleProving.run onCleanUp was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleProving.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
@ -1,57 +0,0 @@
|
||||
import ../../conf
|
||||
when storage_enable_proof_failures:
|
||||
import std/strutils
|
||||
import pkg/stint
|
||||
import pkg/ethers
|
||||
|
||||
import ../../contracts/marketplace
|
||||
import ../../contracts/requests
|
||||
import ../../logutils
|
||||
import ../../market
|
||||
import ../../utils/exceptions
|
||||
import ../salescontext
|
||||
import ./proving
|
||||
import ./errored
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales simulated-proving"
|
||||
|
||||
type SaleProvingSimulated* = ref object of SaleProving
|
||||
failEveryNProofs*: int
|
||||
proofCount: int
|
||||
|
||||
proc onSubmitProofError(error: ref CatchableError, period: Period, slotId: SlotId) =
|
||||
error "Submitting invalid proof failed", period, slotId, msg = error.msgDetail
|
||||
|
||||
method prove*(
|
||||
state: SaleProvingSimulated,
|
||||
slot: Slot,
|
||||
challenge: ProofChallenge,
|
||||
onProve: OnProve,
|
||||
market: Market,
|
||||
currentPeriod: Period,
|
||||
) {.async.} =
|
||||
try:
|
||||
trace "Processing proving in simulated mode"
|
||||
state.proofCount += 1
|
||||
if state.failEveryNProofs > 0 and state.proofCount mod state.failEveryNProofs == 0:
|
||||
state.proofCount = 0
|
||||
|
||||
try:
|
||||
warn "Submitting INVALID proof", period = currentPeriod, slotId = slot.id
|
||||
await market.submitProof(slot.id, Groth16Proof.default)
|
||||
except ProofInvalidError as e:
|
||||
discard # expected
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as e:
|
||||
onSubmitProofError(e, currentPeriod, slot.id)
|
||||
else:
|
||||
await procCall SaleProving(state).prove(
|
||||
slot, challenge, onProve, market, currentPeriod
|
||||
)
|
||||
except CancelledError as e:
|
||||
trace "Submitting INVALID proof cancelled", error = e.msgDetail
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
error "Submitting INVALID proof failed", error = e.msgDetail
|
||||
@ -1,65 +0,0 @@
|
||||
import pkg/questionable
|
||||
import pkg/metrics
|
||||
|
||||
import ../../logutils
|
||||
import ../../market
|
||||
import ../../utils/exceptions
|
||||
import ../salesagent
|
||||
import ../statemachine
|
||||
import ./cancelled
|
||||
import ./failed
|
||||
import ./ignored
|
||||
import ./downloading
|
||||
import ./errored
|
||||
|
||||
type SaleSlotReserving* = ref object of SaleState
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales reserving"
|
||||
|
||||
method `$`*(state: SaleSlotReserving): string =
|
||||
"SaleSlotReserving"
|
||||
|
||||
method onCancelled*(state: SaleSlotReserving, request: StorageRequest): ?State =
|
||||
return some State(SaleCancelled())
|
||||
|
||||
method onFailed*(state: SaleSlotReserving, request: StorageRequest): ?State =
|
||||
return some State(SaleFailed())
|
||||
|
||||
method run*(
|
||||
state: SaleSlotReserving, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let agent = SalesAgent(machine)
|
||||
let data = agent.data
|
||||
let context = agent.context
|
||||
let market = context.market
|
||||
|
||||
logScope:
|
||||
requestId = data.requestId
|
||||
slotIndex = data.slotIndex
|
||||
|
||||
try:
|
||||
let canReserve = await market.canReserveSlot(data.requestId, data.slotIndex)
|
||||
if canReserve:
|
||||
try:
|
||||
trace "Reserving slot"
|
||||
await market.reserveSlot(data.requestId, data.slotIndex)
|
||||
except SlotReservationNotAllowedError as e:
|
||||
debug "Slot cannot be reserved, ignoring", error = e.msg
|
||||
return some State(SaleIgnored(reprocessSlot: false, returnsCollateral: true))
|
||||
except MarketError as e:
|
||||
return some State(SaleErrored(error: e))
|
||||
# other CatchableErrors are handled "automatically" by the SaleState
|
||||
|
||||
trace "Slot successfully reserved"
|
||||
return some State(SaleDownloading())
|
||||
else:
|
||||
# do not re-add this slot to the queue, and return bytes from Reservation to
|
||||
# the Availability
|
||||
debug "Slot cannot be reserved, ignoring"
|
||||
return some State(SaleIgnored(reprocessSlot: false, returnsCollateral: true))
|
||||
except CancelledError as e:
|
||||
trace "SaleSlotReserving.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleSlotReserving.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
@ -1,73 +0,0 @@
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ../salesagent
|
||||
import ./filled
|
||||
import ./finished
|
||||
import ./failed
|
||||
import ./errored
|
||||
import ./proving
|
||||
import ./cancelled
|
||||
import ./payout
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales unknown"
|
||||
|
||||
type
|
||||
SaleUnknown* = ref object of SaleState
|
||||
SaleUnknownError* = object of CatchableError
|
||||
UnexpectedSlotError* = object of SaleUnknownError
|
||||
|
||||
method `$`*(state: SaleUnknown): string =
|
||||
"SaleUnknown"
|
||||
|
||||
method onCancelled*(state: SaleUnknown, request: StorageRequest): ?State =
|
||||
return some State(SaleCancelled())
|
||||
|
||||
method onFailed*(state: SaleUnknown, request: StorageRequest): ?State =
|
||||
return some State(SaleFailed())
|
||||
|
||||
method run*(
|
||||
state: SaleUnknown, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let agent = SalesAgent(machine)
|
||||
let data = agent.data
|
||||
let market = agent.context.market
|
||||
|
||||
try:
|
||||
await agent.retrieveRequest()
|
||||
await agent.subscribe()
|
||||
|
||||
without request =? data.request:
|
||||
error "request could not be retrieved", id = data.requestId
|
||||
let error = newException(SaleError, "request could not be retrieved")
|
||||
return some State(SaleErrored(error: error))
|
||||
|
||||
let slotId = slotId(data.requestId, data.slotIndex)
|
||||
let slotState = await market.slotState(slotId)
|
||||
|
||||
case slotState
|
||||
of SlotState.Free:
|
||||
let error =
|
||||
newException(UnexpectedSlotError, "Slot state on chain should not be 'free'")
|
||||
return some State(SaleErrored(error: error))
|
||||
of SlotState.Filled:
|
||||
return some State(SaleFilled())
|
||||
of SlotState.Finished:
|
||||
return some State(SalePayout())
|
||||
of SlotState.Paid:
|
||||
return some State(SaleFinished())
|
||||
of SlotState.Failed:
|
||||
return some State(SaleFailed())
|
||||
of SlotState.Cancelled:
|
||||
return some State(SaleCancelled())
|
||||
of SlotState.Repair:
|
||||
let error = newException(
|
||||
SlotFreedError, "Slot was forcible freed and host was removed from its hosting"
|
||||
)
|
||||
return some State(SaleErrored(error: error))
|
||||
except CancelledError as e:
|
||||
trace "SaleUnknown.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleUnknown.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
@ -1,6 +0,0 @@
|
||||
import ./slots/builder
|
||||
import ./slots/sampler
|
||||
import ./slots/proofs
|
||||
import ./slots/types
|
||||
|
||||
export builder, sampler, proofs, types
|
||||
@ -1,8 +0,0 @@
|
||||
import ./builder/builder
|
||||
import ./converters
|
||||
|
||||
import ../merkletree
|
||||
|
||||
export builder, converters
|
||||
|
||||
type Poseidon2Builder* = SlotsBuilder[Poseidon2Tree, Poseidon2Hash]
|
||||
@ -1,437 +0,0 @@
|
||||
## Logos Storage
|
||||
## Copyright (c) 2023 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/math
|
||||
import std/sequtils
|
||||
import std/sugar
|
||||
|
||||
import pkg/libp2p
|
||||
import pkg/chronos
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/constantine/math/io/io_fields
|
||||
import pkg/taskpools
|
||||
|
||||
import ../../logutils
|
||||
import ../../utils
|
||||
import ../../stores
|
||||
import ../../manifest
|
||||
import ../../merkletree
|
||||
import ../../utils/poseidon2digest
|
||||
import ../../utils/asynciter
|
||||
import ../../indexingstrategy
|
||||
|
||||
import ../converters
|
||||
|
||||
export converters, asynciter, poseidon2digest
|
||||
|
||||
logScope:
|
||||
topics = "codex slotsbuilder"
|
||||
|
||||
type SlotsBuilder*[T, H] = ref object of RootObj
|
||||
store: BlockStore
|
||||
manifest: Manifest # current manifest
|
||||
strategy: IndexingStrategy # indexing strategy
|
||||
cellSize: NBytes # cell size
|
||||
numSlotBlocks: Natural
|
||||
# number of blocks per slot (should yield a power of two number of cells)
|
||||
slotRoots: seq[H] # roots of the slots
|
||||
emptyBlock: seq[byte] # empty block
|
||||
verifiableTree: ?T # verification tree (dataset tree)
|
||||
emptyDigestTree: T # empty digest tree for empty blocks
|
||||
taskPool: Taskpool
|
||||
|
||||
func verifiable*[T, H](self: SlotsBuilder[T, H]): bool {.inline.} =
|
||||
## Returns true if the slots are verifiable.
|
||||
##
|
||||
|
||||
self.manifest.verifiable
|
||||
|
||||
func slotRoots*[T, H](self: SlotsBuilder[T, H]): seq[H] {.inline.} =
|
||||
## Returns the slot roots.
|
||||
##
|
||||
|
||||
self.slotRoots
|
||||
|
||||
func verifyTree*[T, H](self: SlotsBuilder[T, H]): ?T {.inline.} =
|
||||
## Returns the slots tree (verification tree).
|
||||
##
|
||||
|
||||
self.verifiableTree
|
||||
|
||||
func verifyRoot*[T, H](self: SlotsBuilder[T, H]): ?H {.inline.} =
|
||||
## Returns the slots root (verification root).
|
||||
##
|
||||
|
||||
if tree =? self.verifyTree and root =? tree.root:
|
||||
return some root
|
||||
|
||||
func numSlots*[T, H](self: SlotsBuilder[T, H]): Natural =
|
||||
## Number of slots.
|
||||
##
|
||||
|
||||
self.manifest.numSlots
|
||||
|
||||
func numSlotBlocks*[T, H](self: SlotsBuilder[T, H]): Natural =
|
||||
## Number of blocks per slot.
|
||||
##
|
||||
|
||||
self.numSlotBlocks
|
||||
|
||||
func numBlocks*[T, H](self: SlotsBuilder[T, H]): Natural =
|
||||
## Number of blocks.
|
||||
##
|
||||
|
||||
self.numSlotBlocks * self.manifest.numSlots
|
||||
|
||||
func slotBytes*[T, H](self: SlotsBuilder[T, H]): NBytes =
|
||||
## Number of bytes per slot.
|
||||
##
|
||||
|
||||
(self.manifest.blockSize.int * self.numSlotBlocks).NBytes
|
||||
|
||||
func numBlockCells*[T, H](self: SlotsBuilder[T, H]): Natural =
|
||||
## Number of cells per block.
|
||||
##
|
||||
|
||||
(self.manifest.blockSize div self.cellSize).Natural
|
||||
|
||||
func cellSize*[T, H](self: SlotsBuilder[T, H]): NBytes =
|
||||
## Cell size.
|
||||
##
|
||||
|
||||
self.cellSize
|
||||
|
||||
func numSlotCells*[T, H](self: SlotsBuilder[T, H]): Natural =
|
||||
## Number of cells per slot.
|
||||
##
|
||||
|
||||
self.numBlockCells * self.numSlotBlocks
|
||||
|
||||
func slotIndicesIter*[T, H](self: SlotsBuilder[T, H], slot: Natural): ?!Iter[int] =
|
||||
## Returns the slot indices.
|
||||
##
|
||||
|
||||
self.strategy.getIndices(slot).catch
|
||||
|
||||
func slotIndices*[T, H](self: SlotsBuilder[T, H], slot: Natural): seq[int] =
|
||||
## Returns the slot indices.
|
||||
##
|
||||
|
||||
if iter =? self.strategy.getIndices(slot).catch:
|
||||
return toSeq(iter)
|
||||
|
||||
func manifest*[T, H](self: SlotsBuilder[T, H]): Manifest =
|
||||
## Returns the manifest.
|
||||
##
|
||||
|
||||
self.manifest
|
||||
|
||||
proc buildBlockTree*[T, H](
|
||||
self: SlotsBuilder[T, H], blkIdx: Natural, slotPos: Natural
|
||||
): Future[?!(seq[byte], T)] {.async: (raises: [CancelledError]).} =
|
||||
## Build the block digest tree and return a tuple with the
|
||||
## block data and the tree.
|
||||
##
|
||||
|
||||
logScope:
|
||||
blkIdx = blkIdx
|
||||
slotPos = slotPos
|
||||
numSlotBlocks = self.manifest.numSlotBlocks
|
||||
cellSize = self.cellSize
|
||||
|
||||
trace "Building block tree"
|
||||
|
||||
if slotPos > (self.manifest.numSlotBlocks - 1):
|
||||
# pad blocks are 0 byte blocks
|
||||
trace "Returning empty digest tree for pad block"
|
||||
return success (self.emptyBlock, self.emptyDigestTree)
|
||||
|
||||
without blk =? await self.store.getBlock(self.manifest.treeCid, blkIdx), err:
|
||||
error "Failed to get block CID for tree at index", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
if blk.isEmpty:
|
||||
success (self.emptyBlock, self.emptyDigestTree)
|
||||
else:
|
||||
without tree =? T.digestTree(blk.data, self.cellSize.int), err:
|
||||
error "Failed to create digest for block", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
success (blk.data, tree)
|
||||
|
||||
proc getBlockDigest*[T, H](
|
||||
self: SlotsBuilder[T, H], blkIdx: Natural, slotPos: Natural
|
||||
): Future[?!H] {.async: (raises: [CancelledError]).} =
|
||||
logScope:
|
||||
blkIdx = blkIdx
|
||||
slotPos = slotPos
|
||||
numSlotBlocks = self.manifest.numSlotBlocks
|
||||
cellSize = self.cellSize
|
||||
|
||||
trace "Building block tree"
|
||||
|
||||
if slotPos > (self.manifest.numSlotBlocks - 1):
|
||||
# pad blocks are 0 byte blocks
|
||||
trace "Returning empty digest tree for pad block"
|
||||
return self.emptyDigestTree.root
|
||||
|
||||
without blk =? await self.store.getBlock(self.manifest.treeCid, blkIdx), err:
|
||||
error "Failed to get block CID for tree at index", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
if blk.isEmpty:
|
||||
return self.emptyDigestTree.root
|
||||
|
||||
without dg =? (await T.digest(self.taskPool, blk.data, self.cellSize.int)), err:
|
||||
error "Failed to create digest for block", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
return success dg
|
||||
|
||||
proc getCellHashes*[T, H](
|
||||
self: SlotsBuilder[T, H], slotIndex: Natural
|
||||
): Future[?!seq[H]] {.async: (raises: [CancelledError, IndexingError]).} =
|
||||
## Collect all the cells from a block and return
|
||||
## their hashes.
|
||||
##
|
||||
|
||||
let
|
||||
treeCid = self.manifest.treeCid
|
||||
blockCount = self.manifest.blocksCount
|
||||
numberOfSlots = self.manifest.numSlots
|
||||
|
||||
logScope:
|
||||
treeCid = treeCid
|
||||
origBlockCount = blockCount
|
||||
numberOfSlots = numberOfSlots
|
||||
slotIndex = slotIndex
|
||||
|
||||
let hashes = collect(newSeq):
|
||||
for i, blkIdx in self.strategy.getIndices(slotIndex):
|
||||
logScope:
|
||||
blkIdx = blkIdx
|
||||
pos = i
|
||||
|
||||
trace "Getting block CID for tree at index"
|
||||
without digest =? (await self.getBlockDigest(blkIdx, i)), err:
|
||||
error "Failed to get block CID for tree at index", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
trace "Get block digest", digest = digest.toHex
|
||||
digest
|
||||
|
||||
success hashes
|
||||
|
||||
proc buildSlotTree*[T, H](
|
||||
self: SlotsBuilder[T, H], slotIndex: Natural
|
||||
): Future[?!T] {.async: (raises: [CancelledError]).} =
|
||||
## Build the slot tree from the block digest hashes
|
||||
## and return the tree.
|
||||
|
||||
try:
|
||||
without cellHashes =? (await self.getCellHashes(slotIndex)), err:
|
||||
error "Failed to select slot blocks", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
T.init(cellHashes)
|
||||
except IndexingError as err:
|
||||
error "Failed to build slot tree", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
proc buildSlot*[T, H](
|
||||
self: SlotsBuilder[T, H], slotIndex: Natural
|
||||
): Future[?!H] {.async: (raises: [CancelledError]).} =
|
||||
## Build a slot tree and store the proofs in
|
||||
## the block store.
|
||||
##
|
||||
|
||||
logScope:
|
||||
cid = self.manifest.treeCid
|
||||
slotIndex = slotIndex
|
||||
|
||||
trace "Building slot tree"
|
||||
|
||||
without tree =? (await self.buildSlotTree(slotIndex)) and
|
||||
treeCid =? tree.root .? toSlotCid, err:
|
||||
error "Failed to build slot tree", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
trace "Storing slot tree", treeCid, slotIndex, leaves = tree.leavesCount
|
||||
for i, leaf in tree.leaves:
|
||||
without cellCid =? leaf.toCellCid, err:
|
||||
error "Failed to get CID for slot cell", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
without proof =? tree.getProof(i) and encodableProof =? proof.toEncodableProof, err:
|
||||
error "Failed to get proof for slot tree", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
if err =?
|
||||
(await self.store.putCidAndProof(treeCid, i, cellCid, encodableProof)).errorOption:
|
||||
error "Failed to store slot tree", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
tree.root()
|
||||
|
||||
func buildVerifyTree*[T, H](self: SlotsBuilder[T, H], slotRoots: openArray[H]): ?!T =
|
||||
T.init(@slotRoots)
|
||||
|
||||
proc buildSlots*[T, H](
|
||||
self: SlotsBuilder[T, H]
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
## Build all slot trees and store them in the block store.
|
||||
##
|
||||
|
||||
logScope:
|
||||
cid = self.manifest.treeCid
|
||||
blockCount = self.manifest.blocksCount
|
||||
|
||||
trace "Building slots"
|
||||
|
||||
if self.slotRoots.len == 0:
|
||||
self.slotRoots = collect(newSeq):
|
||||
for i in 0 ..< self.manifest.numSlots:
|
||||
without slotRoot =? (await self.buildSlot(i)), err:
|
||||
error "Failed to build slot", err = err.msg, index = i
|
||||
return failure(err)
|
||||
slotRoot
|
||||
|
||||
without tree =? self.buildVerifyTree(self.slotRoots) and root =? tree.root, err:
|
||||
error "Failed to build slot roots tree", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
if verifyTree =? self.verifyTree and verifyRoot =? verifyTree.root:
|
||||
if not bool(verifyRoot == root): # TODO: `!=` doesn't work for SecretBool
|
||||
return failure "Existing slots root doesn't match reconstructed root."
|
||||
|
||||
self.verifiableTree = some tree
|
||||
|
||||
success()
|
||||
|
||||
proc buildManifest*[T, H](
|
||||
self: SlotsBuilder[T, H]
|
||||
): Future[?!Manifest] {.async: (raises: [CancelledError]).} =
|
||||
if err =? (await self.buildSlots()).errorOption:
|
||||
error "Failed to build slot roots", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
without rootCids =? self.slotRoots.toSlotCids(), err:
|
||||
error "Failed to map slot roots to CIDs", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
without rootProvingCidRes =? self.verifyRoot .? toVerifyCid() and
|
||||
rootProvingCid =? rootProvingCidRes, err:
|
||||
error "Failed to map slot roots to CIDs", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
Manifest.new(
|
||||
self.manifest, rootProvingCid, rootCids, self.cellSize, self.strategy.strategyType
|
||||
)
|
||||
|
||||
proc new*[T, H](
|
||||
_: type SlotsBuilder[T, H],
|
||||
store: BlockStore,
|
||||
manifest: Manifest,
|
||||
taskPool: Taskpool,
|
||||
strategy = LinearStrategy,
|
||||
cellSize = DefaultCellSize,
|
||||
): ?!SlotsBuilder[T, H] =
|
||||
if not manifest.protected:
|
||||
trace "Manifest is not protected."
|
||||
return failure("Manifest is not protected.")
|
||||
|
||||
logScope:
|
||||
blockSize = manifest.blockSize
|
||||
strategy = strategy
|
||||
cellSize = cellSize
|
||||
|
||||
if (manifest.blocksCount mod manifest.numSlots) != 0:
|
||||
const msg = "Number of blocks must be divisible by number of slots."
|
||||
trace msg
|
||||
return failure(msg)
|
||||
|
||||
let cellSize = if manifest.verifiable: manifest.cellSize else: cellSize
|
||||
if (manifest.blockSize mod cellSize) != 0.NBytes:
|
||||
const msg = "Block size must be divisible by cell size."
|
||||
trace msg
|
||||
return failure(msg)
|
||||
|
||||
let
|
||||
numSlotBlocks = manifest.numSlotBlocks
|
||||
numBlockCells = (manifest.blockSize div cellSize).int # number of cells per block
|
||||
numSlotCells = manifest.numSlotBlocks * numBlockCells
|
||||
# number of uncorrected slot cells
|
||||
pow2SlotCells = nextPowerOfTwo(numSlotCells) # pow2 cells per slot
|
||||
numPadSlotBlocks = (pow2SlotCells div numBlockCells) - numSlotBlocks
|
||||
# pow2 blocks per slot
|
||||
|
||||
numSlotBlocksTotal =
|
||||
# pad blocks per slot
|
||||
if numPadSlotBlocks > 0:
|
||||
numPadSlotBlocks + numSlotBlocks
|
||||
else:
|
||||
numSlotBlocks
|
||||
|
||||
numBlocksTotal = numSlotBlocksTotal * manifest.numSlots # number of blocks per slot
|
||||
|
||||
emptyBlock = newSeq[byte](manifest.blockSize.int)
|
||||
emptyDigestTree = ?T.digestTree(emptyBlock, cellSize.int)
|
||||
|
||||
strategy =
|
||||
?strategy.init(
|
||||
0,
|
||||
manifest.blocksCount - 1,
|
||||
manifest.numSlots,
|
||||
manifest.numSlots,
|
||||
numPadSlotBlocks,
|
||||
).catch
|
||||
|
||||
logScope:
|
||||
numSlotBlocks = numSlotBlocks
|
||||
numBlockCells = numBlockCells
|
||||
numSlotCells = numSlotCells
|
||||
pow2SlotCells = pow2SlotCells
|
||||
numPadSlotBlocks = numPadSlotBlocks
|
||||
numBlocksTotal = numBlocksTotal
|
||||
numSlotBlocksTotal = numSlotBlocksTotal
|
||||
strategy = strategy.strategyType
|
||||
|
||||
trace "Creating slots builder"
|
||||
|
||||
var self = SlotsBuilder[T, H](
|
||||
store: store,
|
||||
manifest: manifest,
|
||||
strategy: strategy,
|
||||
cellSize: cellSize,
|
||||
emptyBlock: emptyBlock,
|
||||
numSlotBlocks: numSlotBlocksTotal,
|
||||
emptyDigestTree: emptyDigestTree,
|
||||
taskPool: taskPool,
|
||||
)
|
||||
|
||||
if manifest.verifiable:
|
||||
if manifest.slotRoots.len == 0 or manifest.slotRoots.len != manifest.numSlots:
|
||||
return failure "Manifest is verifiable but slot roots are missing or invalid."
|
||||
|
||||
let
|
||||
slotRoots = manifest.slotRoots.mapIt((?it.fromSlotCid()))
|
||||
tree = ?self.buildVerifyTree(slotRoots)
|
||||
expectedRoot = ?manifest.verifyRoot.fromVerifyCid()
|
||||
verifyRoot = ?tree.root
|
||||
|
||||
if verifyRoot != expectedRoot:
|
||||
return failure "Existing slots root doesn't match reconstructed root."
|
||||
|
||||
self.slotRoots = slotRoots
|
||||
self.verifiableTree = some tree
|
||||
|
||||
success self
|
||||
@ -1,82 +0,0 @@
|
||||
## Logos Storage
|
||||
## Copyright (c) 2024 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import std/sequtils
|
||||
|
||||
import pkg/libp2p
|
||||
import pkg/stew/arrayops
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/poseidon2
|
||||
import pkg/poseidon2/io
|
||||
|
||||
import ../codextypes
|
||||
import ../merkletree
|
||||
import ../errors
|
||||
import ../utils/digest
|
||||
|
||||
func toCid(hash: Poseidon2Hash, mcodec: MultiCodec, cidCodec: MultiCodec): ?!Cid =
|
||||
let
|
||||
mhash = ?MultiHash.init(mcodec, hash.toBytes).mapFailure
|
||||
treeCid = ?Cid.init(CIDv1, cidCodec, mhash).mapFailure
|
||||
success treeCid
|
||||
|
||||
proc toPoseidon2Hash(
|
||||
cid: Cid, mcodec: MultiCodec, cidCodec: MultiCodec
|
||||
): ?!Poseidon2Hash =
|
||||
if cid.cidver != CIDv1:
|
||||
return failure("Unexpected CID version")
|
||||
|
||||
if cid.mcodec != cidCodec:
|
||||
return failure(
|
||||
"Cid is not of expected codec. Was: " & $cid.mcodec & " but expected: " & $cidCodec
|
||||
)
|
||||
|
||||
let
|
||||
mhash = ?cid.mhash.mapFailure
|
||||
bytes: array[32, byte] = array[32, byte].initCopyFrom(mhash.digestBytes())
|
||||
hash = ?Poseidon2Hash.fromBytes(bytes).toFailure
|
||||
|
||||
success hash
|
||||
|
||||
func toCellCid*(hash: Poseidon2Hash): ?!Cid =
|
||||
toCid(hash, Pos2Bn128MrklCodec, CodexSlotCellCodec)
|
||||
|
||||
func fromCellCid*(cid: Cid): ?!Poseidon2Hash =
|
||||
toPoseidon2Hash(cid, Pos2Bn128MrklCodec, CodexSlotCellCodec)
|
||||
|
||||
func toSlotCid*(hash: Poseidon2Hash): ?!Cid =
|
||||
toCid(hash, multiCodec("identity"), SlotRootCodec)
|
||||
|
||||
func toSlotCids*(slotRoots: openArray[Poseidon2Hash]): ?!seq[Cid] =
|
||||
success slotRoots.mapIt(?it.toSlotCid)
|
||||
|
||||
func fromSlotCid*(cid: Cid): ?!Poseidon2Hash =
|
||||
toPoseidon2Hash(cid, multiCodec("identity"), SlotRootCodec)
|
||||
|
||||
func toVerifyCid*(hash: Poseidon2Hash): ?!Cid =
|
||||
toCid(hash, multiCodec("identity"), SlotProvingRootCodec)
|
||||
|
||||
func fromVerifyCid*(cid: Cid): ?!Poseidon2Hash =
|
||||
toPoseidon2Hash(cid, multiCodec("identity"), SlotProvingRootCodec)
|
||||
|
||||
func toEncodableProof*(proof: Poseidon2Proof): ?!CodexProof =
|
||||
let encodableProof = CodexProof(
|
||||
mcodec: multiCodec("identity"),
|
||||
index: proof.index,
|
||||
nleaves: proof.nleaves,
|
||||
path: proof.path.mapIt(@(it.toBytes)),
|
||||
)
|
||||
|
||||
success encodableProof
|
||||
|
||||
func toVerifiableProof*(proof: CodexProof): ?!Poseidon2Proof =
|
||||
let nodes = proof.path.mapIt(?Poseidon2Hash.fromBytes(it.toArray32).toFailure)
|
||||
|
||||
Poseidon2Proof.init(index = proof.index, nleaves = proof.nleaves, nodes = nodes)
|
||||
@ -1,5 +0,0 @@
|
||||
import ./proofs/backends
|
||||
import ./proofs/prover
|
||||
import ./proofs/backendfactory
|
||||
|
||||
export circomcompat, prover, backendfactory
|
||||
@ -1,82 +0,0 @@
|
||||
import os
|
||||
import strutils
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/questionable
|
||||
import pkg/confutils/defs
|
||||
import pkg/stew/io2
|
||||
import pkg/ethers
|
||||
|
||||
import ../../conf
|
||||
import ./backends
|
||||
import ./backendutils
|
||||
|
||||
proc initializeFromConfig(config: CodexConf, utils: BackendUtils): ?!AnyBackend =
|
||||
if not fileAccessible($config.circomR1cs, {AccessFlags.Read}) or
|
||||
not endsWith($config.circomR1cs, ".r1cs"):
|
||||
return failure("Circom R1CS file not accessible")
|
||||
|
||||
if not fileAccessible($config.circomWasm, {AccessFlags.Read}) or
|
||||
not endsWith($config.circomWasm, ".wasm"):
|
||||
return failure("Circom wasm file not accessible")
|
||||
|
||||
if not fileAccessible($config.circomZkey, {AccessFlags.Read}) or
|
||||
not endsWith($config.circomZkey, ".zkey"):
|
||||
return failure("Circom zkey file not accessible")
|
||||
|
||||
trace "Initialized prover backend from cli config"
|
||||
success(
|
||||
utils.initializeCircomBackend(
|
||||
$config.circomR1cs, $config.circomWasm, $config.circomZkey
|
||||
)
|
||||
)
|
||||
|
||||
proc r1csFilePath(config: CodexConf): string =
|
||||
config.circuitDir / "proof_main.r1cs"
|
||||
|
||||
proc wasmFilePath(config: CodexConf): string =
|
||||
config.circuitDir / "proof_main.wasm"
|
||||
|
||||
proc zkeyFilePath(config: CodexConf): string =
|
||||
config.circuitDir / "proof_main.zkey"
|
||||
|
||||
proc initializeFromCircuitDirFiles(
|
||||
config: CodexConf, utils: BackendUtils
|
||||
): ?!AnyBackend {.gcsafe.} =
|
||||
if fileExists(config.r1csFilePath) and fileExists(config.wasmFilePath) and
|
||||
fileExists(config.zkeyFilePath):
|
||||
trace "Initialized prover backend from local files"
|
||||
return success(
|
||||
utils.initializeCircomBackend(
|
||||
config.r1csFilePath, config.wasmFilePath, config.zkeyFilePath
|
||||
)
|
||||
)
|
||||
|
||||
failure("Circuit files not found")
|
||||
|
||||
proc suggestDownloadTool(config: CodexConf) =
|
||||
without address =? config.marketplaceAddress:
|
||||
raise (ref Defect)(
|
||||
msg: "Proving backend initializing while marketplace address not set."
|
||||
)
|
||||
|
||||
let
|
||||
tokens = ["cirdl", "\"" & $config.circuitDir & "\"", config.ethProvider, $address]
|
||||
instructions = "'./" & tokens.join(" ") & "'"
|
||||
|
||||
warn "Proving circuit files are not found. Please run the following to download them:",
|
||||
instructions
|
||||
|
||||
proc initializeBackend*(
|
||||
config: CodexConf, utils: BackendUtils = BackendUtils()
|
||||
): ?!AnyBackend =
|
||||
without backend =? initializeFromConfig(config, utils), cliErr:
|
||||
info "Could not initialize prover backend from CLI options...", msg = cliErr.msg
|
||||
without backend =? initializeFromCircuitDirFiles(config, utils), localErr:
|
||||
info "Could not initialize prover backend from circuit dir files...",
|
||||
msg = localErr.msg
|
||||
suggestDownloadTool(config)
|
||||
return failure("CircuitFilesNotFound")
|
||||
# Unexpected: value of backend does not survive leaving each scope. (definition does though...)
|
||||
return success(backend)
|
||||
return success(backend)
|
||||
@ -1,5 +0,0 @@
|
||||
import ./backends/circomcompat
|
||||
|
||||
export circomcompat
|
||||
|
||||
type AnyBackend* = CircomCompat
|
||||
@ -1,240 +0,0 @@
|
||||
## Logos Storage
|
||||
## Copyright (c) 2024 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sugar
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/questionable/results
|
||||
import pkg/circomcompat
|
||||
|
||||
import ../../types
|
||||
import ../../../stores
|
||||
import ../../../contracts
|
||||
|
||||
import ./converters
|
||||
|
||||
export circomcompat, converters
|
||||
|
||||
type
|
||||
CircomCompat* = object
|
||||
slotDepth: int # max depth of the slot tree
|
||||
datasetDepth: int # max depth of dataset tree
|
||||
blkDepth: int # depth of the block merkle tree (pow2 for now)
|
||||
cellElms: int # number of field elements per cell
|
||||
numSamples: int # number of samples per slot
|
||||
r1csPath: string # path to the r1cs file
|
||||
wasmPath: string # path to the wasm file
|
||||
zkeyPath: string # path to the zkey file
|
||||
backendCfg: ptr CircomBn254Cfg
|
||||
vkp*: ptr CircomKey
|
||||
|
||||
NormalizedProofInputs*[H] {.borrow: `.`.} = distinct ProofInputs[H]
|
||||
|
||||
func normalizeInput*[H](
|
||||
self: CircomCompat, input: ProofInputs[H]
|
||||
): NormalizedProofInputs[H] =
|
||||
## Parameters in CIRCOM circuits are statically sized and must be properly
|
||||
## padded before they can be passed onto the circuit. This function takes
|
||||
## variable length parameters and performs that padding.
|
||||
##
|
||||
## The output from this function can be JSON-serialized and used as direct
|
||||
## inputs to the CIRCOM circuit for testing and debugging when one wishes
|
||||
## to bypass the Rust FFI.
|
||||
|
||||
let normSamples = collect:
|
||||
for sample in input.samples:
|
||||
var merklePaths = sample.merklePaths
|
||||
merklePaths.setLen(self.slotDepth)
|
||||
Sample[H](cellData: sample.cellData, merklePaths: merklePaths)
|
||||
|
||||
var normSlotProof = input.slotProof
|
||||
normSlotProof.setLen(self.datasetDepth)
|
||||
|
||||
NormalizedProofInputs[H] ProofInputs[H](
|
||||
entropy: input.entropy,
|
||||
datasetRoot: input.datasetRoot,
|
||||
slotIndex: input.slotIndex,
|
||||
slotRoot: input.slotRoot,
|
||||
nCellsPerSlot: input.nCellsPerSlot,
|
||||
nSlotsPerDataSet: input.nSlotsPerDataSet,
|
||||
slotProof: normSlotProof,
|
||||
samples: normSamples,
|
||||
)
|
||||
|
||||
proc release*(self: CircomCompat) =
|
||||
## Release the ctx
|
||||
##
|
||||
|
||||
if not isNil(self.backendCfg):
|
||||
self.backendCfg.unsafeAddr.release_cfg()
|
||||
|
||||
if not isNil(self.vkp):
|
||||
self.vkp.unsafeAddr.release_key()
|
||||
|
||||
proc prove[H](self: CircomCompat, input: NormalizedProofInputs[H]): ?!CircomProof =
|
||||
doAssert input.samples.len == self.numSamples, "Number of samples does not match"
|
||||
|
||||
doAssert input.slotProof.len <= self.datasetDepth,
|
||||
"Slot proof is too deep - dataset has more slots than what we can handle?"
|
||||
|
||||
doAssert input.samples.allIt(
|
||||
block:
|
||||
(
|
||||
it.merklePaths.len <= self.slotDepth + self.blkDepth and
|
||||
it.cellData.len == self.cellElms
|
||||
)
|
||||
), "Merkle paths too deep or cells too big for circuit"
|
||||
|
||||
# TODO: All parameters should match circom's static parametter
|
||||
var ctx: ptr CircomCompatCtx
|
||||
|
||||
defer:
|
||||
if ctx != nil:
|
||||
ctx.addr.release_circom_compat()
|
||||
|
||||
if init_circom_compat(self.backendCfg, addr ctx) != ERR_OK or ctx == nil:
|
||||
raiseAssert("failed to initialize CircomCompat ctx")
|
||||
|
||||
var
|
||||
entropy = input.entropy.toBytes
|
||||
dataSetRoot = input.datasetRoot.toBytes
|
||||
slotRoot = input.slotRoot.toBytes
|
||||
|
||||
if ctx.push_input_u256_array("entropy".cstring, entropy[0].addr, entropy.len.uint32) !=
|
||||
ERR_OK:
|
||||
return failure("Failed to push entropy")
|
||||
|
||||
if ctx.push_input_u256_array(
|
||||
"dataSetRoot".cstring, dataSetRoot[0].addr, dataSetRoot.len.uint32
|
||||
) != ERR_OK:
|
||||
return failure("Failed to push data set root")
|
||||
|
||||
if ctx.push_input_u256_array(
|
||||
"slotRoot".cstring, slotRoot[0].addr, slotRoot.len.uint32
|
||||
) != ERR_OK:
|
||||
return failure("Failed to push data set root")
|
||||
|
||||
if ctx.push_input_u32("nCellsPerSlot".cstring, input.nCellsPerSlot.uint32) != ERR_OK:
|
||||
return failure("Failed to push nCellsPerSlot")
|
||||
|
||||
if ctx.push_input_u32("nSlotsPerDataSet".cstring, input.nSlotsPerDataSet.uint32) !=
|
||||
ERR_OK:
|
||||
return failure("Failed to push nSlotsPerDataSet")
|
||||
|
||||
if ctx.push_input_u32("slotIndex".cstring, input.slotIndex.uint32) != ERR_OK:
|
||||
return failure("Failed to push slotIndex")
|
||||
|
||||
var slotProof = input.slotProof.mapIt(it.toBytes).concat
|
||||
|
||||
doAssert(slotProof.len == self.datasetDepth)
|
||||
# arrays are always flattened
|
||||
if ctx.push_input_u256_array(
|
||||
"slotProof".cstring, slotProof[0].addr, uint (slotProof[0].len * slotProof.len)
|
||||
) != ERR_OK:
|
||||
return failure("Failed to push slot proof")
|
||||
|
||||
for s in input.samples:
|
||||
var
|
||||
merklePaths = s.merklePaths.mapIt(it.toBytes)
|
||||
data = s.cellData.mapIt(@(it.toBytes)).concat
|
||||
|
||||
if ctx.push_input_u256_array(
|
||||
"merklePaths".cstring,
|
||||
merklePaths[0].addr,
|
||||
uint (merklePaths[0].len * merklePaths.len),
|
||||
) != ERR_OK:
|
||||
return failure("Failed to push merkle paths")
|
||||
|
||||
if ctx.push_input_u256_array("cellData".cstring, data[0].addr, data.len.uint) !=
|
||||
ERR_OK:
|
||||
return failure("Failed to push cell data")
|
||||
|
||||
var proofPtr: ptr Proof = nil
|
||||
|
||||
let proof =
|
||||
try:
|
||||
if (let res = self.backendCfg.prove_circuit(ctx, proofPtr.addr); res != ERR_OK) or
|
||||
proofPtr == nil:
|
||||
return failure("Failed to prove - err code: " & $res)
|
||||
|
||||
proofPtr[]
|
||||
finally:
|
||||
if proofPtr != nil:
|
||||
proofPtr.addr.release_proof()
|
||||
|
||||
success proof
|
||||
|
||||
proc prove*[H](self: CircomCompat, input: ProofInputs[H]): ?!CircomProof =
|
||||
self.prove(self.normalizeInput(input))
|
||||
|
||||
proc verify*[H](
|
||||
self: CircomCompat, proof: CircomProof, inputs: ProofInputs[H]
|
||||
): ?!bool =
|
||||
## Verify a proof using a ctx
|
||||
##
|
||||
|
||||
var
|
||||
proofPtr = unsafeAddr proof
|
||||
inputs = inputs.toCircomInputs()
|
||||
|
||||
try:
|
||||
let res = verify_circuit(proofPtr, inputs.addr, self.vkp)
|
||||
if res == ERR_OK:
|
||||
success true
|
||||
elif res == ERR_FAILED_TO_VERIFY_PROOF:
|
||||
success false
|
||||
else:
|
||||
failure("Failed to verify proof - err code: " & $res)
|
||||
finally:
|
||||
inputs.releaseCircomInputs()
|
||||
|
||||
proc init*(
|
||||
_: type CircomCompat,
|
||||
r1csPath: string,
|
||||
wasmPath: string,
|
||||
zkeyPath: string = "",
|
||||
slotDepth = DefaultMaxSlotDepth,
|
||||
datasetDepth = DefaultMaxDatasetDepth,
|
||||
blkDepth = DefaultBlockDepth,
|
||||
cellElms = DefaultCellElms,
|
||||
numSamples = DefaultSamplesNum,
|
||||
): CircomCompat =
|
||||
## Create a new ctx
|
||||
##
|
||||
|
||||
var cfg: ptr CircomBn254Cfg
|
||||
var zkey = if zkeyPath.len > 0: zkeyPath.cstring else: nil
|
||||
|
||||
if init_circom_config(r1csPath.cstring, wasmPath.cstring, zkey, cfg.addr) != ERR_OK or
|
||||
cfg == nil:
|
||||
if cfg != nil:
|
||||
cfg.addr.release_cfg()
|
||||
raiseAssert("failed to initialize circom compat config")
|
||||
|
||||
var vkpPtr: ptr VerifyingKey = nil
|
||||
|
||||
if cfg.get_verifying_key(vkpPtr.addr) != ERR_OK or vkpPtr == nil:
|
||||
if vkpPtr != nil:
|
||||
vkpPtr.addr.release_key()
|
||||
raiseAssert("Failed to get verifying key")
|
||||
|
||||
CircomCompat(
|
||||
r1csPath: r1csPath,
|
||||
wasmPath: wasmPath,
|
||||
zkeyPath: zkeyPath,
|
||||
slotDepth: slotDepth,
|
||||
datasetDepth: datasetDepth,
|
||||
blkDepth: blkDepth,
|
||||
cellElms: cellElms,
|
||||
numSamples: numSamples,
|
||||
backendCfg: cfg,
|
||||
vkp: vkpPtr,
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user