mirror of
https://github.com/logos-storage/logos-storage-nim.git
synced 2026-02-25 07:53:19 +00:00
Merge branch 'master' into logging/repostore-timing
This commit is contained in:
commit
aeb75f4864
@ -89,7 +89,7 @@ runs:
|
||||
|
||||
- name: Install gcc 14 on Linux
|
||||
# We don't want to install gcc 14 for coverage (Ubuntu 20.04)
|
||||
if : ${{ inputs.os == 'linux' && !inputs.coverage }}
|
||||
if : ${{ inputs.os == 'linux' && inputs.coverage != 'true' }}
|
||||
shell: ${{ inputs.shell }} {0}
|
||||
run: |
|
||||
# Add GCC-14 to alternatives
|
||||
@ -202,7 +202,7 @@ runs:
|
||||
- name: Restore Nim toolchain binaries from cache
|
||||
id: nim-cache
|
||||
uses: actions/cache@v4
|
||||
if : ${{ !inputs.coverage }}
|
||||
if : ${{ inputs.coverage != 'true' }}
|
||||
with:
|
||||
path: NimBinaries
|
||||
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }}
|
||||
|
||||
4
.github/workflows/docker-reusable.yml
vendored
4
.github/workflows/docker-reusable.yml
vendored
@ -94,11 +94,11 @@ jobs:
|
||||
- target:
|
||||
os: linux
|
||||
arch: amd64
|
||||
builder: ubuntu-22.04
|
||||
builder: ubuntu-24.04
|
||||
- target:
|
||||
os: linux
|
||||
arch: arm64
|
||||
builder: ubuntu-22.04-arm
|
||||
builder: ubuntu-24.04-arm
|
||||
|
||||
name: Build ${{ matrix.target.os }}/${{ matrix.target.arch }}
|
||||
runs-on: ${{ matrix.builder }}
|
||||
|
||||
16
.github/workflows/docs.yml
vendored
16
.github/workflows/docs.yml
vendored
@ -2,17 +2,17 @@ name: OpenAPI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
tags:
|
||||
- "v*.*.*"
|
||||
paths:
|
||||
- 'openapi.yaml'
|
||||
- '.github/workflows/docs.yml'
|
||||
- "openapi.yaml"
|
||||
- ".github/workflows/docs.yml"
|
||||
pull_request:
|
||||
branches:
|
||||
- '**'
|
||||
- "**"
|
||||
paths:
|
||||
- 'openapi.yaml'
|
||||
- '.github/workflows/docs.yml'
|
||||
- "openapi.yaml"
|
||||
- ".github/workflows/docs.yml"
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
@ -40,7 +40,7 @@ jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
if: github.ref == 'refs/heads/master'
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
8
.github/workflows/nim-matrix.yml
vendored
8
.github/workflows/nim-matrix.yml
vendored
@ -20,10 +20,10 @@ jobs:
|
||||
uses: fabiocaccamo/create-matrix-action@v5
|
||||
with:
|
||||
matrix: |
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
|
||||
build:
|
||||
needs: matrix
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@ -45,3 +45,5 @@ docker/prometheus-data
|
||||
.DS_Store
|
||||
nim.cfg
|
||||
tests/integration/logs
|
||||
|
||||
data/
|
||||
|
||||
10
.gitmodules
vendored
10
.gitmodules
vendored
@ -221,3 +221,13 @@
|
||||
[submodule "vendor/nph"]
|
||||
path = vendor/nph
|
||||
url = https://github.com/arnetheduck/nph.git
|
||||
[submodule "vendor/nim-quic"]
|
||||
path = vendor/nim-quic
|
||||
url = https://github.com/vacp2p/nim-quic.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nim-ngtcp2"]
|
||||
path = vendor/nim-ngtcp2
|
||||
url = https://github.com/vacp2p/nim-ngtcp2.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
|
||||
5
Makefile
5
Makefile
@ -229,6 +229,11 @@ nph/%: build-nph
|
||||
echo -e $(FORMAT_MSG) "nph/$*" && \
|
||||
$(NPH) $*
|
||||
|
||||
format:
|
||||
$(NPH) *.nim
|
||||
$(NPH) codex/
|
||||
$(NPH) tests/
|
||||
|
||||
clean-nph:
|
||||
rm -f $(NPH)
|
||||
|
||||
|
||||
@ -59,8 +59,8 @@ Feel free to dive in, contributions are welcomed! Open an issue or submit PRs.
|
||||
|
||||
### Linting and formatting
|
||||
|
||||
`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is requrired to adhere to its styling.
|
||||
`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling.
|
||||
If you are setting up fresh setup, in order to get `nph` run `make build-nph`.
|
||||
In order to format files run `make nph/<file/folder you want to format>`.
|
||||
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior commiting them.
|
||||
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them.
|
||||
If you are using VSCode and the [NimLang](https://marketplace.visualstudio.com/items?itemName=NimLang.nimlang) extension you can enable "Format On Save" (eq. the `nim.formatOnSave` property) that will format the files using `nph`.
|
||||
34
build.nims
34
build.nims
@ -4,7 +4,6 @@ import std/os except commandLineParams
|
||||
|
||||
### Helper functions
|
||||
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
|
||||
|
||||
if not dirExists "build":
|
||||
mkDir "build"
|
||||
|
||||
@ -14,13 +13,15 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
|
||||
for param in commandLineParams():
|
||||
extra_params &= " " & param
|
||||
else:
|
||||
for i in 2..<paramCount():
|
||||
for i in 2 ..< paramCount():
|
||||
extra_params &= " " & paramStr(i)
|
||||
|
||||
let
|
||||
# Place build output in 'build' folder, even if name includes a longer path.
|
||||
outName = os.lastPathPart(name)
|
||||
cmd = "nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir & name & ".nim"
|
||||
cmd =
|
||||
"nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir &
|
||||
name & ".nim"
|
||||
|
||||
exec(cmd)
|
||||
|
||||
@ -29,7 +30,8 @@ proc test(name: string, srcDir = "tests/", params = "", lang = "c") =
|
||||
exec "build/" & name
|
||||
|
||||
task codex, "build codex binary":
|
||||
buildBinary "codex", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
|
||||
buildBinary "codex",
|
||||
params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
|
||||
|
||||
task toolsCirdl, "build tools/cirdl binary":
|
||||
buildBinary "tools/cirdl/cirdl"
|
||||
@ -41,7 +43,9 @@ task testContracts, "Build & run Codex Contract tests":
|
||||
test "testContracts"
|
||||
|
||||
task testIntegration, "Run integration tests":
|
||||
buildBinary "codex", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:codex_enable_proof_failures=true"
|
||||
buildBinary "codex",
|
||||
params =
|
||||
"-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:codex_enable_proof_failures=true"
|
||||
test "testIntegration"
|
||||
# use params to enable logging from the integration test executable
|
||||
# test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " &
|
||||
@ -90,15 +94,25 @@ task coverage, "generates code coverage report":
|
||||
|
||||
var nimSrcs = " "
|
||||
for f in walkDirRec("codex", {pcFile}):
|
||||
if f.endswith(".nim"): nimSrcs.add " " & f.absolutePath.quoteShell()
|
||||
if f.endswith(".nim"):
|
||||
nimSrcs.add " " & f.absolutePath.quoteShell()
|
||||
|
||||
echo "======== Running Tests ======== "
|
||||
test "coverage", srcDir = "tests/", params = " --nimcache:nimcache/coverage -d:release -d:codex_enable_proof_failures=true"
|
||||
test "coverage",
|
||||
srcDir = "tests/",
|
||||
params =
|
||||
" --nimcache:nimcache/coverage -d:release -d:codex_enable_proof_failures=true"
|
||||
exec("rm nimcache/coverage/*.c")
|
||||
rmDir("coverage"); mkDir("coverage")
|
||||
rmDir("coverage")
|
||||
mkDir("coverage")
|
||||
echo " ======== Running LCOV ======== "
|
||||
exec("lcov --capture --directory nimcache/coverage --output-file coverage/coverage.info")
|
||||
exec("lcov --extract coverage/coverage.info --output-file coverage/coverage.f.info " & nimSrcs)
|
||||
exec(
|
||||
"lcov --capture --directory nimcache/coverage --output-file coverage/coverage.info"
|
||||
)
|
||||
exec(
|
||||
"lcov --extract coverage/coverage.info --output-file coverage/coverage.f.info " &
|
||||
nimSrcs
|
||||
)
|
||||
echo " ======== Generating HTML coverage report ======== "
|
||||
exec("genhtml coverage/coverage.f.info --output-directory coverage/report ")
|
||||
echo " ======== Coverage report Done ======== "
|
||||
|
||||
39
codex.nim
39
codex.nim
@ -38,33 +38,35 @@ when isMainModule:
|
||||
when defined(posix):
|
||||
import system/ansi_c
|
||||
|
||||
type
|
||||
CodexStatus {.pure.} = enum
|
||||
Stopped,
|
||||
Stopping,
|
||||
Running
|
||||
type CodexStatus {.pure.} = enum
|
||||
Stopped
|
||||
Stopping
|
||||
Running
|
||||
|
||||
let config = CodexConf.load(
|
||||
version = codexFullVersion,
|
||||
envVarsPrefix = "codex",
|
||||
secondarySources = proc (config: CodexConf, sources: auto) {.gcsafe, raises: [ConfigurationError].} =
|
||||
if configFile =? config.configFile:
|
||||
sources.addConfigFile(Toml, configFile)
|
||||
secondarySources = proc(
|
||||
config: CodexConf, sources: auto
|
||||
) {.gcsafe, raises: [ConfigurationError].} =
|
||||
if configFile =? config.configFile:
|
||||
sources.addConfigFile(Toml, configFile)
|
||||
,
|
||||
)
|
||||
config.setupLogging()
|
||||
config.setupMetrics()
|
||||
|
||||
if not(checkAndCreateDataDir((config.dataDir).string)):
|
||||
if not (checkAndCreateDataDir((config.dataDir).string)):
|
||||
# We are unable to access/create data folder or data folder's
|
||||
# permissions are insecure.
|
||||
quit QuitFailure
|
||||
|
||||
if config.prover() and not(checkAndCreateDataDir((config.circuitDir).string)):
|
||||
if config.prover() and not (checkAndCreateDataDir((config.circuitDir).string)):
|
||||
quit QuitFailure
|
||||
|
||||
trace "Data dir initialized", dir = $config.dataDir
|
||||
|
||||
if not(checkAndCreateDataDir((config.dataDir / "repo"))):
|
||||
if not (checkAndCreateDataDir((config.dataDir / "repo"))):
|
||||
# We are unable to access/create data folder or data folder's
|
||||
# permissions are insecure.
|
||||
quit QuitFailure
|
||||
@ -83,11 +85,12 @@ when isMainModule:
|
||||
config.dataDir / config.netPrivKeyFile
|
||||
|
||||
privateKey = setupKey(keyPath).expect("Should setup private key!")
|
||||
server = try:
|
||||
CodexServer.new(config, privateKey)
|
||||
except Exception as exc:
|
||||
error "Failed to start Codex", msg = exc.msg
|
||||
quit QuitFailure
|
||||
server =
|
||||
try:
|
||||
CodexServer.new(config, privateKey)
|
||||
except Exception as exc:
|
||||
error "Failed to start Codex", msg = exc.msg
|
||||
quit QuitFailure
|
||||
|
||||
## Ctrl+C handling
|
||||
proc doShutdown() =
|
||||
@ -101,7 +104,9 @@ when isMainModule:
|
||||
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
||||
try:
|
||||
setupForeignThreadGc()
|
||||
except Exception as exc: raiseAssert exc.msg # shouldn't happen
|
||||
except Exception as exc:
|
||||
raiseAssert exc.msg
|
||||
# shouldn't happen
|
||||
notice "Shutting down after having received SIGINT"
|
||||
|
||||
doShutdown()
|
||||
|
||||
@ -41,80 +41,86 @@ type Advertiser* = ref object of RootObj
|
||||
advertiserRunning*: bool # Indicates if discovery is running
|
||||
concurrentAdvReqs: int # Concurrent advertise requests
|
||||
|
||||
advertiseLocalStoreLoop*: Future[void] # Advertise loop task handle
|
||||
advertiseLocalStoreLoop*: Future[void].Raising([]) # Advertise loop task handle
|
||||
advertiseQueue*: AsyncQueue[Cid] # Advertise queue
|
||||
trackedFutures*: TrackedFutures # Advertise tasks futures
|
||||
|
||||
advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep
|
||||
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
|
||||
|
||||
proc addCidToQueue(b: Advertiser, cid: Cid) {.async.} =
|
||||
proc addCidToQueue(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} =
|
||||
if cid notin b.advertiseQueue:
|
||||
await b.advertiseQueue.put(cid)
|
||||
|
||||
trace "Advertising", cid
|
||||
|
||||
proc advertiseBlock(b: Advertiser, cid: Cid) {.async.} =
|
||||
proc advertiseBlock(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} =
|
||||
without isM =? cid.isManifest, err:
|
||||
warn "Unable to determine if cid is manifest"
|
||||
return
|
||||
|
||||
if isM:
|
||||
without blk =? await b.localStore.getBlock(cid), err:
|
||||
error "Error retrieving manifest block", cid, err = err.msg
|
||||
return
|
||||
try:
|
||||
if isM:
|
||||
without blk =? await b.localStore.getBlock(cid), err:
|
||||
error "Error retrieving manifest block", cid, err = err.msg
|
||||
return
|
||||
|
||||
without manifest =? Manifest.decode(blk), err:
|
||||
error "Unable to decode as manifest", err = err.msg
|
||||
return
|
||||
without manifest =? Manifest.decode(blk), err:
|
||||
error "Unable to decode as manifest", err = err.msg
|
||||
return
|
||||
|
||||
# announce manifest cid and tree cid
|
||||
await b.addCidToQueue(cid)
|
||||
await b.addCidToQueue(manifest.treeCid)
|
||||
# announce manifest cid and tree cid
|
||||
await b.addCidToQueue(cid)
|
||||
await b.addCidToQueue(manifest.treeCid)
|
||||
except CancelledError as exc:
|
||||
trace "Cancelled advertise block", cid
|
||||
raise exc
|
||||
except CatchableError as e:
|
||||
error "failed to advertise block", cid, error = e.msgDetail
|
||||
|
||||
proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} =
|
||||
while b.advertiserRunning:
|
||||
try:
|
||||
if cids =? await b.localStore.listBlocks(blockType = BlockType.Manifest):
|
||||
trace "Advertiser begins iterating blocks..."
|
||||
for c in cids:
|
||||
if cid =? await c:
|
||||
await b.advertiseBlock(cid)
|
||||
trace "Advertiser iterating blocks finished."
|
||||
try:
|
||||
while b.advertiserRunning:
|
||||
try:
|
||||
if cids =? await b.localStore.listBlocks(blockType = BlockType.Manifest):
|
||||
trace "Advertiser begins iterating blocks..."
|
||||
for c in cids:
|
||||
if cid =? await c:
|
||||
await b.advertiseBlock(cid)
|
||||
trace "Advertiser iterating blocks finished."
|
||||
except CatchableError as e:
|
||||
error "Error in advertise local store loop", error = e.msgDetail
|
||||
raiseAssert("Unexpected exception in advertiseLocalStoreLoop")
|
||||
|
||||
await sleepAsync(b.advertiseLocalStoreLoopSleep)
|
||||
except CancelledError:
|
||||
break # do not propagate as advertiseLocalStoreLoop was asyncSpawned
|
||||
except CatchableError as e:
|
||||
error "failed to advertise blocks in local store", error = e.msgDetail
|
||||
except CancelledError:
|
||||
warn "Cancelled advertise local store loop"
|
||||
|
||||
info "Exiting advertise task loop"
|
||||
|
||||
proc processQueueLoop(b: Advertiser) {.async: (raises: []).} =
|
||||
while b.advertiserRunning:
|
||||
try:
|
||||
try:
|
||||
while b.advertiserRunning:
|
||||
let cid = await b.advertiseQueue.get()
|
||||
|
||||
if cid in b.inFlightAdvReqs:
|
||||
continue
|
||||
|
||||
try:
|
||||
let request = b.discovery.provide(cid)
|
||||
let request = b.discovery.provide(cid)
|
||||
b.inFlightAdvReqs[cid] = request
|
||||
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
|
||||
|
||||
b.inFlightAdvReqs[cid] = request
|
||||
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
|
||||
await request
|
||||
finally:
|
||||
defer:
|
||||
b.inFlightAdvReqs.del(cid)
|
||||
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
|
||||
except CancelledError:
|
||||
trace "Advertise task cancelled"
|
||||
return
|
||||
except CatchableError as exc:
|
||||
warn "Exception in advertise task runner", exc = exc.msg
|
||||
|
||||
await request
|
||||
except CancelledError:
|
||||
warn "Cancelled advertise task runner"
|
||||
|
||||
info "Exiting advertise task runner"
|
||||
|
||||
proc start*(b: Advertiser) {.async.} =
|
||||
proc start*(b: Advertiser) {.async: (raises: []).} =
|
||||
## Start the advertiser
|
||||
##
|
||||
|
||||
@ -134,13 +140,11 @@ proc start*(b: Advertiser) {.async.} =
|
||||
for i in 0 ..< b.concurrentAdvReqs:
|
||||
let fut = b.processQueueLoop()
|
||||
b.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
|
||||
b.advertiseLocalStoreLoop = advertiseLocalStoreLoop(b)
|
||||
b.trackedFutures.track(b.advertiseLocalStoreLoop)
|
||||
asyncSpawn b.advertiseLocalStoreLoop
|
||||
|
||||
proc stop*(b: Advertiser) {.async.} =
|
||||
proc stop*(b: Advertiser) {.async: (raises: []).} =
|
||||
## Stop the advertiser
|
||||
##
|
||||
|
||||
|
||||
@ -48,7 +48,7 @@ type DiscoveryEngine* = ref object of RootObj
|
||||
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
|
||||
discEngineRunning*: bool # Indicates if discovery is running
|
||||
concurrentDiscReqs: int # Concurrent discovery requests
|
||||
discoveryLoop*: Future[void] # Discovery loop task handle
|
||||
discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle
|
||||
discoveryQueue*: AsyncQueue[Cid] # Discovery queue
|
||||
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
|
||||
minPeersPerBlock*: int # Max number of peers with block
|
||||
@ -57,30 +57,21 @@ type DiscoveryEngine* = ref object of RootObj
|
||||
# Inflight discovery requests
|
||||
|
||||
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||
while b.discEngineRunning:
|
||||
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
|
||||
try:
|
||||
try:
|
||||
while b.discEngineRunning:
|
||||
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
|
||||
await b.discoveryQueue.put(cid)
|
||||
except CancelledError:
|
||||
trace "Discovery loop cancelled"
|
||||
return
|
||||
except CatchableError as exc:
|
||||
warn "Exception in discovery loop", exc = exc.msg
|
||||
|
||||
try:
|
||||
logScope:
|
||||
sleep = b.discoveryLoopSleep
|
||||
wanted = b.pendingBlocks.len
|
||||
await sleepAsync(b.discoveryLoopSleep)
|
||||
except CancelledError:
|
||||
discard # do not propagate as discoveryQueueLoop was asyncSpawned
|
||||
except CancelledError:
|
||||
trace "Discovery loop cancelled"
|
||||
|
||||
proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||
## Run discovery tasks
|
||||
##
|
||||
|
||||
while b.discEngineRunning:
|
||||
try:
|
||||
try:
|
||||
while b.discEngineRunning:
|
||||
let cid = await b.discoveryQueue.get()
|
||||
|
||||
if cid in b.inFlightDiscReqs:
|
||||
@ -90,35 +81,28 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||
let haves = b.peers.peersHave(cid)
|
||||
|
||||
if haves.len < b.minPeersPerBlock:
|
||||
try:
|
||||
let request = b.discovery.find(cid).wait(DefaultDiscoveryTimeout)
|
||||
let request = b.discovery.find(cid)
|
||||
b.inFlightDiscReqs[cid] = request
|
||||
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
|
||||
|
||||
b.inFlightDiscReqs[cid] = request
|
||||
defer:
|
||||
b.inFlightDiscReqs.del(cid)
|
||||
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
|
||||
let peers = await request
|
||||
|
||||
if (await request.withTimeout(DefaultDiscoveryTimeout)) and
|
||||
peers =? (await request).catch:
|
||||
let dialed = await allFinished(peers.mapIt(b.network.dialPeer(it.data)))
|
||||
|
||||
for i, f in dialed:
|
||||
if f.failed:
|
||||
await b.discovery.removeProvider(peers[i].data.peerId)
|
||||
finally:
|
||||
b.inFlightDiscReqs.del(cid)
|
||||
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
|
||||
except CancelledError:
|
||||
trace "Discovery task cancelled"
|
||||
return
|
||||
except CatchableError as exc:
|
||||
warn "Exception in discovery task runner", exc = exc.msg
|
||||
except Exception as e:
|
||||
# Raised by b.discovery.removeProvider somehow...
|
||||
# This should not be catchable, and we should never get here. Therefore,
|
||||
# raise a Defect.
|
||||
raiseAssert "Exception when removing provider"
|
||||
except CancelledError:
|
||||
trace "Discovery task cancelled"
|
||||
return
|
||||
|
||||
info "Exiting discovery task runner"
|
||||
|
||||
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
|
||||
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) =
|
||||
for cid in cids:
|
||||
if cid notin b.discoveryQueue:
|
||||
try:
|
||||
@ -126,11 +110,11 @@ proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
|
||||
except CatchableError as exc:
|
||||
warn "Exception queueing discovery request", exc = exc.msg
|
||||
|
||||
proc start*(b: DiscoveryEngine) {.async.} =
|
||||
proc start*(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||
## Start the discengine task
|
||||
##
|
||||
|
||||
trace "Discovery engine start"
|
||||
trace "Discovery engine starting"
|
||||
|
||||
if b.discEngineRunning:
|
||||
warn "Starting discovery engine twice"
|
||||
@ -140,13 +124,13 @@ proc start*(b: DiscoveryEngine) {.async.} =
|
||||
for i in 0 ..< b.concurrentDiscReqs:
|
||||
let fut = b.discoveryTaskLoop()
|
||||
b.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
|
||||
b.discoveryLoop = b.discoveryQueueLoop()
|
||||
b.trackedFutures.track(b.discoveryLoop)
|
||||
asyncSpawn b.discoveryLoop
|
||||
|
||||
proc stop*(b: DiscoveryEngine) {.async.} =
|
||||
trace "Discovery engine started"
|
||||
|
||||
proc stop*(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||
## Stop the discovery engine
|
||||
##
|
||||
|
||||
|
||||
@ -19,6 +19,7 @@ import pkg/metrics
|
||||
import pkg/stint
|
||||
import pkg/questionable
|
||||
|
||||
import ../../rng
|
||||
import ../../stores/blockstore
|
||||
import ../../blocktype
|
||||
import ../../utils
|
||||
@ -67,12 +68,6 @@ const
|
||||
DefaultMaxPeersPerRequest* = 10
|
||||
DefaultTaskQueueSize = 100
|
||||
DefaultConcurrentTasks = 10
|
||||
# DefaultMaxRetries = 3
|
||||
# DefaultConcurrentDiscRequests = 10
|
||||
# DefaultConcurrentAdvertRequests = 10
|
||||
# DefaultDiscoveryTimeout = 1.minutes
|
||||
# DefaultMaxQueriedBlocksCache = 1000
|
||||
# DefaultMinPeersPerBlock = 3
|
||||
|
||||
type
|
||||
TaskHandler* = proc(task: BlockExcPeerCtx): Future[void] {.gcsafe.}
|
||||
@ -88,10 +83,8 @@ type
|
||||
trackedFutures: TrackedFutures # Tracks futures of blockexc tasks
|
||||
blockexcRunning: bool # Indicates if the blockexc task is running
|
||||
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
|
||||
peersPerRequest: int # Max number of peers to request from
|
||||
wallet*: WalletRef # Nitro wallet for micropayments
|
||||
pricing*: ?Pricing # Optional bandwidth pricing
|
||||
blockFetchTimeout*: Duration # Timeout for fetching blocks over the network
|
||||
discovery*: DiscoveryEngine
|
||||
advertiser*: Advertiser
|
||||
|
||||
@ -100,124 +93,150 @@ type
|
||||
price*: UInt256
|
||||
|
||||
# attach task scheduler to engine
|
||||
proc scheduleTask(b: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe.} =
|
||||
b.taskQueue.pushOrUpdateNoWait(task).isOk()
|
||||
proc scheduleTask(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, raises: [].} =
|
||||
if self.taskQueue.pushOrUpdateNoWait(task).isOk():
|
||||
trace "Task scheduled for peer", peer = task.id
|
||||
else:
|
||||
warn "Unable to schedule task for peer", peer = task.id
|
||||
|
||||
proc blockexcTaskRunner(b: BlockExcEngine) {.async: (raises: []).}
|
||||
proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).}
|
||||
|
||||
proc start*(b: BlockExcEngine) {.async.} =
|
||||
proc start*(self: BlockExcEngine) {.async: (raises: []).} =
|
||||
## Start the blockexc task
|
||||
##
|
||||
|
||||
await b.discovery.start()
|
||||
await b.advertiser.start()
|
||||
await self.discovery.start()
|
||||
await self.advertiser.start()
|
||||
|
||||
trace "Blockexc starting with concurrent tasks", tasks = b.concurrentTasks
|
||||
if b.blockexcRunning:
|
||||
trace "Blockexc starting with concurrent tasks", tasks = self.concurrentTasks
|
||||
if self.blockexcRunning:
|
||||
warn "Starting blockexc twice"
|
||||
return
|
||||
|
||||
b.blockexcRunning = true
|
||||
for i in 0 ..< b.concurrentTasks:
|
||||
let fut = b.blockexcTaskRunner()
|
||||
b.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
self.blockexcRunning = true
|
||||
for i in 0 ..< self.concurrentTasks:
|
||||
let fut = self.blockexcTaskRunner()
|
||||
self.trackedFutures.track(fut)
|
||||
|
||||
proc stop*(b: BlockExcEngine) {.async.} =
|
||||
proc stop*(self: BlockExcEngine) {.async: (raises: []).} =
|
||||
## Stop the blockexc blockexc
|
||||
##
|
||||
|
||||
await b.discovery.stop()
|
||||
await b.advertiser.stop()
|
||||
await self.trackedFutures.cancelTracked()
|
||||
await self.network.stop()
|
||||
await self.discovery.stop()
|
||||
await self.advertiser.stop()
|
||||
|
||||
trace "NetworkStore stop"
|
||||
if not b.blockexcRunning:
|
||||
if not self.blockexcRunning:
|
||||
warn "Stopping blockexc without starting it"
|
||||
return
|
||||
|
||||
b.blockexcRunning = false
|
||||
await b.trackedFutures.cancelTracked()
|
||||
self.blockexcRunning = false
|
||||
|
||||
trace "NetworkStore stopped"
|
||||
|
||||
proc sendWantHave(
|
||||
b: BlockExcEngine, addresses: seq[BlockAddress], peers: seq[BlockExcPeerCtx]
|
||||
): Future[void] {.async.} =
|
||||
self: BlockExcEngine, addresses: seq[BlockAddress], peers: seq[BlockExcPeerCtx]
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
for p in peers:
|
||||
let toAsk = addresses.filterIt(it notin p.peerHave)
|
||||
trace "Sending wantHave request", toAsk, peer = p.id
|
||||
await b.network.request.sendWantList(p.id, toAsk, wantType = WantType.WantHave)
|
||||
await self.network.request.sendWantList(p.id, toAsk, wantType = WantType.WantHave)
|
||||
codex_block_exchange_want_have_lists_sent.inc()
|
||||
|
||||
proc sendWantBlock(
|
||||
b: BlockExcEngine, addresses: seq[BlockAddress], blockPeer: BlockExcPeerCtx
|
||||
): Future[void] {.async.} =
|
||||
self: BlockExcEngine, addresses: seq[BlockAddress], blockPeer: BlockExcPeerCtx
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
trace "Sending wantBlock request to", addresses, peer = blockPeer.id
|
||||
await b.network.request.sendWantList(
|
||||
await self.network.request.sendWantList(
|
||||
blockPeer.id, addresses, wantType = WantType.WantBlock
|
||||
) # we want this remote to send us a block
|
||||
codex_block_exchange_want_block_lists_sent.inc()
|
||||
|
||||
proc monitorBlockHandle(
|
||||
b: BlockExcEngine, handle: Future[Block], address: BlockAddress, peerId: PeerId
|
||||
) {.async.} =
|
||||
proc randomPeer(peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx =
|
||||
Rng.instance.sample(peers)
|
||||
|
||||
proc downloadInternal(
|
||||
self: BlockExcEngine, address: BlockAddress
|
||||
) {.async: (raises: []).} =
|
||||
logScope:
|
||||
address = address
|
||||
|
||||
let handle = self.pendingBlocks.getWantHandle(address)
|
||||
trace "Downloading block"
|
||||
try:
|
||||
discard await handle
|
||||
while address in self.pendingBlocks:
|
||||
logScope:
|
||||
retries = self.pendingBlocks.retries(address)
|
||||
interval = self.pendingBlocks.retryInterval
|
||||
|
||||
if self.pendingBlocks.retriesExhausted(address):
|
||||
trace "Error retries exhausted"
|
||||
handle.fail(newException(RetriesExhaustedError, "Error retries exhausted"))
|
||||
break
|
||||
|
||||
trace "Running retry handle"
|
||||
let peers = self.peers.getPeersForBlock(address)
|
||||
logScope:
|
||||
peersWith = peers.with.len
|
||||
peersWithout = peers.without.len
|
||||
|
||||
trace "Peers for block"
|
||||
if peers.with.len > 0:
|
||||
self.pendingBlocks.setInFlight(address, true)
|
||||
await self.sendWantBlock(@[address], peers.with.randomPeer)
|
||||
else:
|
||||
self.pendingBlocks.setInFlight(address, false)
|
||||
if peers.without.len > 0:
|
||||
await self.sendWantHave(@[address], peers.without)
|
||||
self.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
|
||||
|
||||
await (handle or sleepAsync(self.pendingBlocks.retryInterval))
|
||||
self.pendingBlocks.decRetries(address)
|
||||
|
||||
if handle.finished:
|
||||
trace "Handle for block finished", failed = handle.failed
|
||||
break
|
||||
except CancelledError as exc:
|
||||
trace "Block handle cancelled", address, peerId
|
||||
trace "Block download cancelled"
|
||||
if not handle.finished:
|
||||
await handle.cancelAndWait()
|
||||
except CatchableError as exc:
|
||||
warn "Error block handle, disconnecting peer", address, exc = exc.msg, peerId
|
||||
|
||||
# TODO: really, this is just a quick and dirty way of
|
||||
# preventing hitting the same "bad" peer every time, however,
|
||||
# we might as well discover this on or next iteration, so
|
||||
# it doesn't mean that we're never talking to this peer again.
|
||||
# TODO: we need a lot more work around peer selection and
|
||||
# prioritization
|
||||
|
||||
# drop unresponsive peer
|
||||
await b.network.switch.disconnect(peerId)
|
||||
b.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
|
||||
|
||||
proc pickPseudoRandom(
|
||||
address: BlockAddress, peers: seq[BlockExcPeerCtx]
|
||||
): BlockExcPeerCtx =
|
||||
return peers[hash(address) mod peers.len]
|
||||
warn "Error downloadloading block", exc = exc.msg
|
||||
if not handle.finished:
|
||||
handle.fail(exc)
|
||||
finally:
|
||||
self.pendingBlocks.setInFlight(address, false)
|
||||
|
||||
proc requestBlock*(
|
||||
b: BlockExcEngine, address: BlockAddress
|
||||
): Future[?!Block] {.async.} =
|
||||
let blockFuture = b.pendingBlocks.getWantHandle(address, b.blockFetchTimeout)
|
||||
self: BlockExcEngine, address: BlockAddress
|
||||
): Future[?!Block] {.async: (raises: [CancelledError]).} =
|
||||
if address notin self.pendingBlocks:
|
||||
self.trackedFutures.track(self.downloadInternal(address))
|
||||
|
||||
if not b.pendingBlocks.isInFlight(address):
|
||||
let peers = b.peers.getPeersForBlock(address)
|
||||
|
||||
if peers.with.len == 0:
|
||||
b.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
|
||||
else:
|
||||
let selected = pickPseudoRandom(address, peers.with)
|
||||
asyncSpawn b.monitorBlockHandle(blockFuture, address, selected.id)
|
||||
b.pendingBlocks.setInFlight(address)
|
||||
await b.sendWantBlock(@[address], selected)
|
||||
|
||||
await b.sendWantHave(@[address], peers.without)
|
||||
|
||||
# Don't let timeouts bubble up. We can't be too broad here or we break
|
||||
# cancellations.
|
||||
try:
|
||||
success await blockFuture
|
||||
except AsyncTimeoutError as err:
|
||||
let handle = self.pendingBlocks.getWantHandle(address)
|
||||
success await handle
|
||||
except CancelledError as err:
|
||||
warn "Block request cancelled", address
|
||||
raise err
|
||||
except CatchableError as err:
|
||||
error "Block request failed", address, err = err.msg
|
||||
failure err
|
||||
|
||||
proc requestBlock*(b: BlockExcEngine, cid: Cid): Future[?!Block] =
|
||||
b.requestBlock(BlockAddress.init(cid))
|
||||
proc requestBlock*(
|
||||
self: BlockExcEngine, cid: Cid
|
||||
): Future[?!Block] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.requestBlock(BlockAddress.init(cid))
|
||||
|
||||
proc blockPresenceHandler*(
|
||||
b: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence]
|
||||
) {.async.} =
|
||||
self: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence]
|
||||
) {.async: (raises: []).} =
|
||||
trace "Received block presence from peer", peer, blocks = blocks.mapIt($it)
|
||||
let
|
||||
peerCtx = b.peers.get(peer)
|
||||
wantList = toSeq(b.pendingBlocks.wantList)
|
||||
peerCtx = self.peers.get(peer)
|
||||
ourWantList = toSeq(self.pendingBlocks.wantList)
|
||||
|
||||
if peerCtx.isNil:
|
||||
return
|
||||
@ -228,82 +247,116 @@ proc blockPresenceHandler*(
|
||||
|
||||
let
|
||||
peerHave = peerCtx.peerHave
|
||||
dontWantCids = peerHave.filterIt(it notin wantList)
|
||||
dontWantCids = peerHave.filterIt(it notin ourWantList)
|
||||
|
||||
if dontWantCids.len > 0:
|
||||
peerCtx.cleanPresence(dontWantCids)
|
||||
|
||||
let wantCids = wantList.filterIt(it in peerHave)
|
||||
|
||||
if wantCids.len > 0:
|
||||
trace "Peer has blocks in our wantList", peer, wants = wantCids
|
||||
await b.sendWantBlock(wantCids, peerCtx)
|
||||
|
||||
# if none of the connected peers report our wants in their have list,
|
||||
# fire up discovery
|
||||
b.discovery.queueFindBlocksReq(
|
||||
toSeq(b.pendingBlocks.wantListCids).filter do(cid: Cid) -> bool:
|
||||
not b.peers.anyIt(cid in it.peerHaveCids)
|
||||
let ourWantCids = ourWantList.filterIt(
|
||||
it in peerHave and not self.pendingBlocks.retriesExhausted(it) and
|
||||
not self.pendingBlocks.isInFlight(it)
|
||||
)
|
||||
|
||||
proc scheduleTasks(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} =
|
||||
for address in ourWantCids:
|
||||
self.pendingBlocks.setInFlight(address, true)
|
||||
self.pendingBlocks.decRetries(address)
|
||||
|
||||
if ourWantCids.len > 0:
|
||||
trace "Peer has blocks in our wantList", peer, wants = ourWantCids
|
||||
if err =? catch(await self.sendWantBlock(ourWantCids, peerCtx)).errorOption:
|
||||
warn "Failed to send wantBlock to peer", peer, err = err.msg
|
||||
|
||||
proc scheduleTasks(
|
||||
self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
let cids = blocksDelivery.mapIt(it.blk.cid)
|
||||
|
||||
# schedule any new peers to provide blocks to
|
||||
for p in b.peers:
|
||||
for p in self.peers:
|
||||
for c in cids: # for each cid
|
||||
# schedule a peer if it wants at least one cid
|
||||
# and we have it in our local store
|
||||
if c in p.peerWantsCids:
|
||||
if await (c in b.localStore):
|
||||
if b.scheduleTask(p):
|
||||
trace "Task scheduled for peer", peer = p.id
|
||||
else:
|
||||
warn "Unable to schedule task for peer", peer = p.id
|
||||
try:
|
||||
if await (c in self.localStore):
|
||||
# TODO: the try/except should go away once blockstore tracks exceptions
|
||||
self.scheduleTask(p)
|
||||
break
|
||||
except CancelledError as exc:
|
||||
warn "Checking local store canceled", cid = c, err = exc.msg
|
||||
return
|
||||
except CatchableError as exc:
|
||||
error "Error checking local store for cid", cid = c, err = exc.msg
|
||||
raiseAssert "Unexpected error checking local store for cid"
|
||||
|
||||
break # do next peer
|
||||
|
||||
proc cancelBlocks(b: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} =
|
||||
proc cancelBlocks(
|
||||
self: BlockExcEngine, addrs: seq[BlockAddress]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
## Tells neighboring peers that we're no longer interested in a block.
|
||||
trace "Sending block request cancellations to peers",
|
||||
addrs, peers = b.peers.mapIt($it.id)
|
||||
##
|
||||
|
||||
let failed = (
|
||||
await allFinished(
|
||||
b.peers.mapIt(
|
||||
b.network.request.sendWantCancellations(peer = it.id, addresses = addrs)
|
||||
if self.peers.len == 0:
|
||||
return
|
||||
|
||||
trace "Sending block request cancellations to peers",
|
||||
addrs, peers = self.peers.peerIds
|
||||
|
||||
proc processPeer(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} =
|
||||
await self.network.request.sendWantCancellations(
|
||||
peer = peerCtx.id, addresses = addrs.filterIt(it in peerCtx)
|
||||
)
|
||||
|
||||
return peerCtx
|
||||
|
||||
try:
|
||||
let (succeededFuts, failedFuts) = await allFinishedFailed(
|
||||
toSeq(self.peers.peers.values).filterIt(it.peerHave.anyIt(it in addrs)).map(
|
||||
processPeer
|
||||
)
|
||||
)
|
||||
).filterIt(it.failed)
|
||||
|
||||
if failed.len > 0:
|
||||
warn "Failed to send block request cancellations to peers", peers = failed.len
|
||||
(await allFinished(succeededFuts)).mapIt(it.read).apply do(peerCtx: BlockExcPeerCtx):
|
||||
peerCtx.cleanPresence(addrs)
|
||||
|
||||
proc resolveBlocks*(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} =
|
||||
b.pendingBlocks.resolve(blocksDelivery)
|
||||
await b.scheduleTasks(blocksDelivery)
|
||||
await b.cancelBlocks(blocksDelivery.mapIt(it.address))
|
||||
if failedFuts.len > 0:
|
||||
warn "Failed to send block request cancellations to peers", peers = failedFuts.len
|
||||
else:
|
||||
trace "Block request cancellations sent to peers", peers = self.peers.len
|
||||
except CancelledError as exc:
|
||||
warn "Error sending block request cancellations", error = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error sending block request cancellations", error = exc.msg
|
||||
|
||||
proc resolveBlocks*(b: BlockExcEngine, blocks: seq[Block]) {.async.} =
|
||||
await b.resolveBlocks(
|
||||
proc resolveBlocks*(
|
||||
self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
self.pendingBlocks.resolve(blocksDelivery)
|
||||
await self.scheduleTasks(blocksDelivery)
|
||||
await self.cancelBlocks(blocksDelivery.mapIt(it.address))
|
||||
|
||||
proc resolveBlocks*(
|
||||
self: BlockExcEngine, blocks: seq[Block]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
await self.resolveBlocks(
|
||||
blocks.mapIt(
|
||||
BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid))
|
||||
)
|
||||
)
|
||||
|
||||
proc payForBlocks(
|
||||
engine: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery]
|
||||
) {.async.} =
|
||||
self: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
let
|
||||
sendPayment = engine.network.request.sendPayment
|
||||
sendPayment = self.network.request.sendPayment
|
||||
price = peer.price(blocksDelivery.mapIt(it.address))
|
||||
|
||||
if payment =? engine.wallet.pay(peer, price):
|
||||
if payment =? self.wallet.pay(peer, price):
|
||||
trace "Sending payment for blocks", price, len = blocksDelivery.len
|
||||
await sendPayment(peer.id, payment)
|
||||
|
||||
proc validateBlockDelivery(b: BlockExcEngine, bd: BlockDelivery): ?!void =
|
||||
if bd.address notin b.pendingBlocks:
|
||||
proc validateBlockDelivery(self: BlockExcEngine, bd: BlockDelivery): ?!void =
|
||||
if bd.address notin self.pendingBlocks:
|
||||
return failure("Received block is not currently a pending block")
|
||||
|
||||
if bd.address.leaf:
|
||||
@ -333,8 +386,8 @@ proc validateBlockDelivery(b: BlockExcEngine, bd: BlockDelivery): ?!void =
|
||||
return success()
|
||||
|
||||
proc blocksDeliveryHandler*(
|
||||
b: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
) {.async.} =
|
||||
self: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
) {.async: (raises: []).} =
|
||||
trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address))
|
||||
|
||||
var validatedBlocksDelivery: seq[BlockDelivery]
|
||||
@ -343,40 +396,50 @@ proc blocksDeliveryHandler*(
|
||||
peer = peer
|
||||
address = bd.address
|
||||
|
||||
if err =? b.validateBlockDelivery(bd).errorOption:
|
||||
warn "Block validation failed", msg = err.msg
|
||||
continue
|
||||
|
||||
if err =? (await b.localStore.putBlock(bd.blk)).errorOption:
|
||||
error "Unable to store block", err = err.msg
|
||||
continue
|
||||
|
||||
if bd.address.leaf:
|
||||
without proof =? bd.proof:
|
||||
error "Proof expected for a leaf block delivery"
|
||||
try:
|
||||
if err =? self.validateBlockDelivery(bd).errorOption:
|
||||
warn "Block validation failed", msg = err.msg
|
||||
continue
|
||||
if err =? (
|
||||
await b.localStore.putCidAndProof(
|
||||
bd.address.treeCid, bd.address.index, bd.blk.cid, proof
|
||||
)
|
||||
).errorOption:
|
||||
error "Unable to store proof and cid for a block"
|
||||
|
||||
if err =? (await self.localStore.putBlock(bd.blk)).errorOption:
|
||||
error "Unable to store block", err = err.msg
|
||||
continue
|
||||
|
||||
if bd.address.leaf:
|
||||
without proof =? bd.proof:
|
||||
warn "Proof expected for a leaf block delivery"
|
||||
continue
|
||||
if err =? (
|
||||
await self.localStore.putCidAndProof(
|
||||
bd.address.treeCid, bd.address.index, bd.blk.cid, proof
|
||||
)
|
||||
).errorOption:
|
||||
warn "Unable to store proof and cid for a block"
|
||||
continue
|
||||
except CatchableError as exc:
|
||||
warn "Error handling block delivery", error = exc.msg
|
||||
continue
|
||||
|
||||
validatedBlocksDelivery.add(bd)
|
||||
|
||||
await b.resolveBlocks(validatedBlocksDelivery)
|
||||
codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64)
|
||||
|
||||
let peerCtx = b.peers.get(peer)
|
||||
|
||||
let peerCtx = self.peers.get(peer)
|
||||
if peerCtx != nil:
|
||||
await b.payForBlocks(peerCtx, blocksDelivery)
|
||||
## shouldn't we remove them from the want-list instead of this:
|
||||
peerCtx.cleanPresence(blocksDelivery.mapIt(it.address))
|
||||
if err =? catch(await self.payForBlocks(peerCtx, blocksDelivery)).errorOption:
|
||||
warn "Error paying for blocks", err = err.msg
|
||||
return
|
||||
|
||||
proc wantListHandler*(b: BlockExcEngine, peer: PeerId, wantList: WantList) {.async.} =
|
||||
let peerCtx = b.peers.get(peer)
|
||||
if err =? catch(await self.resolveBlocks(validatedBlocksDelivery)).errorOption:
|
||||
warn "Error resolving blocks", err = err.msg
|
||||
return
|
||||
|
||||
proc wantListHandler*(
|
||||
self: BlockExcEngine, peer: PeerId, wantList: WantList
|
||||
) {.async: (raises: []).} =
|
||||
trace "Received want list from peer", peer, wantList = wantList.entries.len
|
||||
|
||||
let peerCtx = self.peers.get(peer)
|
||||
|
||||
if peerCtx.isNil:
|
||||
return
|
||||
@ -385,113 +448,138 @@ proc wantListHandler*(b: BlockExcEngine, peer: PeerId, wantList: WantList) {.asy
|
||||
presence: seq[BlockPresence]
|
||||
schedulePeer = false
|
||||
|
||||
for e in wantList.entries:
|
||||
let idx = peerCtx.peerWants.findIt(it.address == e.address)
|
||||
try:
|
||||
for e in wantList.entries:
|
||||
let idx = peerCtx.peerWants.findIt(it.address == e.address)
|
||||
|
||||
logScope:
|
||||
peer = peerCtx.id
|
||||
address = e.address
|
||||
wantType = $e.wantType
|
||||
logScope:
|
||||
peer = peerCtx.id
|
||||
address = e.address
|
||||
wantType = $e.wantType
|
||||
|
||||
if idx < 0: # Adding new entry to peer wants
|
||||
let
|
||||
have = await e.address in b.localStore
|
||||
price = @(b.pricing.get(Pricing(price: 0.u256)).price.toBytesBE)
|
||||
if idx < 0: # Adding new entry to peer wants
|
||||
let
|
||||
have =
|
||||
try:
|
||||
await e.address in self.localStore
|
||||
except CatchableError as exc:
|
||||
# TODO: should not be necessary once we have proper exception tracking on the BlockStore interface
|
||||
false
|
||||
price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE)
|
||||
|
||||
case e.wantType:
|
||||
of WantType.WantHave:
|
||||
if have:
|
||||
presence.add(
|
||||
BlockPresence(
|
||||
address: e.address, `type`: BlockPresenceType.Have, price: price
|
||||
)
|
||||
)
|
||||
else:
|
||||
if e.sendDontHave:
|
||||
if e.cancel:
|
||||
trace "Received cancelation for untracked block, skipping",
|
||||
address = e.address
|
||||
continue
|
||||
|
||||
trace "Processing want list entry", wantList = $e
|
||||
case e.wantType
|
||||
of WantType.WantHave:
|
||||
if have:
|
||||
presence.add(
|
||||
BlockPresence(
|
||||
address: e.address, `type`: BlockPresenceType.DontHave, price: price
|
||||
address: e.address, `type`: BlockPresenceType.Have, price: price
|
||||
)
|
||||
)
|
||||
else:
|
||||
if e.sendDontHave:
|
||||
presence.add(
|
||||
BlockPresence(
|
||||
address: e.address, `type`: BlockPresenceType.DontHave, price: price
|
||||
)
|
||||
)
|
||||
|
||||
codex_block_exchange_want_have_lists_received.inc()
|
||||
of WantType.WantBlock:
|
||||
peerCtx.peerWants.add(e)
|
||||
schedulePeer = true
|
||||
codex_block_exchange_want_block_lists_received.inc()
|
||||
else: # Updating existing entry in peer wants
|
||||
# peer doesn't want this block anymore
|
||||
if e.cancel:
|
||||
trace "Canceling want for block", address = e.address
|
||||
peerCtx.peerWants.del(idx)
|
||||
trace "Canceled block request",
|
||||
address = e.address, len = peerCtx.peerWants.len
|
||||
else:
|
||||
if e.wantType == WantType.WantBlock:
|
||||
schedulePeer = true
|
||||
# peer might want to ask for the same cid with
|
||||
# different want params
|
||||
trace "Updating want for block", address = e.address
|
||||
peerCtx.peerWants[idx] = e # update entry
|
||||
trace "Updated block request",
|
||||
address = e.address, len = peerCtx.peerWants.len
|
||||
|
||||
codex_block_exchange_want_have_lists_received.inc()
|
||||
of WantType.WantBlock:
|
||||
peerCtx.peerWants.add(e)
|
||||
schedulePeer = true
|
||||
codex_block_exchange_want_block_lists_received.inc()
|
||||
else: # Updating existing entry in peer wants
|
||||
# peer doesn't want this block anymore
|
||||
if e.cancel:
|
||||
trace "Canceling want for block", address = e.address
|
||||
peerCtx.peerWants.del(idx)
|
||||
else:
|
||||
# peer might want to ask for the same cid with
|
||||
# different want params
|
||||
trace "Updating want for block", address = e.address
|
||||
peerCtx.peerWants[idx] = e # update entry
|
||||
if presence.len > 0:
|
||||
trace "Sending presence to remote", items = presence.mapIt($it).join(",")
|
||||
await self.network.request.sendPresence(peer, presence)
|
||||
|
||||
if presence.len > 0:
|
||||
trace "Sending presence to remote", items = presence.mapIt($it).join(",")
|
||||
await b.network.request.sendPresence(peer, presence)
|
||||
if schedulePeer:
|
||||
self.scheduleTask(peerCtx)
|
||||
except CancelledError as exc: #TODO: replace with CancelledError
|
||||
warn "Error processing want list", error = exc.msg
|
||||
|
||||
if schedulePeer:
|
||||
if not b.scheduleTask(peerCtx):
|
||||
warn "Unable to schedule task for peer", peer
|
||||
|
||||
proc accountHandler*(engine: BlockExcEngine, peer: PeerId, account: Account) {.async.} =
|
||||
let context = engine.peers.get(peer)
|
||||
proc accountHandler*(
|
||||
self: BlockExcEngine, peer: PeerId, account: Account
|
||||
) {.async: (raises: []).} =
|
||||
let context = self.peers.get(peer)
|
||||
if context.isNil:
|
||||
return
|
||||
|
||||
context.account = account.some
|
||||
|
||||
proc paymentHandler*(
|
||||
engine: BlockExcEngine, peer: PeerId, payment: SignedState
|
||||
) {.async.} =
|
||||
self: BlockExcEngine, peer: PeerId, payment: SignedState
|
||||
) {.async: (raises: []).} =
|
||||
trace "Handling payments", peer
|
||||
|
||||
without context =? engine.peers.get(peer).option and account =? context.account:
|
||||
without context =? self.peers.get(peer).option and account =? context.account:
|
||||
trace "No context or account for peer", peer
|
||||
return
|
||||
|
||||
if channel =? context.paymentChannel:
|
||||
let sender = account.address
|
||||
discard engine.wallet.acceptPayment(channel, Asset, sender, payment)
|
||||
discard self.wallet.acceptPayment(channel, Asset, sender, payment)
|
||||
else:
|
||||
context.paymentChannel = engine.wallet.acceptChannel(payment).option
|
||||
context.paymentChannel = self.wallet.acceptChannel(payment).option
|
||||
|
||||
proc setupPeer*(b: BlockExcEngine, peer: PeerId) {.async.} =
|
||||
proc setupPeer*(
|
||||
self: BlockExcEngine, peer: PeerId
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
## Perform initial setup, such as want
|
||||
## list exchange
|
||||
##
|
||||
|
||||
trace "Setting up peer", peer
|
||||
|
||||
if peer notin b.peers:
|
||||
if peer notin self.peers:
|
||||
trace "Setting up new peer", peer
|
||||
b.peers.add(BlockExcPeerCtx(id: peer))
|
||||
trace "Added peer", peers = b.peers.len
|
||||
self.peers.add(BlockExcPeerCtx(id: peer))
|
||||
trace "Added peer", peers = self.peers.len
|
||||
|
||||
# broadcast our want list, the other peer will do the same
|
||||
if b.pendingBlocks.wantListLen > 0:
|
||||
if self.pendingBlocks.wantListLen > 0:
|
||||
trace "Sending our want list to a peer", peer
|
||||
let cids = toSeq(b.pendingBlocks.wantList)
|
||||
await b.network.request.sendWantList(peer, cids, full = true)
|
||||
let cids = toSeq(self.pendingBlocks.wantList)
|
||||
await self.network.request.sendWantList(peer, cids, full = true)
|
||||
|
||||
if address =? b.pricing .? address:
|
||||
await b.network.request.sendAccount(peer, Account(address: address))
|
||||
if address =? self.pricing .? address:
|
||||
trace "Sending account to peer", peer
|
||||
await self.network.request.sendAccount(peer, Account(address: address))
|
||||
|
||||
proc dropPeer*(b: BlockExcEngine, peer: PeerId) =
|
||||
proc dropPeer*(self: BlockExcEngine, peer: PeerId) {.raises: [].} =
|
||||
## Cleanup disconnected peer
|
||||
##
|
||||
|
||||
trace "Dropping peer", peer
|
||||
|
||||
# drop the peer from the peers table
|
||||
b.peers.remove(peer)
|
||||
self.peers.remove(peer)
|
||||
|
||||
proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
|
||||
proc taskHandler*(
|
||||
self: BlockExcEngine, task: BlockExcPeerCtx
|
||||
) {.gcsafe, async: (raises: [CancelledError, RetriesExhaustedError]).} =
|
||||
# Send to the peer blocks he wants to get,
|
||||
# if they present in our local store
|
||||
|
||||
@ -514,22 +602,25 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
|
||||
|
||||
proc localLookup(e: WantListEntry): Future[?!BlockDelivery] {.async.} =
|
||||
if e.address.leaf:
|
||||
(await b.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map(
|
||||
(await self.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map(
|
||||
(blkAndProof: (Block, CodexProof)) =>
|
||||
BlockDelivery(
|
||||
address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some
|
||||
)
|
||||
)
|
||||
else:
|
||||
(await b.localStore.getBlock(e.address)).map(
|
||||
(await self.localStore.getBlock(e.address)).map(
|
||||
(blk: Block) =>
|
||||
BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none)
|
||||
)
|
||||
|
||||
let
|
||||
blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup))
|
||||
blocksDelivery =
|
||||
blocksDeliveryFut.filterIt(it.completed and it.read.isOk).mapIt(it.read.get)
|
||||
blocksDelivery = blocksDeliveryFut.filterIt(it.completed and it.value.isOk).mapIt:
|
||||
if bd =? it.value:
|
||||
bd
|
||||
else:
|
||||
raiseAssert "Unexpected error in local lookup"
|
||||
|
||||
# All the wants that failed local lookup must be set to not-in-flight again.
|
||||
let
|
||||
@ -540,26 +631,23 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
|
||||
if blocksDelivery.len > 0:
|
||||
trace "Sending blocks to peer",
|
||||
peer = task.id, blocks = (blocksDelivery.mapIt(it.address))
|
||||
await b.network.request.sendBlocksDelivery(task.id, blocksDelivery)
|
||||
await self.network.request.sendBlocksDelivery(task.id, blocksDelivery)
|
||||
|
||||
codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64)
|
||||
|
||||
task.peerWants.keepItIf(it.address notin successAddresses)
|
||||
|
||||
proc blockexcTaskRunner(b: BlockExcEngine) {.async: (raises: []).} =
|
||||
proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} =
|
||||
## process tasks
|
||||
##
|
||||
|
||||
trace "Starting blockexc task runner"
|
||||
while b.blockexcRunning:
|
||||
try:
|
||||
let peerCtx = await b.taskQueue.pop()
|
||||
|
||||
await b.taskHandler(peerCtx)
|
||||
except CancelledError:
|
||||
break # do not propagate as blockexcTaskRunner was asyncSpawned
|
||||
except CatchableError as e:
|
||||
error "error running block exchange task", error = e.msgDetail
|
||||
try:
|
||||
while self.blockexcRunning:
|
||||
let peerCtx = await self.taskQueue.pop()
|
||||
await self.taskHandler(peerCtx)
|
||||
except CatchableError as exc:
|
||||
error "error running block exchange task", error = exc.msg
|
||||
|
||||
info "Exiting blockexc task runner"
|
||||
|
||||
@ -573,55 +661,59 @@ proc new*(
|
||||
peerStore: PeerCtxStore,
|
||||
pendingBlocks: PendingBlocksManager,
|
||||
concurrentTasks = DefaultConcurrentTasks,
|
||||
peersPerRequest = DefaultMaxPeersPerRequest,
|
||||
blockFetchTimeout = DefaultBlockTimeout,
|
||||
): BlockExcEngine =
|
||||
## Create new block exchange engine instance
|
||||
##
|
||||
|
||||
let engine = BlockExcEngine(
|
||||
let self = BlockExcEngine(
|
||||
localStore: localStore,
|
||||
peers: peerStore,
|
||||
pendingBlocks: pendingBlocks,
|
||||
peersPerRequest: peersPerRequest,
|
||||
network: network,
|
||||
wallet: wallet,
|
||||
concurrentTasks: concurrentTasks,
|
||||
trackedFutures: TrackedFutures.new(),
|
||||
trackedFutures: TrackedFutures(),
|
||||
taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize),
|
||||
discovery: discovery,
|
||||
advertiser: advertiser,
|
||||
blockFetchTimeout: blockFetchTimeout,
|
||||
)
|
||||
|
||||
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
proc peerEventHandler(
|
||||
peerId: PeerId, event: PeerEvent
|
||||
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
|
||||
if event.kind == PeerEventKind.Joined:
|
||||
await engine.setupPeer(peerId)
|
||||
await self.setupPeer(peerId)
|
||||
else:
|
||||
engine.dropPeer(peerId)
|
||||
self.dropPeer(peerId)
|
||||
|
||||
if not isNil(network.switch):
|
||||
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
|
||||
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
|
||||
|
||||
proc blockWantListHandler(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} =
|
||||
engine.wantListHandler(peer, wantList)
|
||||
proc blockWantListHandler(
|
||||
peer: PeerId, wantList: WantList
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
self.wantListHandler(peer, wantList)
|
||||
|
||||
proc blockPresenceHandler(
|
||||
peer: PeerId, presence: seq[BlockPresence]
|
||||
): Future[void] {.gcsafe.} =
|
||||
engine.blockPresenceHandler(peer, presence)
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
self.blockPresenceHandler(peer, presence)
|
||||
|
||||
proc blocksDeliveryHandler(
|
||||
peer: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
): Future[void] {.gcsafe.} =
|
||||
engine.blocksDeliveryHandler(peer, blocksDelivery)
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
self.blocksDeliveryHandler(peer, blocksDelivery)
|
||||
|
||||
proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} =
|
||||
engine.accountHandler(peer, account)
|
||||
proc accountHandler(
|
||||
peer: PeerId, account: Account
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
self.accountHandler(peer, account)
|
||||
|
||||
proc paymentHandler(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} =
|
||||
engine.paymentHandler(peer, payment)
|
||||
proc paymentHandler(
|
||||
peer: PeerId, payment: SignedState
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
self.paymentHandler(peer, payment)
|
||||
|
||||
network.handlers = BlockExcHandlers(
|
||||
onWantList: blockWantListHandler,
|
||||
@ -631,4 +723,4 @@ proc new*(
|
||||
onPayment: paymentHandler,
|
||||
)
|
||||
|
||||
return engine
|
||||
return self
|
||||
|
||||
@ -7,6 +7,8 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/math
|
||||
import pkg/nitro
|
||||
import pkg/questionable/results
|
||||
@ -15,9 +17,6 @@ import ../peers
|
||||
export nitro
|
||||
export results
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
|
||||
const ChainId* = 0.u256 # invalid chain id for now
|
||||
const Asset* = EthAddress.zero # invalid ERC20 asset address for now
|
||||
const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals
|
||||
|
||||
@ -7,13 +7,11 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/tables
|
||||
import std/monotimes
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
import std/strutils
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
@ -34,66 +32,76 @@ declareGauge(
|
||||
codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us"
|
||||
)
|
||||
|
||||
const DefaultBlockTimeout* = 10.minutes
|
||||
const
|
||||
DefaultBlockRetries* = 3000
|
||||
DefaultRetryInterval* = 500.millis
|
||||
|
||||
type
|
||||
RetriesExhaustedError* = object of CatchableError
|
||||
BlockHandle* = Future[Block].Raising([CancelledError, RetriesExhaustedError])
|
||||
|
||||
BlockReq* = object
|
||||
handle*: Future[Block]
|
||||
handle*: BlockHandle
|
||||
inFlight*: bool
|
||||
blockRetries*: int
|
||||
startTime*: int64
|
||||
|
||||
PendingBlocksManager* = ref object of RootObj
|
||||
blockRetries*: int = DefaultBlockRetries
|
||||
retryInterval*: Duration = DefaultRetryInterval
|
||||
blocks*: Table[BlockAddress, BlockReq] # pending Block requests
|
||||
|
||||
proc updatePendingBlockGauge(p: PendingBlocksManager) =
|
||||
codex_block_exchange_pending_block_requests.set(p.blocks.len.int64)
|
||||
|
||||
proc getWantHandle*(
|
||||
p: PendingBlocksManager,
|
||||
address: BlockAddress,
|
||||
timeout = DefaultBlockTimeout,
|
||||
inFlight = false,
|
||||
): Future[Block] {.async.} =
|
||||
self: PendingBlocksManager, address: BlockAddress, inFlight = false
|
||||
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
|
||||
## Add an event for a block
|
||||
##
|
||||
|
||||
try:
|
||||
if address notin p.blocks:
|
||||
p.blocks[address] = BlockReq(
|
||||
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
|
||||
inFlight: inFlight,
|
||||
startTime: getMonoTime().ticks,
|
||||
)
|
||||
self.blocks.withValue(address, blk):
|
||||
return blk[].handle
|
||||
do:
|
||||
let blk = BlockReq(
|
||||
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
|
||||
inFlight: inFlight,
|
||||
blockRetries: self.blockRetries,
|
||||
startTime: getMonoTime().ticks,
|
||||
)
|
||||
self.blocks[address] = blk
|
||||
let handle = blk.handle
|
||||
|
||||
p.updatePendingBlockGauge()
|
||||
return await p.blocks[address].handle.wait(timeout)
|
||||
except CancelledError as exc:
|
||||
trace "Blocks cancelled", exc = exc.msg, address
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
error "Pending WANT failed or expired", exc = exc.msg
|
||||
# no need to cancel, it is already cancelled by wait()
|
||||
raise exc
|
||||
finally:
|
||||
p.blocks.del(address)
|
||||
p.updatePendingBlockGauge()
|
||||
proc cleanUpBlock(data: pointer) {.raises: [].} =
|
||||
self.blocks.del(address)
|
||||
self.updatePendingBlockGauge()
|
||||
|
||||
handle.addCallback(cleanUpBlock)
|
||||
handle.cancelCallback = proc(data: pointer) {.raises: [].} =
|
||||
if not handle.finished:
|
||||
handle.removeCallback(cleanUpBlock)
|
||||
cleanUpBlock(nil)
|
||||
|
||||
self.updatePendingBlockGauge()
|
||||
return handle
|
||||
|
||||
proc getWantHandle*(
|
||||
p: PendingBlocksManager, cid: Cid, timeout = DefaultBlockTimeout, inFlight = false
|
||||
): Future[Block] =
|
||||
p.getWantHandle(BlockAddress.init(cid), timeout, inFlight)
|
||||
self: PendingBlocksManager, cid: Cid, inFlight = false
|
||||
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
|
||||
self.getWantHandle(BlockAddress.init(cid), inFlight)
|
||||
|
||||
proc resolve*(
|
||||
p: PendingBlocksManager, blocksDelivery: seq[BlockDelivery]
|
||||
self: PendingBlocksManager, blocksDelivery: seq[BlockDelivery]
|
||||
) {.gcsafe, raises: [].} =
|
||||
## Resolve pending blocks
|
||||
##
|
||||
|
||||
for bd in blocksDelivery:
|
||||
p.blocks.withValue(bd.address, blockReq):
|
||||
if not blockReq.handle.finished:
|
||||
self.blocks.withValue(bd.address, blockReq):
|
||||
if not blockReq[].handle.finished:
|
||||
trace "Resolving pending block", address = bd.address
|
||||
let
|
||||
startTime = blockReq.startTime
|
||||
startTime = blockReq[].startTime
|
||||
stopTime = getMonoTime().ticks
|
||||
retrievalDurationUs = (stopTime - startTime) div 1000
|
||||
|
||||
@ -106,52 +114,70 @@ proc resolve*(
|
||||
else:
|
||||
trace "Block handle already finished", address = bd.address
|
||||
|
||||
proc setInFlight*(p: PendingBlocksManager, address: BlockAddress, inFlight = true) =
|
||||
func retries*(self: PendingBlocksManager, address: BlockAddress): int =
|
||||
self.blocks.withValue(address, pending):
|
||||
result = pending[].blockRetries
|
||||
do:
|
||||
result = 0
|
||||
|
||||
func decRetries*(self: PendingBlocksManager, address: BlockAddress) =
|
||||
self.blocks.withValue(address, pending):
|
||||
pending[].blockRetries -= 1
|
||||
|
||||
func retriesExhausted*(self: PendingBlocksManager, address: BlockAddress): bool =
|
||||
self.blocks.withValue(address, pending):
|
||||
result = pending[].blockRetries <= 0
|
||||
|
||||
func setInFlight*(self: PendingBlocksManager, address: BlockAddress, inFlight = true) =
|
||||
## Set inflight status for a block
|
||||
##
|
||||
|
||||
p.blocks.withValue(address, pending):
|
||||
self.blocks.withValue(address, pending):
|
||||
pending[].inFlight = inFlight
|
||||
|
||||
proc isInFlight*(p: PendingBlocksManager, address: BlockAddress): bool =
|
||||
func isInFlight*(self: PendingBlocksManager, address: BlockAddress): bool =
|
||||
## Check if a block is in flight
|
||||
##
|
||||
|
||||
p.blocks.withValue(address, pending):
|
||||
self.blocks.withValue(address, pending):
|
||||
result = pending[].inFlight
|
||||
|
||||
proc contains*(p: PendingBlocksManager, cid: Cid): bool =
|
||||
BlockAddress.init(cid) in p.blocks
|
||||
func contains*(self: PendingBlocksManager, cid: Cid): bool =
|
||||
BlockAddress.init(cid) in self.blocks
|
||||
|
||||
proc contains*(p: PendingBlocksManager, address: BlockAddress): bool =
|
||||
address in p.blocks
|
||||
func contains*(self: PendingBlocksManager, address: BlockAddress): bool =
|
||||
address in self.blocks
|
||||
|
||||
iterator wantList*(p: PendingBlocksManager): BlockAddress =
|
||||
for a in p.blocks.keys:
|
||||
iterator wantList*(self: PendingBlocksManager): BlockAddress =
|
||||
for a in self.blocks.keys:
|
||||
yield a
|
||||
|
||||
iterator wantListBlockCids*(p: PendingBlocksManager): Cid =
|
||||
for a in p.blocks.keys:
|
||||
iterator wantListBlockCids*(self: PendingBlocksManager): Cid =
|
||||
for a in self.blocks.keys:
|
||||
if not a.leaf:
|
||||
yield a.cid
|
||||
|
||||
iterator wantListCids*(p: PendingBlocksManager): Cid =
|
||||
iterator wantListCids*(self: PendingBlocksManager): Cid =
|
||||
var yieldedCids = initHashSet[Cid]()
|
||||
for a in p.blocks.keys:
|
||||
for a in self.blocks.keys:
|
||||
let cid = a.cidOrTreeCid
|
||||
if cid notin yieldedCids:
|
||||
yieldedCids.incl(cid)
|
||||
yield cid
|
||||
|
||||
iterator wantHandles*(p: PendingBlocksManager): Future[Block] =
|
||||
for v in p.blocks.values:
|
||||
iterator wantHandles*(self: PendingBlocksManager): Future[Block] =
|
||||
for v in self.blocks.values:
|
||||
yield v.handle
|
||||
|
||||
proc wantListLen*(p: PendingBlocksManager): int =
|
||||
p.blocks.len
|
||||
proc wantListLen*(self: PendingBlocksManager): int =
|
||||
self.blocks.len
|
||||
|
||||
func len*(p: PendingBlocksManager): int =
|
||||
p.blocks.len
|
||||
func len*(self: PendingBlocksManager): int =
|
||||
self.blocks.len
|
||||
|
||||
func new*(T: type PendingBlocksManager): PendingBlocksManager =
|
||||
PendingBlocksManager()
|
||||
func new*(
|
||||
T: type PendingBlocksManager,
|
||||
retries = DefaultBlockRetries,
|
||||
interval = DefaultRetryInterval,
|
||||
): PendingBlocksManager =
|
||||
PendingBlocksManager(blockRetries: retries, retryInterval: interval)
|
||||
|
||||
@ -21,26 +21,29 @@ import ../../blocktype as bt
|
||||
import ../../logutils
|
||||
import ../protobuf/blockexc as pb
|
||||
import ../protobuf/payments
|
||||
import ../../utils/trackedfutures
|
||||
|
||||
import ./networkpeer
|
||||
|
||||
export network, payments
|
||||
export networkpeer, payments
|
||||
|
||||
logScope:
|
||||
topics = "codex blockexcnetwork"
|
||||
|
||||
const
|
||||
Codec* = "/codex/blockexc/1.0.0"
|
||||
MaxInflight* = 100
|
||||
DefaultMaxInflight* = 100
|
||||
|
||||
type
|
||||
WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.}
|
||||
WantListHandler* =
|
||||
proc(peer: PeerId, wantList: WantList) {.gcsafe, async: (raises: []).}
|
||||
BlocksDeliveryHandler* =
|
||||
proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.}
|
||||
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.gcsafe, async: (raises: []).}
|
||||
BlockPresenceHandler* =
|
||||
proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
|
||||
AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
|
||||
PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
|
||||
proc(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async: (raises: []).}
|
||||
AccountHandler* = proc(peer: PeerId, account: Account) {.gcsafe, async: (raises: []).}
|
||||
PaymentHandler* =
|
||||
proc(peer: PeerId, payment: SignedState) {.gcsafe, async: (raises: []).}
|
||||
|
||||
BlockExcHandlers* = object
|
||||
onWantList*: WantListHandler
|
||||
@ -57,15 +60,20 @@ type
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false,
|
||||
): Future[void] {.gcsafe.}
|
||||
WantCancellationSender* =
|
||||
proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.}
|
||||
BlocksDeliverySender* =
|
||||
proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.}
|
||||
PresenceSender* =
|
||||
proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
|
||||
AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
|
||||
PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
|
||||
) {.async: (raises: [CancelledError]).}
|
||||
WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]) {.
|
||||
async: (raises: [CancelledError])
|
||||
.}
|
||||
BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.
|
||||
async: (raises: [CancelledError])
|
||||
.}
|
||||
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]) {.
|
||||
async: (raises: [CancelledError])
|
||||
.}
|
||||
AccountSender* =
|
||||
proc(peer: PeerId, account: Account) {.async: (raises: [CancelledError]).}
|
||||
PaymentSender* =
|
||||
proc(peer: PeerId, payment: SignedState) {.async: (raises: [CancelledError]).}
|
||||
|
||||
BlockExcRequest* = object
|
||||
sendWantList*: WantListSender
|
||||
@ -82,6 +90,8 @@ type
|
||||
request*: BlockExcRequest
|
||||
getConn: ConnProvider
|
||||
inflightSema: AsyncSemaphore
|
||||
maxInflight: int = DefaultMaxInflight
|
||||
trackedFutures*: TrackedFutures = TrackedFutures()
|
||||
|
||||
proc peerId*(b: BlockExcNetwork): PeerId =
|
||||
## Return peer id
|
||||
@ -95,7 +105,9 @@ proc isSelf*(b: BlockExcNetwork, peer: PeerId): bool =
|
||||
|
||||
return b.peerId == peer
|
||||
|
||||
proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
|
||||
proc send*(
|
||||
b: BlockExcNetwork, id: PeerId, msg: pb.Message
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
## Send message to peer
|
||||
##
|
||||
|
||||
@ -103,8 +115,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
|
||||
trace "Unable to send, peer not found", peerId = id
|
||||
return
|
||||
|
||||
let peer = b.peers[id]
|
||||
try:
|
||||
let peer = b.peers[id]
|
||||
|
||||
await b.inflightSema.acquire()
|
||||
await peer.send(msg)
|
||||
except CancelledError as error:
|
||||
@ -114,7 +127,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
|
||||
finally:
|
||||
b.inflightSema.release()
|
||||
|
||||
proc handleWantList(b: BlockExcNetwork, peer: NetworkPeer, list: WantList) {.async.} =
|
||||
proc handleWantList(
|
||||
b: BlockExcNetwork, peer: NetworkPeer, list: WantList
|
||||
) {.async: (raises: []).} =
|
||||
## Handle incoming want list
|
||||
##
|
||||
|
||||
@ -130,7 +145,7 @@ proc sendWantList*(
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false,
|
||||
): Future[void] =
|
||||
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||
## Send a want message to peer
|
||||
##
|
||||
|
||||
@ -151,14 +166,14 @@ proc sendWantList*(
|
||||
|
||||
proc sendWantCancellations*(
|
||||
b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress]
|
||||
): Future[void] {.async.} =
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
## Informs a remote peer that we're no longer interested in a set of blocks
|
||||
##
|
||||
await b.sendWantList(id = id, addresses = addresses, cancel = true)
|
||||
|
||||
proc handleBlocksDelivery(
|
||||
b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery]
|
||||
) {.async.} =
|
||||
) {.async: (raises: []).} =
|
||||
## Handle incoming blocks
|
||||
##
|
||||
|
||||
@ -167,7 +182,7 @@ proc handleBlocksDelivery(
|
||||
|
||||
proc sendBlocksDelivery*(
|
||||
b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
): Future[void] =
|
||||
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||
## Send blocks to remote
|
||||
##
|
||||
|
||||
@ -175,7 +190,7 @@ proc sendBlocksDelivery*(
|
||||
|
||||
proc handleBlockPresence(
|
||||
b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence]
|
||||
) {.async.} =
|
||||
) {.async: (raises: []).} =
|
||||
## Handle block presence
|
||||
##
|
||||
|
||||
@ -184,7 +199,7 @@ proc handleBlockPresence(
|
||||
|
||||
proc sendBlockPresence*(
|
||||
b: BlockExcNetwork, id: PeerId, presence: seq[BlockPresence]
|
||||
): Future[void] =
|
||||
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||
## Send presence to remote
|
||||
##
|
||||
|
||||
@ -192,20 +207,24 @@ proc sendBlockPresence*(
|
||||
|
||||
proc handleAccount(
|
||||
network: BlockExcNetwork, peer: NetworkPeer, account: Account
|
||||
) {.async.} =
|
||||
) {.async: (raises: []).} =
|
||||
## Handle account info
|
||||
##
|
||||
|
||||
if not network.handlers.onAccount.isNil:
|
||||
await network.handlers.onAccount(peer.id, account)
|
||||
|
||||
proc sendAccount*(b: BlockExcNetwork, id: PeerId, account: Account): Future[void] =
|
||||
proc sendAccount*(
|
||||
b: BlockExcNetwork, id: PeerId, account: Account
|
||||
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||
## Send account info to remote
|
||||
##
|
||||
|
||||
b.send(id, Message(account: AccountMessage.init(account)))
|
||||
|
||||
proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[void] =
|
||||
proc sendPayment*(
|
||||
b: BlockExcNetwork, id: PeerId, payment: SignedState
|
||||
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||
## Send payment to remote
|
||||
##
|
||||
|
||||
@ -213,30 +232,32 @@ proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[
|
||||
|
||||
proc handlePayment(
|
||||
network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState
|
||||
) {.async.} =
|
||||
) {.async: (raises: []).} =
|
||||
## Handle payment
|
||||
##
|
||||
|
||||
if not network.handlers.onPayment.isNil:
|
||||
await network.handlers.onPayment(peer.id, payment)
|
||||
|
||||
proc rpcHandler(b: BlockExcNetwork, peer: NetworkPeer, msg: Message) {.raises: [].} =
|
||||
proc rpcHandler(
|
||||
b: BlockExcNetwork, peer: NetworkPeer, msg: Message
|
||||
) {.async: (raises: []).} =
|
||||
## handle rpc messages
|
||||
##
|
||||
if msg.wantList.entries.len > 0:
|
||||
asyncSpawn b.handleWantList(peer, msg.wantList)
|
||||
b.trackedFutures.track(b.handleWantList(peer, msg.wantList))
|
||||
|
||||
if msg.payload.len > 0:
|
||||
asyncSpawn b.handleBlocksDelivery(peer, msg.payload)
|
||||
b.trackedFutures.track(b.handleBlocksDelivery(peer, msg.payload))
|
||||
|
||||
if msg.blockPresences.len > 0:
|
||||
asyncSpawn b.handleBlockPresence(peer, msg.blockPresences)
|
||||
b.trackedFutures.track(b.handleBlockPresence(peer, msg.blockPresences))
|
||||
|
||||
if account =? Account.init(msg.account):
|
||||
asyncSpawn b.handleAccount(peer, account)
|
||||
b.trackedFutures.track(b.handleAccount(peer, account))
|
||||
|
||||
if payment =? SignedState.init(msg.payment):
|
||||
asyncSpawn b.handlePayment(peer, payment)
|
||||
b.trackedFutures.track(b.handlePayment(peer, payment))
|
||||
|
||||
proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
|
||||
## Creates or retrieves a BlockExcNetwork Peer
|
||||
@ -245,8 +266,11 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
|
||||
if peer in b.peers:
|
||||
return b.peers.getOrDefault(peer, nil)
|
||||
|
||||
var getConn: ConnProvider = proc(): Future[Connection] {.async, gcsafe, closure.} =
|
||||
var getConn: ConnProvider = proc(): Future[Connection] {.
|
||||
async: (raises: [CancelledError])
|
||||
.} =
|
||||
try:
|
||||
trace "Getting new connection stream", peer
|
||||
return await b.switch.dial(peer, Codec)
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
@ -256,8 +280,8 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
|
||||
if not isNil(b.getConn):
|
||||
getConn = b.getConn
|
||||
|
||||
let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async.} =
|
||||
b.rpcHandler(p, msg)
|
||||
let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} =
|
||||
await b.rpcHandler(p, msg)
|
||||
|
||||
# create new pubsub peer
|
||||
let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler)
|
||||
@ -282,48 +306,65 @@ proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} =
|
||||
trace "Skipping dialing self", peer = peer.peerId
|
||||
return
|
||||
|
||||
if peer.peerId in b.peers:
|
||||
trace "Already connected to peer", peer = peer.peerId
|
||||
return
|
||||
|
||||
await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
|
||||
|
||||
proc dropPeer*(b: BlockExcNetwork, peer: PeerId) =
|
||||
## Cleanup disconnected peer
|
||||
##
|
||||
|
||||
trace "Dropping peer", peer
|
||||
b.peers.del(peer)
|
||||
|
||||
method init*(b: BlockExcNetwork) =
|
||||
method init*(self: BlockExcNetwork) =
|
||||
## Perform protocol initialization
|
||||
##
|
||||
|
||||
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
proc peerEventHandler(
|
||||
peerId: PeerId, event: PeerEvent
|
||||
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
|
||||
if event.kind == PeerEventKind.Joined:
|
||||
b.setupPeer(peerId)
|
||||
self.setupPeer(peerId)
|
||||
else:
|
||||
b.dropPeer(peerId)
|
||||
self.dropPeer(peerId)
|
||||
|
||||
b.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
|
||||
b.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
|
||||
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
|
||||
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
|
||||
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
proc handler(
|
||||
conn: Connection, proto: string
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
let peerId = conn.peerId
|
||||
let blockexcPeer = b.getOrCreatePeer(peerId)
|
||||
let blockexcPeer = self.getOrCreatePeer(peerId)
|
||||
await blockexcPeer.readLoop(conn) # attach read loop
|
||||
|
||||
b.handler = handle
|
||||
b.codec = Codec
|
||||
self.handler = handler
|
||||
self.codec = Codec
|
||||
|
||||
proc stop*(self: BlockExcNetwork) {.async: (raises: []).} =
|
||||
await self.trackedFutures.cancelTracked()
|
||||
|
||||
proc new*(
|
||||
T: type BlockExcNetwork,
|
||||
switch: Switch,
|
||||
connProvider: ConnProvider = nil,
|
||||
maxInflight = MaxInflight,
|
||||
maxInflight = DefaultMaxInflight,
|
||||
): BlockExcNetwork =
|
||||
## Create a new BlockExcNetwork instance
|
||||
##
|
||||
|
||||
let self = BlockExcNetwork(
|
||||
switch: switch, getConn: connProvider, inflightSema: newAsyncSemaphore(maxInflight)
|
||||
switch: switch,
|
||||
getConn: connProvider,
|
||||
inflightSema: newAsyncSemaphore(maxInflight),
|
||||
maxInflight: maxInflight,
|
||||
)
|
||||
|
||||
self.maxIncomingStreams = self.maxInflight
|
||||
|
||||
proc sendWantList(
|
||||
id: PeerId,
|
||||
cids: seq[BlockAddress],
|
||||
@ -332,26 +373,32 @@ proc new*(
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false,
|
||||
): Future[void] {.gcsafe.} =
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave)
|
||||
|
||||
proc sendWantCancellations(
|
||||
id: PeerId, addresses: seq[BlockAddress]
|
||||
): Future[void] {.gcsafe.} =
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendWantCancellations(id, addresses)
|
||||
|
||||
proc sendBlocksDelivery(
|
||||
id: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
): Future[void] {.gcsafe.} =
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendBlocksDelivery(id, blocksDelivery)
|
||||
|
||||
proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
|
||||
proc sendPresence(
|
||||
id: PeerId, presence: seq[BlockPresence]
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendBlockPresence(id, presence)
|
||||
|
||||
proc sendAccount(id: PeerId, account: Account): Future[void] {.gcsafe.} =
|
||||
proc sendAccount(
|
||||
id: PeerId, account: Account
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendAccount(id, account)
|
||||
|
||||
proc sendPayment(id: PeerId, payment: SignedState): Future[void] {.gcsafe.} =
|
||||
proc sendPayment(
|
||||
id: PeerId, payment: SignedState
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendPayment(id, payment)
|
||||
|
||||
self.request = BlockExcRequest(
|
||||
|
||||
@ -7,9 +7,7 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
@ -18,67 +16,81 @@ import ../protobuf/blockexc
|
||||
import ../protobuf/message
|
||||
import ../../errors
|
||||
import ../../logutils
|
||||
import ../../utils/trackedfutures
|
||||
|
||||
logScope:
|
||||
topics = "codex blockexcnetworkpeer"
|
||||
|
||||
type
|
||||
ConnProvider* = proc(): Future[Connection] {.gcsafe, closure.}
|
||||
const DefaultYieldInterval = 50.millis
|
||||
|
||||
RPCHandler* = proc(peer: NetworkPeer, msg: Message): Future[void] {.gcsafe.}
|
||||
type
|
||||
ConnProvider* =
|
||||
proc(): Future[Connection] {.gcsafe, async: (raises: [CancelledError]).}
|
||||
|
||||
RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.gcsafe, async: (raises: []).}
|
||||
|
||||
NetworkPeer* = ref object of RootObj
|
||||
id*: PeerId
|
||||
handler*: RPCHandler
|
||||
sendConn: Connection
|
||||
getConn: ConnProvider
|
||||
yieldInterval*: Duration = DefaultYieldInterval
|
||||
trackedFutures: TrackedFutures
|
||||
|
||||
proc connected*(b: NetworkPeer): bool =
|
||||
not (isNil(b.sendConn)) and not (b.sendConn.closed or b.sendConn.atEof)
|
||||
proc connected*(self: NetworkPeer): bool =
|
||||
not (isNil(self.sendConn)) and not (self.sendConn.closed or self.sendConn.atEof)
|
||||
|
||||
proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} =
|
||||
proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} =
|
||||
if isNil(conn):
|
||||
trace "No connection to read from", peer = self.id
|
||||
return
|
||||
|
||||
trace "Attaching read loop", peer = self.id, connId = conn.oid
|
||||
try:
|
||||
var nextYield = Moment.now() + self.yieldInterval
|
||||
while not conn.atEof or not conn.closed:
|
||||
if Moment.now() > nextYield:
|
||||
nextYield = Moment.now() + self.yieldInterval
|
||||
trace "Yielding in read loop",
|
||||
peer = self.id, nextYield = nextYield, interval = self.yieldInterval
|
||||
await sleepAsync(10.millis)
|
||||
|
||||
let
|
||||
data = await conn.readLp(MaxMessageSize.int)
|
||||
msg = Message.protobufDecode(data).mapFailure().tryGet()
|
||||
await b.handler(b, msg)
|
||||
trace "Received message", peer = self.id, connId = conn.oid
|
||||
await self.handler(self, msg)
|
||||
except CancelledError:
|
||||
trace "Read loop cancelled"
|
||||
except CatchableError as err:
|
||||
warn "Exception in blockexc read loop", msg = err.msg
|
||||
finally:
|
||||
trace "Detaching read loop", peer = self.id, connId = conn.oid
|
||||
await conn.close()
|
||||
|
||||
proc connect*(b: NetworkPeer): Future[Connection] {.async.} =
|
||||
if b.connected:
|
||||
return b.sendConn
|
||||
proc connect*(
|
||||
self: NetworkPeer
|
||||
): Future[Connection] {.async: (raises: [CancelledError]).} =
|
||||
if self.connected:
|
||||
trace "Already connected", peer = self.id, connId = self.sendConn.oid
|
||||
return self.sendConn
|
||||
|
||||
b.sendConn = await b.getConn()
|
||||
asyncSpawn b.readLoop(b.sendConn)
|
||||
return b.sendConn
|
||||
self.sendConn = await self.getConn()
|
||||
self.trackedFutures.track(self.readLoop(self.sendConn))
|
||||
return self.sendConn
|
||||
|
||||
proc send*(b: NetworkPeer, msg: Message) {.async.} =
|
||||
let conn = await b.connect()
|
||||
proc send*(
|
||||
self: NetworkPeer, msg: Message
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
let conn = await self.connect()
|
||||
|
||||
if isNil(conn):
|
||||
warn "Unable to get send connection for peer message not sent", peer = b.id
|
||||
warn "Unable to get send connection for peer message not sent", peer = self.id
|
||||
return
|
||||
|
||||
trace "Sending message", peer = self.id, connId = conn.oid
|
||||
await conn.writeLp(protobufEncode(msg))
|
||||
|
||||
proc broadcast*(b: NetworkPeer, msg: Message) =
|
||||
proc sendAwaiter() {.async.} =
|
||||
try:
|
||||
await b.send(msg)
|
||||
except CatchableError as exc:
|
||||
warn "Exception broadcasting message to peer", peer = b.id, exc = exc.msg
|
||||
|
||||
asyncSpawn sendAwaiter()
|
||||
|
||||
func new*(
|
||||
T: type NetworkPeer,
|
||||
peer: PeerId,
|
||||
@ -87,4 +99,9 @@ func new*(
|
||||
): NetworkPeer =
|
||||
doAssert(not isNil(connProvider), "should supply connection provider")
|
||||
|
||||
NetworkPeer(id: peer, getConn: connProvider, handler: rpcHandler)
|
||||
NetworkPeer(
|
||||
id: peer,
|
||||
getConn: connProvider,
|
||||
handler: rpcHandler,
|
||||
trackedFutures: TrackedFutures(),
|
||||
)
|
||||
|
||||
@ -7,14 +7,12 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
import std/tables
|
||||
import std/algorithm
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
import std/sequtils
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
@ -33,9 +31,7 @@ type
|
||||
PeerCtxStore* = ref object of RootObj
|
||||
peers*: OrderedTable[PeerId, BlockExcPeerCtx]
|
||||
|
||||
PeersForBlock* = object of RootObj
|
||||
with*: seq[BlockExcPeerCtx]
|
||||
without*: seq[BlockExcPeerCtx]
|
||||
PeersForBlock* = tuple[with: seq[BlockExcPeerCtx], without: seq[BlockExcPeerCtx]]
|
||||
|
||||
iterator items*(self: PeerCtxStore): BlockExcPeerCtx =
|
||||
for p in self.peers.values:
|
||||
@ -47,6 +43,9 @@ proc contains*(a: openArray[BlockExcPeerCtx], b: PeerId): bool =
|
||||
|
||||
a.anyIt(it.id == b)
|
||||
|
||||
func peerIds*(self: PeerCtxStore): seq[PeerId] =
|
||||
toSeq(self.peers.keys)
|
||||
|
||||
func contains*(self: PeerCtxStore, peerId: PeerId): bool =
|
||||
peerId in self.peers
|
||||
|
||||
@ -75,7 +74,7 @@ func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
|
||||
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it.address.cidOrTreeCid == cid))
|
||||
|
||||
proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock =
|
||||
var res = PeersForBlock()
|
||||
var res: PeersForBlock = (@[], @[])
|
||||
for peer in self:
|
||||
if peer.peerHave.anyIt(it == address):
|
||||
res.with.add(peer)
|
||||
|
||||
@ -97,7 +97,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: WantList) =
|
||||
pb.write(field, ipb)
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) =
|
||||
var ipb = initProtoBuffer(maxSize = MaxBlockSize)
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, value.blk.cid.data.buffer)
|
||||
ipb.write(2, value.blk.data)
|
||||
ipb.write(3, value.address)
|
||||
@ -128,7 +128,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
|
||||
pb.write(field, ipb)
|
||||
|
||||
proc protobufEncode*(value: Message): seq[byte] =
|
||||
var ipb = initProtoBuffer(maxSize = MaxMessageSize)
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, value.wantList)
|
||||
for v in value.payload:
|
||||
ipb.write(3, v)
|
||||
@ -254,16 +254,14 @@ proc decode*(
|
||||
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
|
||||
var
|
||||
value = Message()
|
||||
pb = initProtoBuffer(msg, maxSize = MaxMessageSize)
|
||||
pb = initProtoBuffer(msg)
|
||||
ipb: ProtoBuffer
|
||||
sublist: seq[seq[byte]]
|
||||
if ?pb.getField(1, ipb):
|
||||
value.wantList = ?WantList.decode(ipb)
|
||||
if ?pb.getRepeatedField(3, sublist):
|
||||
for item in sublist:
|
||||
value.payload.add(
|
||||
?BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize))
|
||||
)
|
||||
value.payload.add(?BlockDelivery.decode(initProtoBuffer(item)))
|
||||
if ?pb.getRepeatedField(4, sublist):
|
||||
for item in sublist:
|
||||
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))
|
||||
|
||||
@ -1,8 +1,9 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import pkg/stew/byteutils
|
||||
import pkg/stint
|
||||
import pkg/nitro
|
||||
import pkg/questionable
|
||||
import pkg/upraises
|
||||
import ./blockexc
|
||||
|
||||
export AccountMessage
|
||||
@ -11,9 +12,6 @@ export StateChannelUpdate
|
||||
export stint
|
||||
export nitro
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
|
||||
type Account* = object
|
||||
address*: EthAddress
|
||||
|
||||
|
||||
@ -1,8 +1,9 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import libp2p
|
||||
import pkg/stint
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/upraises
|
||||
import ./blockexc
|
||||
|
||||
import ../../blocktype
|
||||
@ -11,9 +12,6 @@ export questionable
|
||||
export stint
|
||||
export BlockPresenceType
|
||||
|
||||
upraises.push:
|
||||
{.upraises: [].}
|
||||
|
||||
type
|
||||
PresenceMessage* = blockexc.BlockPresence
|
||||
Presence* = object
|
||||
|
||||
@ -28,8 +28,11 @@ const DefaultChunkSize* = DefaultBlockSize
|
||||
|
||||
type
|
||||
# default reader type
|
||||
ChunkerError* = object of CatchableError
|
||||
ChunkBuffer* = ptr UncheckedArray[byte]
|
||||
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.gcsafe, raises: [Defect].}
|
||||
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.
|
||||
gcsafe, async: (raises: [ChunkerError, CancelledError])
|
||||
.}
|
||||
|
||||
# Reader that splits input data into fixed-size chunks
|
||||
Chunker* = ref object
|
||||
@ -74,7 +77,7 @@ proc new*(
|
||||
|
||||
proc reader(
|
||||
data: ChunkBuffer, len: int
|
||||
): Future[int] {.gcsafe, async, raises: [Defect].} =
|
||||
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
|
||||
var res = 0
|
||||
try:
|
||||
while res < len:
|
||||
@ -85,7 +88,7 @@ proc new*(
|
||||
raise error
|
||||
except LPStreamError as error:
|
||||
error "LPStream error", err = error.msg
|
||||
raise error
|
||||
raise newException(ChunkerError, "LPStream error", error)
|
||||
except CatchableError as exc:
|
||||
error "CatchableError exception", exc = exc.msg
|
||||
raise newException(Defect, exc.msg)
|
||||
@ -102,7 +105,7 @@ proc new*(
|
||||
|
||||
proc reader(
|
||||
data: ChunkBuffer, len: int
|
||||
): Future[int] {.gcsafe, async, raises: [Defect].} =
|
||||
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
|
||||
var total = 0
|
||||
try:
|
||||
while total < len:
|
||||
|
||||
@ -40,5 +40,8 @@ proc toSecondsSince1970*(bytes: seq[byte]): SecondsSince1970 =
|
||||
let asUint = uint64.fromBytes(bytes)
|
||||
cast[int64](asUint)
|
||||
|
||||
proc toSecondsSince1970*(num: uint64): SecondsSince1970 =
|
||||
cast[int64](num)
|
||||
|
||||
proc toSecondsSince1970*(bigint: UInt256): SecondsSince1970 =
|
||||
bigint.truncate(int64)
|
||||
|
||||
@ -11,8 +11,10 @@ import std/sequtils
|
||||
import std/strutils
|
||||
import std/os
|
||||
import std/tables
|
||||
import std/cpuinfo
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/taskpools
|
||||
import pkg/presto
|
||||
import pkg/libp2p
|
||||
import pkg/confutils
|
||||
@ -107,7 +109,9 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
|
||||
quit QuitFailure
|
||||
|
||||
let marketplace = Marketplace.new(marketplaceAddress, signer)
|
||||
let market = OnChainMarket.new(marketplace, config.rewardRecipient)
|
||||
let market = OnChainMarket.new(
|
||||
marketplace, config.rewardRecipient, config.marketplaceRequestCacheSize
|
||||
)
|
||||
let clock = OnChainClock.new(provider)
|
||||
|
||||
var client: ?ClientInteractions
|
||||
@ -130,6 +134,10 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
|
||||
if config.simulateProofFailures > 0:
|
||||
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
|
||||
|
||||
if error =? (await market.loadConfig()).errorOption:
|
||||
fatal "Cannot load market configuration", error = error.msg
|
||||
quit QuitFailure
|
||||
|
||||
let purchasing = Purchasing.new(market, clock)
|
||||
let sales = Sales.new(market, clock, repo, proofFailures)
|
||||
client = some ClientInteractions.new(clock, purchasing)
|
||||
@ -169,14 +177,20 @@ proc start*(s: CodexServer) {.async.} =
|
||||
proc stop*(s: CodexServer) {.async.} =
|
||||
notice "Stopping codex node"
|
||||
|
||||
await allFuturesThrowing(
|
||||
s.restServer.stop(),
|
||||
s.codexNode.switch.stop(),
|
||||
s.codexNode.stop(),
|
||||
s.repoStore.stop(),
|
||||
s.maintenance.stop(),
|
||||
let res = await noCancel allFinishedFailed(
|
||||
@[
|
||||
s.restServer.stop(),
|
||||
s.codexNode.switch.stop(),
|
||||
s.codexNode.stop(),
|
||||
s.repoStore.stop(),
|
||||
s.maintenance.stop(),
|
||||
]
|
||||
)
|
||||
|
||||
if res.failure.len > 0:
|
||||
error "Failed to stop codex node", failures = res.failure.len
|
||||
raiseAssert "Failed to stop codex node"
|
||||
|
||||
proc new*(
|
||||
T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey
|
||||
): CodexServer =
|
||||
@ -194,7 +208,18 @@ proc new*(
|
||||
.withTcpTransport({ServerFlags.ReuseAddr})
|
||||
.build()
|
||||
|
||||
var cache: CacheStore = nil
|
||||
var
|
||||
cache: CacheStore = nil
|
||||
taskpool: Taskpool
|
||||
|
||||
try:
|
||||
if config.numThreads == ThreadCount(0):
|
||||
taskpool = Taskpool.new(numThreads = min(countProcessors(), 16))
|
||||
else:
|
||||
taskpool = Taskpool.new(numThreads = int(config.numThreads))
|
||||
info "Threadpool started", numThreads = taskpool.numThreads
|
||||
except CatchableError as exc:
|
||||
raiseAssert("Failure in taskpool initialization:" & exc.msg)
|
||||
|
||||
if config.cacheSize > 0'nb:
|
||||
cache = CacheStore.new(cacheSize = config.cacheSize)
|
||||
@ -286,6 +311,7 @@ proc new*(
|
||||
engine = engine,
|
||||
discovery = discovery,
|
||||
prover = prover,
|
||||
taskPool = taskpool,
|
||||
)
|
||||
|
||||
restServer = RestServerRef
|
||||
@ -295,7 +321,7 @@ proc new*(
|
||||
bufferSize = (1024 * 64),
|
||||
maxRequestBodySize = int.high,
|
||||
)
|
||||
.expect("Should start rest server!")
|
||||
.expect("Should create rest server!")
|
||||
|
||||
switch.mount(network)
|
||||
|
||||
|
||||
@ -44,14 +44,19 @@ import ./utils
|
||||
import ./nat
|
||||
import ./utils/natutils
|
||||
|
||||
from ./contracts/config import DefaultRequestCacheSize
|
||||
from ./validationconfig import MaxSlots, ValidationGroups
|
||||
|
||||
export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig
|
||||
export ValidationGroups, MaxSlots
|
||||
|
||||
export
|
||||
DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockMaintenanceInterval,
|
||||
DefaultNumberOfBlocksToMaintainPerInterval
|
||||
DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval,
|
||||
DefaultRequestCacheSize
|
||||
|
||||
type ThreadCount* = distinct Natural
|
||||
|
||||
proc `==`*(a, b: ThreadCount): bool {.borrow.}
|
||||
|
||||
proc defaultDataDir*(): string =
|
||||
let dataDir =
|
||||
@ -71,6 +76,7 @@ const
|
||||
|
||||
DefaultDataDir* = defaultDataDir()
|
||||
DefaultCircuitDir* = defaultDataDir() / "circuits"
|
||||
DefaultThreadCount* = ThreadCount(0)
|
||||
|
||||
type
|
||||
StartUpCmd* {.pure.} = enum
|
||||
@ -184,6 +190,13 @@ type
|
||||
name: "max-peers"
|
||||
.}: int
|
||||
|
||||
numThreads* {.
|
||||
desc:
|
||||
"Number of worker threads (\"0\" = use as many threads as there are CPU cores available)",
|
||||
defaultValue: DefaultThreadCount,
|
||||
name: "num-threads"
|
||||
.}: ThreadCount
|
||||
|
||||
agentString* {.
|
||||
defaultValue: "Codex",
|
||||
desc: "Node agent string which is used as identifier in network",
|
||||
@ -238,15 +251,15 @@ type
|
||||
desc:
|
||||
"Time interval in seconds - determines frequency of block " &
|
||||
"maintenance cycle: how often blocks are checked " & "for expiration and cleanup",
|
||||
defaultValue: DefaultBlockMaintenanceInterval,
|
||||
defaultValueDesc: $DefaultBlockMaintenanceInterval,
|
||||
defaultValue: DefaultBlockInterval,
|
||||
defaultValueDesc: $DefaultBlockInterval,
|
||||
name: "block-mi"
|
||||
.}: Duration
|
||||
|
||||
blockMaintenanceNumberOfBlocks* {.
|
||||
desc: "Number of blocks to check every maintenance cycle",
|
||||
defaultValue: DefaultNumberOfBlocksToMaintainPerInterval,
|
||||
defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval,
|
||||
defaultValue: DefaultNumBlocksPerInterval,
|
||||
defaultValueDesc: $DefaultNumBlocksPerInterval,
|
||||
name: "block-mn"
|
||||
.}: int
|
||||
|
||||
@ -347,6 +360,16 @@ type
|
||||
name: "reward-recipient"
|
||||
.}: Option[EthAddress]
|
||||
|
||||
marketplaceRequestCacheSize* {.
|
||||
desc:
|
||||
"Maximum number of StorageRequests kept in memory." &
|
||||
"Reduces fetching of StorageRequest data from the contract.",
|
||||
defaultValue: DefaultRequestCacheSize,
|
||||
defaultValueDesc: $DefaultRequestCacheSize,
|
||||
name: "request-cache-size",
|
||||
hidden
|
||||
.}: uint16
|
||||
|
||||
case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd
|
||||
of PersistenceCmd.prover:
|
||||
circuitDir* {.
|
||||
@ -482,6 +505,13 @@ proc parseCmdArg*(
|
||||
quit QuitFailure
|
||||
ma
|
||||
|
||||
proc parseCmdArg*(T: type ThreadCount, input: string): T {.upraises: [ValueError].} =
|
||||
let count = parseInt(input)
|
||||
if count != 0 and count < 2:
|
||||
warn "Invalid number of threads", input = input
|
||||
quit QuitFailure
|
||||
ThreadCount(count)
|
||||
|
||||
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
|
||||
var res: SignedPeerRecord
|
||||
try:
|
||||
@ -579,6 +609,15 @@ proc readValue*(
|
||||
quit QuitFailure
|
||||
val = NBytes(value)
|
||||
|
||||
proc readValue*(
|
||||
r: var TomlReader, val: var ThreadCount
|
||||
) {.upraises: [SerializationError, IOError].} =
|
||||
var str = r.readValue(string)
|
||||
try:
|
||||
val = parseCmdArg(ThreadCount, str)
|
||||
except CatchableError as err:
|
||||
raise newException(SerializationError, err.msg)
|
||||
|
||||
proc readValue*(
|
||||
r: var TomlReader, val: var Duration
|
||||
) {.upraises: [SerializationError, IOError].} =
|
||||
@ -609,6 +648,9 @@ proc completeCmdArg*(T: type NBytes, val: string): seq[string] =
|
||||
proc completeCmdArg*(T: type Duration, val: string): seq[string] =
|
||||
discard
|
||||
|
||||
proc completeCmdArg*(T: type ThreadCount, val: string): seq[string] =
|
||||
discard
|
||||
|
||||
# silly chronicles, colors is a compile-time property
|
||||
proc stripAnsi*(v: string): string =
|
||||
var
|
||||
|
||||
@ -5,6 +5,7 @@ import pkg/chronos
|
||||
import pkg/stint
|
||||
import ../clock
|
||||
import ../conf
|
||||
import ../utils/trackedfutures
|
||||
|
||||
export clock
|
||||
|
||||
@ -18,9 +19,12 @@ type OnChainClock* = ref object of Clock
|
||||
blockNumber: UInt256
|
||||
started: bool
|
||||
newBlock: AsyncEvent
|
||||
trackedFutures: TrackedFutures
|
||||
|
||||
proc new*(_: type OnChainClock, provider: Provider): OnChainClock =
|
||||
OnChainClock(provider: provider, newBlock: newAsyncEvent())
|
||||
OnChainClock(
|
||||
provider: provider, newBlock: newAsyncEvent(), trackedFutures: TrackedFutures()
|
||||
)
|
||||
|
||||
proc update(clock: OnChainClock, blck: Block) =
|
||||
if number =? blck.number and number > clock.blockNumber:
|
||||
@ -32,15 +36,12 @@ proc update(clock: OnChainClock, blck: Block) =
|
||||
blockTime = blck.timestamp, blockNumber = number, offset = clock.offset
|
||||
clock.newBlock.fire()
|
||||
|
||||
proc update(clock: OnChainClock) {.async.} =
|
||||
proc update(clock: OnChainClock) {.async: (raises: []).} =
|
||||
try:
|
||||
if latest =? (await clock.provider.getBlock(BlockTag.latest)):
|
||||
clock.update(latest)
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as error:
|
||||
debug "error updating clock: ", error = error.msg
|
||||
discard
|
||||
|
||||
method start*(clock: OnChainClock) {.async.} =
|
||||
if clock.started:
|
||||
@ -52,7 +53,7 @@ method start*(clock: OnChainClock) {.async.} =
|
||||
return
|
||||
|
||||
# ignore block parameter; hardhat may call this with pending blocks
|
||||
asyncSpawn clock.update()
|
||||
clock.trackedFutures.track(clock.update())
|
||||
|
||||
await clock.update()
|
||||
|
||||
@ -64,6 +65,7 @@ method stop*(clock: OnChainClock) {.async.} =
|
||||
return
|
||||
|
||||
await clock.subscription.unsubscribe()
|
||||
await clock.trackedFutures.cancelTracked()
|
||||
clock.started = false
|
||||
|
||||
method now*(clock: OnChainClock): SecondsSince1970 =
|
||||
|
||||
@ -4,47 +4,66 @@ import pkg/questionable/results
|
||||
|
||||
export contractabi
|
||||
|
||||
const DefaultRequestCacheSize* = 128.uint16
|
||||
|
||||
type
|
||||
MarketplaceConfig* = object
|
||||
collateral*: CollateralConfig
|
||||
proofs*: ProofConfig
|
||||
reservations*: SlotReservationsConfig
|
||||
requestDurationLimit*: uint64
|
||||
|
||||
CollateralConfig* = object
|
||||
repairRewardPercentage*: uint8
|
||||
# percentage of remaining collateral slot has after it has been freed
|
||||
maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value
|
||||
slashCriterion*: uint16 # amount of proofs missed that lead to slashing
|
||||
slashPercentage*: uint8 # percentage of the collateral that is slashed
|
||||
validatorRewardPercentage*: uint8
|
||||
# percentage of the slashed amount going to the validators
|
||||
|
||||
ProofConfig* = object
|
||||
period*: UInt256 # proofs requirements are calculated per period (in seconds)
|
||||
timeout*: UInt256 # mark proofs as missing before the timeout (in seconds)
|
||||
period*: uint64 # proofs requirements are calculated per period (in seconds)
|
||||
timeout*: uint64 # mark proofs as missing before the timeout (in seconds)
|
||||
downtime*: uint8 # ignore this much recent blocks for proof requirements
|
||||
downtimeProduct*: uint8
|
||||
zkeyHash*: string # hash of the zkey file which is linked to the verifier
|
||||
# Ensures the pointer does not remain in downtime for many consecutive
|
||||
# periods. For each period increase, move the pointer `pointerProduct`
|
||||
# blocks. Should be a prime number to ensure there are no cycles.
|
||||
downtimeProduct*: uint8
|
||||
|
||||
SlotReservationsConfig* = object
|
||||
maxReservations*: uint8
|
||||
|
||||
func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig =
|
||||
ProofConfig(
|
||||
period: tupl[0],
|
||||
timeout: tupl[1],
|
||||
downtime: tupl[2],
|
||||
zkeyHash: tupl[3],
|
||||
downtimeProduct: tupl[4],
|
||||
downtimeProduct: tupl[3],
|
||||
zkeyHash: tupl[4],
|
||||
)
|
||||
|
||||
func fromTuple(_: type SlotReservationsConfig, tupl: tuple): SlotReservationsConfig =
|
||||
SlotReservationsConfig(maxReservations: tupl[0])
|
||||
|
||||
func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig =
|
||||
CollateralConfig(
|
||||
repairRewardPercentage: tupl[0],
|
||||
maxNumberOfSlashes: tupl[1],
|
||||
slashCriterion: tupl[2],
|
||||
slashPercentage: tupl[3],
|
||||
slashPercentage: tupl[2],
|
||||
validatorRewardPercentage: tupl[3],
|
||||
)
|
||||
|
||||
func fromTuple(_: type MarketplaceConfig, tupl: tuple): MarketplaceConfig =
|
||||
MarketplaceConfig(collateral: tupl[0], proofs: tupl[1])
|
||||
MarketplaceConfig(
|
||||
collateral: tupl[0],
|
||||
proofs: tupl[1],
|
||||
reservations: tupl[2],
|
||||
requestDurationLimit: tupl[3],
|
||||
)
|
||||
|
||||
func solidityType*(_: type SlotReservationsConfig): string =
|
||||
solidityType(SlotReservationsConfig.fieldTypes)
|
||||
|
||||
func solidityType*(_: type ProofConfig): string =
|
||||
solidityType(ProofConfig.fieldTypes)
|
||||
@ -53,7 +72,10 @@ func solidityType*(_: type CollateralConfig): string =
|
||||
solidityType(CollateralConfig.fieldTypes)
|
||||
|
||||
func solidityType*(_: type MarketplaceConfig): string =
|
||||
solidityType(CollateralConfig.fieldTypes)
|
||||
solidityType(MarketplaceConfig.fieldTypes)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, slot: SlotReservationsConfig) =
|
||||
encoder.write(slot.fieldValues)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, slot: ProofConfig) =
|
||||
encoder.write(slot.fieldValues)
|
||||
@ -68,6 +90,10 @@ func decode*(decoder: var AbiDecoder, T: type ProofConfig): ?!T =
|
||||
let tupl = ?decoder.read(ProofConfig.fieldTypes)
|
||||
success ProofConfig.fromTuple(tupl)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type SlotReservationsConfig): ?!T =
|
||||
let tupl = ?decoder.read(SlotReservationsConfig.fieldTypes)
|
||||
success SlotReservationsConfig.fromTuple(tupl)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type CollateralConfig): ?!T =
|
||||
let tupl = ?decoder.read(CollateralConfig.fieldTypes)
|
||||
success CollateralConfig.fromTuple(tupl)
|
||||
|
||||
@ -18,9 +18,9 @@ const knownAddresses = {
|
||||
# Taiko Alpha-3 Testnet
|
||||
"167005":
|
||||
{"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable,
|
||||
# Codex Testnet - Nov 25 2024 18:41:29 PM (+00:00 UTC)
|
||||
# Codex Testnet - Feb 25 2025 07:24:19 AM (+00:00 UTC)
|
||||
"789987":
|
||||
{"Marketplace": Address.init("0xAB03b6a58C5262f530D54146DA2a552B1C0F7648")}.toTable,
|
||||
{"Marketplace": Address.init("0xfFaF679D5Cbfdd5Dbc9Be61C616ed115DFb597ed")}.toTable,
|
||||
}.toTable
|
||||
|
||||
proc getKnownAddress(T: type, chainId: UInt256): ?Address =
|
||||
|
||||
@ -1,7 +1,9 @@
|
||||
import std/strformat
|
||||
import std/strutils
|
||||
import pkg/ethers
|
||||
import pkg/upraises
|
||||
import pkg/questionable
|
||||
import pkg/lrucache
|
||||
import ../utils/exceptions
|
||||
import ../logutils
|
||||
import ../market
|
||||
@ -20,6 +22,7 @@ type
|
||||
signer: Signer
|
||||
rewardRecipient: ?Address
|
||||
configuration: ?MarketplaceConfig
|
||||
requestCache: LruCache[string, StorageRequest]
|
||||
|
||||
MarketSubscription = market.Subscription
|
||||
EventSubscription = ethers.Subscription
|
||||
@ -27,128 +30,199 @@ type
|
||||
eventSubscription: EventSubscription
|
||||
|
||||
func new*(
|
||||
_: type OnChainMarket, contract: Marketplace, rewardRecipient = Address.none
|
||||
_: type OnChainMarket,
|
||||
contract: Marketplace,
|
||||
rewardRecipient = Address.none,
|
||||
requestCacheSize: uint16 = DefaultRequestCacheSize,
|
||||
): OnChainMarket =
|
||||
without signer =? contract.signer:
|
||||
raiseAssert("Marketplace contract should have a signer")
|
||||
|
||||
OnChainMarket(contract: contract, signer: signer, rewardRecipient: rewardRecipient)
|
||||
var requestCache = newLruCache[string, StorageRequest](int(requestCacheSize))
|
||||
|
||||
OnChainMarket(
|
||||
contract: contract,
|
||||
signer: signer,
|
||||
rewardRecipient: rewardRecipient,
|
||||
requestCache: requestCache,
|
||||
)
|
||||
|
||||
proc raiseMarketError(message: string) {.raises: [MarketError].} =
|
||||
raise newException(MarketError, message)
|
||||
|
||||
template convertEthersError(body) =
|
||||
func prefixWith(suffix, prefix: string, separator = ": "): string =
|
||||
if prefix.len > 0:
|
||||
return &"{prefix}{separator}{suffix}"
|
||||
else:
|
||||
return suffix
|
||||
|
||||
template convertEthersError(msg: string = "", body) =
|
||||
try:
|
||||
body
|
||||
except EthersError as error:
|
||||
raiseMarketError(error.msgDetail)
|
||||
raiseMarketError(error.msgDetail.prefixWith(msg))
|
||||
|
||||
proc config(market: OnChainMarket): Future[MarketplaceConfig] {.async.} =
|
||||
proc config(
|
||||
market: OnChainMarket
|
||||
): Future[MarketplaceConfig] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
without resolvedConfig =? market.configuration:
|
||||
let fetchedConfig = await market.contract.configuration()
|
||||
market.configuration = some fetchedConfig
|
||||
return fetchedConfig
|
||||
if err =? (await market.loadConfig()).errorOption:
|
||||
raiseMarketError(err.msg)
|
||||
|
||||
without config =? market.configuration:
|
||||
raiseMarketError("Failed to access to config from the Marketplace contract")
|
||||
|
||||
return config
|
||||
|
||||
return resolvedConfig
|
||||
|
||||
proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} =
|
||||
proc approveFunds(
|
||||
market: OnChainMarket, amount: UInt256
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
debug "Approving tokens", amount
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to approve funds"):
|
||||
let tokenAddress = await market.contract.token()
|
||||
let token = Erc20Token.new(tokenAddress, market.signer)
|
||||
discard await token.increaseAllowance(market.contract.address(), amount).confirm(1)
|
||||
|
||||
method getZkeyHash*(market: OnChainMarket): Future[?string] {.async.} =
|
||||
method loadConfig*(
|
||||
market: OnChainMarket
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
without config =? market.configuration:
|
||||
let fetchedConfig = await market.contract.configuration()
|
||||
|
||||
market.configuration = some fetchedConfig
|
||||
|
||||
return success()
|
||||
except EthersError as err:
|
||||
return failure newException(
|
||||
MarketError,
|
||||
"Failed to fetch the config from the Marketplace contract: " & err.msg,
|
||||
)
|
||||
|
||||
method getZkeyHash*(
|
||||
market: OnChainMarket
|
||||
): Future[?string] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
let config = await market.config()
|
||||
return some config.proofs.zkeyHash
|
||||
|
||||
method getSigner*(market: OnChainMarket): Future[Address] {.async.} =
|
||||
convertEthersError:
|
||||
method getSigner*(
|
||||
market: OnChainMarket
|
||||
): Future[Address] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get signer address"):
|
||||
return await market.signer.getAddress()
|
||||
|
||||
method periodicity*(market: OnChainMarket): Future[Periodicity] {.async.} =
|
||||
convertEthersError:
|
||||
method periodicity*(
|
||||
market: OnChainMarket
|
||||
): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
let period = config.proofs.period
|
||||
return Periodicity(seconds: period)
|
||||
|
||||
method proofTimeout*(market: OnChainMarket): Future[UInt256] {.async.} =
|
||||
convertEthersError:
|
||||
method proofTimeout*(
|
||||
market: OnChainMarket
|
||||
): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
return config.proofs.timeout
|
||||
|
||||
method repairRewardPercentage*(market: OnChainMarket): Future[uint8] {.async.} =
|
||||
convertEthersError:
|
||||
let config = await market.contract.configuration()
|
||||
method repairRewardPercentage*(
|
||||
market: OnChainMarket
|
||||
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
return config.collateral.repairRewardPercentage
|
||||
|
||||
method proofDowntime*(market: OnChainMarket): Future[uint8] {.async.} =
|
||||
convertEthersError:
|
||||
method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
return config.requestDurationLimit
|
||||
|
||||
method proofDowntime*(
|
||||
market: OnChainMarket
|
||||
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
return config.proofs.downtime
|
||||
|
||||
method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get slot pointer"):
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.getPointer(slotId, overrides)
|
||||
|
||||
method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get my requests"):
|
||||
return await market.contract.myRequests
|
||||
|
||||
method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get my slots"):
|
||||
let slots = await market.contract.mySlots()
|
||||
debug "Fetched my slots", numSlots = len(slots)
|
||||
|
||||
return slots
|
||||
|
||||
method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} =
|
||||
convertEthersError:
|
||||
method requestStorage(
|
||||
market: OnChainMarket, request: StorageRequest
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to request storage"):
|
||||
debug "Requesting storage"
|
||||
await market.approveFunds(request.totalPrice())
|
||||
discard await market.contract.requestStorage(request).confirm(1)
|
||||
|
||||
method getRequest*(
|
||||
market: OnChainMarket, id: RequestId
|
||||
): Future[?StorageRequest] {.async.} =
|
||||
convertEthersError:
|
||||
try:
|
||||
return some await market.contract.getRequest(id)
|
||||
except Marketplace_UnknownRequest:
|
||||
return none StorageRequest
|
||||
): Future[?StorageRequest] {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
let key = $id
|
||||
|
||||
if key in market.requestCache:
|
||||
return some market.requestCache[key]
|
||||
|
||||
let request = await market.contract.getRequest(id)
|
||||
market.requestCache[key] = request
|
||||
return some request
|
||||
except Marketplace_UnknownRequest, KeyError:
|
||||
warn "Cannot retrieve the request", error = getCurrentExceptionMsg()
|
||||
return none StorageRequest
|
||||
except EthersError as e:
|
||||
error "Cannot retrieve the request", error = e.msg
|
||||
return none StorageRequest
|
||||
|
||||
method requestState*(
|
||||
market: OnChainMarket, requestId: RequestId
|
||||
): Future[?RequestState] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get request state"):
|
||||
try:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return some await market.contract.requestState(requestId, overrides)
|
||||
except Marketplace_UnknownRequest:
|
||||
return none RequestState
|
||||
|
||||
method slotState*(market: OnChainMarket, slotId: SlotId): Future[SlotState] {.async.} =
|
||||
convertEthersError:
|
||||
method slotState*(
|
||||
market: OnChainMarket, slotId: SlotId
|
||||
): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to fetch the slot state from the Marketplace contract"):
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.slotState(slotId, overrides)
|
||||
|
||||
method getRequestEnd*(
|
||||
market: OnChainMarket, id: RequestId
|
||||
): Future[SecondsSince1970] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get request end"):
|
||||
return await market.contract.requestEnd(id)
|
||||
|
||||
method requestExpiresAt*(
|
||||
market: OnChainMarket, id: RequestId
|
||||
): Future[SecondsSince1970] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get request expiry"):
|
||||
return await market.contract.requestExpiry(id)
|
||||
|
||||
method getHost(
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: UInt256
|
||||
): Future[?Address] {.async.} =
|
||||
convertEthersError:
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?Address] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get slot's host"):
|
||||
let slotId = slotId(requestId, slotIndex)
|
||||
let address = await market.contract.getHost(slotId)
|
||||
if address != Address.default:
|
||||
@ -158,12 +232,12 @@ method getHost(
|
||||
|
||||
method currentCollateral*(
|
||||
market: OnChainMarket, slotId: SlotId
|
||||
): Future[UInt256] {.async.} =
|
||||
convertEthersError:
|
||||
): Future[UInt256] {.async: (raises: [MarketError, CancelledError]).} =
|
||||
convertEthersError("Failed to get slot's current collateral"):
|
||||
return await market.contract.currentCollateral(slotId)
|
||||
|
||||
method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get active slot"):
|
||||
try:
|
||||
return some await market.contract.getActiveSlot(slotId)
|
||||
except Marketplace_SlotIsFree:
|
||||
@ -172,45 +246,60 @@ method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.as
|
||||
method fillSlot(
|
||||
market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256,
|
||||
slotIndex: uint64,
|
||||
proof: Groth16Proof,
|
||||
collateral: UInt256,
|
||||
) {.async.} =
|
||||
convertEthersError:
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to fill slot"):
|
||||
logScope:
|
||||
requestId
|
||||
slotIndex
|
||||
|
||||
await market.approveFunds(collateral)
|
||||
trace "calling fillSlot on contract"
|
||||
discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(1)
|
||||
trace "fillSlot transaction completed"
|
||||
try:
|
||||
await market.approveFunds(collateral)
|
||||
trace "calling fillSlot on contract"
|
||||
discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(1)
|
||||
trace "fillSlot transaction completed"
|
||||
except Marketplace_SlotNotFree as parent:
|
||||
raise newException(
|
||||
SlotStateMismatchError, "Failed to fill slot because the slot is not free",
|
||||
parent,
|
||||
)
|
||||
|
||||
method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
|
||||
convertEthersError:
|
||||
var freeSlot: Future[Confirmable]
|
||||
if rewardRecipient =? market.rewardRecipient:
|
||||
# If --reward-recipient specified, use it as the reward recipient, and use
|
||||
# the SP's address as the collateral recipient
|
||||
let collateralRecipient = await market.getSigner()
|
||||
freeSlot = market.contract.freeSlot(
|
||||
slotId,
|
||||
rewardRecipient, # --reward-recipient
|
||||
collateralRecipient,
|
||||
) # SP's address
|
||||
else:
|
||||
# Otherwise, use the SP's address as both the reward and collateral
|
||||
# recipient (the contract will use msg.sender for both)
|
||||
freeSlot = market.contract.freeSlot(slotId)
|
||||
method freeSlot*(
|
||||
market: OnChainMarket, slotId: SlotId
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to free slot"):
|
||||
try:
|
||||
var freeSlot: Future[Confirmable]
|
||||
if rewardRecipient =? market.rewardRecipient:
|
||||
# If --reward-recipient specified, use it as the reward recipient, and use
|
||||
# the SP's address as the collateral recipient
|
||||
let collateralRecipient = await market.getSigner()
|
||||
freeSlot = market.contract.freeSlot(
|
||||
slotId,
|
||||
rewardRecipient, # --reward-recipient
|
||||
collateralRecipient,
|
||||
) # SP's address
|
||||
else:
|
||||
# Otherwise, use the SP's address as both the reward and collateral
|
||||
# recipient (the contract will use msg.sender for both)
|
||||
freeSlot = market.contract.freeSlot(slotId)
|
||||
|
||||
discard await freeSlot.confirm(1)
|
||||
discard await freeSlot.confirm(1)
|
||||
except Marketplace_SlotIsFree as parent:
|
||||
raise newException(
|
||||
SlotStateMismatchError, "Failed to free slot, slot is already free", parent
|
||||
)
|
||||
|
||||
method withdrawFunds(market: OnChainMarket, requestId: RequestId) {.async.} =
|
||||
convertEthersError:
|
||||
method withdrawFunds(
|
||||
market: OnChainMarket, requestId: RequestId
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to withdraw funds"):
|
||||
discard await market.contract.withdrawFunds(requestId).confirm(1)
|
||||
|
||||
method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get proof requirement"):
|
||||
try:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.isProofRequired(id, overrides)
|
||||
@ -218,7 +307,7 @@ method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async
|
||||
return false
|
||||
|
||||
method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get future proof requirement"):
|
||||
try:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.willProofBeRequired(id, overrides)
|
||||
@ -228,18 +317,20 @@ method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.a
|
||||
method getChallenge*(
|
||||
market: OnChainMarket, id: SlotId
|
||||
): Future[ProofChallenge] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get proof challenge"):
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.getChallenge(id, overrides)
|
||||
|
||||
method submitProof*(market: OnChainMarket, id: SlotId, proof: Groth16Proof) {.async.} =
|
||||
convertEthersError:
|
||||
method submitProof*(
|
||||
market: OnChainMarket, id: SlotId, proof: Groth16Proof
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to submit proof"):
|
||||
discard await market.contract.submitProof(id, proof).confirm(1)
|
||||
|
||||
method markProofAsMissing*(
|
||||
market: OnChainMarket, id: SlotId, period: Period
|
||||
) {.async.} =
|
||||
convertEthersError:
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to mark proof as missing"):
|
||||
discard await market.contract.markProofAsMissing(id, period).confirm(1)
|
||||
|
||||
method canProofBeMarkedAsMissing*(
|
||||
@ -256,22 +347,28 @@ method canProofBeMarkedAsMissing*(
|
||||
return false
|
||||
|
||||
method reserveSlot*(
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: UInt256
|
||||
) {.async.} =
|
||||
convertEthersError:
|
||||
discard await market.contract
|
||||
.reserveSlot(
|
||||
requestId,
|
||||
slotIndex,
|
||||
# reserveSlot runs out of gas for unknown reason, but 100k gas covers it
|
||||
TransactionOverrides(gasLimit: some 100000.u256),
|
||||
)
|
||||
.confirm(1)
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to reserve slot"):
|
||||
try:
|
||||
discard await market.contract
|
||||
.reserveSlot(
|
||||
requestId,
|
||||
slotIndex,
|
||||
# reserveSlot runs out of gas for unknown reason, but 100k gas covers it
|
||||
TransactionOverrides(gasLimit: some 100000.u256),
|
||||
)
|
||||
.confirm(1)
|
||||
except SlotReservations_ReservationNotAllowed:
|
||||
raise newException(
|
||||
SlotReservationNotAllowedError,
|
||||
"Failed to reserve slot because reservation is not allowed",
|
||||
)
|
||||
|
||||
method canReserveSlot*(
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: UInt256
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
): Future[bool] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Unable to determine if slot can be reserved"):
|
||||
return await market.contract.canReserveSlot(requestId, slotIndex)
|
||||
|
||||
method subscribeRequests*(
|
||||
@ -284,7 +381,7 @@ method subscribeRequests*(
|
||||
|
||||
callback(event.requestId, event.ask, event.expiry)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to StorageRequested events"):
|
||||
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -298,21 +395,21 @@ method subscribeSlotFilled*(
|
||||
|
||||
callback(event.requestId, event.slotIndex)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to SlotFilled events"):
|
||||
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
method subscribeSlotFilled*(
|
||||
market: OnChainMarket,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256,
|
||||
slotIndex: uint64,
|
||||
callback: OnSlotFilled,
|
||||
): Future[MarketSubscription] {.async.} =
|
||||
proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: UInt256) =
|
||||
proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: uint64) =
|
||||
if eventRequestId == requestId and eventSlotIndex == slotIndex:
|
||||
callback(requestId, slotIndex)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to SlotFilled events"):
|
||||
return await market.subscribeSlotFilled(onSlotFilled)
|
||||
|
||||
method subscribeSlotFreed*(
|
||||
@ -325,7 +422,7 @@ method subscribeSlotFreed*(
|
||||
|
||||
callback(event.requestId, event.slotIndex)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to SlotFreed events"):
|
||||
let subscription = await market.contract.subscribe(SlotFreed, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -340,7 +437,7 @@ method subscribeSlotReservationsFull*(
|
||||
|
||||
callback(event.requestId, event.slotIndex)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to SlotReservationsFull events"):
|
||||
let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -354,7 +451,7 @@ method subscribeFulfillment(
|
||||
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to RequestFulfilled events"):
|
||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -369,7 +466,7 @@ method subscribeFulfillment(
|
||||
if event.requestId == requestId:
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to RequestFulfilled events"):
|
||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -383,7 +480,7 @@ method subscribeRequestCancelled*(
|
||||
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to RequestCancelled events"):
|
||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -398,7 +495,7 @@ method subscribeRequestCancelled*(
|
||||
if event.requestId == requestId:
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to RequestCancelled events"):
|
||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -412,7 +509,7 @@ method subscribeRequestFailed*(
|
||||
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to RequestFailed events"):
|
||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -427,7 +524,7 @@ method subscribeRequestFailed*(
|
||||
if event.requestId == requestId:
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to RequestFailed events"):
|
||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -441,7 +538,7 @@ method subscribeProofSubmission*(
|
||||
|
||||
callback(event.id)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to ProofSubmitted events"):
|
||||
let subscription = await market.contract.subscribe(ProofSubmitted, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -451,13 +548,13 @@ method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: OnChainMarket, fromBlock: BlockTag
|
||||
): Future[seq[SlotFilled]] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get past SlotFilled events from block"):
|
||||
return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest)
|
||||
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: OnChainMarket, blocksAgo: int
|
||||
): Future[seq[SlotFilled]] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get past SlotFilled events"):
|
||||
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
|
||||
|
||||
return await market.queryPastSlotFilledEvents(fromBlock)
|
||||
@ -465,21 +562,58 @@ method queryPastSlotFilledEvents*(
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: OnChainMarket, fromTime: SecondsSince1970
|
||||
): Future[seq[SlotFilled]] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get past SlotFilled events from time"):
|
||||
let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime)
|
||||
return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock))
|
||||
|
||||
method queryPastStorageRequestedEvents*(
|
||||
market: OnChainMarket, fromBlock: BlockTag
|
||||
): Future[seq[StorageRequested]] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get past StorageRequested events from block"):
|
||||
return
|
||||
await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest)
|
||||
|
||||
method queryPastStorageRequestedEvents*(
|
||||
market: OnChainMarket, blocksAgo: int
|
||||
): Future[seq[StorageRequested]] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get past StorageRequested events"):
|
||||
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
|
||||
|
||||
return await market.queryPastStorageRequestedEvents(fromBlock)
|
||||
|
||||
method slotCollateral*(
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?!UInt256] {.async: (raises: [CancelledError]).} =
|
||||
let slotid = slotId(requestId, slotIndex)
|
||||
|
||||
try:
|
||||
let slotState = await market.slotState(slotid)
|
||||
|
||||
without request =? await market.getRequest(requestId):
|
||||
return failure newException(
|
||||
MarketError, "Failure calculating the slotCollateral, cannot get the request"
|
||||
)
|
||||
|
||||
return market.slotCollateral(request.ask.collateralPerSlot, slotState)
|
||||
except MarketError as error:
|
||||
error "Error when trying to calculate the slotCollateral", error = error.msg
|
||||
return failure error
|
||||
|
||||
method slotCollateral*(
|
||||
market: OnChainMarket, collateralPerSlot: UInt256, slotState: SlotState
|
||||
): ?!UInt256 {.raises: [].} =
|
||||
if slotState == SlotState.Repair:
|
||||
without repairRewardPercentage =?
|
||||
market.configuration .? collateral .? repairRewardPercentage:
|
||||
return failure newException(
|
||||
MarketError,
|
||||
"Failure calculating the slotCollateral, cannot get the reward percentage",
|
||||
)
|
||||
|
||||
return success (
|
||||
collateralPerSlot - (collateralPerSlot * repairRewardPercentage.u256).div(
|
||||
100.u256
|
||||
)
|
||||
)
|
||||
|
||||
return success(collateralPerSlot)
|
||||
|
||||
@ -42,6 +42,7 @@ type
|
||||
Marketplace_InsufficientCollateral* = object of SolidityError
|
||||
Marketplace_InsufficientReward* = object of SolidityError
|
||||
Marketplace_InvalidCid* = object of SolidityError
|
||||
Marketplace_DurationExceedsLimit* = object of SolidityError
|
||||
Proofs_InsufficientBlockHeight* = object of SolidityError
|
||||
Proofs_InvalidProof* = object of SolidityError
|
||||
Proofs_ProofAlreadySubmitted* = object of SolidityError
|
||||
@ -50,8 +51,8 @@ type
|
||||
Proofs_ProofNotMissing* = object of SolidityError
|
||||
Proofs_ProofNotRequired* = object of SolidityError
|
||||
Proofs_ProofAlreadyMarkedMissing* = object of SolidityError
|
||||
Proofs_InvalidProbability* = object of SolidityError
|
||||
Periods_InvalidSecondsPerPeriod* = object of SolidityError
|
||||
SlotReservations_ReservationNotAllowed* = object of SolidityError
|
||||
|
||||
proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
|
||||
proc token*(marketplace: Marketplace): Address {.contract, view.}
|
||||
@ -59,10 +60,6 @@ proc currentCollateral*(
|
||||
marketplace: Marketplace, id: SlotId
|
||||
): UInt256 {.contract, view.}
|
||||
|
||||
proc slashMisses*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||
proc slashPercentage*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||
proc minCollateralThreshold*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||
|
||||
proc requestStorage*(
|
||||
marketplace: Marketplace, request: StorageRequest
|
||||
): Confirmable {.
|
||||
@ -70,15 +67,14 @@ proc requestStorage*(
|
||||
errors: [
|
||||
Marketplace_InvalidClientAddress, Marketplace_RequestAlreadyExists,
|
||||
Marketplace_InvalidExpiry, Marketplace_InsufficientSlots,
|
||||
Marketplace_InvalidMaxSlotLoss,
|
||||
Marketplace_InvalidMaxSlotLoss, Marketplace_InsufficientDuration,
|
||||
Marketplace_InsufficientProofProbability, Marketplace_InsufficientCollateral,
|
||||
Marketplace_InsufficientReward, Marketplace_InvalidCid,
|
||||
]
|
||||
.}
|
||||
|
||||
proc fillSlot*(
|
||||
marketplace: Marketplace,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256,
|
||||
proof: Groth16Proof,
|
||||
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64, proof: Groth16Proof
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors: [
|
||||
@ -154,9 +150,6 @@ proc requestExpiry*(
|
||||
marketplace: Marketplace, requestId: RequestId
|
||||
): SecondsSince1970 {.contract, view.}
|
||||
|
||||
proc proofTimeout*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||
|
||||
proc proofEnd*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
|
||||
proc missingProofs*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
|
||||
proc isProofRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
|
||||
proc willProofBeRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
|
||||
@ -175,7 +168,7 @@ proc submitProof*(
|
||||
.}
|
||||
|
||||
proc markProofAsMissing*(
|
||||
marketplace: Marketplace, id: SlotId, period: UInt256
|
||||
marketplace: Marketplace, id: SlotId, period: uint64
|
||||
): Confirmable {.
|
||||
contract,
|
||||
errors: [
|
||||
@ -186,9 +179,9 @@ proc markProofAsMissing*(
|
||||
.}
|
||||
|
||||
proc reserveSlot*(
|
||||
marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256
|
||||
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64
|
||||
): Confirmable {.contract.}
|
||||
|
||||
proc canReserveSlot*(
|
||||
marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256
|
||||
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64
|
||||
): bool {.contract, view.}
|
||||
|
||||
@ -14,7 +14,7 @@ proc raiseProviderError(message: string) {.raises: [ProviderError].} =
|
||||
|
||||
proc blockNumberAndTimestamp*(
|
||||
provider: Provider, blockTag: BlockTag
|
||||
): Future[(UInt256, UInt256)] {.async: (raises: [ProviderError]).} =
|
||||
): Future[(UInt256, UInt256)] {.async: (raises: [ProviderError, CancelledError]).} =
|
||||
without latestBlock =? await provider.getBlock(blockTag):
|
||||
raiseProviderError("Could not get latest block")
|
||||
|
||||
@ -25,7 +25,7 @@ proc blockNumberAndTimestamp*(
|
||||
|
||||
proc binarySearchFindClosestBlock(
|
||||
provider: Provider, epochTime: int, low: UInt256, high: UInt256
|
||||
): Future[UInt256] {.async: (raises: [ProviderError]).} =
|
||||
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
|
||||
let (_, lowTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(low))
|
||||
let (_, highTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(high))
|
||||
if abs(lowTimestamp.truncate(int) - epochTime) <
|
||||
@ -39,7 +39,7 @@ proc binarySearchBlockNumberForEpoch(
|
||||
epochTime: UInt256,
|
||||
latestBlockNumber: UInt256,
|
||||
earliestBlockNumber: UInt256,
|
||||
): Future[UInt256] {.async: (raises: [ProviderError]).} =
|
||||
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
|
||||
var low = earliestBlockNumber
|
||||
var high = latestBlockNumber
|
||||
|
||||
@ -65,7 +65,7 @@ proc binarySearchBlockNumberForEpoch(
|
||||
|
||||
proc blockNumberForEpoch*(
|
||||
provider: Provider, epochTime: SecondsSince1970
|
||||
): Future[UInt256] {.async: (raises: [ProviderError]).} =
|
||||
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
|
||||
let epochTimeUInt256 = epochTime.u256
|
||||
let (latestBlockNumber, latestBlockTimestamp) =
|
||||
await provider.blockNumberAndTimestamp(BlockTag.latest)
|
||||
@ -118,6 +118,6 @@ proc blockNumberForEpoch*(
|
||||
|
||||
proc pastBlockTag*(
|
||||
provider: Provider, blocksAgo: int
|
||||
): Future[BlockTag] {.async: (raises: [ProviderError]).} =
|
||||
): Future[BlockTag] {.async: (raises: [ProviderError, CancelledError]).} =
|
||||
let head = await provider.getBlockNumber()
|
||||
return BlockTag.init(head - blocksAgo.abs.u256)
|
||||
|
||||
@ -6,8 +6,11 @@ import pkg/nimcrypto
|
||||
import pkg/ethers/fields
|
||||
import pkg/questionable/results
|
||||
import pkg/stew/byteutils
|
||||
import pkg/libp2p/[cid, multicodec]
|
||||
import ../logutils
|
||||
import ../utils/json
|
||||
import ../clock
|
||||
from ../errors import mapFailure
|
||||
|
||||
export contractabi
|
||||
|
||||
@ -16,25 +19,25 @@ type
|
||||
client* {.serialize.}: Address
|
||||
ask* {.serialize.}: StorageAsk
|
||||
content* {.serialize.}: StorageContent
|
||||
expiry* {.serialize.}: UInt256
|
||||
expiry* {.serialize.}: uint64
|
||||
nonce*: Nonce
|
||||
|
||||
StorageAsk* = object
|
||||
slots* {.serialize.}: uint64
|
||||
slotSize* {.serialize.}: UInt256
|
||||
duration* {.serialize.}: UInt256
|
||||
proofProbability* {.serialize.}: UInt256
|
||||
pricePerBytePerSecond* {.serialize.}: UInt256
|
||||
collateralPerByte* {.serialize.}: UInt256
|
||||
slots* {.serialize.}: uint64
|
||||
slotSize* {.serialize.}: uint64
|
||||
duration* {.serialize.}: uint64
|
||||
maxSlotLoss* {.serialize.}: uint64
|
||||
|
||||
StorageContent* = object
|
||||
cid* {.serialize.}: string
|
||||
cid* {.serialize.}: Cid
|
||||
merkleRoot*: array[32, byte]
|
||||
|
||||
Slot* = object
|
||||
request* {.serialize.}: StorageRequest
|
||||
slotIndex* {.serialize.}: UInt256
|
||||
slotIndex* {.serialize.}: uint64
|
||||
|
||||
SlotId* = distinct array[32, byte]
|
||||
RequestId* = distinct array[32, byte]
|
||||
@ -108,18 +111,21 @@ func fromTuple(_: type Slot, tupl: tuple): Slot =
|
||||
|
||||
func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk =
|
||||
StorageAsk(
|
||||
slots: tupl[0],
|
||||
slotSize: tupl[1],
|
||||
duration: tupl[2],
|
||||
proofProbability: tupl[3],
|
||||
pricePerBytePerSecond: tupl[4],
|
||||
collateralPerByte: tupl[5],
|
||||
proofProbability: tupl[0],
|
||||
pricePerBytePerSecond: tupl[1],
|
||||
collateralPerByte: tupl[2],
|
||||
slots: tupl[3],
|
||||
slotSize: tupl[4],
|
||||
duration: tupl[5],
|
||||
maxSlotLoss: tupl[6],
|
||||
)
|
||||
|
||||
func fromTuple(_: type StorageContent, tupl: tuple): StorageContent =
|
||||
StorageContent(cid: tupl[0], merkleRoot: tupl[1])
|
||||
|
||||
func solidityType*(_: type Cid): string =
|
||||
solidityType(seq[byte])
|
||||
|
||||
func solidityType*(_: type StorageContent): string =
|
||||
solidityType(StorageContent.fieldTypes)
|
||||
|
||||
@ -129,6 +135,10 @@ func solidityType*(_: type StorageAsk): string =
|
||||
func solidityType*(_: type StorageRequest): string =
|
||||
solidityType(StorageRequest.fieldTypes)
|
||||
|
||||
# Note: it seems to be ok to ignore the vbuffer offset for now
|
||||
func encode*(encoder: var AbiEncoder, cid: Cid) =
|
||||
encoder.write(cid.data.buffer)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, content: StorageContent) =
|
||||
encoder.write(content.fieldValues)
|
||||
|
||||
@ -141,8 +151,12 @@ func encode*(encoder: var AbiEncoder, id: RequestId | SlotId | Nonce) =
|
||||
func encode*(encoder: var AbiEncoder, request: StorageRequest) =
|
||||
encoder.write(request.fieldValues)
|
||||
|
||||
func encode*(encoder: var AbiEncoder, request: Slot) =
|
||||
encoder.write(request.fieldValues)
|
||||
func encode*(encoder: var AbiEncoder, slot: Slot) =
|
||||
encoder.write(slot.fieldValues)
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type Cid): ?!T =
|
||||
let data = ?decoder.read(seq[byte])
|
||||
Cid.init(data).mapFailure
|
||||
|
||||
func decode*(decoder: var AbiDecoder, T: type StorageContent): ?!T =
|
||||
let tupl = ?decoder.read(StorageContent.fieldTypes)
|
||||
@ -164,21 +178,21 @@ func id*(request: StorageRequest): RequestId =
|
||||
let encoding = AbiEncoder.encode((request,))
|
||||
RequestId(keccak256.digest(encoding).data)
|
||||
|
||||
func slotId*(requestId: RequestId, slotIndex: UInt256): SlotId =
|
||||
func slotId*(requestId: RequestId, slotIndex: uint64): SlotId =
|
||||
let encoding = AbiEncoder.encode((requestId, slotIndex))
|
||||
SlotId(keccak256.digest(encoding).data)
|
||||
|
||||
func slotId*(request: StorageRequest, slotIndex: UInt256): SlotId =
|
||||
func slotId*(request: StorageRequest, slotIndex: uint64): SlotId =
|
||||
slotId(request.id, slotIndex)
|
||||
|
||||
func id*(slot: Slot): SlotId =
|
||||
slotId(slot.request, slot.slotIndex)
|
||||
|
||||
func pricePerSlotPerSecond*(ask: StorageAsk): UInt256 =
|
||||
ask.pricePerBytePerSecond * ask.slotSize
|
||||
ask.pricePerBytePerSecond * ask.slotSize.u256
|
||||
|
||||
func pricePerSlot*(ask: StorageAsk): UInt256 =
|
||||
ask.duration * ask.pricePerSlotPerSecond
|
||||
ask.duration.u256 * ask.pricePerSlotPerSecond
|
||||
|
||||
func totalPrice*(ask: StorageAsk): UInt256 =
|
||||
ask.slots.u256 * ask.pricePerSlot
|
||||
@ -187,7 +201,7 @@ func totalPrice*(request: StorageRequest): UInt256 =
|
||||
request.ask.totalPrice
|
||||
|
||||
func collateralPerSlot*(ask: StorageAsk): UInt256 =
|
||||
ask.collateralPerByte * ask.slotSize
|
||||
ask.collateralPerByte * ask.slotSize.u256
|
||||
|
||||
func size*(ask: StorageAsk): UInt256 =
|
||||
ask.slots.u256 * ask.slotSize
|
||||
func size*(ask: StorageAsk): uint64 =
|
||||
ask.slots * ask.slotSize
|
||||
|
||||
@ -7,6 +7,8 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/algorithm
|
||||
import std/sequtils
|
||||
|
||||
@ -54,70 +56,122 @@ proc toNodeId*(host: ca.Address): NodeId =
|
||||
|
||||
readUintBE[256](keccak256.digest(host.toArray).data)
|
||||
|
||||
proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} =
|
||||
proc findPeer*(
|
||||
d: Discovery, peerId: PeerId
|
||||
): Future[?PeerRecord] {.async: (raises: [CancelledError]).} =
|
||||
trace "protocol.resolve..."
|
||||
## Find peer using the given Discovery object
|
||||
##
|
||||
let node = await d.protocol.resolve(toNodeId(peerId))
|
||||
|
||||
return
|
||||
if node.isSome():
|
||||
node.get().record.data.some
|
||||
else:
|
||||
PeerRecord.none
|
||||
try:
|
||||
let node = await d.protocol.resolve(toNodeId(peerId))
|
||||
|
||||
method find*(d: Discovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} =
|
||||
return
|
||||
if node.isSome():
|
||||
node.get().record.data.some
|
||||
else:
|
||||
PeerRecord.none
|
||||
except CancelledError as exc:
|
||||
warn "Error finding peer", peerId = peerId, exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error finding peer", peerId = peerId, exc = exc.msg
|
||||
|
||||
return PeerRecord.none
|
||||
|
||||
method find*(
|
||||
d: Discovery, cid: Cid
|
||||
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} =
|
||||
## Find block providers
|
||||
##
|
||||
without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure, error:
|
||||
warn "Error finding providers for block", cid, error = error.msg
|
||||
|
||||
return providers.filterIt(not (it.data.peerId == d.peerId))
|
||||
try:
|
||||
without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure,
|
||||
error:
|
||||
warn "Error finding providers for block", cid, error = error.msg
|
||||
|
||||
method provide*(d: Discovery, cid: Cid) {.async, base.} =
|
||||
return providers.filterIt(not (it.data.peerId == d.peerId))
|
||||
except CancelledError as exc:
|
||||
warn "Error finding providers for block", cid, exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error finding providers for block", cid, exc = exc.msg
|
||||
|
||||
method provide*(d: Discovery, cid: Cid) {.async: (raises: [CancelledError]), base.} =
|
||||
## Provide a block Cid
|
||||
##
|
||||
let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get)
|
||||
try:
|
||||
let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get)
|
||||
|
||||
if nodes.len <= 0:
|
||||
warn "Couldn't provide to any nodes!"
|
||||
if nodes.len <= 0:
|
||||
warn "Couldn't provide to any nodes!"
|
||||
except CancelledError as exc:
|
||||
warn "Error providing block", cid, exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error providing block", cid, exc = exc.msg
|
||||
|
||||
method find*(
|
||||
d: Discovery, host: ca.Address
|
||||
): Future[seq[SignedPeerRecord]] {.async, base.} =
|
||||
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} =
|
||||
## Find host providers
|
||||
##
|
||||
|
||||
trace "Finding providers for host", host = $host
|
||||
without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure,
|
||||
error:
|
||||
trace "Error finding providers for host", host = $host, exc = error.msg
|
||||
return
|
||||
try:
|
||||
trace "Finding providers for host", host = $host
|
||||
without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure,
|
||||
error:
|
||||
trace "Error finding providers for host", host = $host, exc = error.msg
|
||||
return
|
||||
|
||||
if providers.len <= 0:
|
||||
trace "No providers found", host = $host
|
||||
return
|
||||
if providers.len <= 0:
|
||||
trace "No providers found", host = $host
|
||||
return
|
||||
|
||||
providers.sort do(a, b: SignedPeerRecord) -> int:
|
||||
system.cmp[uint64](a.data.seqNo, b.data.seqNo)
|
||||
providers.sort do(a, b: SignedPeerRecord) -> int:
|
||||
system.cmp[uint64](a.data.seqNo, b.data.seqNo)
|
||||
|
||||
return providers
|
||||
return providers
|
||||
except CancelledError as exc:
|
||||
warn "Error finding providers for host", host = $host, exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error finding providers for host", host = $host, exc = exc.msg
|
||||
|
||||
method provide*(d: Discovery, host: ca.Address) {.async, base.} =
|
||||
method provide*(
|
||||
d: Discovery, host: ca.Address
|
||||
) {.async: (raises: [CancelledError]), base.} =
|
||||
## Provide hosts
|
||||
##
|
||||
|
||||
trace "Providing host", host = $host
|
||||
let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get)
|
||||
if nodes.len > 0:
|
||||
trace "Provided to nodes", nodes = nodes.len
|
||||
try:
|
||||
trace "Providing host", host = $host
|
||||
let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get)
|
||||
if nodes.len > 0:
|
||||
trace "Provided to nodes", nodes = nodes.len
|
||||
except CancelledError as exc:
|
||||
warn "Error providing host", host = $host, exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error providing host", host = $host, exc = exc.msg
|
||||
|
||||
method removeProvider*(d: Discovery, peerId: PeerId): Future[void] {.base, gcsafe.} =
|
||||
method removeProvider*(
|
||||
d: Discovery, peerId: PeerId
|
||||
): Future[void] {.base, gcsafe, async: (raises: [CancelledError]).} =
|
||||
## Remove provider from providers table
|
||||
##
|
||||
|
||||
trace "Removing provider", peerId
|
||||
d.protocol.removeProvidersLocal(peerId)
|
||||
try:
|
||||
await d.protocol.removeProvidersLocal(peerId)
|
||||
except CancelledError as exc:
|
||||
warn "Error removing provider", peerId = peerId, exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error removing provider", peerId = peerId, exc = exc.msg
|
||||
except Exception as exc: # Something in discv5 is raising Exception
|
||||
warn "Error removing provider", peerId = peerId, exc = exc.msg
|
||||
raiseAssert("Unexpected Exception in removeProvider")
|
||||
|
||||
proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
||||
## Update providers record
|
||||
@ -125,7 +179,7 @@ proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
||||
|
||||
d.announceAddrs = @addrs
|
||||
|
||||
trace "Updating announce record", addrs = d.announceAddrs
|
||||
info "Updating announce record", addrs = d.announceAddrs
|
||||
d.providerRecord = SignedPeerRecord
|
||||
.init(d.key, PeerRecord.init(d.peerId, d.announceAddrs))
|
||||
.expect("Should construct signed record").some
|
||||
@ -137,7 +191,7 @@ proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
||||
## Update providers record
|
||||
##
|
||||
|
||||
trace "Updating Dht record", addrs = addrs
|
||||
info "Updating Dht record", addrs = addrs
|
||||
d.dhtRecord = SignedPeerRecord
|
||||
.init(d.key, PeerRecord.init(d.peerId, @addrs))
|
||||
.expect("Should construct signed record").some
|
||||
@ -145,12 +199,18 @@ proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
||||
if not d.protocol.isNil:
|
||||
d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR")
|
||||
|
||||
proc start*(d: Discovery) {.async.} =
|
||||
d.protocol.open()
|
||||
await d.protocol.start()
|
||||
proc start*(d: Discovery) {.async: (raises: []).} =
|
||||
try:
|
||||
d.protocol.open()
|
||||
await d.protocol.start()
|
||||
except CatchableError as exc:
|
||||
error "Error starting discovery", exc = exc.msg
|
||||
|
||||
proc stop*(d: Discovery) {.async.} =
|
||||
await d.protocol.closeWait()
|
||||
proc stop*(d: Discovery) {.async: (raises: []).} =
|
||||
try:
|
||||
await noCancel d.protocol.closeWait()
|
||||
except CatchableError as exc:
|
||||
error "Error stopping discovery", exc = exc.msg
|
||||
|
||||
proc new*(
|
||||
T: type Discovery,
|
||||
|
||||
@ -29,14 +29,18 @@ method release*(self: ErasureBackend) {.base, gcsafe.} =
|
||||
raiseAssert("not implemented!")
|
||||
|
||||
method encode*(
|
||||
self: EncoderBackend, buffers, parity: var openArray[seq[byte]]
|
||||
self: EncoderBackend,
|
||||
buffers, parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
dataLen, parityLen: int,
|
||||
): Result[void, cstring] {.base, gcsafe.} =
|
||||
## encode buffers using a backend
|
||||
##
|
||||
raiseAssert("not implemented!")
|
||||
|
||||
method decode*(
|
||||
self: DecoderBackend, buffers, parity, recovered: var openArray[seq[byte]]
|
||||
self: DecoderBackend,
|
||||
buffers, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
dataLen, parityLen, recoveredLen: int,
|
||||
): Result[void, cstring] {.base, gcsafe.} =
|
||||
## decode buffers using a backend
|
||||
##
|
||||
|
||||
@ -10,7 +10,7 @@
|
||||
import std/options
|
||||
|
||||
import pkg/leopard
|
||||
import pkg/stew/results
|
||||
import pkg/results
|
||||
|
||||
import ../backend
|
||||
|
||||
@ -22,11 +22,13 @@ type
|
||||
decoder*: Option[LeoDecoder]
|
||||
|
||||
method encode*(
|
||||
self: LeoEncoderBackend, data, parity: var openArray[seq[byte]]
|
||||
self: LeoEncoderBackend,
|
||||
data, parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
dataLen, parityLen: int,
|
||||
): Result[void, cstring] =
|
||||
## Encode data using Leopard backend
|
||||
|
||||
if parity.len == 0:
|
||||
if parityLen == 0:
|
||||
return ok()
|
||||
|
||||
var encoder =
|
||||
@ -36,10 +38,12 @@ method encode*(
|
||||
else:
|
||||
self.encoder.get()
|
||||
|
||||
encoder.encode(data, parity)
|
||||
encoder.encode(data, parity, dataLen, parityLen)
|
||||
|
||||
method decode*(
|
||||
self: LeoDecoderBackend, data, parity, recovered: var openArray[seq[byte]]
|
||||
self: LeoDecoderBackend,
|
||||
data, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
dataLen, parityLen, recoveredLen: int,
|
||||
): Result[void, cstring] =
|
||||
## Decode data using given Leopard backend
|
||||
|
||||
@ -50,7 +54,7 @@ method decode*(
|
||||
else:
|
||||
self.decoder.get()
|
||||
|
||||
decoder.decode(data, parity, recovered)
|
||||
decoder.decode(data, parity, recovered, dataLen, parityLen, recoveredLen)
|
||||
|
||||
method release*(self: LeoEncoderBackend) =
|
||||
if self.encoder.isSome:
|
||||
|
||||
@ -12,12 +12,14 @@ import pkg/upraises
|
||||
push:
|
||||
{.upraises: [].}
|
||||
|
||||
import std/sequtils
|
||||
import std/sugar
|
||||
import std/[sugar, atomics, sequtils]
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronos/threadsync
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p/[multicodec, cid, multihash]
|
||||
import pkg/libp2p/protobuf/minprotobuf
|
||||
import pkg/taskpools
|
||||
|
||||
import ../logutils
|
||||
import ../manifest
|
||||
@ -28,6 +30,7 @@ import ../utils
|
||||
import ../utils/asynciter
|
||||
import ../indexingstrategy
|
||||
import ../errors
|
||||
import ../utils/arrayutils
|
||||
|
||||
import pkg/stew/byteutils
|
||||
|
||||
@ -68,6 +71,7 @@ type
|
||||
proc(size, blocks, parity: int): DecoderBackend {.raises: [Defect], noSideEffect.}
|
||||
|
||||
Erasure* = ref object
|
||||
taskPool: Taskpool
|
||||
encoderProvider*: EncoderProvider
|
||||
decoderProvider*: DecoderProvider
|
||||
store*: BlockStore
|
||||
@ -87,6 +91,24 @@ type
|
||||
# provided.
|
||||
minSize*: NBytes
|
||||
|
||||
EncodeTask = object
|
||||
success: Atomic[bool]
|
||||
erasure: ptr Erasure
|
||||
blocks: ptr UncheckedArray[ptr UncheckedArray[byte]]
|
||||
parity: ptr UncheckedArray[ptr UncheckedArray[byte]]
|
||||
blockSize, blocksLen, parityLen: int
|
||||
signal: ThreadSignalPtr
|
||||
|
||||
DecodeTask = object
|
||||
success: Atomic[bool]
|
||||
erasure: ptr Erasure
|
||||
blocks: ptr UncheckedArray[ptr UncheckedArray[byte]]
|
||||
parity: ptr UncheckedArray[ptr UncheckedArray[byte]]
|
||||
recovered: ptr UncheckedArray[ptr UncheckedArray[byte]]
|
||||
blockSize, blocksLen: int
|
||||
parityLen, recoveredLen: int
|
||||
signal: ThreadSignalPtr
|
||||
|
||||
func indexToPos(steps, idx, step: int): int {.inline.} =
|
||||
## Convert an index to a position in the encoded
|
||||
## dataset
|
||||
@ -269,6 +291,73 @@ proc init*(
|
||||
strategy: strategy,
|
||||
)
|
||||
|
||||
proc leopardEncodeTask(tp: Taskpool, task: ptr EncodeTask) {.gcsafe.} =
|
||||
# Task suitable for running in taskpools - look, no GC!
|
||||
let encoder =
|
||||
task[].erasure.encoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
|
||||
defer:
|
||||
encoder.release()
|
||||
discard task[].signal.fireSync()
|
||||
|
||||
if (
|
||||
let res =
|
||||
encoder.encode(task[].blocks, task[].parity, task[].blocksLen, task[].parityLen)
|
||||
res.isErr
|
||||
):
|
||||
warn "Error from leopard encoder backend!", error = $res.error
|
||||
|
||||
task[].success.store(false)
|
||||
else:
|
||||
task[].success.store(true)
|
||||
|
||||
proc asyncEncode*(
|
||||
self: Erasure,
|
||||
blockSize, blocksLen, parityLen: int,
|
||||
blocks: ref seq[seq[byte]],
|
||||
parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
without threadPtr =? ThreadSignalPtr.new():
|
||||
return failure("Unable to create thread signal")
|
||||
|
||||
defer:
|
||||
threadPtr.close().expect("closing once works")
|
||||
|
||||
var data = makeUncheckedArray(blocks)
|
||||
|
||||
defer:
|
||||
dealloc(data)
|
||||
|
||||
## Create an ecode task with block data
|
||||
var task = EncodeTask(
|
||||
erasure: addr self,
|
||||
blockSize: blockSize,
|
||||
blocksLen: blocksLen,
|
||||
parityLen: parityLen,
|
||||
blocks: data,
|
||||
parity: parity,
|
||||
signal: threadPtr,
|
||||
)
|
||||
|
||||
let t = addr task
|
||||
|
||||
doAssert self.taskPool.numThreads > 1,
|
||||
"Must have at least one separate thread or signal will never be fired"
|
||||
self.taskPool.spawn leopardEncodeTask(self.taskPool, t)
|
||||
let threadFut = threadPtr.wait()
|
||||
|
||||
if joinErr =? catch(await threadFut.join()).errorOption:
|
||||
if err =? catch(await noCancel threadFut).errorOption:
|
||||
return failure(err)
|
||||
if joinErr of CancelledError:
|
||||
raise (ref CancelledError) joinErr
|
||||
else:
|
||||
return failure(joinErr)
|
||||
|
||||
if not t.success.load():
|
||||
return failure("Leopard encoding failed")
|
||||
|
||||
success()
|
||||
|
||||
proc encodeData(
|
||||
self: Erasure, manifest: Manifest, params: EncodingParams
|
||||
): Future[?!Manifest] {.async.} =
|
||||
@ -276,7 +365,6 @@ proc encodeData(
|
||||
##
|
||||
## `manifest` - the manifest to encode
|
||||
##
|
||||
|
||||
logScope:
|
||||
steps = params.steps
|
||||
rounded_blocks = params.rounded
|
||||
@ -286,7 +374,6 @@ proc encodeData(
|
||||
|
||||
var
|
||||
cids = seq[Cid].new()
|
||||
encoder = self.encoderProvider(manifest.blockSize.int, params.ecK, params.ecM)
|
||||
emptyBlock = newSeq[byte](manifest.blockSize.int)
|
||||
|
||||
cids[].setLen(params.blocksCount)
|
||||
@ -296,8 +383,7 @@ proc encodeData(
|
||||
# TODO: Don't allocate a new seq every time, allocate once and zero out
|
||||
var
|
||||
data = seq[seq[byte]].new() # number of blocks to encode
|
||||
parityData =
|
||||
newSeqWith[seq[byte]](params.ecM, newSeq[byte](manifest.blockSize.int))
|
||||
parity = createDoubleArray(params.ecM, manifest.blockSize.int)
|
||||
|
||||
data[].setLen(params.ecK)
|
||||
# TODO: this is a tight blocking loop so we sleep here to allow
|
||||
@ -311,15 +397,25 @@ proc encodeData(
|
||||
trace "Unable to prepare data", error = err.msg
|
||||
return failure(err)
|
||||
|
||||
trace "Erasure coding data", data = data[].len, parity = parityData.len
|
||||
trace "Erasure coding data", data = data[].len
|
||||
|
||||
if (let res = encoder.encode(data[], parityData); res.isErr):
|
||||
trace "Unable to encode manifest!", error = $res.error
|
||||
return failure($res.error)
|
||||
try:
|
||||
if err =? (
|
||||
await self.asyncEncode(
|
||||
manifest.blockSize.int, params.ecK, params.ecM, data, parity
|
||||
)
|
||||
).errorOption:
|
||||
return failure(err)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
finally:
|
||||
freeDoubleArray(parity, params.ecM)
|
||||
|
||||
var idx = params.rounded + step
|
||||
for j in 0 ..< params.ecM:
|
||||
without blk =? bt.Block.new(parityData[j]), error:
|
||||
var innerPtr: ptr UncheckedArray[byte] = parity[][j]
|
||||
without blk =? bt.Block.new(innerPtr.toOpenArray(0, manifest.blockSize.int - 1)),
|
||||
error:
|
||||
trace "Unable to create parity block", err = error.msg
|
||||
return failure(error)
|
||||
|
||||
@ -356,8 +452,6 @@ proc encodeData(
|
||||
except CatchableError as exc:
|
||||
trace "Erasure coding encoding error", exc = exc.msg
|
||||
return failure(exc)
|
||||
finally:
|
||||
encoder.release()
|
||||
|
||||
proc encode*(
|
||||
self: Erasure,
|
||||
@ -381,6 +475,83 @@ proc encode*(
|
||||
|
||||
return success encodedManifest
|
||||
|
||||
proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} =
|
||||
# Task suitable for running in taskpools - look, no GC!
|
||||
let decoder =
|
||||
task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
|
||||
defer:
|
||||
decoder.release()
|
||||
discard task[].signal.fireSync()
|
||||
|
||||
if (
|
||||
let res = decoder.decode(
|
||||
task[].blocks,
|
||||
task[].parity,
|
||||
task[].recovered,
|
||||
task[].blocksLen,
|
||||
task[].parityLen,
|
||||
task[].recoveredLen,
|
||||
)
|
||||
res.isErr
|
||||
):
|
||||
warn "Error from leopard decoder backend!", error = $res.error
|
||||
task[].success.store(false)
|
||||
else:
|
||||
task[].success.store(true)
|
||||
|
||||
proc asyncDecode*(
|
||||
self: Erasure,
|
||||
blockSize, blocksLen, parityLen: int,
|
||||
blocks, parity: ref seq[seq[byte]],
|
||||
recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
without threadPtr =? ThreadSignalPtr.new():
|
||||
return failure("Unable to create thread signal")
|
||||
|
||||
defer:
|
||||
threadPtr.close().expect("closing once works")
|
||||
|
||||
var
|
||||
blockData = makeUncheckedArray(blocks)
|
||||
parityData = makeUncheckedArray(parity)
|
||||
|
||||
defer:
|
||||
dealloc(blockData)
|
||||
dealloc(parityData)
|
||||
|
||||
## Create an decode task with block data
|
||||
var task = DecodeTask(
|
||||
erasure: addr self,
|
||||
blockSize: blockSize,
|
||||
blocksLen: blocksLen,
|
||||
parityLen: parityLen,
|
||||
recoveredLen: blocksLen,
|
||||
blocks: blockData,
|
||||
parity: parityData,
|
||||
recovered: recovered,
|
||||
signal: threadPtr,
|
||||
)
|
||||
|
||||
# Hold the task pointer until the signal is received
|
||||
let t = addr task
|
||||
doAssert self.taskPool.numThreads > 1,
|
||||
"Must have at least one separate thread or signal will never be fired"
|
||||
self.taskPool.spawn leopardDecodeTask(self.taskPool, t)
|
||||
let threadFut = threadPtr.wait()
|
||||
|
||||
if joinErr =? catch(await threadFut.join()).errorOption:
|
||||
if err =? catch(await noCancel threadFut).errorOption:
|
||||
return failure(err)
|
||||
if joinErr of CancelledError:
|
||||
raise (ref CancelledError) joinErr
|
||||
else:
|
||||
return failure(joinErr)
|
||||
|
||||
if not t.success.load():
|
||||
return failure("Leopard encoding failed")
|
||||
|
||||
success()
|
||||
|
||||
proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
|
||||
## Decode a protected manifest into it's original
|
||||
## manifest
|
||||
@ -388,7 +559,6 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
|
||||
## `encoded` - the encoded (protected) manifest to
|
||||
## be recovered
|
||||
##
|
||||
|
||||
logScope:
|
||||
steps = encoded.steps
|
||||
rounded_blocks = encoded.rounded
|
||||
@ -411,8 +581,7 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
|
||||
var
|
||||
data = seq[seq[byte]].new()
|
||||
parityData = seq[seq[byte]].new()
|
||||
recovered =
|
||||
newSeqWith[seq[byte]](encoded.ecK, newSeq[byte](encoded.blockSize.int))
|
||||
recovered = createDoubleArray(encoded.ecK, encoded.blockSize.int)
|
||||
|
||||
data[].setLen(encoded.ecK) # set len to K
|
||||
parityData[].setLen(encoded.ecM) # set len to M
|
||||
@ -430,15 +599,26 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
|
||||
continue
|
||||
|
||||
trace "Erasure decoding data"
|
||||
|
||||
if (let err = decoder.decode(data[], parityData[], recovered); err.isErr):
|
||||
trace "Unable to decode data!", err = $err.error
|
||||
return failure($err.error)
|
||||
try:
|
||||
if err =? (
|
||||
await self.asyncDecode(
|
||||
encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered
|
||||
)
|
||||
).errorOption:
|
||||
return failure(err)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
finally:
|
||||
freeDoubleArray(recovered, encoded.ecK)
|
||||
|
||||
for i in 0 ..< encoded.ecK:
|
||||
let idx = i * encoded.steps + step
|
||||
if data[i].len <= 0 and not cids[idx].isEmpty:
|
||||
without blk =? bt.Block.new(recovered[i]), error:
|
||||
var innerPtr: ptr UncheckedArray[byte] = recovered[][i]
|
||||
|
||||
without blk =? bt.Block.new(
|
||||
innerPtr.toOpenArray(0, encoded.blockSize.int - 1)
|
||||
), error:
|
||||
trace "Unable to create block!", exc = error.msg
|
||||
return failure(error)
|
||||
|
||||
@ -490,10 +670,13 @@ proc new*(
|
||||
store: BlockStore,
|
||||
encoderProvider: EncoderProvider,
|
||||
decoderProvider: DecoderProvider,
|
||||
taskPool: Taskpool,
|
||||
): Erasure =
|
||||
## Create a new Erasure instance for encoding and decoding manifests
|
||||
##
|
||||
|
||||
Erasure(
|
||||
store: store, encoderProvider: encoderProvider, decoderProvider: decoderProvider
|
||||
store: store,
|
||||
encoderProvider: encoderProvider,
|
||||
decoderProvider: decoderProvider,
|
||||
taskPool: taskPool,
|
||||
)
|
||||
|
||||
@ -9,7 +9,7 @@
|
||||
|
||||
import std/options
|
||||
|
||||
import pkg/stew/results
|
||||
import pkg/results
|
||||
import pkg/chronos
|
||||
import pkg/questionable/results
|
||||
|
||||
@ -19,6 +19,8 @@ type
|
||||
CodexError* = object of CatchableError # base codex error
|
||||
CodexResult*[T] = Result[T, ref CodexError]
|
||||
|
||||
FinishedFailed*[T] = tuple[success: seq[Future[T]], failure: seq[Future[T]]]
|
||||
|
||||
template mapFailure*[T, V, E](
|
||||
exp: Result[T, V], exc: typedesc[E]
|
||||
): Result[T, ref CatchableError] =
|
||||
@ -40,35 +42,18 @@ func toFailure*[T](exp: Option[T]): Result[T, ref CatchableError] {.inline.} =
|
||||
else:
|
||||
T.failure("Option is None")
|
||||
|
||||
# allFuturesThrowing was moved to the tests in libp2p
|
||||
proc allFuturesThrowing*[T](args: varargs[Future[T]]): Future[void] =
|
||||
var futs: seq[Future[T]]
|
||||
for fut in args:
|
||||
futs &= fut
|
||||
proc call() {.async.} =
|
||||
var first: ref CatchableError = nil
|
||||
futs = await allFinished(futs)
|
||||
for fut in futs:
|
||||
if fut.failed:
|
||||
let err = fut.readError()
|
||||
if err of Defect:
|
||||
raise err
|
||||
else:
|
||||
if err of CancelledError:
|
||||
raise err
|
||||
if isNil(first):
|
||||
first = err
|
||||
if not isNil(first):
|
||||
raise first
|
||||
proc allFinishedFailed*[T](futs: seq[Future[T]]): Future[FinishedFailed[T]] {.async.} =
|
||||
## Check if all futures have finished or failed
|
||||
##
|
||||
## TODO: wip, not sure if we want this - at the minimum,
|
||||
## we should probably avoid the async transform
|
||||
|
||||
return call()
|
||||
var res: FinishedFailed[T] = (@[], @[])
|
||||
await allFutures(futs)
|
||||
for f in futs:
|
||||
if f.failed:
|
||||
res.failure.add f
|
||||
else:
|
||||
res.success.add f
|
||||
|
||||
proc allFutureResult*[T](fut: seq[Future[T]]): Future[?!void] {.async.} =
|
||||
try:
|
||||
await allFuturesThrowing(fut)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
return failure(exc.msg)
|
||||
|
||||
return success()
|
||||
return res
|
||||
|
||||
@ -152,7 +152,7 @@ proc formatTextLineSeq*(val: seq[string]): string =
|
||||
template formatIt*(format: LogFormat, T: typedesc, body: untyped) =
|
||||
# Provides formatters for logging with Chronicles for the given type and
|
||||
# `LogFormat`.
|
||||
# NOTE: `seq[T]`, `Option[T]`, and `seq[Option[T]]` are overriddden
|
||||
# NOTE: `seq[T]`, `Option[T]`, and `seq[Option[T]]` are overridden
|
||||
# since the base `setProperty` is generic using `auto` and conflicts with
|
||||
# providing a generic `seq` and `Option` override.
|
||||
when format == LogFormat.json:
|
||||
|
||||
@ -63,7 +63,6 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
|
||||
# optional ErasureInfo erasure = 7; # erasure coding info
|
||||
# optional filename: ?string = 8; # original filename
|
||||
# optional mimetype: ?string = 9; # original mimetype
|
||||
# optional uploadedAt: ?int64 = 10; # original uploadedAt
|
||||
# }
|
||||
# ```
|
||||
#
|
||||
@ -102,9 +101,6 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
|
||||
if manifest.mimetype.isSome:
|
||||
header.write(9, manifest.mimetype.get())
|
||||
|
||||
if manifest.uploadedAt.isSome:
|
||||
header.write(10, manifest.uploadedAt.get().uint64)
|
||||
|
||||
pbNode.write(1, header) # set the treeCid as the data field
|
||||
pbNode.finish()
|
||||
|
||||
@ -135,7 +131,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
|
||||
verifiableStrategy: uint32
|
||||
filename: string
|
||||
mimetype: string
|
||||
uploadedAt: uint64
|
||||
|
||||
# Decode `Header` message
|
||||
if pbNode.getField(1, pbHeader).isErr:
|
||||
@ -169,9 +164,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
|
||||
if pbHeader.getField(9, mimetype).isErr:
|
||||
return failure("Unable to decode `mimetype` from manifest!")
|
||||
|
||||
if pbHeader.getField(10, uploadedAt).isErr:
|
||||
return failure("Unable to decode `uploadedAt` from manifest!")
|
||||
|
||||
let protected = pbErasureInfo.buffer.len > 0
|
||||
var verifiable = false
|
||||
if protected:
|
||||
@ -211,7 +203,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
|
||||
|
||||
var filenameOption = if filename.len == 0: string.none else: filename.some
|
||||
var mimetypeOption = if mimetype.len == 0: string.none else: mimetype.some
|
||||
var uploadedAtOption = if uploadedAt == 0: int64.none else: uploadedAt.int64.some
|
||||
|
||||
let self =
|
||||
if protected:
|
||||
@ -229,7 +220,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
|
||||
strategy = StrategyType(protectedStrategy),
|
||||
filename = filenameOption,
|
||||
mimetype = mimetypeOption,
|
||||
uploadedAt = uploadedAtOption,
|
||||
)
|
||||
else:
|
||||
Manifest.new(
|
||||
@ -241,7 +231,6 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
|
||||
codec = codec.MultiCodec,
|
||||
filename = filenameOption,
|
||||
mimetype = mimetypeOption,
|
||||
uploadedAt = uploadedAtOption,
|
||||
)
|
||||
|
||||
?self.verify()
|
||||
|
||||
@ -38,7 +38,6 @@ type Manifest* = ref object of RootObj
|
||||
version: CidVersion # Cid version
|
||||
filename {.serialize.}: ?string # The filename of the content uploaded (optional)
|
||||
mimetype {.serialize.}: ?string # The mimetype of the content uploaded (optional)
|
||||
uploadedAt {.serialize.}: ?int64 # The UTC creation timestamp in seconds
|
||||
case protected {.serialize.}: bool # Protected datasets have erasure coded info
|
||||
of true:
|
||||
ecK: int # Number of blocks to encode
|
||||
@ -131,8 +130,6 @@ func filename*(self: Manifest): ?string =
|
||||
func mimetype*(self: Manifest): ?string =
|
||||
self.mimetype
|
||||
|
||||
func uploadedAt*(self: Manifest): ?int64 =
|
||||
self.uploadedAt
|
||||
############################################################
|
||||
# Operations on block list
|
||||
############################################################
|
||||
@ -165,14 +162,11 @@ func verify*(self: Manifest): ?!void =
|
||||
|
||||
return success()
|
||||
|
||||
func cid*(self: Manifest): ?!Cid {.deprecated: "use treeCid instead".} =
|
||||
self.treeCid.success
|
||||
|
||||
func `==`*(a, b: Manifest): bool =
|
||||
(a.treeCid == b.treeCid) and (a.datasetSize == b.datasetSize) and
|
||||
(a.blockSize == b.blockSize) and (a.version == b.version) and (a.hcodec == b.hcodec) and
|
||||
(a.codec == b.codec) and (a.protected == b.protected) and (a.filename == b.filename) and
|
||||
(a.mimetype == b.mimetype) and (a.uploadedAt == b.uploadedAt) and (
|
||||
(a.mimetype == b.mimetype) and (
|
||||
if a.protected:
|
||||
(a.ecK == b.ecK) and (a.ecM == b.ecM) and (a.originalTreeCid == b.originalTreeCid) and
|
||||
(a.originalDatasetSize == b.originalDatasetSize) and
|
||||
@ -202,9 +196,6 @@ func `$`*(self: Manifest): string =
|
||||
if self.mimetype.isSome:
|
||||
result &= ", mimetype: " & $self.mimetype
|
||||
|
||||
if self.uploadedAt.isSome:
|
||||
result &= ", uploadedAt: " & $self.uploadedAt
|
||||
|
||||
result &= (
|
||||
if self.protected:
|
||||
", ecK: " & $self.ecK & ", ecM: " & $self.ecM & ", originalTreeCid: " &
|
||||
@ -236,7 +227,6 @@ func new*(
|
||||
protected = false,
|
||||
filename: ?string = string.none,
|
||||
mimetype: ?string = string.none,
|
||||
uploadedAt: ?int64 = int64.none,
|
||||
): Manifest =
|
||||
T(
|
||||
treeCid: treeCid,
|
||||
@ -248,7 +238,6 @@ func new*(
|
||||
protected: protected,
|
||||
filename: filename,
|
||||
mimetype: mimetype,
|
||||
uploadedAt: uploadedAt,
|
||||
)
|
||||
|
||||
func new*(
|
||||
@ -278,7 +267,6 @@ func new*(
|
||||
protectedStrategy: strategy,
|
||||
filename: manifest.filename,
|
||||
mimetype: manifest.mimetype,
|
||||
uploadedAt: manifest.uploadedAt,
|
||||
)
|
||||
|
||||
func new*(T: type Manifest, manifest: Manifest): Manifest =
|
||||
@ -296,7 +284,6 @@ func new*(T: type Manifest, manifest: Manifest): Manifest =
|
||||
protected: false,
|
||||
filename: manifest.filename,
|
||||
mimetype: manifest.mimetype,
|
||||
uploadedAt: manifest.uploadedAt,
|
||||
)
|
||||
|
||||
func new*(
|
||||
@ -314,7 +301,6 @@ func new*(
|
||||
strategy = SteppedStrategy,
|
||||
filename: ?string = string.none,
|
||||
mimetype: ?string = string.none,
|
||||
uploadedAt: ?int64 = int64.none,
|
||||
): Manifest =
|
||||
Manifest(
|
||||
treeCid: treeCid,
|
||||
@ -331,7 +317,6 @@ func new*(
|
||||
protectedStrategy: strategy,
|
||||
filename: filename,
|
||||
mimetype: mimetype,
|
||||
uploadedAt: uploadedAt,
|
||||
)
|
||||
|
||||
func new*(
|
||||
@ -374,7 +359,6 @@ func new*(
|
||||
verifiableStrategy: strategy,
|
||||
filename: manifest.filename,
|
||||
mimetype: manifest.mimetype,
|
||||
uploadedAt: manifest.uploadedAt,
|
||||
)
|
||||
|
||||
func new*(T: type Manifest, data: openArray[byte]): ?!Manifest =
|
||||
|
||||
105
codex/market.nim
105
codex/market.nim
@ -18,15 +18,16 @@ export periods
|
||||
type
|
||||
Market* = ref object of RootObj
|
||||
MarketError* = object of CodexError
|
||||
SlotStateMismatchError* = object of MarketError
|
||||
SlotReservationNotAllowedError* = object of MarketError
|
||||
Subscription* = ref object of RootObj
|
||||
OnRequest* =
|
||||
proc(id: RequestId, ask: StorageAsk, expiry: UInt256) {.gcsafe, upraises: [].}
|
||||
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].}
|
||||
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
||||
OnSlotFilled* =
|
||||
proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||
OnSlotFreed* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||
OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
|
||||
OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
|
||||
OnSlotReservationsFull* =
|
||||
proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||
proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
|
||||
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
||||
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
||||
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises: [].}
|
||||
@ -37,19 +38,19 @@ type
|
||||
StorageRequested* = object of MarketplaceEvent
|
||||
requestId*: RequestId
|
||||
ask*: StorageAsk
|
||||
expiry*: UInt256
|
||||
expiry*: uint64
|
||||
|
||||
SlotFilled* = object of MarketplaceEvent
|
||||
requestId* {.indexed.}: RequestId
|
||||
slotIndex*: UInt256
|
||||
slotIndex*: uint64
|
||||
|
||||
SlotFreed* = object of MarketplaceEvent
|
||||
requestId* {.indexed.}: RequestId
|
||||
slotIndex*: UInt256
|
||||
slotIndex*: uint64
|
||||
|
||||
SlotReservationsFull* = object of MarketplaceEvent
|
||||
requestId* {.indexed.}: RequestId
|
||||
slotIndex*: UInt256
|
||||
slotIndex*: uint64
|
||||
|
||||
RequestFulfilled* = object of MarketplaceEvent
|
||||
requestId* {.indexed.}: RequestId
|
||||
@ -63,22 +64,42 @@ type
|
||||
ProofSubmitted* = object of MarketplaceEvent
|
||||
id*: SlotId
|
||||
|
||||
method getZkeyHash*(market: Market): Future[?string] {.base, async.} =
|
||||
method loadConfig*(
|
||||
market: Market
|
||||
): Future[?!void] {.base, async: (raises: [CancelledError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getSigner*(market: Market): Future[Address] {.base, async.} =
|
||||
method getZkeyHash*(
|
||||
market: Market
|
||||
): Future[?string] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method periodicity*(market: Market): Future[Periodicity] {.base, async.} =
|
||||
method getSigner*(
|
||||
market: Market
|
||||
): Future[Address] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method proofTimeout*(market: Market): Future[UInt256] {.base, async.} =
|
||||
method periodicity*(
|
||||
market: Market
|
||||
): Future[Periodicity] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method repairRewardPercentage*(market: Market): Future[uint8] {.base, async.} =
|
||||
method proofTimeout*(
|
||||
market: Market
|
||||
): Future[uint64] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method proofDowntime*(market: Market): Future[uint8] {.base, async.} =
|
||||
method repairRewardPercentage*(
|
||||
market: Market
|
||||
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method requestDurationLimit*(market: Market): Future[uint64] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method proofDowntime*(
|
||||
market: Market
|
||||
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getPointer*(market: Market, slotId: SlotId): Future[uint8] {.base, async.} =
|
||||
@ -89,7 +110,9 @@ proc inDowntime*(market: Market, slotId: SlotId): Future[bool] {.async.} =
|
||||
let pntr = await market.getPointer(slotId)
|
||||
return pntr < downtime
|
||||
|
||||
method requestStorage*(market: Market, request: StorageRequest) {.base, async.} =
|
||||
method requestStorage*(
|
||||
market: Market, request: StorageRequest
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
|
||||
@ -100,7 +123,7 @@ method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} =
|
||||
|
||||
method getRequest*(
|
||||
market: Market, id: RequestId
|
||||
): Future[?StorageRequest] {.base, async.} =
|
||||
): Future[?StorageRequest] {.base, async: (raises: [CancelledError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method requestState*(
|
||||
@ -108,7 +131,9 @@ method requestState*(
|
||||
): Future[?RequestState] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method slotState*(market: Market, slotId: SlotId): Future[SlotState] {.base, async.} =
|
||||
method slotState*(
|
||||
market: Market, slotId: SlotId
|
||||
): Future[SlotState] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getRequestEnd*(
|
||||
@ -122,13 +147,13 @@ method requestExpiresAt*(
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getHost*(
|
||||
market: Market, requestId: RequestId, slotIndex: UInt256
|
||||
): Future[?Address] {.base, async.} =
|
||||
market: Market, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?Address] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method currentCollateral*(
|
||||
market: Market, slotId: SlotId
|
||||
): Future[UInt256] {.base, async.} =
|
||||
): Future[UInt256] {.base, async: (raises: [MarketError, CancelledError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, async.} =
|
||||
@ -137,16 +162,20 @@ method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, asy
|
||||
method fillSlot*(
|
||||
market: Market,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256,
|
||||
slotIndex: uint64,
|
||||
proof: Groth16Proof,
|
||||
collateral: UInt256,
|
||||
) {.base, async.} =
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method freeSlot*(market: Market, slotId: SlotId) {.base, async.} =
|
||||
method freeSlot*(
|
||||
market: Market, slotId: SlotId
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method withdrawFunds*(market: Market, requestId: RequestId) {.base, async.} =
|
||||
method withdrawFunds*(
|
||||
market: Market, requestId: RequestId
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequests*(
|
||||
@ -165,10 +194,14 @@ method getChallenge*(
|
||||
): Future[ProofChallenge] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method submitProof*(market: Market, id: SlotId, proof: Groth16Proof) {.base, async.} =
|
||||
method submitProof*(
|
||||
market: Market, id: SlotId, proof: Groth16Proof
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method markProofAsMissing*(market: Market, id: SlotId, period: Period) {.base, async.} =
|
||||
method markProofAsMissing*(
|
||||
market: Market, id: SlotId, period: Period
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method canProofBeMarkedAsMissing*(
|
||||
@ -177,12 +210,12 @@ method canProofBeMarkedAsMissing*(
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method reserveSlot*(
|
||||
market: Market, requestId: RequestId, slotIndex: UInt256
|
||||
) {.base, async.} =
|
||||
market: Market, requestId: RequestId, slotIndex: uint64
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method canReserveSlot*(
|
||||
market: Market, requestId: RequestId, slotIndex: UInt256
|
||||
market: Market, requestId: RequestId, slotIndex: uint64
|
||||
): Future[bool] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
@ -202,7 +235,7 @@ method subscribeSlotFilled*(
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeSlotFilled*(
|
||||
market: Market, requestId: RequestId, slotIndex: UInt256, callback: OnSlotFilled
|
||||
market: Market, requestId: RequestId, slotIndex: uint64, callback: OnSlotFilled
|
||||
): Future[Subscription] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
@ -268,3 +301,13 @@ method queryPastStorageRequestedEvents*(
|
||||
market: Market, blocksAgo: int
|
||||
): Future[seq[StorageRequested]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method slotCollateral*(
|
||||
market: Market, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?!UInt256] {.base, async: (raises: [CancelledError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method slotCollateral*(
|
||||
market: Market, collateralPerSlot: UInt256, slotState: SlotState
|
||||
): ?!UInt256 {.base, gcsafe, raises: [].} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
@ -27,11 +27,11 @@ const MaxMerkleTreeSize = 100.MiBs.uint
|
||||
const MaxMerkleProofSize = 1.MiBs.uint
|
||||
|
||||
proc encode*(self: CodexTree): seq[byte] =
|
||||
var pb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, self.mcodec.uint64)
|
||||
pb.write(2, self.leavesCount.uint64)
|
||||
for node in self.nodes:
|
||||
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
||||
var nodesPb = initProtoBuffer()
|
||||
nodesPb.write(1, node)
|
||||
nodesPb.finish()
|
||||
pb.write(3, nodesPb)
|
||||
@ -40,7 +40,7 @@ proc encode*(self: CodexTree): seq[byte] =
|
||||
pb.buffer
|
||||
|
||||
proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
|
||||
var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize)
|
||||
var pb = initProtoBuffer(data)
|
||||
var mcodecCode: uint64
|
||||
var leavesCount: uint64
|
||||
discard ?pb.getField(1, mcodecCode).mapFailure
|
||||
@ -63,13 +63,13 @@ proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
|
||||
CodexTree.fromNodes(mcodec, nodes, leavesCount.int)
|
||||
|
||||
proc encode*(self: CodexProof): seq[byte] =
|
||||
var pb = initProtoBuffer(maxSize = MaxMerkleProofSize)
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, self.mcodec.uint64)
|
||||
pb.write(2, self.index.uint64)
|
||||
pb.write(3, self.nleaves.uint64)
|
||||
|
||||
for node in self.path:
|
||||
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
||||
var nodesPb = initProtoBuffer()
|
||||
nodesPb.write(1, node)
|
||||
nodesPb.finish()
|
||||
pb.write(4, nodesPb)
|
||||
@ -78,7 +78,7 @@ proc encode*(self: CodexProof): seq[byte] =
|
||||
pb.buffer
|
||||
|
||||
proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof =
|
||||
var pb = initProtoBuffer(data, maxSize = MaxMerkleProofSize)
|
||||
var pb = initProtoBuffer(data)
|
||||
var mcodecCode: uint64
|
||||
var index: uint64
|
||||
var nleaves: uint64
|
||||
|
||||
268
codex/node.nim
268
codex/node.nim
@ -15,6 +15,7 @@ import std/strformat
|
||||
import std/sugar
|
||||
import times
|
||||
|
||||
import pkg/taskpools
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/chronos
|
||||
@ -44,13 +45,14 @@ import ./utils
|
||||
import ./errors
|
||||
import ./logutils
|
||||
import ./utils/asynciter
|
||||
import ./utils/trackedfutures
|
||||
|
||||
export logutils
|
||||
|
||||
logScope:
|
||||
topics = "codex node"
|
||||
|
||||
const FetchBatch = 200
|
||||
const DefaultFetchBatch = 10
|
||||
|
||||
type
|
||||
Contracts* =
|
||||
@ -70,6 +72,8 @@ type
|
||||
contracts*: Contracts
|
||||
clock*: Clock
|
||||
storage*: Contracts
|
||||
taskpool: Taskpool
|
||||
trackedFutures: TrackedFutures
|
||||
|
||||
CodexNodeRef* = ref CodexNode
|
||||
|
||||
@ -149,7 +153,11 @@ proc updateExpiry*(
|
||||
let ensuringFutures = Iter[int].new(0 ..< manifest.blocksCount).mapIt(
|
||||
self.networkStore.localStore.ensureExpiry(manifest.treeCid, it, expiry)
|
||||
)
|
||||
await allFuturesThrowing(ensuringFutures)
|
||||
|
||||
let res = await allFinishedFailed(ensuringFutures)
|
||||
if res.failure.len > 0:
|
||||
trace "Some blocks failed to update expiry", len = res.failure.len
|
||||
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
@ -161,8 +169,9 @@ proc fetchBatched*(
|
||||
self: CodexNodeRef,
|
||||
cid: Cid,
|
||||
iter: Iter[int],
|
||||
batchSize = FetchBatch,
|
||||
batchSize = DefaultFetchBatch,
|
||||
onBatch: BatchProc = nil,
|
||||
fetchLocal = true,
|
||||
): Future[?!void] {.async, gcsafe.} =
|
||||
## Fetch blocks in batches of `batchSize`
|
||||
##
|
||||
@ -177,30 +186,62 @@ proc fetchBatched*(
|
||||
let blocks = collect:
|
||||
for i in 0 ..< batchSize:
|
||||
if not iter.finished:
|
||||
self.networkStore.getBlock(BlockAddress.init(cid, iter.next()))
|
||||
let address = BlockAddress.init(cid, iter.next())
|
||||
if not (await address in self.networkStore) or fetchLocal:
|
||||
self.networkStore.getBlock(address)
|
||||
|
||||
if blocksErr =? (await allFutureResult(blocks)).errorOption:
|
||||
return failure(blocksErr)
|
||||
let res = await allFinishedFailed(blocks)
|
||||
if res.failure.len > 0:
|
||||
trace "Some blocks failed to fetch", len = res.failure.len
|
||||
return failure("Some blocks failed to fetch (" & $res.failure.len & " )")
|
||||
|
||||
if not onBatch.isNil and
|
||||
batchErr =? (await onBatch(blocks.mapIt(it.read.get))).errorOption:
|
||||
return failure(batchErr)
|
||||
|
||||
await sleepAsync(1.millis)
|
||||
|
||||
success()
|
||||
|
||||
proc fetchBatched*(
|
||||
self: CodexNodeRef,
|
||||
manifest: Manifest,
|
||||
batchSize = FetchBatch,
|
||||
batchSize = DefaultFetchBatch,
|
||||
onBatch: BatchProc = nil,
|
||||
fetchLocal = true,
|
||||
): Future[?!void] =
|
||||
## Fetch manifest in batches of `batchSize`
|
||||
##
|
||||
|
||||
trace "Fetching blocks in batches of", size = batchSize
|
||||
trace "Fetching blocks in batches of",
|
||||
size = batchSize, blocksCount = manifest.blocksCount
|
||||
|
||||
let iter = Iter[int].new(0 ..< manifest.blocksCount)
|
||||
self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch)
|
||||
self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch, fetchLocal)
|
||||
|
||||
proc fetchDatasetAsync*(
|
||||
self: CodexNodeRef, manifest: Manifest, fetchLocal = true
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
## Asynchronously fetch a dataset in the background.
|
||||
## This task will be tracked and cleaned up on node shutdown.
|
||||
##
|
||||
try:
|
||||
if err =? (
|
||||
await self.fetchBatched(
|
||||
manifest = manifest, batchSize = DefaultFetchBatch, fetchLocal = fetchLocal
|
||||
)
|
||||
).errorOption:
|
||||
error "Unable to fetch blocks", err = err.msg
|
||||
except CancelledError as exc:
|
||||
trace "Cancelled fetching blocks", exc = exc.msg
|
||||
except CatchableError as exc:
|
||||
error "Error fetching blocks", exc = exc.msg
|
||||
|
||||
proc fetchDatasetAsyncTask*(self: CodexNodeRef, manifest: Manifest) =
|
||||
## Start fetching a dataset in the background.
|
||||
## The task will be tracked and cleaned up on node shutdown.
|
||||
##
|
||||
self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false))
|
||||
|
||||
proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async.} =
|
||||
## Streams the contents of a single block.
|
||||
@ -212,16 +253,15 @@ proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async
|
||||
without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err:
|
||||
return failure(err)
|
||||
|
||||
proc streamOneBlock(): Future[void] {.async.} =
|
||||
proc streamOneBlock(): Future[void] {.async: (raises: []).} =
|
||||
try:
|
||||
defer:
|
||||
await stream.pushEof()
|
||||
await stream.pushData(blk.data)
|
||||
except CatchableError as exc:
|
||||
trace "Unable to send block", cid, exc = exc.msg
|
||||
discard
|
||||
finally:
|
||||
await stream.pushEof()
|
||||
|
||||
asyncSpawn streamOneBlock()
|
||||
self.trackedFutures.track(streamOneBlock())
|
||||
LPStream(stream).success
|
||||
|
||||
proc streamEntireDataset(
|
||||
@ -231,24 +271,40 @@ proc streamEntireDataset(
|
||||
##
|
||||
trace "Retrieving blocks from manifest", manifestCid
|
||||
|
||||
var jobs: seq[Future[void]]
|
||||
let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false))
|
||||
if manifest.protected:
|
||||
# Retrieve, decode and save to the local store all EС groups
|
||||
proc erasureJob(): Future[?!void] {.async.} =
|
||||
# Spawn an erasure decoding job
|
||||
let erasure =
|
||||
Erasure.new(self.networkStore, leoEncoderProvider, leoDecoderProvider)
|
||||
without _ =? (await erasure.decode(manifest)), error:
|
||||
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
|
||||
return failure(error)
|
||||
proc erasureJob(): Future[void] {.async: (raises: []).} =
|
||||
try:
|
||||
# Spawn an erasure decoding job
|
||||
let erasure = Erasure.new(
|
||||
self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
|
||||
)
|
||||
without _ =? (await erasure.decode(manifest)), error:
|
||||
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
|
||||
except CatchableError as exc:
|
||||
trace "Error erasure decoding manifest", manifestCid, exc = exc.msg
|
||||
|
||||
return success()
|
||||
jobs.add(erasureJob())
|
||||
|
||||
if err =? (await erasureJob()).errorOption:
|
||||
return failure(err)
|
||||
jobs.add(self.fetchDatasetAsync(manifest))
|
||||
|
||||
# Monitor stream completion and cancel background jobs when done
|
||||
proc monitorStream() {.async: (raises: []).} =
|
||||
try:
|
||||
await stream.join()
|
||||
except CatchableError as exc:
|
||||
warn "Stream failed", exc = exc.msg
|
||||
finally:
|
||||
await noCancel allFutures(jobs.mapIt(it.cancelAndWait))
|
||||
|
||||
self.trackedFutures.track(monitorStream())
|
||||
|
||||
# Retrieve all blocks of the dataset sequentially from the local store or network
|
||||
trace "Creating store stream for manifest", manifestCid
|
||||
LPStream(StoreStream.new(self.networkStore, manifest, pad = false)).success
|
||||
|
||||
stream.success
|
||||
|
||||
proc retrieve*(
|
||||
self: CodexNodeRef, cid: Cid, local: bool = true
|
||||
@ -267,6 +323,65 @@ proc retrieve*(
|
||||
|
||||
await self.streamEntireDataset(manifest, cid)
|
||||
|
||||
proc deleteSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!void] {.async.} =
|
||||
if err =? (await self.networkStore.delBlock(cid)).errorOption:
|
||||
error "Error deleting block", cid, err = err.msg
|
||||
return failure(err)
|
||||
|
||||
trace "Deleted block", cid
|
||||
return success()
|
||||
|
||||
proc deleteEntireDataset(self: CodexNodeRef, cid: Cid): Future[?!void] {.async.} =
|
||||
# Deletion is a strictly local operation
|
||||
var store = self.networkStore.localStore
|
||||
|
||||
if not (await cid in store):
|
||||
# As per the contract for delete*, an absent dataset is not an error.
|
||||
return success()
|
||||
|
||||
without manifestBlock =? await store.getBlock(cid), err:
|
||||
return failure(err)
|
||||
|
||||
without manifest =? Manifest.decode(manifestBlock), err:
|
||||
return failure(err)
|
||||
|
||||
let runtimeQuota = initDuration(milliseconds = 100)
|
||||
var lastIdle = getTime()
|
||||
for i in 0 ..< manifest.blocksCount:
|
||||
if (getTime() - lastIdle) >= runtimeQuota:
|
||||
await idleAsync()
|
||||
lastIdle = getTime()
|
||||
|
||||
if err =? (await store.delBlock(manifest.treeCid, i)).errorOption:
|
||||
# The contract for delBlock is fuzzy, but we assume that if the block is
|
||||
# simply missing we won't get an error. This is a best effort operation and
|
||||
# can simply be retried.
|
||||
error "Failed to delete block within dataset", index = i, err = err.msg
|
||||
return failure(err)
|
||||
|
||||
if err =? (await store.delBlock(cid)).errorOption:
|
||||
error "Error deleting manifest block", err = err.msg
|
||||
|
||||
success()
|
||||
|
||||
proc delete*(
|
||||
self: CodexNodeRef, cid: Cid
|
||||
): Future[?!void] {.async: (raises: [CatchableError]).} =
|
||||
## Deletes a whole dataset, if Cid is a Manifest Cid, or a single block, if Cid a block Cid,
|
||||
## from the underlying block store. This is a strictly local operation.
|
||||
##
|
||||
## Missing blocks in dataset deletes are ignored.
|
||||
##
|
||||
|
||||
without isManifest =? cid.isManifest, err:
|
||||
trace "Bad content type for CID:", cid = cid, err = err.msg
|
||||
return failure(err)
|
||||
|
||||
if not isManifest:
|
||||
return await self.deleteSingleBlock(cid)
|
||||
|
||||
await self.deleteEntireDataset(cid)
|
||||
|
||||
proc store*(
|
||||
self: CodexNodeRef,
|
||||
stream: LPStream,
|
||||
@ -332,7 +447,6 @@ proc store*(
|
||||
codec = dataCodec,
|
||||
filename = filename,
|
||||
mimetype = mimetype,
|
||||
uploadedAt = now().utc.toTime.toUnix.some,
|
||||
)
|
||||
|
||||
without manifestBlk =? await self.storeManifest(manifest), err:
|
||||
@ -369,13 +483,13 @@ proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} =
|
||||
proc setupRequest(
|
||||
self: CodexNodeRef,
|
||||
cid: Cid,
|
||||
duration: UInt256,
|
||||
duration: uint64,
|
||||
proofProbability: UInt256,
|
||||
nodes: uint,
|
||||
tolerance: uint,
|
||||
pricePerBytePerSecond: UInt256,
|
||||
collateralPerByte: UInt256,
|
||||
expiry: UInt256,
|
||||
expiry: uint64,
|
||||
): Future[?!StorageRequest] {.async.} =
|
||||
## Setup slots for a given dataset
|
||||
##
|
||||
@ -403,8 +517,9 @@ proc setupRequest(
|
||||
return failure error
|
||||
|
||||
# Erasure code the dataset according to provided parameters
|
||||
let erasure =
|
||||
Erasure.new(self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider)
|
||||
let erasure = Erasure.new(
|
||||
self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
|
||||
)
|
||||
|
||||
without encoded =? (await erasure.encode(manifest, ecK, ecM)), error:
|
||||
trace "Unable to erasure code dataset"
|
||||
@ -432,17 +547,14 @@ proc setupRequest(
|
||||
request = StorageRequest(
|
||||
ask: StorageAsk(
|
||||
slots: verifiable.numSlots.uint64,
|
||||
slotSize: builder.slotBytes.uint.u256,
|
||||
slotSize: builder.slotBytes.uint64,
|
||||
duration: duration,
|
||||
proofProbability: proofProbability,
|
||||
pricePerBytePerSecond: pricePerBytePerSecond,
|
||||
collateralPerByte: collateralPerByte,
|
||||
maxSlotLoss: tolerance,
|
||||
),
|
||||
content: StorageContent(
|
||||
cid: $manifestBlk.cid, # TODO: why string?
|
||||
merkleRoot: verifyRoot,
|
||||
),
|
||||
content: StorageContent(cid: manifestBlk.cid, merkleRoot: verifyRoot),
|
||||
expiry: expiry,
|
||||
)
|
||||
|
||||
@ -452,13 +564,13 @@ proc setupRequest(
|
||||
proc requestStorage*(
|
||||
self: CodexNodeRef,
|
||||
cid: Cid,
|
||||
duration: UInt256,
|
||||
duration: uint64,
|
||||
proofProbability: UInt256,
|
||||
nodes: uint,
|
||||
tolerance: uint,
|
||||
pricePerBytePerSecond: UInt256,
|
||||
collateralPerByte: UInt256,
|
||||
expiry: UInt256,
|
||||
expiry: uint64,
|
||||
): Future[?!PurchaseId] {.async.} =
|
||||
## Initiate a request for storage sequence, this might
|
||||
## be a multistep procedure.
|
||||
@ -472,7 +584,7 @@ proc requestStorage*(
|
||||
pricePerBytePerSecond = pricePerBytePerSecond
|
||||
proofProbability = proofProbability
|
||||
collateralPerByte = collateralPerByte
|
||||
expiry = expiry.truncate(int64)
|
||||
expiry = expiry
|
||||
now = self.clock.now
|
||||
|
||||
trace "Received a request for storage!"
|
||||
@ -494,20 +606,26 @@ proc requestStorage*(
|
||||
success purchase.id
|
||||
|
||||
proc onStore(
|
||||
self: CodexNodeRef, request: StorageRequest, slotIdx: UInt256, blocksCb: BlocksCb
|
||||
self: CodexNodeRef,
|
||||
request: StorageRequest,
|
||||
slotIdx: uint64,
|
||||
blocksCb: BlocksCb,
|
||||
isRepairing: bool = false,
|
||||
): Future[?!void] {.async.} =
|
||||
## store data in local storage
|
||||
##
|
||||
|
||||
let cid = request.content.cid
|
||||
|
||||
logScope:
|
||||
cid = request.content.cid
|
||||
cid = $cid
|
||||
slotIdx = slotIdx
|
||||
|
||||
trace "Received a request to store a slot"
|
||||
|
||||
without cid =? Cid.init(request.content.cid).mapFailure, err:
|
||||
trace "Unable to parse Cid", cid
|
||||
return failure(err)
|
||||
# TODO: Use the isRepairing to manage the slot download.
|
||||
# If isRepairing is true, the slot has to be repaired before
|
||||
# being downloaded.
|
||||
|
||||
without manifest =? (await self.fetchManifest(cid)), err:
|
||||
trace "Unable to fetch manifest for cid", cid, err = err.msg
|
||||
@ -518,11 +636,9 @@ proc onStore(
|
||||
trace "Unable to create slots builder", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
let
|
||||
slotIdx = slotIdx.truncate(int)
|
||||
expiry = request.expiry.toSecondsSince1970
|
||||
let expiry = request.expiry
|
||||
|
||||
if slotIdx > manifest.slotRoots.high:
|
||||
if slotIdx > manifest.slotRoots.high.uint64:
|
||||
trace "Slot index not in manifest", slotIdx
|
||||
return failure(newException(CodexError, "Slot index not in manifest"))
|
||||
|
||||
@ -530,9 +646,12 @@ proc onStore(
|
||||
trace "Updating expiry for blocks", blocks = blocks.len
|
||||
|
||||
let ensureExpiryFutures =
|
||||
blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry))
|
||||
if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption:
|
||||
return failure(updateExpiryErr)
|
||||
blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry.toSecondsSince1970))
|
||||
|
||||
let res = await allFinishedFailed(ensureExpiryFutures)
|
||||
if res.failure.len > 0:
|
||||
trace "Some blocks failed to update expiry", len = res.failure.len
|
||||
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
|
||||
|
||||
if not blocksCb.isNil and err =? (await blocksCb(blocks)).errorOption:
|
||||
trace "Unable to process blocks", err = err.msg
|
||||
@ -546,7 +665,11 @@ proc onStore(
|
||||
trace "Unable to create indexing strategy from protected manifest", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
without blksIter =? indexer.getIndicies(slotIdx).catch, err:
|
||||
if slotIdx > int.high.uint64:
|
||||
error "Cannot cast slot index to int", slotIndex = slotIdx
|
||||
return
|
||||
|
||||
without blksIter =? indexer.getIndicies(slotIdx.int).catch, err:
|
||||
trace "Unable to get indicies from strategy", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
@ -556,13 +679,13 @@ proc onStore(
|
||||
trace "Unable to fetch blocks", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
without slotRoot =? (await builder.buildSlot(slotIdx.Natural)), err:
|
||||
without slotRoot =? (await builder.buildSlot(slotIdx.int)), err:
|
||||
trace "Unable to build slot", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
trace "Slot successfully retrieved and reconstructed"
|
||||
|
||||
if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx.int]:
|
||||
if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx]:
|
||||
trace "Slot root mismatch",
|
||||
manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid()
|
||||
return failure(newException(CodexError, "Slot root mismatch"))
|
||||
@ -578,8 +701,8 @@ proc onProve(
|
||||
##
|
||||
|
||||
let
|
||||
cidStr = slot.request.content.cid
|
||||
slotIdx = slot.slotIndex.truncate(Natural)
|
||||
cidStr = $slot.request.content.cid
|
||||
slotIdx = slot.slotIndex
|
||||
|
||||
logScope:
|
||||
cid = cidStr
|
||||
@ -600,7 +723,8 @@ proc onProve(
|
||||
return failure(err)
|
||||
|
||||
when defined(verify_circuit):
|
||||
without (inputs, proof) =? await prover.prove(slotIdx, manifest, challenge), err:
|
||||
without (inputs, proof) =? await prover.prove(slotIdx.int, manifest, challenge),
|
||||
err:
|
||||
error "Unable to generate proof", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
@ -614,7 +738,7 @@ proc onProve(
|
||||
|
||||
trace "Proof verified successfully"
|
||||
else:
|
||||
without (_, proof) =? await prover.prove(slotIdx, manifest, challenge), err:
|
||||
without (_, proof) =? await prover.prove(slotIdx.int, manifest, challenge), err:
|
||||
error "Unable to generate proof", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
@ -627,16 +751,11 @@ proc onProve(
|
||||
failure "Prover not enabled"
|
||||
|
||||
proc onExpiryUpdate(
|
||||
self: CodexNodeRef, rootCid: string, expiry: SecondsSince1970
|
||||
self: CodexNodeRef, rootCid: Cid, expiry: SecondsSince1970
|
||||
): Future[?!void] {.async.} =
|
||||
without cid =? Cid.init(rootCid):
|
||||
trace "Unable to parse Cid", cid
|
||||
let error = newException(CodexError, "Unable to parse Cid")
|
||||
return failure(error)
|
||||
return await self.updateExpiry(rootCid, expiry)
|
||||
|
||||
return await self.updateExpiry(cid, expiry)
|
||||
|
||||
proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: UInt256) =
|
||||
proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: uint64) =
|
||||
# TODO: remove data from local storage
|
||||
discard
|
||||
|
||||
@ -652,16 +771,19 @@ proc start*(self: CodexNodeRef) {.async.} =
|
||||
|
||||
if hostContracts =? self.contracts.host:
|
||||
hostContracts.sales.onStore = proc(
|
||||
request: StorageRequest, slot: UInt256, onBatch: BatchProc
|
||||
request: StorageRequest,
|
||||
slot: uint64,
|
||||
onBatch: BatchProc,
|
||||
isRepairing: bool = false,
|
||||
): Future[?!void] =
|
||||
self.onStore(request, slot, onBatch)
|
||||
self.onStore(request, slot, onBatch, isRepairing)
|
||||
|
||||
hostContracts.sales.onExpiryUpdate = proc(
|
||||
rootCid: string, expiry: SecondsSince1970
|
||||
rootCid: Cid, expiry: SecondsSince1970
|
||||
): Future[?!void] =
|
||||
self.onExpiryUpdate(rootCid, expiry)
|
||||
|
||||
hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: UInt256) =
|
||||
hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: uint64) =
|
||||
# TODO: remove data from local storage
|
||||
self.onClear(request, slotIndex)
|
||||
|
||||
@ -703,6 +825,11 @@ proc start*(self: CodexNodeRef) {.async.} =
|
||||
proc stop*(self: CodexNodeRef) {.async.} =
|
||||
trace "Stopping node"
|
||||
|
||||
if not self.taskpool.isNil:
|
||||
self.taskpool.shutdown()
|
||||
|
||||
await self.trackedFutures.cancelTracked()
|
||||
|
||||
if not self.engine.isNil:
|
||||
await self.engine.stop()
|
||||
|
||||
@ -730,6 +857,7 @@ proc new*(
|
||||
networkStore: NetworkStore,
|
||||
engine: BlockExcEngine,
|
||||
discovery: Discovery,
|
||||
taskpool: Taskpool,
|
||||
prover = Prover.none,
|
||||
contracts = Contracts.default,
|
||||
): CodexNodeRef =
|
||||
@ -742,5 +870,7 @@ proc new*(
|
||||
engine: engine,
|
||||
prover: prover,
|
||||
discovery: discovery,
|
||||
taskPool: taskpool,
|
||||
contracts: contracts,
|
||||
trackedFutures: TrackedFutures(),
|
||||
)
|
||||
|
||||
@ -2,10 +2,10 @@ import pkg/stint
|
||||
|
||||
type
|
||||
Periodicity* = object
|
||||
seconds*: UInt256
|
||||
seconds*: uint64
|
||||
|
||||
Period* = UInt256
|
||||
Timestamp* = UInt256
|
||||
Period* = uint64
|
||||
Timestamp* = uint64
|
||||
|
||||
func periodOf*(periodicity: Periodicity, timestamp: Timestamp): Period =
|
||||
timestamp div periodicity.seconds
|
||||
|
||||
@ -14,7 +14,7 @@ export purchase
|
||||
|
||||
type
|
||||
Purchasing* = ref object
|
||||
market: Market
|
||||
market*: Market
|
||||
clock: Clock
|
||||
purchases: Table[PurchaseId, Purchase]
|
||||
proofProbability*: UInt256
|
||||
|
||||
@ -1,25 +1,35 @@
|
||||
import pkg/metrics
|
||||
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_cancelled, "codex purchases cancelled")
|
||||
|
||||
logScope:
|
||||
topics = "marketplace purchases cancelled"
|
||||
|
||||
type PurchaseCancelled* = ref object of ErrorHandlingState
|
||||
type PurchaseCancelled* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchaseCancelled): string =
|
||||
"cancelled"
|
||||
|
||||
method run*(state: PurchaseCancelled, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: PurchaseCancelled, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_cancelled.inc()
|
||||
let purchase = Purchase(machine)
|
||||
|
||||
warn "Request cancelled, withdrawing remaining funds", requestId = purchase.requestId
|
||||
await purchase.market.withdrawFunds(purchase.requestId)
|
||||
try:
|
||||
warn "Request cancelled, withdrawing remaining funds",
|
||||
requestId = purchase.requestId
|
||||
await purchase.market.withdrawFunds(purchase.requestId)
|
||||
|
||||
let error = newException(Timeout, "Purchase cancelled due to timeout")
|
||||
purchase.future.fail(error)
|
||||
let error = newException(Timeout, "Purchase cancelled due to timeout")
|
||||
purchase.future.fail(error)
|
||||
except CancelledError as e:
|
||||
trace "PurchaseCancelled.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchaseCancelled.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
|
||||
@ -14,7 +14,9 @@ type PurchaseErrored* = ref object of PurchaseState
|
||||
method `$`*(state: PurchaseErrored): string =
|
||||
"errored"
|
||||
|
||||
method run*(state: PurchaseErrored, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: PurchaseErrored, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_error.inc()
|
||||
let purchase = Purchase(machine)
|
||||
|
||||
|
||||
@ -1,8 +0,0 @@
|
||||
import pkg/questionable
|
||||
import ../statemachine
|
||||
import ./error
|
||||
|
||||
type ErrorHandlingState* = ref object of PurchaseState
|
||||
|
||||
method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State =
|
||||
some State(PurchaseErrored(error: error))
|
||||
@ -1,6 +1,7 @@
|
||||
import pkg/metrics
|
||||
import ../statemachine
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_failed, "codex purchases failed")
|
||||
@ -10,11 +11,20 @@ type PurchaseFailed* = ref object of PurchaseState
|
||||
method `$`*(state: PurchaseFailed): string =
|
||||
"failed"
|
||||
|
||||
method run*(state: PurchaseFailed, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: PurchaseFailed, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_failed.inc()
|
||||
let purchase = Purchase(machine)
|
||||
warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId
|
||||
await purchase.market.withdrawFunds(purchase.requestId)
|
||||
|
||||
try:
|
||||
warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId
|
||||
await purchase.market.withdrawFunds(purchase.requestId)
|
||||
except CancelledError as e:
|
||||
trace "PurchaseFailed.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchaseFailed.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
|
||||
let error = newException(PurchaseError, "Purchase failed")
|
||||
return some State(PurchaseErrored(error: error))
|
||||
|
||||
@ -1,7 +1,9 @@
|
||||
import pkg/metrics
|
||||
|
||||
import ../statemachine
|
||||
import ../../utils/exceptions
|
||||
import ../../logutils
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_finished, "codex purchases finished")
|
||||
|
||||
@ -13,10 +15,19 @@ type PurchaseFinished* = ref object of PurchaseState
|
||||
method `$`*(state: PurchaseFinished): string =
|
||||
"finished"
|
||||
|
||||
method run*(state: PurchaseFinished, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: PurchaseFinished, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_finished.inc()
|
||||
let purchase = Purchase(machine)
|
||||
info "Purchase finished, withdrawing remaining funds", requestId = purchase.requestId
|
||||
await purchase.market.withdrawFunds(purchase.requestId)
|
||||
try:
|
||||
info "Purchase finished, withdrawing remaining funds",
|
||||
requestId = purchase.requestId
|
||||
await purchase.market.withdrawFunds(purchase.requestId)
|
||||
|
||||
purchase.future.complete()
|
||||
purchase.future.complete()
|
||||
except CancelledError as e:
|
||||
trace "PurchaseFinished.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchaseFinished.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
|
||||
@ -1,18 +1,28 @@
|
||||
import pkg/metrics
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./submitted
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_pending, "codex purchases pending")
|
||||
|
||||
type PurchasePending* = ref object of ErrorHandlingState
|
||||
type PurchasePending* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchasePending): string =
|
||||
"pending"
|
||||
|
||||
method run*(state: PurchasePending, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: PurchasePending, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_pending.inc()
|
||||
let purchase = Purchase(machine)
|
||||
let request = !purchase.request
|
||||
await purchase.market.requestStorage(request)
|
||||
return some State(PurchaseSubmitted())
|
||||
try:
|
||||
let request = !purchase.request
|
||||
await purchase.market.requestStorage(request)
|
||||
return some State(PurchaseSubmitted())
|
||||
except CancelledError as e:
|
||||
trace "PurchasePending.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchasePending.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
|
||||
@ -1,22 +1,25 @@
|
||||
import pkg/metrics
|
||||
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./finished
|
||||
import ./failed
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_started, "codex purchases started")
|
||||
|
||||
logScope:
|
||||
topics = "marketplace purchases started"
|
||||
|
||||
type PurchaseStarted* = ref object of ErrorHandlingState
|
||||
type PurchaseStarted* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchaseStarted): string =
|
||||
"started"
|
||||
|
||||
method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: PurchaseStarted, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_started.inc()
|
||||
let purchase = Purchase(machine)
|
||||
|
||||
@ -28,15 +31,24 @@ method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.}
|
||||
proc callback(_: RequestId) =
|
||||
failed.complete()
|
||||
|
||||
let subscription = await market.subscribeRequestFailed(purchase.requestId, callback)
|
||||
var ended: Future[void]
|
||||
try:
|
||||
let subscription = await market.subscribeRequestFailed(purchase.requestId, callback)
|
||||
|
||||
# Ensure that we're past the request end by waiting an additional second
|
||||
let ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1)
|
||||
let fut = await one(ended, failed)
|
||||
await subscription.unsubscribe()
|
||||
if fut.id == failed.id:
|
||||
# Ensure that we're past the request end by waiting an additional second
|
||||
ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1)
|
||||
let fut = await one(ended, failed)
|
||||
await subscription.unsubscribe()
|
||||
if fut.id == failed.id:
|
||||
ended.cancelSoon()
|
||||
return some State(PurchaseFailed())
|
||||
else:
|
||||
failed.cancelSoon()
|
||||
return some State(PurchaseFinished())
|
||||
except CancelledError as e:
|
||||
ended.cancelSoon()
|
||||
return some State(PurchaseFailed())
|
||||
else:
|
||||
failed.cancelSoon()
|
||||
return some State(PurchaseFinished())
|
||||
trace "PurchaseStarted.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchaseStarted.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
|
||||
@ -1,22 +1,25 @@
|
||||
import pkg/metrics
|
||||
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./started
|
||||
import ./cancelled
|
||||
import ./error
|
||||
|
||||
logScope:
|
||||
topics = "marketplace purchases submitted"
|
||||
|
||||
declareCounter(codex_purchases_submitted, "codex purchases submitted")
|
||||
|
||||
type PurchaseSubmitted* = ref object of ErrorHandlingState
|
||||
type PurchaseSubmitted* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchaseSubmitted): string =
|
||||
"submitted"
|
||||
|
||||
method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: PurchaseSubmitted, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
codex_purchases_submitted.inc()
|
||||
let purchase = Purchase(machine)
|
||||
let request = !purchase.request
|
||||
@ -44,5 +47,10 @@ method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async.
|
||||
await wait().withTimeout()
|
||||
except Timeout:
|
||||
return some State(PurchaseCancelled())
|
||||
except CancelledError as e:
|
||||
trace "PurchaseSubmitted.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchaseSubmitted.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
|
||||
return some State(PurchaseStarted())
|
||||
|
||||
@ -1,34 +1,44 @@
|
||||
import pkg/metrics
|
||||
import ../../utils/exceptions
|
||||
import ../../logutils
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./submitted
|
||||
import ./started
|
||||
import ./cancelled
|
||||
import ./finished
|
||||
import ./failed
|
||||
import ./error
|
||||
|
||||
declareCounter(codex_purchases_unknown, "codex purchases unknown")
|
||||
|
||||
type PurchaseUnknown* = ref object of ErrorHandlingState
|
||||
type PurchaseUnknown* = ref object of PurchaseState
|
||||
|
||||
method `$`*(state: PurchaseUnknown): string =
|
||||
"unknown"
|
||||
|
||||
method run*(state: PurchaseUnknown, machine: Machine): Future[?State] {.async.} =
|
||||
codex_purchases_unknown.inc()
|
||||
let purchase = Purchase(machine)
|
||||
if (request =? await purchase.market.getRequest(purchase.requestId)) and
|
||||
(requestState =? await purchase.market.requestState(purchase.requestId)):
|
||||
purchase.request = some request
|
||||
method run*(
|
||||
state: PurchaseUnknown, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
try:
|
||||
codex_purchases_unknown.inc()
|
||||
let purchase = Purchase(machine)
|
||||
if (request =? await purchase.market.getRequest(purchase.requestId)) and
|
||||
(requestState =? await purchase.market.requestState(purchase.requestId)):
|
||||
purchase.request = some request
|
||||
|
||||
case requestState
|
||||
of RequestState.New:
|
||||
return some State(PurchaseSubmitted())
|
||||
of RequestState.Started:
|
||||
return some State(PurchaseStarted())
|
||||
of RequestState.Cancelled:
|
||||
return some State(PurchaseCancelled())
|
||||
of RequestState.Finished:
|
||||
return some State(PurchaseFinished())
|
||||
of RequestState.Failed:
|
||||
return some State(PurchaseFailed())
|
||||
case requestState
|
||||
of RequestState.New:
|
||||
return some State(PurchaseSubmitted())
|
||||
of RequestState.Started:
|
||||
return some State(PurchaseStarted())
|
||||
of RequestState.Cancelled:
|
||||
return some State(PurchaseCancelled())
|
||||
of RequestState.Finished:
|
||||
return some State(PurchaseFinished())
|
||||
of RequestState.Failed:
|
||||
return some State(PurchaseFailed())
|
||||
except CancelledError as e:
|
||||
trace "PurchaseUnknown.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during PurchaseUnknown.run", error = e.msgDetail
|
||||
return some State(PurchaseErrored(error: e))
|
||||
|
||||
@ -13,8 +13,8 @@ push:
|
||||
{.upraises: [].}
|
||||
|
||||
import std/sequtils
|
||||
import mimetypes
|
||||
import os
|
||||
import std/mimetypes
|
||||
import std/os
|
||||
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
@ -65,9 +65,15 @@ proc formatManifestBlocks(node: CodexNodeRef): Future[JsonNode] {.async.} =
|
||||
|
||||
return %RestContentList.init(content)
|
||||
|
||||
proc isPending(resp: HttpResponseRef): bool =
|
||||
## Checks that an HttpResponseRef object is still pending; i.e.,
|
||||
## that no body has yet been sent. This helps us guard against calling
|
||||
## sendBody(resp: HttpResponseRef, ...) twice, which is illegal.
|
||||
return resp.getResponseState() == HttpResponseState.Empty
|
||||
|
||||
proc retrieveCid(
|
||||
node: CodexNodeRef, cid: Cid, local: bool = true, resp: HttpResponseRef
|
||||
): Future[RestApiResponse] {.async.} =
|
||||
): Future[void] {.async: (raises: [CancelledError, HttpWriteError]).} =
|
||||
## Download a file from the node in a streaming
|
||||
## manner
|
||||
##
|
||||
@ -79,16 +85,21 @@ proc retrieveCid(
|
||||
without stream =? (await node.retrieve(cid, local)), error:
|
||||
if error of BlockNotFoundError:
|
||||
resp.status = Http404
|
||||
return await resp.sendBody("")
|
||||
await resp.sendBody(
|
||||
"The requested CID could not be retrieved (" & error.msg & ")."
|
||||
)
|
||||
return
|
||||
else:
|
||||
resp.status = Http500
|
||||
return await resp.sendBody(error.msg)
|
||||
await resp.sendBody(error.msg)
|
||||
return
|
||||
|
||||
# It is ok to fetch again the manifest because it will hit the cache
|
||||
without manifest =? (await node.fetchManifest(cid)), err:
|
||||
error "Failed to fetch manifest", err = err.msg
|
||||
resp.status = Http404
|
||||
return await resp.sendBody(err.msg)
|
||||
await resp.sendBody(err.msg)
|
||||
return
|
||||
|
||||
if manifest.mimetype.isSome:
|
||||
resp.setHeader("Content-Type", manifest.mimetype.get())
|
||||
@ -103,7 +114,14 @@ proc retrieveCid(
|
||||
else:
|
||||
resp.setHeader("Content-Disposition", "attachment")
|
||||
|
||||
await resp.prepareChunked()
|
||||
# For erasure-coded datasets, we need to return the _original_ length; i.e.,
|
||||
# the length of the non-erasure-coded dataset, as that's what we will be
|
||||
# returning to the client.
|
||||
let contentLength =
|
||||
if manifest.protected: manifest.originalDatasetSize else: manifest.datasetSize
|
||||
resp.setHeader("Content-Length", $(contentLength.int))
|
||||
|
||||
await resp.prepare(HttpResponseStreamType.Plain)
|
||||
|
||||
while not stream.atEof:
|
||||
var
|
||||
@ -116,13 +134,16 @@ proc retrieveCid(
|
||||
|
||||
bytes += buff.len
|
||||
|
||||
await resp.sendChunk(addr buff[0], buff.len)
|
||||
await resp.send(addr buff[0], buff.len)
|
||||
await resp.finish()
|
||||
codex_api_downloads.inc()
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Excepting streaming blocks", exc = exc.msg
|
||||
warn "Error streaming blocks", exc = exc.msg
|
||||
resp.status = Http500
|
||||
return await resp.sendBody("")
|
||||
if resp.isPending():
|
||||
await resp.sendBody(exc.msg)
|
||||
finally:
|
||||
info "Sent bytes", cid = cid, bytes
|
||||
if not stream.isNil:
|
||||
@ -238,6 +259,15 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
||||
let json = await formatManifestBlocks(node)
|
||||
return RestApiResponse.response($json, contentType = "application/json")
|
||||
|
||||
router.api(MethodOptions, "/api/codex/v1/data/{cid}") do(
|
||||
cid: Cid, resp: HttpResponseRef
|
||||
) -> RestApiResponse:
|
||||
if corsOrigin =? allowedOrigin:
|
||||
resp.setCorsHeaders("GET,DELETE", corsOrigin)
|
||||
|
||||
resp.status = Http204
|
||||
await resp.sendBody("")
|
||||
|
||||
router.api(MethodGet, "/api/codex/v1/data/{cid}") do(
|
||||
cid: Cid, resp: HttpResponseRef
|
||||
) -> RestApiResponse:
|
||||
@ -254,6 +284,27 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
||||
|
||||
await node.retrieveCid(cid.get(), local = true, resp = resp)
|
||||
|
||||
router.api(MethodDelete, "/api/codex/v1/data/{cid}") do(
|
||||
cid: Cid, resp: HttpResponseRef
|
||||
) -> RestApiResponse:
|
||||
## Deletes either a single block or an entire dataset
|
||||
## from the local node. Does nothing and returns 200
|
||||
## if the dataset is not locally available.
|
||||
##
|
||||
var headers = buildCorsHeaders("DELETE", allowedOrigin)
|
||||
|
||||
if cid.isErr:
|
||||
return RestApiResponse.error(Http400, $cid.error(), headers = headers)
|
||||
|
||||
if err =? (await node.delete(cid.get())).errorOption:
|
||||
return RestApiResponse.error(Http500, err.msg, headers = headers)
|
||||
|
||||
if corsOrigin =? allowedOrigin:
|
||||
resp.setCorsHeaders("DELETE", corsOrigin)
|
||||
|
||||
resp.status = Http204
|
||||
await resp.sendBody("")
|
||||
|
||||
router.api(MethodPost, "/api/codex/v1/data/{cid}/network") do(
|
||||
cid: Cid, resp: HttpResponseRef
|
||||
) -> RestApiResponse:
|
||||
@ -269,15 +320,8 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
||||
error "Failed to fetch manifest", err = err.msg
|
||||
return RestApiResponse.error(Http404, err.msg, headers = headers)
|
||||
|
||||
proc fetchDatasetAsync(): Future[void] {.async.} =
|
||||
try:
|
||||
if err =? (await node.fetchBatched(manifest)).errorOption:
|
||||
error "Unable to fetch dataset", cid = cid.get(), err = err.msg
|
||||
except CatchableError as exc:
|
||||
error "CatchableError when fetching dataset", cid = cid.get(), exc = exc.msg
|
||||
discard
|
||||
|
||||
asyncSpawn fetchDatasetAsync()
|
||||
# Start fetching the dataset in the background
|
||||
node.fetchDatasetAsyncTask(manifest)
|
||||
|
||||
let json = %formatManifest(cid.get(), manifest)
|
||||
return RestApiResponse.response($json, contentType = "application/json")
|
||||
@ -298,6 +342,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
||||
resp.setCorsHeaders("GET", corsOrigin)
|
||||
resp.setHeader("Access-Control-Headers", "X-Requested-With")
|
||||
|
||||
resp.setHeader("Access-Control-Expose-Headers", "Content-Disposition")
|
||||
await node.retrieveCid(cid.get(), local = false, resp = resp)
|
||||
|
||||
router.api(MethodGet, "/api/codex/v1/data/{cid}/network/manifest") do(
|
||||
@ -430,19 +475,28 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
|
||||
if restAv.totalSize == 0:
|
||||
return RestApiResponse.error(
|
||||
Http400, "Total size must be larger then zero", headers = headers
|
||||
Http422, "Total size must be larger then zero", headers = headers
|
||||
)
|
||||
|
||||
if not reservations.hasAvailable(restAv.totalSize.truncate(uint)):
|
||||
if not reservations.hasAvailable(restAv.totalSize):
|
||||
return
|
||||
RestApiResponse.error(Http422, "Not enough storage quota", headers = headers)
|
||||
|
||||
without availability =? (
|
||||
await reservations.createAvailability(
|
||||
restAv.totalSize, restAv.duration, restAv.minPricePerBytePerSecond,
|
||||
restAv.totalSize,
|
||||
restAv.duration,
|
||||
restAv.minPricePerBytePerSecond,
|
||||
restAv.totalCollateral,
|
||||
enabled = restAv.enabled |? true,
|
||||
until = restAv.until |? 0,
|
||||
)
|
||||
), error:
|
||||
if error of CancelledError:
|
||||
raise error
|
||||
if error of UntilOutOfBoundsError:
|
||||
return RestApiResponse.error(Http422, error.msg)
|
||||
|
||||
return RestApiResponse.error(Http500, error.msg, headers = headers)
|
||||
|
||||
return RestApiResponse.response(
|
||||
@ -479,6 +533,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
## tokens) to be matched against the request's pricePerBytePerSecond
|
||||
## totalCollateral - total collateral (in amount of
|
||||
## tokens) that can be distributed among matching requests
|
||||
|
||||
try:
|
||||
without contracts =? node.contracts.host:
|
||||
return RestApiResponse.error(Http503, "Persistence is not enabled")
|
||||
@ -503,17 +558,23 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
return RestApiResponse.error(Http500, error.msg)
|
||||
|
||||
if isSome restAv.freeSize:
|
||||
return RestApiResponse.error(Http400, "Updating freeSize is not allowed")
|
||||
return RestApiResponse.error(Http422, "Updating freeSize is not allowed")
|
||||
|
||||
if size =? restAv.totalSize:
|
||||
if size == 0:
|
||||
return RestApiResponse.error(Http422, "Total size must be larger then zero")
|
||||
|
||||
# we don't allow lowering the totalSize bellow currently utilized size
|
||||
if size < (availability.totalSize - availability.freeSize):
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
Http422,
|
||||
"New totalSize must be larger then current totalSize - freeSize, which is currently: " &
|
||||
$(availability.totalSize - availability.freeSize),
|
||||
)
|
||||
|
||||
if not reservations.hasAvailable(size):
|
||||
return RestApiResponse.error(Http422, "Not enough storage quota")
|
||||
|
||||
availability.freeSize += size - availability.totalSize
|
||||
availability.totalSize = size
|
||||
|
||||
@ -526,10 +587,21 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
if totalCollateral =? restAv.totalCollateral:
|
||||
availability.totalCollateral = totalCollateral
|
||||
|
||||
if err =? (await reservations.update(availability)).errorOption:
|
||||
return RestApiResponse.error(Http500, err.msg)
|
||||
if until =? restAv.until:
|
||||
availability.until = until
|
||||
|
||||
return RestApiResponse.response(Http200)
|
||||
if enabled =? restAv.enabled:
|
||||
availability.enabled = enabled
|
||||
|
||||
if err =? (await reservations.update(availability)).errorOption:
|
||||
if err of CancelledError:
|
||||
raise err
|
||||
if err of UntilOutOfBoundsError:
|
||||
return RestApiResponse.error(Http422, err.msg)
|
||||
else:
|
||||
return RestApiResponse.error(Http500, err.msg)
|
||||
|
||||
return RestApiResponse.response(Http204)
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
@ -607,18 +679,52 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
without params =? StorageRequestParams.fromJson(body), error:
|
||||
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
||||
|
||||
let expiry = params.expiry
|
||||
|
||||
if expiry <= 0 or expiry >= params.duration:
|
||||
return RestApiResponse.error(
|
||||
Http422,
|
||||
"Expiry must be greater than zero and less than the request's duration",
|
||||
headers = headers,
|
||||
)
|
||||
|
||||
if params.proofProbability <= 0:
|
||||
return RestApiResponse.error(
|
||||
Http422, "Proof probability must be greater than zero", headers = headers
|
||||
)
|
||||
|
||||
if params.collateralPerByte <= 0:
|
||||
return RestApiResponse.error(
|
||||
Http422, "Collateral per byte must be greater than zero", headers = headers
|
||||
)
|
||||
|
||||
if params.pricePerBytePerSecond <= 0:
|
||||
return RestApiResponse.error(
|
||||
Http422,
|
||||
"Price per byte per second must be greater than zero",
|
||||
headers = headers,
|
||||
)
|
||||
|
||||
let requestDurationLimit = await contracts.purchasing.market.requestDurationLimit
|
||||
if params.duration > requestDurationLimit:
|
||||
return RestApiResponse.error(
|
||||
Http422,
|
||||
"Duration exceeds limit of " & $requestDurationLimit & " seconds",
|
||||
headers = headers,
|
||||
)
|
||||
|
||||
let nodes = params.nodes |? 3
|
||||
let tolerance = params.tolerance |? 1
|
||||
|
||||
if tolerance == 0:
|
||||
return RestApiResponse.error(
|
||||
Http400, "Tolerance needs to be bigger then zero", headers = headers
|
||||
Http422, "Tolerance needs to be bigger then zero", headers = headers
|
||||
)
|
||||
|
||||
# prevent underflow
|
||||
if tolerance > nodes:
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
Http422,
|
||||
"Invalid parameters: `tolerance` cannot be greater than `nodes`",
|
||||
headers = headers,
|
||||
)
|
||||
@ -629,21 +735,11 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
# ensure leopard constrainst of 1 < K ≥ M
|
||||
if ecK <= 1 or ecK < ecM:
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
Http422,
|
||||
"Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`",
|
||||
headers = headers,
|
||||
)
|
||||
|
||||
without expiry =? params.expiry:
|
||||
return RestApiResponse.error(Http400, "Expiry required", headers = headers)
|
||||
|
||||
if expiry <= 0 or expiry >= params.duration:
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
"Expiry needs value bigger then zero and smaller then the request's duration",
|
||||
headers = headers,
|
||||
)
|
||||
|
||||
without purchaseId =?
|
||||
await node.requestStorage(
|
||||
cid, params.duration, params.proofProbability, nodes, tolerance,
|
||||
@ -651,7 +747,7 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
), error:
|
||||
if error of InsufficientBlocksError:
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
Http422,
|
||||
"Dataset too small for erasure parameters, need at least " &
|
||||
$(ref InsufficientBlocksError)(error).minSize.int & " bytes",
|
||||
headers = headers,
|
||||
|
||||
@ -14,7 +14,7 @@ import pkg/chronos
|
||||
import pkg/libp2p
|
||||
import pkg/stew/base10
|
||||
import pkg/stew/byteutils
|
||||
import pkg/stew/results
|
||||
import pkg/results
|
||||
import pkg/stint
|
||||
|
||||
import ../sales
|
||||
|
||||
@ -13,11 +13,11 @@ export json
|
||||
|
||||
type
|
||||
StorageRequestParams* = object
|
||||
duration* {.serialize.}: UInt256
|
||||
duration* {.serialize.}: uint64
|
||||
proofProbability* {.serialize.}: UInt256
|
||||
pricePerBytePerSecond* {.serialize.}: UInt256
|
||||
collateralPerByte* {.serialize.}: UInt256
|
||||
expiry* {.serialize.}: ?UInt256
|
||||
expiry* {.serialize.}: uint64
|
||||
nodes* {.serialize.}: ?uint
|
||||
tolerance* {.serialize.}: ?uint
|
||||
|
||||
@ -28,16 +28,18 @@ type
|
||||
error* {.serialize.}: ?string
|
||||
|
||||
RestAvailability* = object
|
||||
totalSize* {.serialize.}: UInt256
|
||||
duration* {.serialize.}: UInt256
|
||||
totalSize* {.serialize.}: uint64
|
||||
duration* {.serialize.}: uint64
|
||||
minPricePerBytePerSecond* {.serialize.}: UInt256
|
||||
totalCollateral* {.serialize.}: UInt256
|
||||
freeSize* {.serialize.}: ?UInt256
|
||||
freeSize* {.serialize.}: ?uint64
|
||||
enabled* {.serialize.}: ?bool
|
||||
until* {.serialize.}: ?SecondsSince1970
|
||||
|
||||
RestSalesAgent* = object
|
||||
state* {.serialize.}: string
|
||||
requestId* {.serialize.}: RequestId
|
||||
slotIndex* {.serialize.}: UInt256
|
||||
slotIndex* {.serialize.}: uint64
|
||||
request* {.serialize.}: ?StorageRequest
|
||||
reservation* {.serialize.}: ?Reservation
|
||||
|
||||
|
||||
@ -55,6 +55,15 @@ proc sample*[T](
|
||||
|
||||
break
|
||||
|
||||
proc sample*[T](
|
||||
rng: Rng, sample: openArray[T], limit: int
|
||||
): seq[T] {.raises: [Defect, RngSampleError].} =
|
||||
if limit > sample.len:
|
||||
raise newException(RngSampleError, "Limit cannot be larger than sample!")
|
||||
|
||||
for _ in 0 ..< min(sample.len, limit):
|
||||
result.add(rng.sample(sample, result))
|
||||
|
||||
proc shuffle*[T](rng: Rng, a: var openArray[T]) =
|
||||
for i in countdown(a.high, 1):
|
||||
let j = rng.rand(i)
|
||||
|
||||
162
codex/sales.nim
162
codex/sales.nim
@ -113,7 +113,6 @@ proc remove(sales: Sales, agent: SalesAgent) {.async.} =
|
||||
proc cleanUp(
|
||||
sales: Sales,
|
||||
agent: SalesAgent,
|
||||
returnBytes: bool,
|
||||
reprocessSlot: bool,
|
||||
returnedCollateral: ?UInt256,
|
||||
processing: Future[void],
|
||||
@ -132,7 +131,7 @@ proc cleanUp(
|
||||
# if reservation for the SalesAgent was not created, then it means
|
||||
# that the cleanUp was called before the sales process really started, so
|
||||
# there are not really any bytes to be returned
|
||||
if returnBytes and request =? data.request and reservation =? data.reservation:
|
||||
if request =? data.request and reservation =? data.reservation:
|
||||
if returnErr =? (
|
||||
await sales.context.reservations.returnBytesToAvailability(
|
||||
reservation.availabilityId, reservation.id, request.ask.slotSize
|
||||
@ -150,20 +149,35 @@ proc cleanUp(
|
||||
).errorOption:
|
||||
error "failure deleting reservation", error = deleteErr.msg
|
||||
|
||||
if data.slotIndex > uint16.high.uint64:
|
||||
error "Cannot cast slot index to uint16", slotIndex = data.slotIndex
|
||||
return
|
||||
|
||||
# Re-add items back into the queue to prevent small availabilities from
|
||||
# draining the queue. Seen items will be ordered last.
|
||||
if reprocessSlot and request =? data.request:
|
||||
let queue = sales.context.slotQueue
|
||||
var seenItem = SlotQueueItem.init(
|
||||
data.requestId,
|
||||
data.slotIndex.truncate(uint16),
|
||||
data.ask,
|
||||
request.expiry,
|
||||
seen = true,
|
||||
)
|
||||
trace "pushing ignored item to queue, marked as seen"
|
||||
if err =? queue.push(seenItem).errorOption:
|
||||
error "failed to readd slot to queue", errorType = $(type err), error = err.msg
|
||||
try:
|
||||
without collateral =?
|
||||
await sales.context.market.slotCollateral(data.requestId, data.slotIndex), err:
|
||||
error "Failed to re-add item back to the slot queue: unable to calculate collateral",
|
||||
error = err.msg
|
||||
return
|
||||
|
||||
let queue = sales.context.slotQueue
|
||||
var seenItem = SlotQueueItem.init(
|
||||
data.requestId,
|
||||
data.slotIndex.uint16,
|
||||
data.ask,
|
||||
request.expiry,
|
||||
seen = true,
|
||||
collateral = collateral,
|
||||
)
|
||||
trace "pushing ignored item to queue, marked as seen"
|
||||
if err =? queue.push(seenItem).errorOption:
|
||||
error "failed to readd slot to queue", errorType = $(type err), error = err.msg
|
||||
except MarketError as e:
|
||||
error "Failed to re-add item back to the slot queue.", error = e.msg
|
||||
return
|
||||
|
||||
await sales.remove(agent)
|
||||
|
||||
@ -172,7 +186,7 @@ proc cleanUp(
|
||||
processing.complete()
|
||||
|
||||
proc filled(
|
||||
sales: Sales, request: StorageRequest, slotIndex: UInt256, processing: Future[void]
|
||||
sales: Sales, request: StorageRequest, slotIndex: uint64, processing: Future[void]
|
||||
) =
|
||||
if onSale =? sales.context.onSale:
|
||||
onSale(request, slotIndex)
|
||||
@ -184,16 +198,15 @@ proc filled(
|
||||
proc processSlot(sales: Sales, item: SlotQueueItem, done: Future[void]) =
|
||||
debug "Processing slot from queue", requestId = item.requestId, slot = item.slotIndex
|
||||
|
||||
let agent = newSalesAgent(
|
||||
sales.context, item.requestId, item.slotIndex.u256, none StorageRequest
|
||||
)
|
||||
let agent =
|
||||
newSalesAgent(sales.context, item.requestId, item.slotIndex, none StorageRequest)
|
||||
|
||||
agent.onCleanUp = proc(
|
||||
returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
) {.async.} =
|
||||
await sales.cleanUp(agent, returnBytes, reprocessSlot, returnedCollateral, done)
|
||||
await sales.cleanUp(agent, reprocessSlot, returnedCollateral, done)
|
||||
|
||||
agent.onFilled = some proc(request: StorageRequest, slotIndex: UInt256) =
|
||||
agent.onFilled = some proc(request: StorageRequest, slotIndex: uint64) =
|
||||
sales.filled(request, slotIndex, done)
|
||||
|
||||
agent.start(SalePreparing())
|
||||
@ -257,12 +270,12 @@ proc load*(sales: Sales) {.async.} =
|
||||
newSalesAgent(sales.context, slot.request.id, slot.slotIndex, some slot.request)
|
||||
|
||||
agent.onCleanUp = proc(
|
||||
returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
) {.async.} =
|
||||
# since workers are not being dispatched, this future has not been created
|
||||
# by a worker. Create a dummy one here so we can call sales.cleanUp
|
||||
let done: Future[void] = nil
|
||||
await sales.cleanUp(agent, returnBytes, reprocessSlot, returnedCollateral, done)
|
||||
await sales.cleanUp(agent, reprocessSlot, returnedCollateral, done)
|
||||
|
||||
# There is no need to assign agent.onFilled as slots loaded from `mySlots`
|
||||
# are inherently already filled and so assigning agent.onFilled would be
|
||||
@ -271,7 +284,9 @@ proc load*(sales: Sales) {.async.} =
|
||||
agent.start(SaleUnknown())
|
||||
sales.agents.add agent
|
||||
|
||||
proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} =
|
||||
proc OnAvailabilitySaved(
|
||||
sales: Sales, availability: Availability
|
||||
) {.async: (raises: []).} =
|
||||
## When availabilities are modified or added, the queue should be unpaused if
|
||||
## it was paused and any slots in the queue should have their `seen` flag
|
||||
## cleared.
|
||||
@ -283,8 +298,8 @@ proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} =
|
||||
queue.unpause()
|
||||
|
||||
proc onStorageRequested(
|
||||
sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: UInt256
|
||||
) =
|
||||
sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: uint64
|
||||
) {.raises: [].} =
|
||||
logScope:
|
||||
topics = "marketplace sales onStorageRequested"
|
||||
requestId
|
||||
@ -295,7 +310,14 @@ proc onStorageRequested(
|
||||
|
||||
trace "storage requested, adding slots to queue"
|
||||
|
||||
without items =? SlotQueueItem.init(requestId, ask, expiry).catch, err:
|
||||
let market = sales.context.market
|
||||
|
||||
without collateral =? market.slotCollateral(ask.collateralPerSlot, SlotState.Free),
|
||||
err:
|
||||
error "Request failure, unable to calculate collateral", error = err.msg
|
||||
return
|
||||
|
||||
without items =? SlotQueueItem.init(requestId, ask, expiry, collateral).catch, err:
|
||||
if err of SlotsOutOfRangeError:
|
||||
warn "Too many slots, cannot add to queue"
|
||||
else:
|
||||
@ -312,7 +334,7 @@ proc onStorageRequested(
|
||||
else:
|
||||
warn "Error adding request to SlotQueue", error = err.msg
|
||||
|
||||
proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: UInt256) =
|
||||
proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) =
|
||||
logScope:
|
||||
topics = "marketplace sales onSlotFreed"
|
||||
requestId
|
||||
@ -325,35 +347,54 @@ proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: UInt256) =
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
# first attempt to populate request using existing slot metadata in queue
|
||||
without var found =? queue.populateItem(requestId, slotIndex.truncate(uint16)):
|
||||
trace "no existing request metadata, getting request info from contract"
|
||||
# if there's no existing slot for that request, retrieve the request
|
||||
# from the contract.
|
||||
try:
|
||||
without request =? await market.getRequest(requestId):
|
||||
error "unknown request in contract"
|
||||
return
|
||||
try:
|
||||
without request =? (await market.getRequest(requestId)), err:
|
||||
error "unknown request in contract", error = err.msgDetail
|
||||
return
|
||||
|
||||
found = SlotQueueItem.init(request, slotIndex.truncate(uint16))
|
||||
except CancelledError:
|
||||
discard # do not propagate as addSlotToQueue was asyncSpawned
|
||||
except CatchableError as e:
|
||||
error "failed to get request from contract and add slots to queue",
|
||||
error = e.msgDetail
|
||||
# Take the repairing state into consideration to calculate the collateral.
|
||||
# This is particularly needed because it will affect the priority in the queue
|
||||
# and we want to give the user the ability to tweak the parameters.
|
||||
# Adding the repairing state directly in the queue priority calculation
|
||||
# would not allow this flexibility.
|
||||
without collateral =?
|
||||
market.slotCollateral(request.ask.collateralPerSlot, SlotState.Repair), err:
|
||||
error "Failed to add freed slot to queue: unable to calculate collateral",
|
||||
error = err.msg
|
||||
return
|
||||
|
||||
if err =? queue.push(found).errorOption:
|
||||
error "failed to push slot items to queue", error = err.msgDetail
|
||||
if slotIndex > uint16.high.uint64:
|
||||
error "Cannot cast slot index to uint16, value = ", slotIndex
|
||||
return
|
||||
|
||||
without slotQueueItem =?
|
||||
SlotQueueItem.init(request, slotIndex.uint16, collateral = collateral).catch,
|
||||
err:
|
||||
warn "Too many slots, cannot add to queue", error = err.msgDetail
|
||||
return
|
||||
|
||||
if err =? queue.push(slotQueueItem).errorOption:
|
||||
if err of SlotQueueItemExistsError:
|
||||
error "Failed to push item to queue because it already exists",
|
||||
error = err.msgDetail
|
||||
elif err of QueueNotRunningError:
|
||||
warn "Failed to push item to queue because queue is not running",
|
||||
error = err.msgDetail
|
||||
except CancelledError as e:
|
||||
trace "sales.addSlotToQueue was cancelled"
|
||||
|
||||
# We could get rid of this by adding the storage ask in the SlotFreed event,
|
||||
# so we would not need to call getRequest to get the collateralPerSlot.
|
||||
let fut = addSlotToQueue()
|
||||
sales.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
|
||||
proc subscribeRequested(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
|
||||
proc onStorageRequested(requestId: RequestId, ask: StorageAsk, expiry: UInt256) =
|
||||
proc onStorageRequested(
|
||||
requestId: RequestId, ask: StorageAsk, expiry: uint64
|
||||
) {.raises: [].} =
|
||||
sales.onStorageRequested(requestId, ask, expiry)
|
||||
|
||||
try:
|
||||
@ -426,9 +467,13 @@ proc subscribeSlotFilled(sales: Sales) {.async.} =
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) =
|
||||
proc onSlotFilled(requestId: RequestId, slotIndex: uint64) =
|
||||
if slotIndex > uint16.high.uint64:
|
||||
error "Cannot cast slot index to uint16, value = ", slotIndex
|
||||
return
|
||||
|
||||
trace "slot filled, removing from slot queue", requestId, slotIndex
|
||||
queue.delete(requestId, slotIndex.truncate(uint16))
|
||||
queue.delete(requestId, slotIndex.uint16)
|
||||
|
||||
for agent in sales.agents:
|
||||
agent.onSlotFilled(requestId, slotIndex)
|
||||
@ -445,7 +490,7 @@ proc subscribeSlotFreed(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
|
||||
proc onSlotFreed(requestId: RequestId, slotIndex: UInt256) =
|
||||
proc onSlotFreed(requestId: RequestId, slotIndex: uint64) =
|
||||
sales.onSlotFreed(requestId, slotIndex)
|
||||
|
||||
try:
|
||||
@ -461,9 +506,13 @@ proc subscribeSlotReservationsFull(sales: Sales) {.async.} =
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
proc onSlotReservationsFull(requestId: RequestId, slotIndex: UInt256) =
|
||||
proc onSlotReservationsFull(requestId: RequestId, slotIndex: uint64) =
|
||||
if slotIndex > uint16.high.uint64:
|
||||
error "Cannot cast slot index to uint16, value = ", slotIndex
|
||||
return
|
||||
|
||||
trace "reservations for slot full, removing from slot queue", requestId, slotIndex
|
||||
queue.delete(requestId, slotIndex.truncate(uint16))
|
||||
queue.delete(requestId, slotIndex.uint16)
|
||||
|
||||
try:
|
||||
let sub = await market.subscribeSlotReservationsFull(onSlotReservationsFull)
|
||||
@ -477,16 +526,19 @@ proc startSlotQueue(sales: Sales) =
|
||||
let slotQueue = sales.context.slotQueue
|
||||
let reservations = sales.context.reservations
|
||||
|
||||
slotQueue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
||||
slotQueue.onProcessSlot = proc(
|
||||
item: SlotQueueItem, done: Future[void]
|
||||
) {.async: (raises: []).} =
|
||||
trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex
|
||||
sales.processSlot(item, done)
|
||||
|
||||
slotQueue.start()
|
||||
|
||||
proc onAvailabilityAdded(availability: Availability) {.async.} =
|
||||
await sales.onAvailabilityAdded(availability)
|
||||
proc OnAvailabilitySaved(availability: Availability) {.async: (raises: []).} =
|
||||
if availability.enabled:
|
||||
await sales.OnAvailabilitySaved(availability)
|
||||
|
||||
reservations.onAvailabilityAdded = onAvailabilityAdded
|
||||
reservations.OnAvailabilitySaved = OnAvailabilitySaved
|
||||
|
||||
proc subscribe(sales: Sales) {.async.} =
|
||||
await sales.subscribeRequested()
|
||||
|
||||
@ -35,6 +35,7 @@ import std/sequtils
|
||||
import std/sugar
|
||||
import std/typetraits
|
||||
import std/sequtils
|
||||
import std/times
|
||||
import pkg/chronos
|
||||
import pkg/datastore
|
||||
import pkg/nimcrypto
|
||||
@ -64,30 +65,41 @@ type
|
||||
SomeStorableId = AvailabilityId | ReservationId
|
||||
Availability* = ref object
|
||||
id* {.serialize.}: AvailabilityId
|
||||
totalSize* {.serialize.}: UInt256
|
||||
freeSize* {.serialize.}: UInt256
|
||||
duration* {.serialize.}: UInt256
|
||||
totalSize* {.serialize.}: uint64
|
||||
freeSize* {.serialize.}: uint64
|
||||
duration* {.serialize.}: uint64
|
||||
minPricePerBytePerSecond* {.serialize.}: UInt256
|
||||
totalCollateral {.serialize.}: UInt256
|
||||
totalRemainingCollateral* {.serialize.}: UInt256
|
||||
# If set to false, the availability will not accept new slots.
|
||||
# If enabled, it will not impact any existing slots that are already being hosted.
|
||||
enabled* {.serialize.}: bool
|
||||
# Specifies the latest timestamp after which the availability will no longer host any slots.
|
||||
# If set to 0, there will be no restrictions.
|
||||
until* {.serialize.}: SecondsSince1970
|
||||
|
||||
Reservation* = ref object
|
||||
id* {.serialize.}: ReservationId
|
||||
availabilityId* {.serialize.}: AvailabilityId
|
||||
size* {.serialize.}: UInt256
|
||||
size* {.serialize.}: uint64
|
||||
requestId* {.serialize.}: RequestId
|
||||
slotIndex* {.serialize.}: UInt256
|
||||
slotIndex* {.serialize.}: uint64
|
||||
validUntil* {.serialize.}: SecondsSince1970
|
||||
|
||||
Reservations* = ref object of RootObj
|
||||
availabilityLock: AsyncLock
|
||||
# Lock for protecting assertions of availability's sizes when searching for matching availability
|
||||
repo: RepoStore
|
||||
onAvailabilityAdded: ?OnAvailabilityAdded
|
||||
OnAvailabilitySaved: ?OnAvailabilitySaved
|
||||
|
||||
GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.}
|
||||
IterDispose* = proc(): Future[?!void] {.gcsafe, closure.}
|
||||
OnAvailabilityAdded* =
|
||||
proc(availability: Availability): Future[void] {.upraises: [], gcsafe.}
|
||||
GetNext* = proc(): Future[?seq[byte]] {.
|
||||
upraises: [], gcsafe, async: (raises: [CancelledError]), closure
|
||||
.}
|
||||
IterDispose* =
|
||||
proc(): Future[?!void] {.gcsafe, async: (raises: [CancelledError]), closure.}
|
||||
OnAvailabilitySaved* = proc(availability: Availability): Future[void] {.
|
||||
upraises: [], gcsafe, async: (raises: [])
|
||||
.}
|
||||
StorableIter* = ref object
|
||||
finished*: bool
|
||||
next*: GetNext
|
||||
@ -102,13 +114,20 @@ type
|
||||
SerializationError* = object of ReservationsError
|
||||
UpdateFailedError* = object of ReservationsError
|
||||
BytesOutOfBoundsError* = object of ReservationsError
|
||||
UntilOutOfBoundsError* = object of ReservationsError
|
||||
|
||||
const
|
||||
SalesKey = (CodexMetaKey / "sales").tryGet # TODO: move to sales module
|
||||
ReservationsKey = (SalesKey / "reservations").tryGet
|
||||
|
||||
proc hash*(x: AvailabilityId): Hash {.borrow.}
|
||||
proc all*(self: Reservations, T: type SomeStorableObject): Future[?!seq[T]] {.async.}
|
||||
proc all*(
|
||||
self: Reservations, T: type SomeStorableObject
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).}
|
||||
|
||||
proc all*(
|
||||
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).}
|
||||
|
||||
template withLock(lock, body) =
|
||||
try:
|
||||
@ -123,11 +142,13 @@ proc new*(T: type Reservations, repo: RepoStore): Reservations =
|
||||
|
||||
proc init*(
|
||||
_: type Availability,
|
||||
totalSize: UInt256,
|
||||
freeSize: UInt256,
|
||||
duration: UInt256,
|
||||
totalSize: uint64,
|
||||
freeSize: uint64,
|
||||
duration: uint64,
|
||||
minPricePerBytePerSecond: UInt256,
|
||||
totalCollateral: UInt256,
|
||||
enabled: bool,
|
||||
until: SecondsSince1970,
|
||||
): Availability =
|
||||
var id: array[32, byte]
|
||||
doAssert randomBytes(id) == 32
|
||||
@ -139,6 +160,8 @@ proc init*(
|
||||
minPricePerBytePerSecond: minPricePerBytePerSecond,
|
||||
totalCollateral: totalCollateral,
|
||||
totalRemainingCollateral: totalCollateral,
|
||||
enabled: enabled,
|
||||
until: until,
|
||||
)
|
||||
|
||||
func totalCollateral*(self: Availability): UInt256 {.inline.} =
|
||||
@ -151,9 +174,10 @@ proc `totalCollateral=`*(self: Availability, value: UInt256) {.inline.} =
|
||||
proc init*(
|
||||
_: type Reservation,
|
||||
availabilityId: AvailabilityId,
|
||||
size: UInt256,
|
||||
size: uint64,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256,
|
||||
slotIndex: uint64,
|
||||
validUntil: SecondsSince1970,
|
||||
): Reservation =
|
||||
var id: array[32, byte]
|
||||
doAssert randomBytes(id) == 32
|
||||
@ -163,6 +187,7 @@ proc init*(
|
||||
size: size,
|
||||
requestId: requestId,
|
||||
slotIndex: slotIndex,
|
||||
validUntil: validUntil,
|
||||
)
|
||||
|
||||
func toArray(id: SomeStorableId): array[32, byte] =
|
||||
@ -189,10 +214,10 @@ logutils.formatIt(LogFormat.textLines, SomeStorableId):
|
||||
logutils.formatIt(LogFormat.json, SomeStorableId):
|
||||
it.to0xHexLog
|
||||
|
||||
proc `onAvailabilityAdded=`*(
|
||||
self: Reservations, onAvailabilityAdded: OnAvailabilityAdded
|
||||
proc `OnAvailabilitySaved=`*(
|
||||
self: Reservations, OnAvailabilitySaved: OnAvailabilitySaved
|
||||
) =
|
||||
self.onAvailabilityAdded = some onAvailabilityAdded
|
||||
self.OnAvailabilitySaved = some OnAvailabilitySaved
|
||||
|
||||
func key*(id: AvailabilityId): ?!Key =
|
||||
## sales / reservations / <availabilityId>
|
||||
@ -206,7 +231,7 @@ func key*(availability: Availability): ?!Key =
|
||||
return availability.id.key
|
||||
|
||||
func maxCollateralPerByte*(availability: Availability): UInt256 =
|
||||
return availability.totalRemainingCollateral div availability.freeSize
|
||||
return availability.totalRemainingCollateral div availability.freeSize.stuint(256)
|
||||
|
||||
func key*(reservation: Reservation): ?!Key =
|
||||
return key(reservation.id, reservation.availabilityId)
|
||||
@ -217,11 +242,19 @@ func available*(self: Reservations): uint =
|
||||
func hasAvailable*(self: Reservations, bytes: uint): bool =
|
||||
self.repo.available(bytes.NBytes)
|
||||
|
||||
proc exists*(self: Reservations, key: Key): Future[bool] {.async.} =
|
||||
proc exists*(
|
||||
self: Reservations, key: Key
|
||||
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||
let exists = await self.repo.metaDs.ds.contains(key)
|
||||
return exists
|
||||
|
||||
proc getImpl(self: Reservations, key: Key): Future[?!seq[byte]] {.async.} =
|
||||
iterator items(self: StorableIter): Future[?seq[byte]] =
|
||||
while not self.finished:
|
||||
yield self.next()
|
||||
|
||||
proc getImpl(
|
||||
self: Reservations, key: Key
|
||||
): Future[?!seq[byte]] {.async: (raises: [CancelledError]).} =
|
||||
if not await self.exists(key):
|
||||
let err =
|
||||
newException(NotExistsError, "object with key " & $key & " does not exist")
|
||||
@ -234,7 +267,7 @@ proc getImpl(self: Reservations, key: Key): Future[?!seq[byte]] {.async.} =
|
||||
|
||||
proc get*(
|
||||
self: Reservations, key: Key, T: type SomeStorableObject
|
||||
): Future[?!T] {.async.} =
|
||||
): Future[?!T] {.async: (raises: [CancelledError]).} =
|
||||
without serialized =? await self.getImpl(key), error:
|
||||
return failure(error)
|
||||
|
||||
@ -243,7 +276,9 @@ proc get*(
|
||||
|
||||
return success obj
|
||||
|
||||
proc updateImpl(self: Reservations, obj: SomeStorableObject): Future[?!void] {.async.} =
|
||||
proc updateImpl(
|
||||
self: Reservations, obj: SomeStorableObject
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
trace "updating " & $(obj.type), id = obj.id
|
||||
|
||||
without key =? obj.key, error:
|
||||
@ -256,10 +291,15 @@ proc updateImpl(self: Reservations, obj: SomeStorableObject): Future[?!void] {.a
|
||||
|
||||
proc updateAvailability(
|
||||
self: Reservations, obj: Availability
|
||||
): Future[?!void] {.async.} =
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
logScope:
|
||||
availabilityId = obj.id
|
||||
|
||||
if obj.until < 0:
|
||||
let error =
|
||||
newException(UntilOutOfBoundsError, "Cannot set until to a negative value")
|
||||
return failure(error)
|
||||
|
||||
without key =? obj.key, error:
|
||||
return failure(error)
|
||||
|
||||
@ -268,66 +308,65 @@ proc updateAvailability(
|
||||
trace "Creating new Availability"
|
||||
let res = await self.updateImpl(obj)
|
||||
# inform subscribers that Availability has been added
|
||||
if onAvailabilityAdded =? self.onAvailabilityAdded:
|
||||
# when chronos v4 is implemented, and OnAvailabilityAdded is annotated
|
||||
# with async:(raises:[]), we can remove this try/catch as we know, with
|
||||
# certainty, that nothing will be raised
|
||||
try:
|
||||
await onAvailabilityAdded(obj)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
# we don't have any insight into types of exceptions that
|
||||
# `onAvailabilityAdded` can raise because it is caller-defined
|
||||
warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg
|
||||
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
|
||||
await OnAvailabilitySaved(obj)
|
||||
return res
|
||||
else:
|
||||
return failure(err)
|
||||
|
||||
if obj.until > 0:
|
||||
without allReservations =? await self.all(Reservation, obj.id), error:
|
||||
error.msg = "Error updating reservation: " & error.msg
|
||||
return failure(error)
|
||||
|
||||
let requestEnds = allReservations.mapIt(it.validUntil)
|
||||
|
||||
if requestEnds.len > 0 and requestEnds.max > obj.until:
|
||||
let error = newException(
|
||||
UntilOutOfBoundsError,
|
||||
"Until parameter must be greater or equal to the longest currently hosted slot",
|
||||
)
|
||||
return failure(error)
|
||||
|
||||
# Sizing of the availability changed, we need to adjust the repo reservation accordingly
|
||||
if oldAvailability.totalSize != obj.totalSize:
|
||||
trace "totalSize changed, updating repo reservation"
|
||||
if oldAvailability.totalSize < obj.totalSize: # storage added
|
||||
if reserveErr =? (
|
||||
await self.repo.reserve(
|
||||
(obj.totalSize - oldAvailability.totalSize).truncate(uint).NBytes
|
||||
)
|
||||
await self.repo.reserve((obj.totalSize - oldAvailability.totalSize).NBytes)
|
||||
).errorOption:
|
||||
return failure(reserveErr.toErr(ReserveFailedError))
|
||||
elif oldAvailability.totalSize > obj.totalSize: # storage removed
|
||||
if reserveErr =? (
|
||||
await self.repo.release(
|
||||
(oldAvailability.totalSize - obj.totalSize).truncate(uint).NBytes
|
||||
)
|
||||
await self.repo.release((oldAvailability.totalSize - obj.totalSize).NBytes)
|
||||
).errorOption:
|
||||
return failure(reserveErr.toErr(ReleaseFailedError))
|
||||
|
||||
let res = await self.updateImpl(obj)
|
||||
|
||||
if oldAvailability.freeSize < obj.freeSize: # availability added
|
||||
if oldAvailability.freeSize < obj.freeSize or oldAvailability.duration < obj.duration or
|
||||
oldAvailability.minPricePerBytePerSecond < obj.minPricePerBytePerSecond or
|
||||
oldAvailability.totalCollateral < obj.totalCollateral: # availability updated
|
||||
# inform subscribers that Availability has been modified (with increased
|
||||
# size)
|
||||
if onAvailabilityAdded =? self.onAvailabilityAdded:
|
||||
# when chronos v4 is implemented, and OnAvailabilityAdded is annotated
|
||||
# with async:(raises:[]), we can remove this try/catch as we know, with
|
||||
# certainty, that nothing will be raised
|
||||
try:
|
||||
await onAvailabilityAdded(obj)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
# we don't have any insight into types of exceptions that
|
||||
# `onAvailabilityAdded` can raise because it is caller-defined
|
||||
warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg
|
||||
|
||||
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
|
||||
await OnAvailabilitySaved(obj)
|
||||
return res
|
||||
|
||||
proc update*(self: Reservations, obj: Reservation): Future[?!void] {.async.} =
|
||||
proc update*(
|
||||
self: Reservations, obj: Reservation
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
return await self.updateImpl(obj)
|
||||
|
||||
proc update*(self: Reservations, obj: Availability): Future[?!void] {.async.} =
|
||||
withLock(self.availabilityLock):
|
||||
return await self.updateAvailability(obj)
|
||||
proc update*(
|
||||
self: Reservations, obj: Availability
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
withLock(self.availabilityLock):
|
||||
return await self.updateAvailability(obj)
|
||||
except AsyncLockError as e:
|
||||
error "Lock error when trying to update the availability", err = e.msg
|
||||
return failure(e)
|
||||
|
||||
proc delete(self: Reservations, key: Key): Future[?!void] {.async.} =
|
||||
trace "deleting object", key
|
||||
@ -361,7 +400,7 @@ proc deleteReservation*(
|
||||
else:
|
||||
return failure(error)
|
||||
|
||||
if reservation.size > 0.u256:
|
||||
if reservation.size > 0.uint64:
|
||||
trace "returning remaining reservation bytes to availability",
|
||||
size = reservation.size
|
||||
|
||||
@ -389,17 +428,25 @@ proc deleteReservation*(
|
||||
|
||||
proc createAvailability*(
|
||||
self: Reservations,
|
||||
size: UInt256,
|
||||
duration: UInt256,
|
||||
size: uint64,
|
||||
duration: uint64,
|
||||
minPricePerBytePerSecond: UInt256,
|
||||
totalCollateral: UInt256,
|
||||
enabled: bool,
|
||||
until: SecondsSince1970,
|
||||
): Future[?!Availability] {.async.} =
|
||||
trace "creating availability",
|
||||
size, duration, minPricePerBytePerSecond, totalCollateral
|
||||
size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until
|
||||
|
||||
let availability =
|
||||
Availability.init(size, size, duration, minPricePerBytePerSecond, totalCollateral)
|
||||
let bytes = availability.freeSize.truncate(uint)
|
||||
if until < 0:
|
||||
let error =
|
||||
newException(UntilOutOfBoundsError, "Cannot set until to a negative value")
|
||||
return failure(error)
|
||||
|
||||
let availability = Availability.init(
|
||||
size, size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until
|
||||
)
|
||||
let bytes = availability.freeSize
|
||||
|
||||
if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
|
||||
return failure(reserveErr.toErr(ReserveFailedError))
|
||||
@ -418,10 +465,11 @@ proc createAvailability*(
|
||||
method createReservation*(
|
||||
self: Reservations,
|
||||
availabilityId: AvailabilityId,
|
||||
slotSize: UInt256,
|
||||
slotSize: uint64,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256,
|
||||
slotIndex: uint64,
|
||||
collateralPerByte: UInt256,
|
||||
validUntil: SecondsSince1970,
|
||||
): Future[?!Reservation] {.async, base.} =
|
||||
withLock(self.availabilityLock):
|
||||
without availabilityKey =? availabilityId.key, error:
|
||||
@ -438,9 +486,11 @@ method createReservation*(
|
||||
)
|
||||
return failure(error)
|
||||
|
||||
trace "Creating reservation", availabilityId, slotSize, requestId, slotIndex
|
||||
trace "Creating reservation",
|
||||
availabilityId, slotSize, requestId, slotIndex, validUntil = validUntil
|
||||
|
||||
let reservation = Reservation.init(availabilityId, slotSize, requestId, slotIndex)
|
||||
let reservation =
|
||||
Reservation.init(availabilityId, slotSize, requestId, slotIndex, validUntil)
|
||||
|
||||
if createResErr =? (await self.update(reservation)).errorOption:
|
||||
return failure(createResErr)
|
||||
@ -450,7 +500,7 @@ method createReservation*(
|
||||
availability.freeSize -= slotSize
|
||||
|
||||
# adjust the remaining totalRemainingCollateral
|
||||
availability.totalRemainingCollateral -= slotSize * collateralPerByte
|
||||
availability.totalRemainingCollateral -= slotSize.u256 * collateralPerByte
|
||||
|
||||
# update availability with reduced size
|
||||
trace "Updating availability with reduced size"
|
||||
@ -475,7 +525,7 @@ proc returnBytesToAvailability*(
|
||||
self: Reservations,
|
||||
availabilityId: AvailabilityId,
|
||||
reservationId: ReservationId,
|
||||
bytes: UInt256,
|
||||
bytes: uint64,
|
||||
): Future[?!void] {.async.} =
|
||||
logScope:
|
||||
reservationId
|
||||
@ -502,8 +552,7 @@ proc returnBytesToAvailability*(
|
||||
|
||||
# First lets see if we can re-reserve the bytes, if the Repo's quota
|
||||
# is depleted then we will fail-fast as there is nothing to be done atm.
|
||||
if reserveErr =?
|
||||
(await self.repo.reserve(bytesToBeReturned.truncate(uint).NBytes)).errorOption:
|
||||
if reserveErr =? (await self.repo.reserve(bytesToBeReturned.NBytes)).errorOption:
|
||||
return failure(reserveErr.toErr(ReserveFailedError))
|
||||
|
||||
without availabilityKey =? availabilityId.key, error:
|
||||
@ -517,8 +566,7 @@ proc returnBytesToAvailability*(
|
||||
# Update availability with returned size
|
||||
if updateErr =? (await self.updateAvailability(availability)).errorOption:
|
||||
trace "Rolling back returning bytes"
|
||||
if rollbackErr =?
|
||||
(await self.repo.release(bytesToBeReturned.truncate(uint).NBytes)).errorOption:
|
||||
if rollbackErr =? (await self.repo.release(bytesToBeReturned.NBytes)).errorOption:
|
||||
rollbackErr.parent = updateErr
|
||||
return failure(rollbackErr)
|
||||
|
||||
@ -531,7 +579,7 @@ proc release*(
|
||||
reservationId: ReservationId,
|
||||
availabilityId: AvailabilityId,
|
||||
bytes: uint,
|
||||
): Future[?!void] {.async.} =
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
logScope:
|
||||
topics = "release"
|
||||
bytes
|
||||
@ -546,7 +594,7 @@ proc release*(
|
||||
without var reservation =? (await self.get(key, Reservation)), error:
|
||||
return failure(error)
|
||||
|
||||
if reservation.size < bytes.u256:
|
||||
if reservation.size < bytes:
|
||||
let error = newException(
|
||||
BytesOutOfBoundsError,
|
||||
"trying to release an amount of bytes that is greater than the total size of the Reservation",
|
||||
@ -556,7 +604,7 @@ proc release*(
|
||||
if releaseErr =? (await self.repo.release(bytes.NBytes)).errorOption:
|
||||
return failure(releaseErr.toErr(ReleaseFailedError))
|
||||
|
||||
reservation.size -= bytes.u256
|
||||
reservation.size -= bytes
|
||||
|
||||
# persist partially used Reservation with updated size
|
||||
if err =? (await self.update(reservation)).errorOption:
|
||||
@ -569,13 +617,9 @@ proc release*(
|
||||
|
||||
return success()
|
||||
|
||||
iterator items(self: StorableIter): Future[?seq[byte]] =
|
||||
while not self.finished:
|
||||
yield self.next()
|
||||
|
||||
proc storables(
|
||||
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
|
||||
): Future[?!StorableIter] {.async.} =
|
||||
): Future[?!StorableIter] {.async: (raises: [CancelledError]).} =
|
||||
var iter = StorableIter()
|
||||
let query = Query.init(queryKey)
|
||||
when T is Availability:
|
||||
@ -593,7 +637,7 @@ proc storables(
|
||||
return failure(error)
|
||||
|
||||
# /sales/reservations
|
||||
proc next(): Future[?seq[byte]] {.async.} =
|
||||
proc next(): Future[?seq[byte]] {.async: (raises: [CancelledError]).} =
|
||||
await idleAsync()
|
||||
iter.finished = results.finished
|
||||
if not results.finished and res =? (await results.next()) and res.data.len > 0 and
|
||||
@ -602,7 +646,7 @@ proc storables(
|
||||
|
||||
return none seq[byte]
|
||||
|
||||
proc dispose(): Future[?!void] {.async.} =
|
||||
proc dispose(): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
return await results.dispose()
|
||||
|
||||
iter.next = next
|
||||
@ -611,39 +655,49 @@ proc storables(
|
||||
|
||||
proc allImpl(
|
||||
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
|
||||
): Future[?!seq[T]] {.async.} =
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
|
||||
var ret: seq[T] = @[]
|
||||
|
||||
without storables =? (await self.storables(T, queryKey)), error:
|
||||
return failure(error)
|
||||
|
||||
for storable in storables.items:
|
||||
without bytes =? (await storable):
|
||||
continue
|
||||
try:
|
||||
without bytes =? (await storable):
|
||||
continue
|
||||
|
||||
without obj =? T.fromJson(bytes), error:
|
||||
error "json deserialization error",
|
||||
json = string.fromBytes(bytes), error = error.msg
|
||||
continue
|
||||
without obj =? T.fromJson(bytes), error:
|
||||
error "json deserialization error",
|
||||
json = string.fromBytes(bytes), error = error.msg
|
||||
continue
|
||||
|
||||
ret.add obj
|
||||
ret.add obj
|
||||
except CancelledError as err:
|
||||
raise err
|
||||
except CatchableError as err:
|
||||
error "Error when retrieving storable", error = err.msg
|
||||
continue
|
||||
|
||||
return success(ret)
|
||||
|
||||
proc all*(self: Reservations, T: type SomeStorableObject): Future[?!seq[T]] {.async.} =
|
||||
proc all*(
|
||||
self: Reservations, T: type SomeStorableObject
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
|
||||
return await self.allImpl(T)
|
||||
|
||||
proc all*(
|
||||
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
|
||||
): Future[?!seq[T]] {.async.} =
|
||||
without key =? (ReservationsKey / $availabilityId):
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
|
||||
without key =? key(availabilityId):
|
||||
return failure("no key")
|
||||
|
||||
return await self.allImpl(T, key)
|
||||
|
||||
proc findAvailability*(
|
||||
self: Reservations,
|
||||
size, duration, pricePerBytePerSecond, collateralPerByte: UInt256,
|
||||
size, duration: uint64,
|
||||
pricePerBytePerSecond, collateralPerByte: UInt256,
|
||||
validUntil: SecondsSince1970,
|
||||
): Future[?Availability] {.async.} =
|
||||
without storables =? (await self.storables(Availability)), e:
|
||||
error "failed to get all storables", error = e.msg
|
||||
@ -651,11 +705,14 @@ proc findAvailability*(
|
||||
|
||||
for item in storables.items:
|
||||
if bytes =? (await item) and availability =? Availability.fromJson(bytes):
|
||||
if size <= availability.freeSize and duration <= availability.duration and
|
||||
if availability.enabled and size <= availability.freeSize and
|
||||
duration <= availability.duration and
|
||||
collateralPerByte <= availability.maxCollateralPerByte and
|
||||
pricePerBytePerSecond >= availability.minPricePerBytePerSecond:
|
||||
pricePerBytePerSecond >= availability.minPricePerBytePerSecond and
|
||||
(availability.until == 0 or availability.until >= validUntil):
|
||||
trace "availability matched",
|
||||
id = availability.id,
|
||||
enabled = availability.enabled,
|
||||
size,
|
||||
availFreeSize = availability.freeSize,
|
||||
duration,
|
||||
@ -663,7 +720,8 @@ proc findAvailability*(
|
||||
pricePerBytePerSecond,
|
||||
availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond,
|
||||
collateralPerByte,
|
||||
availMaxCollateralPerByte = availability.maxCollateralPerByte
|
||||
availMaxCollateralPerByte = availability.maxCollateralPerByte,
|
||||
until = availability.until
|
||||
|
||||
# TODO: As soon as we're on ARC-ORC, we can use destructors
|
||||
# to automatically dispose our iterators when they fall out of scope.
|
||||
@ -675,6 +733,7 @@ proc findAvailability*(
|
||||
|
||||
trace "availability did not match",
|
||||
id = availability.id,
|
||||
enabled = availability.enabled,
|
||||
size,
|
||||
availFreeSize = availability.freeSize,
|
||||
duration,
|
||||
@ -682,4 +741,5 @@ proc findAvailability*(
|
||||
pricePerBytePerSecond,
|
||||
availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond,
|
||||
collateralPerByte,
|
||||
availMaxCollateralPerByte = availability.maxCollateralPerByte
|
||||
availMaxCollateralPerByte = availability.maxCollateralPerByte,
|
||||
until = availability.until
|
||||
|
||||
@ -6,6 +6,7 @@ import pkg/upraises
|
||||
import ../contracts/requests
|
||||
import ../errors
|
||||
import ../logutils
|
||||
import ../utils/exceptions
|
||||
import ./statemachine
|
||||
import ./salescontext
|
||||
import ./salesdata
|
||||
@ -26,9 +27,9 @@ type
|
||||
onFilled*: ?OnFilled
|
||||
|
||||
OnCleanUp* = proc(
|
||||
returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
): Future[void] {.gcsafe, upraises: [].}
|
||||
OnFilled* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||
OnFilled* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].}
|
||||
|
||||
SalesAgentError = object of CodexError
|
||||
AllSlotsFilledError* = object of SalesAgentError
|
||||
@ -39,7 +40,7 @@ func `==`*(a, b: SalesAgent): bool =
|
||||
proc newSalesAgent*(
|
||||
context: SalesContext,
|
||||
requestId: RequestId,
|
||||
slotIndex: UInt256,
|
||||
slotIndex: uint64,
|
||||
request: ?StorageRequest,
|
||||
): SalesAgent =
|
||||
var agent = SalesAgent.new()
|
||||
@ -68,41 +69,47 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} =
|
||||
let data = agent.data
|
||||
let clock = agent.context.clock
|
||||
|
||||
proc onCancelled() {.async.} =
|
||||
proc onCancelled() {.async: (raises: []).} =
|
||||
without request =? data.request:
|
||||
return
|
||||
|
||||
let market = agent.context.market
|
||||
let expiry = await market.requestExpiresAt(data.requestId)
|
||||
try:
|
||||
let market = agent.context.market
|
||||
let expiry = await market.requestExpiresAt(data.requestId)
|
||||
|
||||
while true:
|
||||
let deadline = max(clock.now, expiry) + 1
|
||||
trace "Waiting for request to be cancelled", now = clock.now, expiry = deadline
|
||||
await clock.waitUntil(deadline)
|
||||
while true:
|
||||
let deadline = max(clock.now, expiry) + 1
|
||||
trace "Waiting for request to be cancelled", now = clock.now, expiry = deadline
|
||||
await clock.waitUntil(deadline)
|
||||
|
||||
without state =? await agent.retrieveRequestState():
|
||||
error "Uknown request", requestId = data.requestId
|
||||
return
|
||||
without state =? await agent.retrieveRequestState():
|
||||
error "Unknown request", requestId = data.requestId
|
||||
return
|
||||
|
||||
case state
|
||||
of New:
|
||||
discard
|
||||
of RequestState.Cancelled:
|
||||
agent.schedule(cancelledEvent(request))
|
||||
break
|
||||
of RequestState.Started, RequestState.Finished, RequestState.Failed:
|
||||
break
|
||||
case state
|
||||
of New:
|
||||
discard
|
||||
of RequestState.Cancelled:
|
||||
agent.schedule(cancelledEvent(request))
|
||||
break
|
||||
of RequestState.Started, RequestState.Finished, RequestState.Failed:
|
||||
break
|
||||
|
||||
debug "The request is not yet canceled, even though it should be. Waiting for some more time.",
|
||||
currentState = state, now = clock.now
|
||||
debug "The request is not yet canceled, even though it should be. Waiting for some more time.",
|
||||
currentState = state, now = clock.now
|
||||
except CancelledError:
|
||||
trace "Waiting for expiry to lapse was cancelled", requestId = data.requestId
|
||||
except CatchableError as e:
|
||||
error "Error while waiting for expiry to lapse", error = e.msgDetail
|
||||
|
||||
data.cancelled = onCancelled()
|
||||
|
||||
method onFulfilled*(
|
||||
agent: SalesAgent, requestId: RequestId
|
||||
) {.base, gcsafe, upraises: [].} =
|
||||
if agent.data.requestId == requestId and not agent.data.cancelled.isNil:
|
||||
agent.data.cancelled.cancelSoon()
|
||||
let cancelled = agent.data.cancelled
|
||||
if agent.data.requestId == requestId and not cancelled.isNil and not cancelled.finished:
|
||||
cancelled.cancelSoon()
|
||||
|
||||
method onFailed*(
|
||||
agent: SalesAgent, requestId: RequestId
|
||||
@ -113,7 +120,7 @@ method onFailed*(
|
||||
agent.schedule(failedEvent(request))
|
||||
|
||||
method onSlotFilled*(
|
||||
agent: SalesAgent, requestId: RequestId, slotIndex: UInt256
|
||||
agent: SalesAgent, requestId: RequestId, slotIndex: uint64
|
||||
) {.base, gcsafe, upraises: [].} =
|
||||
if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex:
|
||||
agent.schedule(slotFilledEvent(requestId, slotIndex))
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/upraises
|
||||
import pkg/libp2p/cid
|
||||
|
||||
import ../market
|
||||
import ../clock
|
||||
@ -25,13 +26,13 @@ type
|
||||
|
||||
BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].}
|
||||
OnStore* = proc(
|
||||
request: StorageRequest, slot: UInt256, blocksCb: BlocksCb
|
||||
request: StorageRequest, slot: uint64, blocksCb: BlocksCb, isRepairing: bool
|
||||
): Future[?!void] {.gcsafe, upraises: [].}
|
||||
OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.
|
||||
gcsafe, upraises: []
|
||||
.}
|
||||
OnExpiryUpdate* = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {.
|
||||
OnExpiryUpdate* = proc(rootCid: Cid, expiry: SecondsSince1970): Future[?!void] {.
|
||||
gcsafe, upraises: []
|
||||
.}
|
||||
OnClear* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||
OnSale* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||
OnClear* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].}
|
||||
OnSale* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].}
|
||||
|
||||
@ -7,6 +7,6 @@ type SalesData* = ref object
|
||||
requestId*: RequestId
|
||||
ask*: StorageAsk
|
||||
request*: ?StorageRequest
|
||||
slotIndex*: UInt256
|
||||
slotIndex*: uint64
|
||||
cancelled*: Future[void]
|
||||
reservation*: ?Reservation
|
||||
|
||||
@ -3,8 +3,8 @@ import std/tables
|
||||
import pkg/chronos
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/upraises
|
||||
import ../errors
|
||||
import ../clock
|
||||
import ../logutils
|
||||
import ../rng
|
||||
import ../utils
|
||||
@ -16,8 +16,9 @@ logScope:
|
||||
topics = "marketplace slotqueue"
|
||||
|
||||
type
|
||||
OnProcessSlot* =
|
||||
proc(item: SlotQueueItem, done: Future[void]): Future[void] {.gcsafe, upraises: [].}
|
||||
OnProcessSlot* = proc(item: SlotQueueItem, done: Future[void]): Future[void] {.
|
||||
gcsafe, async: (raises: [])
|
||||
.}
|
||||
|
||||
# Non-ref obj copies value when assigned, preventing accidental modification
|
||||
# of values which could cause an incorrect order (eg
|
||||
@ -25,16 +26,16 @@ type
|
||||
# but the heap invariant would no longer be honoured. When non-ref, the
|
||||
# compiler can ensure that statement will fail).
|
||||
SlotQueueWorker = object
|
||||
doneProcessing*: Future[void]
|
||||
doneProcessing*: Future[void].Raising([])
|
||||
|
||||
SlotQueueItem* = object
|
||||
requestId: RequestId
|
||||
slotIndex: uint16
|
||||
slotSize: UInt256
|
||||
duration: UInt256
|
||||
slotSize: uint64
|
||||
duration: uint64
|
||||
pricePerBytePerSecond: UInt256
|
||||
collateralPerByte: UInt256
|
||||
expiry: UInt256
|
||||
collateral: UInt256 # Collateral computed
|
||||
expiry: uint64
|
||||
seen: bool
|
||||
|
||||
# don't need to -1 to prevent overflow when adding 1 (to always allow push)
|
||||
@ -75,9 +76,6 @@ proc profitability(item: SlotQueueItem): UInt256 =
|
||||
slotSize: item.slotSize,
|
||||
).pricePerSlot
|
||||
|
||||
proc collateralPerSlot(item: SlotQueueItem): UInt256 =
|
||||
StorageAsk(collateralPerByte: item.collateralPerByte, slotSize: item.slotSize).collateralPerSlot
|
||||
|
||||
proc `<`*(a, b: SlotQueueItem): bool =
|
||||
# for A to have a higher priority than B (in a min queue), A must be less than
|
||||
# B.
|
||||
@ -94,8 +92,8 @@ proc `<`*(a, b: SlotQueueItem): bool =
|
||||
scoreA.addIf(a.profitability > b.profitability, 3)
|
||||
scoreB.addIf(a.profitability < b.profitability, 3)
|
||||
|
||||
scoreA.addIf(a.collateralPerSlot < b.collateralPerSlot, 2)
|
||||
scoreB.addIf(a.collateralPerSlot > b.collateralPerSlot, 2)
|
||||
scoreA.addIf(a.collateral < b.collateral, 2)
|
||||
scoreB.addIf(a.collateral > b.collateral, 2)
|
||||
|
||||
scoreA.addIf(a.expiry > b.expiry, 1)
|
||||
scoreB.addIf(a.expiry < b.expiry, 1)
|
||||
@ -128,14 +126,25 @@ proc new*(
|
||||
# `newAsyncQueue` procedure
|
||||
|
||||
proc init(_: type SlotQueueWorker): SlotQueueWorker =
|
||||
SlotQueueWorker(doneProcessing: newFuture[void]("slotqueue.worker.processing"))
|
||||
let workerFut = Future[void].Raising([]).init(
|
||||
"slotqueue.worker.processing", {FutureFlag.OwnCancelSchedule}
|
||||
)
|
||||
|
||||
workerFut.cancelCallback = proc(data: pointer) {.raises: [].} =
|
||||
# this is equivalent to try: ... except CatchableError: ...
|
||||
if not workerFut.finished:
|
||||
workerFut.complete()
|
||||
trace "Cancelling `SlotQueue` worker processing future"
|
||||
|
||||
SlotQueueWorker(doneProcessing: workerFut)
|
||||
|
||||
proc init*(
|
||||
_: type SlotQueueItem,
|
||||
requestId: RequestId,
|
||||
slotIndex: uint16,
|
||||
ask: StorageAsk,
|
||||
expiry: UInt256,
|
||||
expiry: uint64,
|
||||
collateral: UInt256,
|
||||
seen = false,
|
||||
): SlotQueueItem =
|
||||
SlotQueueItem(
|
||||
@ -144,25 +153,32 @@ proc init*(
|
||||
slotSize: ask.slotSize,
|
||||
duration: ask.duration,
|
||||
pricePerBytePerSecond: ask.pricePerBytePerSecond,
|
||||
collateralPerByte: ask.collateralPerByte,
|
||||
collateral: collateral,
|
||||
expiry: expiry,
|
||||
seen: seen,
|
||||
)
|
||||
|
||||
proc init*(
|
||||
_: type SlotQueueItem, request: StorageRequest, slotIndex: uint16
|
||||
_: type SlotQueueItem,
|
||||
request: StorageRequest,
|
||||
slotIndex: uint16,
|
||||
collateral: UInt256,
|
||||
): SlotQueueItem =
|
||||
SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry)
|
||||
SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry, collateral)
|
||||
|
||||
proc init*(
|
||||
_: type SlotQueueItem, requestId: RequestId, ask: StorageAsk, expiry: UInt256
|
||||
): seq[SlotQueueItem] =
|
||||
_: type SlotQueueItem,
|
||||
requestId: RequestId,
|
||||
ask: StorageAsk,
|
||||
expiry: uint64,
|
||||
collateral: UInt256,
|
||||
): seq[SlotQueueItem] {.raises: [SlotsOutOfRangeError].} =
|
||||
if not ask.slots.inRange:
|
||||
raise newException(SlotsOutOfRangeError, "Too many slots")
|
||||
|
||||
var i = 0'u16
|
||||
proc initSlotQueueItem(): SlotQueueItem =
|
||||
let item = SlotQueueItem.init(requestId, i, ask, expiry)
|
||||
let item = SlotQueueItem.init(requestId, i, ask, expiry, collateral)
|
||||
inc i
|
||||
return item
|
||||
|
||||
@ -170,8 +186,10 @@ proc init*(
|
||||
Rng.instance.shuffle(items)
|
||||
return items
|
||||
|
||||
proc init*(_: type SlotQueueItem, request: StorageRequest): seq[SlotQueueItem] =
|
||||
return SlotQueueItem.init(request.id, request.ask, request.expiry)
|
||||
proc init*(
|
||||
_: type SlotQueueItem, request: StorageRequest, collateral: UInt256
|
||||
): seq[SlotQueueItem] =
|
||||
return SlotQueueItem.init(request.id, request.ask, request.expiry, collateral)
|
||||
|
||||
proc inRange*(val: SomeUnsignedInt): bool =
|
||||
val.uint16 in SlotQueueSize.low .. SlotQueueSize.high
|
||||
@ -182,10 +200,10 @@ proc requestId*(self: SlotQueueItem): RequestId =
|
||||
proc slotIndex*(self: SlotQueueItem): uint16 =
|
||||
self.slotIndex
|
||||
|
||||
proc slotSize*(self: SlotQueueItem): UInt256 =
|
||||
proc slotSize*(self: SlotQueueItem): uint64 =
|
||||
self.slotSize
|
||||
|
||||
proc duration*(self: SlotQueueItem): UInt256 =
|
||||
proc duration*(self: SlotQueueItem): uint64 =
|
||||
self.duration
|
||||
|
||||
proc pricePerBytePerSecond*(self: SlotQueueItem): UInt256 =
|
||||
@ -233,25 +251,7 @@ proc unpause*(self: SlotQueue) =
|
||||
# set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait()
|
||||
self.unpaused.fire()
|
||||
|
||||
proc populateItem*(
|
||||
self: SlotQueue, requestId: RequestId, slotIndex: uint16
|
||||
): ?SlotQueueItem =
|
||||
trace "populate item, items in queue", len = self.queue.len
|
||||
for item in self.queue.items:
|
||||
trace "populate item search", itemRequestId = item.requestId, requestId
|
||||
if item.requestId == requestId:
|
||||
return some SlotQueueItem(
|
||||
requestId: requestId,
|
||||
slotIndex: slotIndex,
|
||||
slotSize: item.slotSize,
|
||||
duration: item.duration,
|
||||
pricePerBytePerSecond: item.pricePerBytePerSecond,
|
||||
collateralPerByte: item.collateralPerByte,
|
||||
expiry: item.expiry,
|
||||
)
|
||||
return none SlotQueueItem
|
||||
|
||||
proc push*(self: SlotQueue, item: SlotQueueItem): ?!void =
|
||||
proc push*(self: SlotQueue, item: SlotQueueItem): ?!void {.raises: [].} =
|
||||
logScope:
|
||||
requestId = item.requestId
|
||||
slotIndex = item.slotIndex
|
||||
@ -429,7 +429,6 @@ proc run(self: SlotQueue) {.async: (raises: []).} =
|
||||
|
||||
let fut = self.dispatch(worker, item)
|
||||
self.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
|
||||
await sleepAsync(1.millis) # poll
|
||||
except CancelledError:
|
||||
@ -457,7 +456,6 @@ proc start*(self: SlotQueue) =
|
||||
|
||||
let fut = self.run()
|
||||
self.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
|
||||
proc stop*(self: SlotQueue) {.async.} =
|
||||
if not self.running:
|
||||
|
||||
@ -25,7 +25,7 @@ method onFailed*(
|
||||
discard
|
||||
|
||||
method onSlotFilled*(
|
||||
state: SaleState, requestId: RequestId, slotIndex: UInt256
|
||||
state: SaleState, requestId: RequestId, slotIndex: uint64
|
||||
): ?State {.base, upraises: [].} =
|
||||
discard
|
||||
|
||||
@ -37,6 +37,6 @@ proc failedEvent*(request: StorageRequest): Event =
|
||||
return proc(state: State): ?State =
|
||||
SaleState(state).onFailed(request)
|
||||
|
||||
proc slotFilledEvent*(requestId: RequestId, slotIndex: UInt256): Event =
|
||||
proc slotFilledEvent*(requestId: RequestId, slotIndex: uint64): Event =
|
||||
return proc(state: State): ?State =
|
||||
SaleState(state).onSlotFilled(requestId, slotIndex)
|
||||
|
||||
@ -1,17 +1,28 @@
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../salesagent
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./errored
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales cancelled"
|
||||
|
||||
type SaleCancelled* = ref object of ErrorHandlingState
|
||||
type SaleCancelled* = ref object of SaleState
|
||||
|
||||
method `$`*(state: SaleCancelled): string =
|
||||
"SaleCancelled"
|
||||
|
||||
method run*(state: SaleCancelled, machine: Machine): Future[?State] {.async.} =
|
||||
proc slotIsFilledByMe(
|
||||
market: Market, requestId: RequestId, slotIndex: uint64
|
||||
): Future[bool] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
let host = await market.getHost(requestId, slotIndex)
|
||||
let me = await market.getSigner()
|
||||
|
||||
return host == me.some
|
||||
|
||||
method run*(
|
||||
state: SaleCancelled, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let agent = SalesAgent(machine)
|
||||
let data = agent.data
|
||||
let market = agent.context.market
|
||||
@ -19,21 +30,33 @@ method run*(state: SaleCancelled, machine: Machine): Future[?State] {.async.} =
|
||||
without request =? data.request:
|
||||
raiseAssert "no sale request"
|
||||
|
||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||
debug "Collecting collateral and partial payout",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
let currentCollateral = await market.currentCollateral(slot.id)
|
||||
await market.freeSlot(slot.id)
|
||||
try:
|
||||
var returnedCollateral = UInt256.none
|
||||
|
||||
if onClear =? agent.context.onClear and request =? data.request:
|
||||
onClear(request, data.slotIndex)
|
||||
if await slotIsFilledByMe(market, data.requestId, data.slotIndex):
|
||||
debug "Collecting collateral and partial payout",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
|
||||
if onCleanUp =? agent.onCleanUp:
|
||||
await onCleanUp(
|
||||
returnBytes = true,
|
||||
reprocessSlot = false,
|
||||
returnedCollateral = some currentCollateral,
|
||||
)
|
||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||
let currentCollateral = await market.currentCollateral(slot.id)
|
||||
|
||||
warn "Sale cancelled due to timeout",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
try:
|
||||
await market.freeSlot(slot.id)
|
||||
except SlotStateMismatchError as e:
|
||||
warn "Failed to free slot because slot is already free", error = e.msg
|
||||
|
||||
returnedCollateral = currentCollateral.some
|
||||
|
||||
if onClear =? agent.context.onClear and request =? data.request:
|
||||
onClear(request, data.slotIndex)
|
||||
|
||||
if onCleanUp =? agent.onCleanUp:
|
||||
await onCleanUp(reprocessSlot = false, returnedCollateral = returnedCollateral)
|
||||
|
||||
warn "Sale cancelled due to timeout",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
except CancelledError as e:
|
||||
trace "SaleCancelled.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleCancelled.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
|
||||
@ -4,16 +4,16 @@ import pkg/questionable/results
|
||||
import ../../blocktype as bt
|
||||
import ../../logutils
|
||||
import ../../market
|
||||
import ../../utils/exceptions
|
||||
import ../salesagent
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./cancelled
|
||||
import ./failed
|
||||
import ./filled
|
||||
import ./initialproving
|
||||
import ./errored
|
||||
|
||||
type SaleDownloading* = ref object of ErrorHandlingState
|
||||
type SaleDownloading* = ref object of SaleState
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales downloading"
|
||||
@ -28,11 +28,13 @@ method onFailed*(state: SaleDownloading, request: StorageRequest): ?State =
|
||||
return some State(SaleFailed())
|
||||
|
||||
method onSlotFilled*(
|
||||
state: SaleDownloading, requestId: RequestId, slotIndex: UInt256
|
||||
state: SaleDownloading, requestId: RequestId, slotIndex: uint64
|
||||
): ?State =
|
||||
return some State(SaleFilled())
|
||||
|
||||
method run*(state: SaleDownloading, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: SaleDownloading, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let agent = SalesAgent(machine)
|
||||
let data = agent.data
|
||||
let context = agent.context
|
||||
@ -64,9 +66,18 @@ method run*(state: SaleDownloading, machine: Machine): Future[?State] {.async.}
|
||||
trace "Releasing batch of bytes written to disk", bytes
|
||||
return await reservations.release(reservation.id, reservation.availabilityId, bytes)
|
||||
|
||||
trace "Starting download"
|
||||
if err =? (await onStore(request, data.slotIndex, onBlocks)).errorOption:
|
||||
return some State(SaleErrored(error: err, reprocessSlot: false))
|
||||
try:
|
||||
let slotId = slotId(request.id, data.slotIndex)
|
||||
let isRepairing = (await context.market.slotState(slotId)) == SlotState.Repair
|
||||
|
||||
trace "Download complete"
|
||||
return some State(SaleInitialProving())
|
||||
trace "Starting download"
|
||||
if err =? (await onStore(request, data.slotIndex, onBlocks, isRepairing)).errorOption:
|
||||
return some State(SaleErrored(error: err, reprocessSlot: false))
|
||||
|
||||
trace "Download complete"
|
||||
return some State(SaleInitialProving())
|
||||
except CancelledError as e:
|
||||
trace "SaleDownloading.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleDownloading.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
|
||||
@ -17,10 +17,9 @@ type SaleErrored* = ref object of SaleState
|
||||
method `$`*(state: SaleErrored): string =
|
||||
"SaleErrored"
|
||||
|
||||
method onError*(state: SaleState, err: ref CatchableError): ?State {.upraises: [].} =
|
||||
error "error during SaleErrored run", error = err.msg
|
||||
|
||||
method run*(state: SaleErrored, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: SaleErrored, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let agent = SalesAgent(machine)
|
||||
let data = agent.data
|
||||
let context = agent.context
|
||||
@ -30,8 +29,13 @@ method run*(state: SaleErrored, machine: Machine): Future[?State] {.async.} =
|
||||
requestId = data.requestId,
|
||||
slotIndex = data.slotIndex
|
||||
|
||||
if onClear =? context.onClear and request =? data.request:
|
||||
onClear(request, data.slotIndex)
|
||||
try:
|
||||
if onClear =? context.onClear and request =? data.request:
|
||||
onClear(request, data.slotIndex)
|
||||
|
||||
if onCleanUp =? agent.onCleanUp:
|
||||
await onCleanUp(returnBytes = true, reprocessSlot = state.reprocessSlot)
|
||||
if onCleanUp =? agent.onCleanUp:
|
||||
await onCleanUp(reprocessSlot = state.reprocessSlot)
|
||||
except CancelledError as e:
|
||||
trace "SaleErrored.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleErrored.run", error = e.msgDetail
|
||||
|
||||
@ -1,8 +0,0 @@
|
||||
import pkg/questionable
|
||||
import ../statemachine
|
||||
import ./errored
|
||||
|
||||
type ErrorHandlingState* = ref object of SaleState
|
||||
|
||||
method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State =
|
||||
some State(SaleErrored(error: error))
|
||||
@ -1,30 +1,40 @@
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../../utils/exceptions
|
||||
import ../salesagent
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./errored
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales failed"
|
||||
|
||||
type
|
||||
SaleFailed* = ref object of ErrorHandlingState
|
||||
SaleFailed* = ref object of SaleState
|
||||
SaleFailedError* = object of SaleError
|
||||
|
||||
method `$`*(state: SaleFailed): string =
|
||||
"SaleFailed"
|
||||
|
||||
method run*(state: SaleFailed, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: SaleFailed, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let data = SalesAgent(machine).data
|
||||
let market = SalesAgent(machine).context.market
|
||||
|
||||
without request =? data.request:
|
||||
raiseAssert "no sale request"
|
||||
|
||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||
debug "Removing slot from mySlots",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
await market.freeSlot(slot.id)
|
||||
try:
|
||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||
debug "Removing slot from mySlots",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
|
||||
let error = newException(SaleFailedError, "Sale failed")
|
||||
return some State(SaleErrored(error: error))
|
||||
await market.freeSlot(slot.id)
|
||||
|
||||
let error = newException(SaleFailedError, "Sale failed")
|
||||
return some State(SaleErrored(error: error))
|
||||
except CancelledError as e:
|
||||
trace "SaleFailed.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleFailed.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
|
||||
@ -3,9 +3,9 @@ import pkg/questionable/results
|
||||
|
||||
import ../../conf
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ../salesagent
|
||||
import ./errorhandling
|
||||
import ./errored
|
||||
import ./cancelled
|
||||
import ./failed
|
||||
@ -18,7 +18,7 @@ logScope:
|
||||
topics = "marketplace sales filled"
|
||||
|
||||
type
|
||||
SaleFilled* = ref object of ErrorHandlingState
|
||||
SaleFilled* = ref object of SaleState
|
||||
HostMismatchError* = object of CatchableError
|
||||
|
||||
method onCancelled*(state: SaleFilled, request: StorageRequest): ?State =
|
||||
@ -30,40 +30,48 @@ method onFailed*(state: SaleFilled, request: StorageRequest): ?State =
|
||||
method `$`*(state: SaleFilled): string =
|
||||
"SaleFilled"
|
||||
|
||||
method run*(state: SaleFilled, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: SaleFilled, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let agent = SalesAgent(machine)
|
||||
let data = agent.data
|
||||
let context = agent.context
|
||||
|
||||
let market = context.market
|
||||
let host = await market.getHost(data.requestId, data.slotIndex)
|
||||
let me = await market.getSigner()
|
||||
|
||||
if host == me.some:
|
||||
info "Slot succesfully filled",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
try:
|
||||
let host = await market.getHost(data.requestId, data.slotIndex)
|
||||
let me = await market.getSigner()
|
||||
|
||||
without request =? data.request:
|
||||
raiseAssert "no sale request"
|
||||
if host == me.some:
|
||||
info "Slot succesfully filled",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
|
||||
if onFilled =? agent.onFilled:
|
||||
onFilled(request, data.slotIndex)
|
||||
without request =? data.request:
|
||||
raiseAssert "no sale request"
|
||||
|
||||
without onExpiryUpdate =? context.onExpiryUpdate:
|
||||
raiseAssert "onExpiryUpdate callback not set"
|
||||
if onFilled =? agent.onFilled:
|
||||
onFilled(request, data.slotIndex)
|
||||
|
||||
let requestEnd = await market.getRequestEnd(data.requestId)
|
||||
if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption:
|
||||
return some State(SaleErrored(error: err))
|
||||
without onExpiryUpdate =? context.onExpiryUpdate:
|
||||
raiseAssert "onExpiryUpdate callback not set"
|
||||
|
||||
when codex_enable_proof_failures:
|
||||
if context.simulateProofFailures > 0:
|
||||
info "Proving with failure rate", rate = context.simulateProofFailures
|
||||
return some State(
|
||||
SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures)
|
||||
)
|
||||
let requestEnd = await market.getRequestEnd(data.requestId)
|
||||
if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption:
|
||||
return some State(SaleErrored(error: err))
|
||||
|
||||
return some State(SaleProving())
|
||||
else:
|
||||
let error = newException(HostMismatchError, "Slot filled by other host")
|
||||
return some State(SaleErrored(error: error))
|
||||
when codex_enable_proof_failures:
|
||||
if context.simulateProofFailures > 0:
|
||||
info "Proving with failure rate", rate = context.simulateProofFailures
|
||||
return some State(
|
||||
SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures)
|
||||
)
|
||||
|
||||
return some State(SaleProving())
|
||||
else:
|
||||
let error = newException(HostMismatchError, "Slot filled by other host")
|
||||
return some State(SaleErrored(error: error))
|
||||
except CancelledError as e:
|
||||
trace "SaleFilled.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleFilled.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
|
||||
@ -1,9 +1,9 @@
|
||||
import pkg/stint
|
||||
import ../../logutils
|
||||
import ../../market
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ../salesagent
|
||||
import ./errorhandling
|
||||
import ./filled
|
||||
import ./cancelled
|
||||
import ./failed
|
||||
@ -13,7 +13,7 @@ import ./errored
|
||||
logScope:
|
||||
topics = "marketplace sales filling"
|
||||
|
||||
type SaleFilling* = ref object of ErrorHandlingState
|
||||
type SaleFilling* = ref object of SaleState
|
||||
proof*: Groth16Proof
|
||||
|
||||
method `$`*(state: SaleFilling): string =
|
||||
@ -25,9 +25,12 @@ method onCancelled*(state: SaleFilling, request: StorageRequest): ?State =
|
||||
method onFailed*(state: SaleFilling, request: StorageRequest): ?State =
|
||||
return some State(SaleFailed())
|
||||
|
||||
method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: SaleFilling, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let data = SalesAgent(machine).data
|
||||
let market = SalesAgent(machine).context.market
|
||||
|
||||
without (request =? data.request):
|
||||
raiseAssert "Request not set"
|
||||
|
||||
@ -35,28 +38,26 @@ method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} =
|
||||
requestId = data.requestId
|
||||
slotIndex = data.slotIndex
|
||||
|
||||
let slotState = await market.slotState(slotId(data.requestId, data.slotIndex))
|
||||
let requestedCollateral = request.ask.collateralPerSlot
|
||||
var collateral: UInt256
|
||||
|
||||
if slotState == SlotState.Repair:
|
||||
# When repairing the node gets "discount" on the collateral that it needs to
|
||||
let repairRewardPercentage = (await market.repairRewardPercentage).u256
|
||||
collateral =
|
||||
requestedCollateral -
|
||||
((requestedCollateral * repairRewardPercentage)).div(100.u256)
|
||||
else:
|
||||
collateral = requestedCollateral
|
||||
|
||||
debug "Filling slot"
|
||||
try:
|
||||
await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral)
|
||||
except MarketError as e:
|
||||
if e.msg.contains "Slot is not free":
|
||||
debug "Slot is already filled, ignoring slot"
|
||||
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
|
||||
else:
|
||||
return some State(SaleErrored(error: e))
|
||||
# other CatchableErrors are handled "automatically" by the ErrorHandlingState
|
||||
without collateral =? await market.slotCollateral(data.requestId, data.slotIndex),
|
||||
err:
|
||||
error "Failure attempting to fill slot: unable to calculate collateral",
|
||||
error = err.msg
|
||||
return some State(SaleErrored(error: err))
|
||||
|
||||
return some State(SaleFilled())
|
||||
debug "Filling slot"
|
||||
try:
|
||||
await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral)
|
||||
except SlotStateMismatchError as e:
|
||||
debug "Slot is already filled, ignoring slot"
|
||||
return some State(SaleIgnored(reprocessSlot: false))
|
||||
except MarketError as e:
|
||||
return some State(SaleErrored(error: e))
|
||||
# other CatchableErrors are handled "automatically" by the SaleState
|
||||
|
||||
return some State(SaleFilled())
|
||||
except CancelledError as e:
|
||||
trace "SaleFilling.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleFilling.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
|
||||
@ -1,16 +1,17 @@
|
||||
import pkg/chronos
|
||||
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ../salesagent
|
||||
import ./errorhandling
|
||||
import ./cancelled
|
||||
import ./failed
|
||||
import ./errored
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales finished"
|
||||
|
||||
type SaleFinished* = ref object of ErrorHandlingState
|
||||
type SaleFinished* = ref object of SaleState
|
||||
returnedCollateral*: ?UInt256
|
||||
|
||||
method `$`*(state: SaleFinished): string =
|
||||
@ -22,7 +23,9 @@ method onCancelled*(state: SaleFinished, request: StorageRequest): ?State =
|
||||
method onFailed*(state: SaleFinished, request: StorageRequest): ?State =
|
||||
return some State(SaleFailed())
|
||||
|
||||
method run*(state: SaleFinished, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: SaleFinished, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let agent = SalesAgent(machine)
|
||||
let data = agent.data
|
||||
|
||||
@ -32,5 +35,14 @@ method run*(state: SaleFinished, machine: Machine): Future[?State] {.async.} =
|
||||
info "Slot finished and paid out",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
|
||||
if onCleanUp =? agent.onCleanUp:
|
||||
await onCleanUp(returnedCollateral = state.returnedCollateral)
|
||||
try:
|
||||
if onClear =? agent.context.onClear:
|
||||
onClear(request, data.slotIndex)
|
||||
|
||||
if onCleanUp =? agent.onCleanUp:
|
||||
await onCleanUp(returnedCollateral = state.returnedCollateral)
|
||||
except CancelledError as e:
|
||||
trace "SaleFilled.run onCleanUp was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleFilled.run in onCleanUp callback", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
|
||||
@ -1,9 +1,10 @@
|
||||
import pkg/chronos
|
||||
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ../salesagent
|
||||
import ./errorhandling
|
||||
import ./errored
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales ignored"
|
||||
@ -11,17 +12,22 @@ logScope:
|
||||
# Ignored slots could mean there was no availability or that the slot could
|
||||
# not be reserved.
|
||||
|
||||
type SaleIgnored* = ref object of ErrorHandlingState
|
||||
type SaleIgnored* = ref object of SaleState
|
||||
reprocessSlot*: bool # readd slot to queue with `seen` flag
|
||||
returnBytes*: bool # return unreleased bytes from Reservation to Availability
|
||||
|
||||
method `$`*(state: SaleIgnored): string =
|
||||
"SaleIgnored"
|
||||
|
||||
method run*(state: SaleIgnored, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: SaleIgnored, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let agent = SalesAgent(machine)
|
||||
|
||||
if onCleanUp =? agent.onCleanUp:
|
||||
await onCleanUp(
|
||||
reprocessSlot = state.reprocessSlot, returnBytes = state.returnBytes
|
||||
)
|
||||
try:
|
||||
if onCleanUp =? agent.onCleanUp:
|
||||
await onCleanUp(reprocessSlot = state.reprocessSlot)
|
||||
except CancelledError as e:
|
||||
trace "SaleIgnored.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleIgnored.run in onCleanUp", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
|
||||
@ -1,9 +1,9 @@
|
||||
import pkg/questionable/results
|
||||
import ../../clock
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ../salesagent
|
||||
import ./errorhandling
|
||||
import ./filling
|
||||
import ./cancelled
|
||||
import ./errored
|
||||
@ -12,7 +12,7 @@ import ./failed
|
||||
logScope:
|
||||
topics = "marketplace sales initial-proving"
|
||||
|
||||
type SaleInitialProving* = ref object of ErrorHandlingState
|
||||
type SaleInitialProving* = ref object of SaleState
|
||||
|
||||
method `$`*(state: SaleInitialProving): string =
|
||||
"SaleInitialProving"
|
||||
@ -25,9 +25,9 @@ method onFailed*(state: SaleInitialProving, request: StorageRequest): ?State =
|
||||
|
||||
proc waitUntilNextPeriod(clock: Clock, periodicity: Periodicity) {.async.} =
|
||||
trace "Waiting until next period"
|
||||
let period = periodicity.periodOf(clock.now().u256)
|
||||
let periodEnd = periodicity.periodEnd(period).truncate(int64)
|
||||
await clock.waitUntil(periodEnd + 1)
|
||||
let period = periodicity.periodOf(clock.now().Timestamp)
|
||||
let periodEnd = periodicity.periodEnd(period)
|
||||
await clock.waitUntil((periodEnd + 1).toSecondsSince1970)
|
||||
|
||||
proc waitForStableChallenge(market: Market, clock: Clock, slotId: SlotId) {.async.} =
|
||||
let periodicity = await market.periodicity()
|
||||
@ -36,7 +36,9 @@ proc waitForStableChallenge(market: Market, clock: Clock, slotId: SlotId) {.asyn
|
||||
while (await market.getPointer(slotId)) > (256 - downtime):
|
||||
await clock.waitUntilNextPeriod(periodicity)
|
||||
|
||||
method run*(state: SaleInitialProving, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: SaleInitialProving, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let data = SalesAgent(machine).data
|
||||
let context = SalesAgent(machine).context
|
||||
let market = context.market
|
||||
@ -48,16 +50,22 @@ method run*(state: SaleInitialProving, machine: Machine): Future[?State] {.async
|
||||
without onProve =? context.onProve:
|
||||
raiseAssert "onProve callback not set"
|
||||
|
||||
debug "Waiting for a proof challenge that is valid for the entire period"
|
||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||
await waitForStableChallenge(market, clock, slot.id)
|
||||
try:
|
||||
debug "Waiting for a proof challenge that is valid for the entire period"
|
||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||
await waitForStableChallenge(market, clock, slot.id)
|
||||
|
||||
debug "Generating initial proof", requestId = data.requestId
|
||||
let challenge = await context.market.getChallenge(slot.id)
|
||||
without proof =? (await onProve(slot, challenge)), err:
|
||||
error "Failed to generate initial proof", error = err.msg
|
||||
return some State(SaleErrored(error: err))
|
||||
debug "Generating initial proof", requestId = data.requestId
|
||||
let challenge = await context.market.getChallenge(slot.id)
|
||||
without proof =? (await onProve(slot, challenge)), err:
|
||||
error "Failed to generate initial proof", error = err.msg
|
||||
return some State(SaleErrored(error: err))
|
||||
|
||||
debug "Finished proof calculation", requestId = data.requestId
|
||||
debug "Finished proof calculation", requestId = data.requestId
|
||||
|
||||
return some State(SaleFilling(proof: proof))
|
||||
return some State(SaleFilling(proof: proof))
|
||||
except CancelledError as e:
|
||||
trace "SaleInitialProving.run onCleanUp was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleInitialProving.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
|
||||
@ -1,16 +1,17 @@
|
||||
import ../../logutils
|
||||
import ../../market
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ../salesagent
|
||||
import ./errorhandling
|
||||
import ./cancelled
|
||||
import ./failed
|
||||
import ./finished
|
||||
import ./errored
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales payout"
|
||||
|
||||
type SalePayout* = ref object of ErrorHandlingState
|
||||
type SalePayout* = ref object of SaleState
|
||||
|
||||
method `$`*(state: SalePayout): string =
|
||||
"SalePayout"
|
||||
@ -21,17 +22,25 @@ method onCancelled*(state: SalePayout, request: StorageRequest): ?State =
|
||||
method onFailed*(state: SalePayout, request: StorageRequest): ?State =
|
||||
return some State(SaleFailed())
|
||||
|
||||
method run*(state: SalePayout, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: SalePayout, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let data = SalesAgent(machine).data
|
||||
let market = SalesAgent(machine).context.market
|
||||
|
||||
without request =? data.request:
|
||||
raiseAssert "no sale request"
|
||||
|
||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||
debug "Collecting finished slot's reward",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
let currentCollateral = await market.currentCollateral(slot.id)
|
||||
await market.freeSlot(slot.id)
|
||||
try:
|
||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||
debug "Collecting finished slot's reward",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
let currentCollateral = await market.currentCollateral(slot.id)
|
||||
await market.freeSlot(slot.id)
|
||||
|
||||
return some State(SaleFinished(returnedCollateral: some currentCollateral))
|
||||
return some State(SaleFinished(returnedCollateral: some currentCollateral))
|
||||
except CancelledError as e:
|
||||
trace "SalePayout.run onCleanUp was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SalePayout.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
|
||||
@ -4,9 +4,9 @@ import pkg/metrics
|
||||
|
||||
import ../../logutils
|
||||
import ../../market
|
||||
import ../../utils/exceptions
|
||||
import ../salesagent
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./cancelled
|
||||
import ./failed
|
||||
import ./filled
|
||||
@ -18,7 +18,7 @@ declareCounter(
|
||||
codex_reservations_availability_mismatch, "codex reservations availability_mismatch"
|
||||
)
|
||||
|
||||
type SalePreparing* = ref object of ErrorHandlingState
|
||||
type SalePreparing* = ref object of SaleState
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales preparing"
|
||||
@ -33,66 +33,76 @@ method onFailed*(state: SalePreparing, request: StorageRequest): ?State =
|
||||
return some State(SaleFailed())
|
||||
|
||||
method onSlotFilled*(
|
||||
state: SalePreparing, requestId: RequestId, slotIndex: UInt256
|
||||
state: SalePreparing, requestId: RequestId, slotIndex: uint64
|
||||
): ?State =
|
||||
return some State(SaleFilled())
|
||||
|
||||
method run*(state: SalePreparing, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: SalePreparing, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let agent = SalesAgent(machine)
|
||||
let data = agent.data
|
||||
let context = agent.context
|
||||
let market = context.market
|
||||
let reservations = context.reservations
|
||||
|
||||
await agent.retrieveRequest()
|
||||
await agent.subscribe()
|
||||
try:
|
||||
await agent.retrieveRequest()
|
||||
await agent.subscribe()
|
||||
|
||||
without request =? data.request:
|
||||
raiseAssert "no sale request"
|
||||
without request =? data.request:
|
||||
raiseAssert "no sale request"
|
||||
|
||||
let slotId = slotId(data.requestId, data.slotIndex)
|
||||
let state = await market.slotState(slotId)
|
||||
if state != SlotState.Free and state != SlotState.Repair:
|
||||
return some State(SaleIgnored(reprocessSlot: false, returnBytes: false))
|
||||
let slotId = slotId(data.requestId, data.slotIndex)
|
||||
let state = await market.slotState(slotId)
|
||||
if state != SlotState.Free and state != SlotState.Repair:
|
||||
return some State(SaleIgnored(reprocessSlot: false))
|
||||
|
||||
# TODO: Once implemented, check to ensure the host is allowed to fill the slot,
|
||||
# due to the [sliding window mechanism](https://github.com/codex-storage/codex-research/blob/master/design/marketplace.md#dispersal)
|
||||
# TODO: Once implemented, check to ensure the host is allowed to fill the slot,
|
||||
# due to the [sliding window mechanism](https://github.com/codex-storage/codex-research/blob/master/design/marketplace.md#dispersal)
|
||||
|
||||
logScope:
|
||||
slotIndex = data.slotIndex
|
||||
slotSize = request.ask.slotSize
|
||||
duration = request.ask.duration
|
||||
pricePerBytePerSecond = request.ask.pricePerBytePerSecond
|
||||
collateralPerByte = request.ask.collateralPerByte
|
||||
logScope:
|
||||
slotIndex = data.slotIndex
|
||||
slotSize = request.ask.slotSize
|
||||
duration = request.ask.duration
|
||||
pricePerBytePerSecond = request.ask.pricePerBytePerSecond
|
||||
collateralPerByte = request.ask.collateralPerByte
|
||||
|
||||
without availability =?
|
||||
await reservations.findAvailability(
|
||||
request.ask.slotSize, request.ask.duration, request.ask.pricePerBytePerSecond,
|
||||
request.ask.collateralPerByte,
|
||||
):
|
||||
debug "No availability found for request, ignoring"
|
||||
let requestEnd = await market.getRequestEnd(data.requestId)
|
||||
|
||||
return some State(SaleIgnored(reprocessSlot: true))
|
||||
without availability =?
|
||||
await reservations.findAvailability(
|
||||
request.ask.slotSize, request.ask.duration, request.ask.pricePerBytePerSecond,
|
||||
request.ask.collateralPerByte, requestEnd,
|
||||
):
|
||||
debug "No availability found for request, ignoring"
|
||||
|
||||
info "Availability found for request, creating reservation"
|
||||
|
||||
without reservation =?
|
||||
await reservations.createReservation(
|
||||
availability.id, request.ask.slotSize, request.id, data.slotIndex,
|
||||
request.ask.collateralPerByte,
|
||||
), error:
|
||||
trace "Creation of reservation failed"
|
||||
# Race condition:
|
||||
# reservations.findAvailability (line 64) is no guarantee. You can never know for certain that the reservation can be created until after you have it.
|
||||
# Should createReservation fail because there's no space, we proceed to SaleIgnored.
|
||||
if error of BytesOutOfBoundsError:
|
||||
# Lets monitor how often this happen and if it is often we can make it more inteligent to handle it
|
||||
codex_reservations_availability_mismatch.inc()
|
||||
return some State(SaleIgnored(reprocessSlot: true))
|
||||
|
||||
return some State(SaleErrored(error: error))
|
||||
info "Availability found for request, creating reservation"
|
||||
|
||||
trace "Reservation created succesfully"
|
||||
without reservation =?
|
||||
await reservations.createReservation(
|
||||
availability.id, request.ask.slotSize, request.id, data.slotIndex,
|
||||
request.ask.collateralPerByte, requestEnd,
|
||||
), error:
|
||||
trace "Creation of reservation failed"
|
||||
# Race condition:
|
||||
# reservations.findAvailability (line 64) is no guarantee. You can never know for certain that the reservation can be created until after you have it.
|
||||
# Should createReservation fail because there's no space, we proceed to SaleIgnored.
|
||||
if error of BytesOutOfBoundsError:
|
||||
# Lets monitor how often this happen and if it is often we can make it more inteligent to handle it
|
||||
codex_reservations_availability_mismatch.inc()
|
||||
return some State(SaleIgnored(reprocessSlot: true))
|
||||
|
||||
data.reservation = some reservation
|
||||
return some State(SaleSlotReserving())
|
||||
return some State(SaleErrored(error: error))
|
||||
|
||||
trace "Reservation created successfully"
|
||||
|
||||
data.reservation = some reservation
|
||||
return some State(SaleSlotReserving())
|
||||
except CancelledError as e:
|
||||
trace "SalePreparing.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SalePreparing.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
|
||||
@ -6,7 +6,6 @@ import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ../salesagent
|
||||
import ../salescontext
|
||||
import ./errorhandling
|
||||
import ./cancelled
|
||||
import ./failed
|
||||
import ./errored
|
||||
@ -18,7 +17,7 @@ logScope:
|
||||
type
|
||||
SlotFreedError* = object of CatchableError
|
||||
SlotNotFilledError* = object of CatchableError
|
||||
SaleProving* = ref object of ErrorHandlingState
|
||||
SaleProving* = ref object of SaleState
|
||||
loop: Future[void]
|
||||
|
||||
method prove*(
|
||||
@ -47,7 +46,7 @@ proc proveLoop(
|
||||
market: Market,
|
||||
clock: Clock,
|
||||
request: StorageRequest,
|
||||
slotIndex: UInt256,
|
||||
slotIndex: uint64,
|
||||
onProve: OnProve,
|
||||
) {.async.} =
|
||||
let slot = Slot(request: request, slotIndex: slotIndex)
|
||||
@ -61,12 +60,12 @@ proc proveLoop(
|
||||
|
||||
proc getCurrentPeriod(): Future[Period] {.async.} =
|
||||
let periodicity = await market.periodicity()
|
||||
return periodicity.periodOf(clock.now().u256)
|
||||
return periodicity.periodOf(clock.now().Timestamp)
|
||||
|
||||
proc waitUntilPeriod(period: Period) {.async.} =
|
||||
let periodicity = await market.periodicity()
|
||||
# Ensure that we're past the period boundary by waiting an additional second
|
||||
await clock.waitUntil(periodicity.periodStart(period).truncate(int64) + 1)
|
||||
await clock.waitUntil((periodicity.periodStart(period) + 1).toSecondsSince1970)
|
||||
|
||||
while true:
|
||||
let currentPeriod = await getCurrentPeriod()
|
||||
@ -113,7 +112,9 @@ method onFailed*(state: SaleProving, request: StorageRequest): ?State =
|
||||
# state change
|
||||
return some State(SaleFailed())
|
||||
|
||||
method run*(state: SaleProving, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: SaleProving, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let data = SalesAgent(machine).data
|
||||
let context = SalesAgent(machine).context
|
||||
|
||||
@ -129,27 +130,37 @@ method run*(state: SaleProving, machine: Machine): Future[?State] {.async.} =
|
||||
without clock =? context.clock:
|
||||
raiseAssert("clock not set")
|
||||
|
||||
debug "Start proving", requestId = data.requestId, slotIndex = data.slotIndex
|
||||
try:
|
||||
let loop = state.proveLoop(market, clock, request, data.slotIndex, onProve)
|
||||
state.loop = loop
|
||||
await loop
|
||||
except CancelledError:
|
||||
discard
|
||||
debug "Start proving", requestId = data.requestId, slotIndex = data.slotIndex
|
||||
try:
|
||||
let loop = state.proveLoop(market, clock, request, data.slotIndex, onProve)
|
||||
state.loop = loop
|
||||
await loop
|
||||
except CancelledError as e:
|
||||
trace "proving loop cancelled"
|
||||
discard
|
||||
except CatchableError as e:
|
||||
error "Proving failed",
|
||||
msg = e.msg, typ = $(type e), stack = e.getStackTrace(), error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
finally:
|
||||
# Cleanup of the proving loop
|
||||
debug "Stopping proving.", requestId = data.requestId, slotIndex = data.slotIndex
|
||||
|
||||
if not state.loop.isNil:
|
||||
if not state.loop.finished:
|
||||
try:
|
||||
await state.loop.cancelAndWait()
|
||||
except CancelledError:
|
||||
discard
|
||||
except CatchableError as e:
|
||||
error "Error during cancellation of proving loop", msg = e.msg
|
||||
|
||||
state.loop = nil
|
||||
|
||||
return some State(SalePayout())
|
||||
except CancelledError as e:
|
||||
trace "SaleProving.run onCleanUp was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Proving failed", msg = e.msg
|
||||
error "Error during SaleProving.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
finally:
|
||||
# Cleanup of the proving loop
|
||||
debug "Stopping proving.", requestId = data.requestId, slotIndex = data.slotIndex
|
||||
|
||||
if not state.loop.isNil:
|
||||
if not state.loop.finished:
|
||||
try:
|
||||
await state.loop.cancelAndWait()
|
||||
except CatchableError as e:
|
||||
error "Error during cancellation of proving loop", msg = e.msg
|
||||
|
||||
state.loop = nil
|
||||
|
||||
return some State(SalePayout())
|
||||
|
||||
@ -4,12 +4,14 @@ when codex_enable_proof_failures:
|
||||
import pkg/stint
|
||||
import pkg/ethers
|
||||
|
||||
import ../../contracts/marketplace
|
||||
import ../../contracts/requests
|
||||
import ../../logutils
|
||||
import ../../market
|
||||
import ../../utils/exceptions
|
||||
import ../salescontext
|
||||
import ./proving
|
||||
import ./errored
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales simulated-proving"
|
||||
@ -18,7 +20,7 @@ when codex_enable_proof_failures:
|
||||
failEveryNProofs*: int
|
||||
proofCount: int
|
||||
|
||||
proc onSubmitProofError(error: ref CatchableError, period: UInt256, slotId: SlotId) =
|
||||
proc onSubmitProofError(error: ref CatchableError, period: Period, slotId: SlotId) =
|
||||
error "Submitting invalid proof failed", period, slotId, msg = error.msgDetail
|
||||
|
||||
method prove*(
|
||||
@ -29,22 +31,27 @@ when codex_enable_proof_failures:
|
||||
market: Market,
|
||||
currentPeriod: Period,
|
||||
) {.async.} =
|
||||
trace "Processing proving in simulated mode"
|
||||
state.proofCount += 1
|
||||
if state.failEveryNProofs > 0 and state.proofCount mod state.failEveryNProofs == 0:
|
||||
state.proofCount = 0
|
||||
try:
|
||||
trace "Processing proving in simulated mode"
|
||||
state.proofCount += 1
|
||||
if state.failEveryNProofs > 0 and state.proofCount mod state.failEveryNProofs == 0:
|
||||
state.proofCount = 0
|
||||
|
||||
try:
|
||||
warn "Submitting INVALID proof", period = currentPeriod, slotId = slot.id
|
||||
await market.submitProof(slot.id, Groth16Proof.default)
|
||||
except MarketError as e:
|
||||
if not e.msg.contains("Invalid proof"):
|
||||
try:
|
||||
warn "Submitting INVALID proof", period = currentPeriod, slotId = slot.id
|
||||
await market.submitProof(slot.id, Groth16Proof.default)
|
||||
except Proofs_InvalidProof as e:
|
||||
discard # expected
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as e:
|
||||
onSubmitProofError(e, currentPeriod, slot.id)
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as e:
|
||||
onSubmitProofError(e, currentPeriod, slot.id)
|
||||
else:
|
||||
await procCall SaleProving(state).prove(
|
||||
slot, challenge, onProve, market, currentPeriod
|
||||
)
|
||||
else:
|
||||
await procCall SaleProving(state).prove(
|
||||
slot, challenge, onProve, market, currentPeriod
|
||||
)
|
||||
except CancelledError as e:
|
||||
trace "Submitting INVALID proof cancelled", error = e.msgDetail
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
error "Submitting INVALID proof failed", error = e.msgDetail
|
||||
|
||||
@ -3,16 +3,16 @@ import pkg/metrics
|
||||
|
||||
import ../../logutils
|
||||
import ../../market
|
||||
import ../../utils/exceptions
|
||||
import ../salesagent
|
||||
import ../statemachine
|
||||
import ./errorhandling
|
||||
import ./cancelled
|
||||
import ./failed
|
||||
import ./ignored
|
||||
import ./downloading
|
||||
import ./errored
|
||||
|
||||
type SaleSlotReserving* = ref object of ErrorHandlingState
|
||||
type SaleSlotReserving* = ref object of SaleState
|
||||
|
||||
logScope:
|
||||
topics = "marketplace sales reserving"
|
||||
@ -26,7 +26,9 @@ method onCancelled*(state: SaleSlotReserving, request: StorageRequest): ?State =
|
||||
method onFailed*(state: SaleSlotReserving, request: StorageRequest): ?State =
|
||||
return some State(SaleFailed())
|
||||
|
||||
method run*(state: SaleSlotReserving, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: SaleSlotReserving, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let agent = SalesAgent(machine)
|
||||
let data = agent.data
|
||||
let context = agent.context
|
||||
@ -36,23 +38,28 @@ method run*(state: SaleSlotReserving, machine: Machine): Future[?State] {.async.
|
||||
requestId = data.requestId
|
||||
slotIndex = data.slotIndex
|
||||
|
||||
let canReserve = await market.canReserveSlot(data.requestId, data.slotIndex)
|
||||
if canReserve:
|
||||
try:
|
||||
trace "Reserving slot"
|
||||
await market.reserveSlot(data.requestId, data.slotIndex)
|
||||
except MarketError as e:
|
||||
if e.msg.contains "SlotReservations_ReservationNotAllowed":
|
||||
try:
|
||||
let canReserve = await market.canReserveSlot(data.requestId, data.slotIndex)
|
||||
if canReserve:
|
||||
try:
|
||||
trace "Reserving slot"
|
||||
await market.reserveSlot(data.requestId, data.slotIndex)
|
||||
except SlotReservationNotAllowedError as e:
|
||||
debug "Slot cannot be reserved, ignoring", error = e.msg
|
||||
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
|
||||
else:
|
||||
return some State(SaleIgnored(reprocessSlot: false))
|
||||
except MarketError as e:
|
||||
return some State(SaleErrored(error: e))
|
||||
# other CatchableErrors are handled "automatically" by the ErrorHandlingState
|
||||
# other CatchableErrors are handled "automatically" by the SaleState
|
||||
|
||||
trace "Slot successfully reserved"
|
||||
return some State(SaleDownloading())
|
||||
else:
|
||||
# do not re-add this slot to the queue, and return bytes from Reservation to
|
||||
# the Availability
|
||||
debug "Slot cannot be reserved, ignoring"
|
||||
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
|
||||
trace "Slot successfully reserved"
|
||||
return some State(SaleDownloading())
|
||||
else:
|
||||
# do not re-add this slot to the queue, and return bytes from Reservation to
|
||||
# the Availability
|
||||
debug "Slot cannot be reserved, ignoring"
|
||||
return some State(SaleIgnored(reprocessSlot: false))
|
||||
except CancelledError as e:
|
||||
trace "SaleSlotReserving.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleSlotReserving.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
import ../../logutils
|
||||
import ../../utils/exceptions
|
||||
import ../statemachine
|
||||
import ../salesagent
|
||||
import ./filled
|
||||
@ -26,34 +27,42 @@ method onCancelled*(state: SaleUnknown, request: StorageRequest): ?State =
|
||||
method onFailed*(state: SaleUnknown, request: StorageRequest): ?State =
|
||||
return some State(SaleFailed())
|
||||
|
||||
method run*(state: SaleUnknown, machine: Machine): Future[?State] {.async.} =
|
||||
method run*(
|
||||
state: SaleUnknown, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let agent = SalesAgent(machine)
|
||||
let data = agent.data
|
||||
let market = agent.context.market
|
||||
|
||||
await agent.retrieveRequest()
|
||||
await agent.subscribe()
|
||||
try:
|
||||
await agent.retrieveRequest()
|
||||
await agent.subscribe()
|
||||
|
||||
let slotId = slotId(data.requestId, data.slotIndex)
|
||||
let slotState = await market.slotState(slotId)
|
||||
let slotId = slotId(data.requestId, data.slotIndex)
|
||||
let slotState = await market.slotState(slotId)
|
||||
|
||||
case slotState
|
||||
of SlotState.Free:
|
||||
let error =
|
||||
newException(UnexpectedSlotError, "Slot state on chain should not be 'free'")
|
||||
return some State(SaleErrored(error: error))
|
||||
of SlotState.Filled:
|
||||
return some State(SaleFilled())
|
||||
of SlotState.Finished:
|
||||
return some State(SalePayout())
|
||||
of SlotState.Paid:
|
||||
return some State(SaleFinished())
|
||||
of SlotState.Failed:
|
||||
return some State(SaleFailed())
|
||||
of SlotState.Cancelled:
|
||||
return some State(SaleCancelled())
|
||||
of SlotState.Repair:
|
||||
let error = newException(
|
||||
SlotFreedError, "Slot was forcible freed and host was removed from its hosting"
|
||||
)
|
||||
return some State(SaleErrored(error: error))
|
||||
case slotState
|
||||
of SlotState.Free:
|
||||
let error =
|
||||
newException(UnexpectedSlotError, "Slot state on chain should not be 'free'")
|
||||
return some State(SaleErrored(error: error))
|
||||
of SlotState.Filled:
|
||||
return some State(SaleFilled())
|
||||
of SlotState.Finished:
|
||||
return some State(SalePayout())
|
||||
of SlotState.Paid:
|
||||
return some State(SaleFinished())
|
||||
of SlotState.Failed:
|
||||
return some State(SaleFailed())
|
||||
of SlotState.Cancelled:
|
||||
return some State(SaleCancelled())
|
||||
of SlotState.Repair:
|
||||
let error = newException(
|
||||
SlotFreedError, "Slot was forcible freed and host was removed from its hosting"
|
||||
)
|
||||
return some State(SaleErrored(error: error))
|
||||
except CancelledError as e:
|
||||
trace "SaleUnknown.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
error "Error during SaleUnknown.run", error = e.msgDetail
|
||||
return some State(SaleErrored(error: e))
|
||||
|
||||
@ -315,13 +315,15 @@ proc new*[T, H](
|
||||
cellSize = cellSize
|
||||
|
||||
if (manifest.blocksCount mod manifest.numSlots) != 0:
|
||||
trace "Number of blocks must be divisable by number of slots."
|
||||
return failure("Number of blocks must be divisable by number of slots.")
|
||||
const msg = "Number of blocks must be divisible by number of slots."
|
||||
trace msg
|
||||
return failure(msg)
|
||||
|
||||
let cellSize = if manifest.verifiable: manifest.cellSize else: cellSize
|
||||
if (manifest.blockSize mod cellSize) != 0.NBytes:
|
||||
trace "Block size must be divisable by cell size."
|
||||
return failure("Block size must be divisable by cell size.")
|
||||
const msg = "Block size must be divisible by cell size."
|
||||
trace msg
|
||||
return failure(msg)
|
||||
|
||||
let
|
||||
numSlotBlocks = manifest.numSlotBlocks
|
||||
|
||||
@ -38,7 +38,9 @@ type
|
||||
AnyProof* = CircomProof
|
||||
|
||||
AnySampler* = Poseidon2Sampler
|
||||
# add any other generic type here, eg. Poseidon2Sampler | ReinforceConcreteSampler
|
||||
AnyBuilder* = Poseidon2Builder
|
||||
# add any other generic type here, eg. Poseidon2Builder | ReinforceConcreteBuilder
|
||||
|
||||
AnyProofInputs* = ProofInputs[Poseidon2Hash]
|
||||
Prover* = ref object of RootObj
|
||||
|
||||
@ -22,8 +22,8 @@ import ../logutils
|
||||
import ../systemclock
|
||||
|
||||
const
|
||||
DefaultBlockMaintenanceInterval* = 10.minutes
|
||||
DefaultNumberOfBlocksToMaintainPerInterval* = 1000
|
||||
DefaultBlockInterval* = 10.minutes
|
||||
DefaultNumBlocksPerInterval* = 1000
|
||||
|
||||
type BlockMaintainer* = ref object of RootObj
|
||||
repoStore: RepoStore
|
||||
|
||||
@ -137,6 +137,14 @@ method hasBlock*(self: NetworkStore, cid: Cid): Future[?!bool] {.async.} =
|
||||
trace "Checking network store for block existence", cid
|
||||
return await self.localStore.hasBlock(cid)
|
||||
|
||||
method hasBlock*(
|
||||
self: NetworkStore, tree: Cid, index: Natural
|
||||
): Future[?!bool] {.async.} =
|
||||
## Check if the block exists in the blockstore
|
||||
##
|
||||
trace "Checking network store for block existence", tree, index
|
||||
return await self.localStore.hasBlock(tree, index)
|
||||
|
||||
method close*(self: NetworkStore): Future[void] {.async.} =
|
||||
## Close the underlying local blockstore
|
||||
##
|
||||
|
||||
@ -57,6 +57,17 @@ proc putLeafMetadata*(
|
||||
(md.some, res),
|
||||
)
|
||||
|
||||
proc delLeafMetadata*(
|
||||
self: RepoStore, treeCid: Cid, index: Natural
|
||||
): Future[?!void] {.async.} =
|
||||
without key =? createBlockCidAndProofMetadataKey(treeCid, index), err:
|
||||
return failure(err)
|
||||
|
||||
if err =? (await self.metaDs.delete(key)).errorOption:
|
||||
return failure(err)
|
||||
|
||||
success()
|
||||
|
||||
proc getLeafMetadata*(
|
||||
self: RepoStore, treeCid: Cid, index: Natural
|
||||
): Future[?!LeafMetadata] {.async.} =
|
||||
@ -94,7 +105,7 @@ proc updateQuotaUsage*(
|
||||
minusUsed: NBytes = 0.NBytes,
|
||||
plusReserved: NBytes = 0.NBytes,
|
||||
minusReserved: NBytes = 0.NBytes,
|
||||
): Future[?!void] {.async.} =
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
await self.metaDs.modify(
|
||||
QuotaUsedKey,
|
||||
proc(maybeCurrUsage: ?QuotaUsage): Future[?QuotaUsage] {.async.} =
|
||||
@ -205,9 +216,6 @@ proc storeBlock*(
|
||||
proc tryDeleteBlock*(
|
||||
self: RepoStore, cid: Cid, expiryLimit = SecondsSince1970.low
|
||||
): Future[?!DeleteResult] {.async.} =
|
||||
if cid.isEmpty:
|
||||
return success(DeleteResult(kind: InUse))
|
||||
|
||||
without metaKey =? createBlockExpirationMetadataKey(cid), err:
|
||||
return failure(err)
|
||||
|
||||
|
||||
@ -213,13 +213,13 @@ method putBlock*(
|
||||
|
||||
return success()
|
||||
|
||||
method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} =
|
||||
## Delete a block from the blockstore when block refCount is 0 or block is expired
|
||||
##
|
||||
|
||||
proc delBlockInternal(self: RepoStore, cid: Cid): Future[?!DeleteResultKind] {.async.} =
|
||||
logScope:
|
||||
cid = cid
|
||||
|
||||
if cid.isEmpty:
|
||||
return success(Deleted)
|
||||
|
||||
trace "Attempting to delete a block"
|
||||
|
||||
without res =? await self.tryDeleteBlock(cid, self.clock.now()), err:
|
||||
@ -232,12 +232,28 @@ method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} =
|
||||
|
||||
if err =? (await self.updateQuotaUsage(minusUsed = res.released)).errorOption:
|
||||
return failure(err)
|
||||
elif res.kind == InUse:
|
||||
trace "Block in use, refCount > 0 and not expired"
|
||||
else:
|
||||
trace "Block not found in store"
|
||||
|
||||
return success()
|
||||
success(res.kind)
|
||||
|
||||
method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} =
|
||||
## Delete a block from the blockstore when block refCount is 0 or block is expired
|
||||
##
|
||||
|
||||
logScope:
|
||||
cid = cid
|
||||
|
||||
without outcome =? await self.delBlockInternal(cid), err:
|
||||
return failure(err)
|
||||
|
||||
case outcome
|
||||
of InUse:
|
||||
failure("Directly deleting a block that is part of a dataset is not allowed.")
|
||||
of NotFound:
|
||||
trace "Block not found, ignoring"
|
||||
success()
|
||||
of Deleted:
|
||||
trace "Block already deleted"
|
||||
success()
|
||||
|
||||
method delBlock*(
|
||||
self: RepoStore, treeCid: Cid, index: Natural
|
||||
@ -248,12 +264,19 @@ method delBlock*(
|
||||
else:
|
||||
return failure(err)
|
||||
|
||||
if err =? (await self.delLeafMetadata(treeCid, index)).errorOption:
|
||||
error "Failed to delete leaf metadata, block will remain on disk.", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
if err =?
|
||||
(await self.updateBlockMetadata(leafMd.blkCid, minusRefCount = 1)).errorOption:
|
||||
if not (err of BlockNotFoundError):
|
||||
return failure(err)
|
||||
|
||||
await self.delBlock(leafMd.blkCid) # safe delete, only if refCount == 0
|
||||
without _ =? await self.delBlockInternal(leafMd.blkCid), err:
|
||||
return failure(err)
|
||||
|
||||
success()
|
||||
|
||||
method hasBlock*(self: RepoStore, cid: Cid): Future[?!bool] {.async.} =
|
||||
## Check if the block exists in the blockstore
|
||||
@ -322,6 +345,18 @@ proc createBlockExpirationQuery(maxNumber: int, offset: int): ?!Query =
|
||||
let queryKey = ?createBlockExpirationMetadataQueryKey()
|
||||
success Query.init(queryKey, offset = offset, limit = maxNumber)
|
||||
|
||||
proc blockRefCount*(self: RepoStore, cid: Cid): Future[?!Natural] {.async.} =
|
||||
## Returns the reference count for a block. If the count is zero;
|
||||
## this means the block is eligible for garbage collection.
|
||||
##
|
||||
without key =? createBlockExpirationMetadataKey(cid), err:
|
||||
return failure(err)
|
||||
|
||||
without md =? await get[BlockMetadata](self.metaDs, key), err:
|
||||
return failure(err)
|
||||
|
||||
return success(md.refCount)
|
||||
|
||||
method getBlockExpirations*(
|
||||
self: RepoStore, maxNumber: int, offset: int
|
||||
): Future[?!AsyncIter[BlockExpiration]] {.async, base.} =
|
||||
@ -372,7 +407,9 @@ method close*(self: RepoStore): Future[void] {.async.} =
|
||||
# RepoStore procs
|
||||
###########################################################
|
||||
|
||||
proc reserve*(self: RepoStore, bytes: NBytes): Future[?!void] {.async.} =
|
||||
proc reserve*(
|
||||
self: RepoStore, bytes: NBytes
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
## Reserve bytes
|
||||
##
|
||||
|
||||
@ -380,7 +417,9 @@ proc reserve*(self: RepoStore, bytes: NBytes): Future[?!void] {.async.} =
|
||||
|
||||
await self.updateQuotaUsage(plusReserved = bytes)
|
||||
|
||||
proc release*(self: RepoStore, bytes: NBytes): Future[?!void] {.async.} =
|
||||
proc release*(
|
||||
self: RepoStore, bytes: NBytes
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
## Release bytes
|
||||
##
|
||||
|
||||
|
||||
@ -21,8 +21,8 @@ import ../../systemclock
|
||||
import ../../units
|
||||
|
||||
const
|
||||
DefaultBlockTtl* = 24.hours
|
||||
DefaultQuotaBytes* = 8.GiBs
|
||||
DefaultBlockTtl* = 30.days
|
||||
DefaultQuotaBytes* = 20.GiBs
|
||||
|
||||
type
|
||||
QuotaNotEnoughError* = object of CodexError
|
||||
|
||||
@ -57,6 +57,8 @@ template withExceptions(body: untyped) =
|
||||
raise newLPStreamEOFError()
|
||||
except AsyncStreamError as exc:
|
||||
raise newException(LPStreamError, exc.msg)
|
||||
except CatchableError as exc:
|
||||
raise newException(Defect, "Unexpected error in AsyncStreamWrapper", exc)
|
||||
|
||||
method readOnce*(
|
||||
self: AsyncStreamWrapper, pbytes: pointer, nbytes: int
|
||||
@ -74,11 +76,13 @@ method readOnce*(
|
||||
|
||||
proc completeWrite(
|
||||
self: AsyncStreamWrapper, fut: Future[void], msgLen: int
|
||||
): Future[void] {.async.} =
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
withExceptions:
|
||||
await fut
|
||||
|
||||
method write*(self: AsyncStreamWrapper, msg: seq[byte]): Future[void] =
|
||||
method write*(
|
||||
self: AsyncStreamWrapper, msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
# Avoid a copy of msg being kept in the closure created by `{.async.}` as this
|
||||
# drives up memory usage
|
||||
|
||||
|
||||
@ -67,13 +67,9 @@ method atEof*(self: StoreStream): bool =
|
||||
self.offset >= self.size
|
||||
|
||||
type LPStreamReadError* = object of LPStreamError
|
||||
par*: ref CatchableError
|
||||
|
||||
proc newLPStreamReadError*(p: ref CatchableError): ref LPStreamReadError =
|
||||
var w = newException(LPStreamReadError, "Read stream failed")
|
||||
w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg
|
||||
w.par = p
|
||||
result = w
|
||||
newException(LPStreamReadError, "Read stream failed", p)
|
||||
|
||||
method readOnce*(
|
||||
self: StoreStream, pbytes: pointer, nbytes: int
|
||||
@ -110,7 +106,7 @@ method readOnce*(
|
||||
raise newLPStreamReadError(error)
|
||||
|
||||
trace "Reading bytes from store stream",
|
||||
manifestCid = self.manifest.cid.get(),
|
||||
manifestCid = self.manifest.treeCid,
|
||||
numBlocks = self.manifest.blocksCount,
|
||||
blockNum,
|
||||
blkCid = blk.cid,
|
||||
|
||||
38
codex/utils/arrayutils.nim
Normal file
38
codex/utils/arrayutils.nim
Normal file
@ -0,0 +1,38 @@
|
||||
import std/sequtils
|
||||
|
||||
proc createDoubleArray*(
|
||||
outerLen, innerLen: int
|
||||
): ptr UncheckedArray[ptr UncheckedArray[byte]] =
|
||||
# Allocate outer array
|
||||
result = cast[ptr UncheckedArray[ptr UncheckedArray[byte]]](allocShared0(
|
||||
sizeof(ptr UncheckedArray[byte]) * outerLen
|
||||
))
|
||||
|
||||
# Allocate each inner array
|
||||
for i in 0 ..< outerLen:
|
||||
result[i] = cast[ptr UncheckedArray[byte]](allocShared0(sizeof(byte) * innerLen))
|
||||
|
||||
proc freeDoubleArray*(
|
||||
arr: ptr UncheckedArray[ptr UncheckedArray[byte]], outerLen: int
|
||||
) =
|
||||
# Free each inner array
|
||||
for i in 0 ..< outerLen:
|
||||
if not arr[i].isNil:
|
||||
deallocShared(arr[i])
|
||||
|
||||
# Free outer array
|
||||
if not arr.isNil:
|
||||
deallocShared(arr)
|
||||
|
||||
proc makeUncheckedArray*(
|
||||
data: ref seq[seq[byte]]
|
||||
): ptr UncheckedArray[ptr UncheckedArray[byte]] =
|
||||
result = cast[ptr UncheckedArray[ptr UncheckedArray[byte]]](alloc0(
|
||||
sizeof(ptr UncheckedArray[byte]) * data[].len
|
||||
))
|
||||
|
||||
for i, blk in data[]:
|
||||
if blk.len > 0:
|
||||
result[i] = cast[ptr UncheckedArray[byte]](addr blk[0])
|
||||
else:
|
||||
result[i] = nil
|
||||
@ -9,7 +9,7 @@
|
||||
|
||||
import std/sequtils
|
||||
import pkg/chronos
|
||||
import pkg/stew/results
|
||||
import pkg/results
|
||||
|
||||
# Based on chronos AsyncHeapQueue and std/heapqueue
|
||||
|
||||
|
||||
@ -1,10 +0,0 @@
|
||||
import pkg/chronos
|
||||
|
||||
proc asyncSpawn*(future: Future[void], ignore: type CatchableError) =
|
||||
proc ignoringError() {.async.} =
|
||||
try:
|
||||
await future
|
||||
except ignore:
|
||||
discard
|
||||
|
||||
asyncSpawn ignoringError()
|
||||
@ -2,6 +2,7 @@ import pkg/questionable
|
||||
import pkg/chronos
|
||||
import ../logutils
|
||||
import ./trackedfutures
|
||||
import ./exceptions
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
@ -46,24 +47,14 @@ proc schedule*(machine: Machine, event: Event) =
|
||||
except AsyncQueueFullError:
|
||||
raiseAssert "unlimited queue is full?!"
|
||||
|
||||
method run*(state: State, machine: Machine): Future[?State] {.base, async.} =
|
||||
method run*(
|
||||
state: State, machine: Machine
|
||||
): Future[?State] {.base, async: (raises: []).} =
|
||||
discard
|
||||
|
||||
method onError*(state: State, error: ref CatchableError): ?State {.base.} =
|
||||
raise (ref Defect)(msg: "error in state machine: " & error.msg, parent: error)
|
||||
|
||||
proc onError(machine: Machine, error: ref CatchableError): Event =
|
||||
return proc(state: State): ?State =
|
||||
state.onError(error)
|
||||
|
||||
proc run(machine: Machine, state: State) {.async: (raises: []).} =
|
||||
try:
|
||||
if next =? await state.run(machine):
|
||||
machine.schedule(Event.transition(state, next))
|
||||
except CancelledError:
|
||||
discard # do not propagate
|
||||
except CatchableError as e:
|
||||
machine.schedule(machine.onError(e))
|
||||
if next =? await state.run(machine):
|
||||
machine.schedule(Event.transition(state, next))
|
||||
|
||||
proc scheduler(machine: Machine) {.async: (raises: []).} =
|
||||
var running: Future[void].Raising([])
|
||||
@ -83,7 +74,6 @@ proc scheduler(machine: Machine) {.async: (raises: []).} =
|
||||
debug "enter state", state = fromState & " => " & $machine.state
|
||||
running = machine.run(machine.state)
|
||||
machine.trackedFutures.track(running)
|
||||
asyncSpawn running
|
||||
except CancelledError:
|
||||
break # do not propagate bc it is asyncSpawned
|
||||
|
||||
@ -97,7 +87,6 @@ proc start*(machine: Machine, initialState: State) =
|
||||
machine.started = true
|
||||
let fut = machine.scheduler()
|
||||
machine.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
machine.schedule(Event.transition(machine.state, initialState))
|
||||
|
||||
proc stop*(machine: Machine) {.async.} =
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[tables, hashes], stew/results, stew/shims/net as stewNet, chronos, chronicles
|
||||
std/[tables, hashes], pkg/results, pkg/stew/shims/net as stewNet, chronos, chronicles
|
||||
|
||||
import pkg/libp2p
|
||||
|
||||
|
||||
@ -50,7 +50,6 @@ method start*(
|
||||
timer.callback = callback
|
||||
timer.interval = interval
|
||||
timer.loopFuture = timerLoop(timer)
|
||||
asyncSpawn timer.loopFuture
|
||||
|
||||
method stop*(timer: Timer) {.async, base.} =
|
||||
if timer.loopFuture != nil and not timer.loopFuture.finished:
|
||||
|
||||
@ -5,9 +5,11 @@ import ../logutils
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
type TrackedFutures* = ref object
|
||||
futures: Table[uint, FutureBase]
|
||||
cancelling: bool
|
||||
type
|
||||
TrackedFuture = Future[void].Raising([])
|
||||
TrackedFutures* = ref object
|
||||
futures: Table[uint, TrackedFuture]
|
||||
cancelling: bool
|
||||
|
||||
logScope:
|
||||
topics = "trackable futures"
|
||||
@ -15,15 +17,18 @@ logScope:
|
||||
proc len*(self: TrackedFutures): int =
|
||||
self.futures.len
|
||||
|
||||
proc removeFuture(self: TrackedFutures, future: FutureBase) =
|
||||
proc removeFuture(self: TrackedFutures, future: TrackedFuture) =
|
||||
if not self.cancelling and not future.isNil:
|
||||
self.futures.del(future.id)
|
||||
|
||||
proc track*[T](self: TrackedFutures, fut: Future[T]) =
|
||||
proc track*(self: TrackedFutures, fut: TrackedFuture) =
|
||||
if self.cancelling:
|
||||
return
|
||||
|
||||
self.futures[fut.id] = FutureBase(fut)
|
||||
if fut.finished:
|
||||
return
|
||||
|
||||
self.futures[fut.id] = fut
|
||||
|
||||
proc cb(udata: pointer) =
|
||||
self.removeFuture(fut)
|
||||
@ -33,13 +38,8 @@ proc track*[T](self: TrackedFutures, fut: Future[T]) =
|
||||
proc cancelTracked*(self: TrackedFutures) {.async: (raises: []).} =
|
||||
self.cancelling = true
|
||||
|
||||
trace "cancelling tracked futures"
|
||||
|
||||
var cancellations: seq[FutureBase]
|
||||
for future in self.futures.values:
|
||||
if not future.isNil and not future.finished:
|
||||
cancellations.add future.cancelAndWait()
|
||||
|
||||
trace "cancelling tracked futures", len = self.futures.len
|
||||
let cancellations = self.futures.values.toSeq.mapIt(it.cancelAndWait())
|
||||
await noCancel allFutures cancellations
|
||||
|
||||
self.futures.clear()
|
||||
|
||||
@ -2,6 +2,7 @@ import std/sets
|
||||
import std/sequtils
|
||||
import pkg/chronos
|
||||
import pkg/questionable/results
|
||||
import pkg/stew/endians2
|
||||
|
||||
import ./validationconfig
|
||||
import ./market
|
||||
@ -19,11 +20,9 @@ type Validation* = ref object
|
||||
subscriptions: seq[Subscription]
|
||||
running: Future[void]
|
||||
periodicity: Periodicity
|
||||
proofTimeout: UInt256
|
||||
proofTimeout: uint64
|
||||
config: ValidationConfig
|
||||
|
||||
const MaxStorageRequestDuration = 30.days
|
||||
|
||||
logScope:
|
||||
topics = "codex validator"
|
||||
|
||||
@ -35,18 +34,19 @@ proc new*(
|
||||
proc slots*(validation: Validation): seq[SlotId] =
|
||||
validation.slots.toSeq
|
||||
|
||||
proc getCurrentPeriod(validation: Validation): UInt256 =
|
||||
return validation.periodicity.periodOf(validation.clock.now().u256)
|
||||
proc getCurrentPeriod(validation: Validation): Period =
|
||||
return validation.periodicity.periodOf(validation.clock.now().Timestamp)
|
||||
|
||||
proc waitUntilNextPeriod(validation: Validation) {.async.} =
|
||||
let period = validation.getCurrentPeriod()
|
||||
let periodEnd = validation.periodicity.periodEnd(period)
|
||||
trace "Waiting until next period", currentPeriod = period
|
||||
await validation.clock.waitUntil(periodEnd.truncate(int64) + 1)
|
||||
await validation.clock.waitUntil((periodEnd + 1).toSecondsSince1970)
|
||||
|
||||
func groupIndexForSlotId*(slotId: SlotId, validationGroups: ValidationGroups): uint16 =
|
||||
let slotIdUInt256 = UInt256.fromBytesBE(slotId.toArray)
|
||||
(slotIdUInt256 mod validationGroups.u256).truncate(uint16)
|
||||
let a = slotId.toArray
|
||||
let slotIdInt64 = uint64.fromBytesBE(a)
|
||||
(slotIdInt64 mod uint64(validationGroups)).uint16
|
||||
|
||||
func maxSlotsConstraintRespected(validation: Validation): bool =
|
||||
validation.config.maxSlots == 0 or validation.slots.len < validation.config.maxSlots
|
||||
@ -57,7 +57,7 @@ func shouldValidateSlot(validation: Validation, slotId: SlotId): bool =
|
||||
groupIndexForSlotId(slotId, validationGroups) == validation.config.groupIndex
|
||||
|
||||
proc subscribeSlotFilled(validation: Validation) {.async.} =
|
||||
proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) =
|
||||
proc onSlotFilled(requestId: RequestId, slotIndex: uint64) =
|
||||
if not validation.maxSlotsConstraintRespected:
|
||||
return
|
||||
let slotId = slotId(requestId, slotIndex)
|
||||
@ -115,14 +115,13 @@ proc run(validation: Validation) {.async: (raises: []).} =
|
||||
except CatchableError as e:
|
||||
error "Validation failed", msg = e.msg
|
||||
|
||||
proc epochForDurationBackFromNow(
|
||||
validation: Validation, duration: Duration
|
||||
): SecondsSince1970 =
|
||||
return validation.clock.now - duration.secs
|
||||
proc findEpoch(validation: Validation, secondsAgo: uint64): SecondsSince1970 =
|
||||
return validation.clock.now - secondsAgo.int64
|
||||
|
||||
proc restoreHistoricalState(validation: Validation) {.async.} =
|
||||
trace "Restoring historical state..."
|
||||
let startTimeEpoch = validation.epochForDurationBackFromNow(MaxStorageRequestDuration)
|
||||
let requestDurationLimit = await validation.market.requestDurationLimit
|
||||
let startTimeEpoch = validation.findEpoch(secondsAgo = requestDurationLimit)
|
||||
let slotFilledEvents =
|
||||
await validation.market.queryPastSlotFilledEvents(fromTime = startTimeEpoch)
|
||||
for event in slotFilledEvents:
|
||||
@ -143,7 +142,6 @@ proc start*(validation: Validation) {.async.} =
|
||||
await validation.subscribeSlotFilled()
|
||||
await validation.restoreHistoricalState()
|
||||
validation.running = validation.run()
|
||||
asyncSpawn validation.running
|
||||
|
||||
proc stop*(validation: Validation) {.async.} =
|
||||
if not validation.running.isNil and not validation.running.finished:
|
||||
|
||||
79
config.nims
79
config.nims
@ -1,21 +1,24 @@
|
||||
|
||||
include "build.nims"
|
||||
|
||||
import std/os
|
||||
const currentDir = currentSourcePath()[0 .. ^(len("config.nims") + 1)]
|
||||
|
||||
when getEnv("NIMBUS_BUILD_SYSTEM") == "yes" and
|
||||
# BEWARE
|
||||
# In Nim 1.6, config files are evaluated with a working directory
|
||||
# matching where the Nim command was invocated. This means that we
|
||||
# must do all file existence checks with full absolute paths:
|
||||
system.fileExists(currentDir & "nimbus-build-system.paths"):
|
||||
# BEWARE
|
||||
# In Nim 1.6, config files are evaluated with a working directory
|
||||
# matching where the Nim command was invocated. This means that we
|
||||
# must do all file existence checks with full absolute paths:
|
||||
system.fileExists(currentDir & "nimbus-build-system.paths"):
|
||||
include "nimbus-build-system.paths"
|
||||
|
||||
when defined(release):
|
||||
switch("nimcache", joinPath(currentSourcePath.parentDir, "nimcache/release/$projectName"))
|
||||
switch(
|
||||
"nimcache", joinPath(currentSourcePath.parentDir, "nimcache/release/$projectName")
|
||||
)
|
||||
else:
|
||||
switch("nimcache", joinPath(currentSourcePath.parentDir, "nimcache/debug/$projectName"))
|
||||
switch(
|
||||
"nimcache", joinPath(currentSourcePath.parentDir, "nimcache/debug/$projectName")
|
||||
)
|
||||
|
||||
when defined(limitStackUsage):
|
||||
# This limits stack usage of each individual function to 1MB - the option is
|
||||
@ -34,7 +37,8 @@ when defined(windows):
|
||||
# increase stack size
|
||||
switch("passL", "-Wl,--stack,8388608")
|
||||
# https://github.com/nim-lang/Nim/issues/4057
|
||||
--tlsEmulation:off
|
||||
--tlsEmulation:
|
||||
off
|
||||
if defined(i386):
|
||||
# set the IMAGE_FILE_LARGE_ADDRESS_AWARE flag so we can use PAE, if enabled, and access more than 2 GiB of RAM
|
||||
switch("passL", "-Wl,--large-address-aware")
|
||||
@ -63,30 +67,47 @@ else:
|
||||
# ("-fno-asynchronous-unwind-tables" breaks Nim's exception raising, sometimes)
|
||||
switch("passC", "-mno-avx512vl")
|
||||
|
||||
--tlsEmulation:off
|
||||
--threads:on
|
||||
--opt:speed
|
||||
--excessiveStackTrace:on
|
||||
--tlsEmulation:
|
||||
off
|
||||
--threads:
|
||||
on
|
||||
--opt:
|
||||
speed
|
||||
--excessiveStackTrace:
|
||||
on
|
||||
# enable metric collection
|
||||
--define:metrics
|
||||
--define:
|
||||
metrics
|
||||
# for heap-usage-by-instance-type metrics and object base-type strings
|
||||
--define:nimTypeNames
|
||||
--styleCheck:usages
|
||||
--styleCheck:error
|
||||
--maxLoopIterationsVM:1000000000
|
||||
--fieldChecks:on
|
||||
--warningAsError:"ProveField:on"
|
||||
--define:
|
||||
nimTypeNames
|
||||
--styleCheck:
|
||||
usages
|
||||
--styleCheck:
|
||||
error
|
||||
--maxLoopIterationsVM:
|
||||
1000000000
|
||||
--fieldChecks:
|
||||
on
|
||||
--warningAsError:
|
||||
"ProveField:on"
|
||||
|
||||
when (NimMajor, NimMinor) >= (1, 4):
|
||||
--warning:"ObservableStores:off"
|
||||
--warning:"LockLevel:off"
|
||||
--hint:"XCannotRaiseY:off"
|
||||
--warning:
|
||||
"ObservableStores:off"
|
||||
--warning:
|
||||
"LockLevel:off"
|
||||
--hint:
|
||||
"XCannotRaiseY:off"
|
||||
when (NimMajor, NimMinor) >= (1, 6):
|
||||
--warning:"DotLikeOps:off"
|
||||
--warning:
|
||||
"DotLikeOps:off"
|
||||
when (NimMajor, NimMinor, NimPatch) >= (1, 6, 11):
|
||||
--warning:"BareExcept:off"
|
||||
--warning:
|
||||
"BareExcept:off"
|
||||
when (NimMajor, NimMinor) >= (2, 0):
|
||||
--mm:refc
|
||||
--mm:
|
||||
refc
|
||||
|
||||
switch("define", "withoutPCRE")
|
||||
|
||||
@ -94,10 +115,12 @@ switch("define", "withoutPCRE")
|
||||
# "--debugger:native" build. It can be increased with `ulimit -n 1024`.
|
||||
if not defined(macosx):
|
||||
# add debugging symbols and original files and line numbers
|
||||
--debugger:native
|
||||
--debugger:
|
||||
native
|
||||
if not (defined(windows) and defined(i386)) and not defined(disable_libbacktrace):
|
||||
# light-weight stack traces using libbacktrace and libunwind
|
||||
--define:nimStackTraceOverride
|
||||
--define:
|
||||
nimStackTraceOverride
|
||||
switch("import", "libbacktrace")
|
||||
|
||||
# `switch("warning[CaseTransition]", "off")` fails with "Error: invalid command line option: '--warning[CaseTransition]'"
|
||||
|
||||
@ -56,7 +56,7 @@ in pkgs.gcc13Stdenv.mkDerivation rec {
|
||||
fakeCargo
|
||||
];
|
||||
|
||||
# Disable CPU optmizations that make binary not portable.
|
||||
# Disable CPU optimizations that make binary not portable.
|
||||
NIMFLAGS = "-d:disableMarchNative -d:git_revision_override=${revision}";
|
||||
# Avoid Nim cache permission errors.
|
||||
XDG_CACHE_HOME = "/tmp";
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user