mirror of
https://github.com/logos-storage/logos-storage-nim.git
synced 2026-01-04 06:23:06 +00:00
Merge branch 'master' into feat/sampling-primitives
This commit is contained in:
commit
3852415f39
@ -89,7 +89,7 @@ runs:
|
||||
|
||||
- name: Install gcc 14 on Linux
|
||||
# We don't want to install gcc 14 for coverage (Ubuntu 20.04)
|
||||
if : ${{ inputs.os == 'linux' && !inputs.coverage }}
|
||||
if : ${{ inputs.os == 'linux' && inputs.coverage != 'true' }}
|
||||
shell: ${{ inputs.shell }} {0}
|
||||
run: |
|
||||
# Add GCC-14 to alternatives
|
||||
@ -202,7 +202,7 @@ runs:
|
||||
- name: Restore Nim toolchain binaries from cache
|
||||
id: nim-cache
|
||||
uses: actions/cache@v4
|
||||
if : ${{ !inputs.coverage }}
|
||||
if : ${{ inputs.coverage != 'true' }}
|
||||
with:
|
||||
path: NimBinaries
|
||||
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }}
|
||||
|
||||
8
.github/workflows/nim-matrix.yml
vendored
8
.github/workflows/nim-matrix.yml
vendored
@ -20,10 +20,10 @@ jobs:
|
||||
uses: fabiocaccamo/create-matrix-action@v5
|
||||
with:
|
||||
matrix: |
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
|
||||
build:
|
||||
needs: matrix
|
||||
|
||||
10
.gitmodules
vendored
10
.gitmodules
vendored
@ -221,3 +221,13 @@
|
||||
[submodule "vendor/nph"]
|
||||
path = vendor/nph
|
||||
url = https://github.com/arnetheduck/nph.git
|
||||
[submodule "vendor/nim-quic"]
|
||||
path = vendor/nim-quic
|
||||
url = https://github.com/vacp2p/nim-quic.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nim-ngtcp2"]
|
||||
path = vendor/nim-ngtcp2
|
||||
url = https://github.com/vacp2p/nim-ngtcp2.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
|
||||
@ -59,8 +59,8 @@ Feel free to dive in, contributions are welcomed! Open an issue or submit PRs.
|
||||
|
||||
### Linting and formatting
|
||||
|
||||
`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is requrired to adhere to its styling.
|
||||
`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling.
|
||||
If you are setting up fresh setup, in order to get `nph` run `make build-nph`.
|
||||
In order to format files run `make nph/<file/folder you want to format>`.
|
||||
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior commiting them.
|
||||
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them.
|
||||
If you are using VSCode and the [NimLang](https://marketplace.visualstudio.com/items?itemName=NimLang.nimlang) extension you can enable "Format On Save" (eq. the `nim.formatOnSave` property) that will format the files using `nph`.
|
||||
@ -41,80 +41,86 @@ type Advertiser* = ref object of RootObj
|
||||
advertiserRunning*: bool # Indicates if discovery is running
|
||||
concurrentAdvReqs: int # Concurrent advertise requests
|
||||
|
||||
advertiseLocalStoreLoop*: Future[void] # Advertise loop task handle
|
||||
advertiseLocalStoreLoop*: Future[void].Raising([]) # Advertise loop task handle
|
||||
advertiseQueue*: AsyncQueue[Cid] # Advertise queue
|
||||
trackedFutures*: TrackedFutures # Advertise tasks futures
|
||||
|
||||
advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep
|
||||
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
|
||||
|
||||
proc addCidToQueue(b: Advertiser, cid: Cid) {.async.} =
|
||||
proc addCidToQueue(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} =
|
||||
if cid notin b.advertiseQueue:
|
||||
await b.advertiseQueue.put(cid)
|
||||
|
||||
trace "Advertising", cid
|
||||
|
||||
proc advertiseBlock(b: Advertiser, cid: Cid) {.async.} =
|
||||
proc advertiseBlock(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} =
|
||||
without isM =? cid.isManifest, err:
|
||||
warn "Unable to determine if cid is manifest"
|
||||
return
|
||||
|
||||
if isM:
|
||||
without blk =? await b.localStore.getBlock(cid), err:
|
||||
error "Error retrieving manifest block", cid, err = err.msg
|
||||
return
|
||||
try:
|
||||
if isM:
|
||||
without blk =? await b.localStore.getBlock(cid), err:
|
||||
error "Error retrieving manifest block", cid, err = err.msg
|
||||
return
|
||||
|
||||
without manifest =? Manifest.decode(blk), err:
|
||||
error "Unable to decode as manifest", err = err.msg
|
||||
return
|
||||
without manifest =? Manifest.decode(blk), err:
|
||||
error "Unable to decode as manifest", err = err.msg
|
||||
return
|
||||
|
||||
# announce manifest cid and tree cid
|
||||
await b.addCidToQueue(cid)
|
||||
await b.addCidToQueue(manifest.treeCid)
|
||||
# announce manifest cid and tree cid
|
||||
await b.addCidToQueue(cid)
|
||||
await b.addCidToQueue(manifest.treeCid)
|
||||
except CancelledError as exc:
|
||||
trace "Cancelled advertise block", cid
|
||||
raise exc
|
||||
except CatchableError as e:
|
||||
error "failed to advertise block", cid, error = e.msgDetail
|
||||
|
||||
proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} =
|
||||
while b.advertiserRunning:
|
||||
try:
|
||||
if cids =? await b.localStore.listBlocks(blockType = BlockType.Manifest):
|
||||
trace "Advertiser begins iterating blocks..."
|
||||
for c in cids:
|
||||
if cid =? await c:
|
||||
await b.advertiseBlock(cid)
|
||||
trace "Advertiser iterating blocks finished."
|
||||
try:
|
||||
while b.advertiserRunning:
|
||||
try:
|
||||
if cids =? await b.localStore.listBlocks(blockType = BlockType.Manifest):
|
||||
trace "Advertiser begins iterating blocks..."
|
||||
for c in cids:
|
||||
if cid =? await c:
|
||||
await b.advertiseBlock(cid)
|
||||
trace "Advertiser iterating blocks finished."
|
||||
except CatchableError as e:
|
||||
error "Error in advertise local store loop", error = e.msgDetail
|
||||
raiseAssert("Unexpected exception in advertiseLocalStoreLoop")
|
||||
|
||||
await sleepAsync(b.advertiseLocalStoreLoopSleep)
|
||||
except CancelledError:
|
||||
break # do not propagate as advertiseLocalStoreLoop was asyncSpawned
|
||||
except CatchableError as e:
|
||||
error "failed to advertise blocks in local store", error = e.msgDetail
|
||||
except CancelledError:
|
||||
warn "Cancelled advertise local store loop"
|
||||
|
||||
info "Exiting advertise task loop"
|
||||
|
||||
proc processQueueLoop(b: Advertiser) {.async: (raises: []).} =
|
||||
while b.advertiserRunning:
|
||||
try:
|
||||
try:
|
||||
while b.advertiserRunning:
|
||||
let cid = await b.advertiseQueue.get()
|
||||
|
||||
if cid in b.inFlightAdvReqs:
|
||||
continue
|
||||
|
||||
try:
|
||||
let request = b.discovery.provide(cid)
|
||||
let request = b.discovery.provide(cid)
|
||||
b.inFlightAdvReqs[cid] = request
|
||||
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
|
||||
|
||||
b.inFlightAdvReqs[cid] = request
|
||||
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
|
||||
await request
|
||||
finally:
|
||||
defer:
|
||||
b.inFlightAdvReqs.del(cid)
|
||||
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
|
||||
except CancelledError:
|
||||
trace "Advertise task cancelled"
|
||||
return
|
||||
except CatchableError as exc:
|
||||
warn "Exception in advertise task runner", exc = exc.msg
|
||||
|
||||
await request
|
||||
except CancelledError:
|
||||
warn "Cancelled advertise task runner"
|
||||
|
||||
info "Exiting advertise task runner"
|
||||
|
||||
proc start*(b: Advertiser) {.async.} =
|
||||
proc start*(b: Advertiser) {.async: (raises: []).} =
|
||||
## Start the advertiser
|
||||
##
|
||||
|
||||
@ -134,13 +140,11 @@ proc start*(b: Advertiser) {.async.} =
|
||||
for i in 0 ..< b.concurrentAdvReqs:
|
||||
let fut = b.processQueueLoop()
|
||||
b.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
|
||||
b.advertiseLocalStoreLoop = advertiseLocalStoreLoop(b)
|
||||
b.trackedFutures.track(b.advertiseLocalStoreLoop)
|
||||
asyncSpawn b.advertiseLocalStoreLoop
|
||||
|
||||
proc stop*(b: Advertiser) {.async.} =
|
||||
proc stop*(b: Advertiser) {.async: (raises: []).} =
|
||||
## Stop the advertiser
|
||||
##
|
||||
|
||||
|
||||
@ -48,7 +48,7 @@ type DiscoveryEngine* = ref object of RootObj
|
||||
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
|
||||
discEngineRunning*: bool # Indicates if discovery is running
|
||||
concurrentDiscReqs: int # Concurrent discovery requests
|
||||
discoveryLoop*: Future[void] # Discovery loop task handle
|
||||
discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle
|
||||
discoveryQueue*: AsyncQueue[Cid] # Discovery queue
|
||||
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
|
||||
minPeersPerBlock*: int # Max number of peers with block
|
||||
@ -57,30 +57,21 @@ type DiscoveryEngine* = ref object of RootObj
|
||||
# Inflight discovery requests
|
||||
|
||||
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||
while b.discEngineRunning:
|
||||
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
|
||||
try:
|
||||
try:
|
||||
while b.discEngineRunning:
|
||||
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
|
||||
await b.discoveryQueue.put(cid)
|
||||
except CancelledError:
|
||||
trace "Discovery loop cancelled"
|
||||
return
|
||||
except CatchableError as exc:
|
||||
warn "Exception in discovery loop", exc = exc.msg
|
||||
|
||||
try:
|
||||
logScope:
|
||||
sleep = b.discoveryLoopSleep
|
||||
wanted = b.pendingBlocks.len
|
||||
await sleepAsync(b.discoveryLoopSleep)
|
||||
except CancelledError:
|
||||
discard # do not propagate as discoveryQueueLoop was asyncSpawned
|
||||
except CancelledError:
|
||||
trace "Discovery loop cancelled"
|
||||
|
||||
proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||
## Run discovery tasks
|
||||
##
|
||||
|
||||
while b.discEngineRunning:
|
||||
try:
|
||||
try:
|
||||
while b.discEngineRunning:
|
||||
let cid = await b.discoveryQueue.get()
|
||||
|
||||
if cid in b.inFlightDiscReqs:
|
||||
@ -90,35 +81,28 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||
let haves = b.peers.peersHave(cid)
|
||||
|
||||
if haves.len < b.minPeersPerBlock:
|
||||
try:
|
||||
let request = b.discovery.find(cid).wait(DefaultDiscoveryTimeout)
|
||||
let request = b.discovery.find(cid)
|
||||
b.inFlightDiscReqs[cid] = request
|
||||
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
|
||||
|
||||
b.inFlightDiscReqs[cid] = request
|
||||
defer:
|
||||
b.inFlightDiscReqs.del(cid)
|
||||
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
|
||||
let peers = await request
|
||||
|
||||
if (await request.withTimeout(DefaultDiscoveryTimeout)) and
|
||||
peers =? (await request).catch:
|
||||
let dialed = await allFinished(peers.mapIt(b.network.dialPeer(it.data)))
|
||||
|
||||
for i, f in dialed:
|
||||
if f.failed:
|
||||
await b.discovery.removeProvider(peers[i].data.peerId)
|
||||
finally:
|
||||
b.inFlightDiscReqs.del(cid)
|
||||
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
|
||||
except CancelledError:
|
||||
trace "Discovery task cancelled"
|
||||
return
|
||||
except CatchableError as exc:
|
||||
warn "Exception in discovery task runner", exc = exc.msg
|
||||
except Exception as e:
|
||||
# Raised by b.discovery.removeProvider somehow...
|
||||
# This should not be catchable, and we should never get here. Therefore,
|
||||
# raise a Defect.
|
||||
raiseAssert "Exception when removing provider"
|
||||
except CancelledError:
|
||||
trace "Discovery task cancelled"
|
||||
return
|
||||
|
||||
info "Exiting discovery task runner"
|
||||
|
||||
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
|
||||
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) =
|
||||
for cid in cids:
|
||||
if cid notin b.discoveryQueue:
|
||||
try:
|
||||
@ -126,11 +110,11 @@ proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
|
||||
except CatchableError as exc:
|
||||
warn "Exception queueing discovery request", exc = exc.msg
|
||||
|
||||
proc start*(b: DiscoveryEngine) {.async.} =
|
||||
proc start*(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||
## Start the discengine task
|
||||
##
|
||||
|
||||
trace "Discovery engine start"
|
||||
trace "Discovery engine starting"
|
||||
|
||||
if b.discEngineRunning:
|
||||
warn "Starting discovery engine twice"
|
||||
@ -140,12 +124,13 @@ proc start*(b: DiscoveryEngine) {.async.} =
|
||||
for i in 0 ..< b.concurrentDiscReqs:
|
||||
let fut = b.discoveryTaskLoop()
|
||||
b.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
|
||||
b.discoveryLoop = b.discoveryQueueLoop()
|
||||
b.trackedFutures.track(b.discoveryLoop)
|
||||
|
||||
proc stop*(b: DiscoveryEngine) {.async.} =
|
||||
trace "Discovery engine started"
|
||||
|
||||
proc stop*(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||
## Stop the discovery engine
|
||||
##
|
||||
|
||||
|
||||
@ -93,12 +93,15 @@ type
|
||||
price*: UInt256
|
||||
|
||||
# attach task scheduler to engine
|
||||
proc scheduleTask(self: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe.} =
|
||||
self.taskQueue.pushOrUpdateNoWait(task).isOk()
|
||||
proc scheduleTask(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, raises: [].} =
|
||||
if self.taskQueue.pushOrUpdateNoWait(task).isOk():
|
||||
trace "Task scheduled for peer", peer = task.id
|
||||
else:
|
||||
warn "Unable to schedule task for peer", peer = task.id
|
||||
|
||||
proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).}
|
||||
|
||||
proc start*(self: BlockExcEngine) {.async.} =
|
||||
proc start*(self: BlockExcEngine) {.async: (raises: []).} =
|
||||
## Start the blockexc task
|
||||
##
|
||||
|
||||
@ -115,7 +118,7 @@ proc start*(self: BlockExcEngine) {.async.} =
|
||||
let fut = self.blockexcTaskRunner()
|
||||
self.trackedFutures.track(fut)
|
||||
|
||||
proc stop*(self: BlockExcEngine) {.async.} =
|
||||
proc stop*(self: BlockExcEngine) {.async: (raises: []).} =
|
||||
## Stop the blockexc blockexc
|
||||
##
|
||||
|
||||
@ -135,7 +138,7 @@ proc stop*(self: BlockExcEngine) {.async.} =
|
||||
|
||||
proc sendWantHave(
|
||||
self: BlockExcEngine, addresses: seq[BlockAddress], peers: seq[BlockExcPeerCtx]
|
||||
): Future[void] {.async.} =
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
for p in peers:
|
||||
let toAsk = addresses.filterIt(it notin p.peerHave)
|
||||
trace "Sending wantHave request", toAsk, peer = p.id
|
||||
@ -144,7 +147,7 @@ proc sendWantHave(
|
||||
|
||||
proc sendWantBlock(
|
||||
self: BlockExcEngine, addresses: seq[BlockAddress], blockPeer: BlockExcPeerCtx
|
||||
): Future[void] {.async.} =
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
trace "Sending wantBlock request to", addresses, peer = blockPeer.id
|
||||
await self.network.request.sendWantList(
|
||||
blockPeer.id, addresses, wantType = WantType.WantBlock
|
||||
@ -229,7 +232,7 @@ proc requestBlock*(
|
||||
|
||||
proc blockPresenceHandler*(
|
||||
self: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence]
|
||||
) {.async.} =
|
||||
) {.async: (raises: []).} =
|
||||
trace "Received block presence from peer", peer, blocks = blocks.mapIt($it)
|
||||
let
|
||||
peerCtx = self.peers.get(peer)
|
||||
@ -249,20 +252,23 @@ proc blockPresenceHandler*(
|
||||
if dontWantCids.len > 0:
|
||||
peerCtx.cleanPresence(dontWantCids)
|
||||
|
||||
let ourWantCids = ourWantList.filter do(address: BlockAddress) -> bool:
|
||||
if address in peerHave and not self.pendingBlocks.retriesExhausted(address) and
|
||||
not self.pendingBlocks.isInFlight(address):
|
||||
self.pendingBlocks.setInFlight(address, true)
|
||||
self.pendingBlocks.decRetries(address)
|
||||
true
|
||||
else:
|
||||
false
|
||||
let ourWantCids = ourWantList.filterIt(
|
||||
it in peerHave and not self.pendingBlocks.retriesExhausted(it) and
|
||||
not self.pendingBlocks.isInFlight(it)
|
||||
)
|
||||
|
||||
for address in ourWantCids:
|
||||
self.pendingBlocks.setInFlight(address, true)
|
||||
self.pendingBlocks.decRetries(address)
|
||||
|
||||
if ourWantCids.len > 0:
|
||||
trace "Peer has blocks in our wantList", peer, wants = ourWantCids
|
||||
await self.sendWantBlock(ourWantCids, peerCtx)
|
||||
if err =? catch(await self.sendWantBlock(ourWantCids, peerCtx)).errorOption:
|
||||
warn "Failed to send wantBlock to peer", peer, err = err.msg
|
||||
|
||||
proc scheduleTasks(self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} =
|
||||
proc scheduleTasks(
|
||||
self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
let cids = blocksDelivery.mapIt(it.blk.cid)
|
||||
|
||||
# schedule any new peers to provide blocks to
|
||||
@ -271,15 +277,21 @@ proc scheduleTasks(self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.a
|
||||
# schedule a peer if it wants at least one cid
|
||||
# and we have it in our local store
|
||||
if c in p.peerWantsCids:
|
||||
if await (c in self.localStore):
|
||||
if self.scheduleTask(p):
|
||||
trace "Task scheduled for peer", peer = p.id
|
||||
else:
|
||||
warn "Unable to schedule task for peer", peer = p.id
|
||||
try:
|
||||
if await (c in self.localStore):
|
||||
# TODO: the try/except should go away once blockstore tracks exceptions
|
||||
self.scheduleTask(p)
|
||||
break
|
||||
except CancelledError as exc:
|
||||
warn "Checking local store canceled", cid = c, err = exc.msg
|
||||
return
|
||||
except CatchableError as exc:
|
||||
error "Error checking local store for cid", cid = c, err = exc.msg
|
||||
raiseAssert "Unexpected error checking local store for cid"
|
||||
|
||||
break # do next peer
|
||||
|
||||
proc cancelBlocks(self: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} =
|
||||
proc cancelBlocks(
|
||||
self: BlockExcEngine, addrs: seq[BlockAddress]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
## Tells neighboring peers that we're no longer interested in a block.
|
||||
##
|
||||
|
||||
@ -289,35 +301,43 @@ proc cancelBlocks(self: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} =
|
||||
trace "Sending block request cancellations to peers",
|
||||
addrs, peers = self.peers.peerIds
|
||||
|
||||
proc mapPeers(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} =
|
||||
let blocks = addrs.filter do(a: BlockAddress) -> bool:
|
||||
a in peerCtx.blocks
|
||||
proc processPeer(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} =
|
||||
await self.network.request.sendWantCancellations(
|
||||
peer = peerCtx.id, addresses = addrs.filterIt(it in peerCtx)
|
||||
)
|
||||
|
||||
if blocks.len > 0:
|
||||
trace "Sending block request cancellations to peer", peer = peerCtx.id, blocks
|
||||
await self.network.request.sendWantCancellations(
|
||||
peer = peerCtx.id, addresses = blocks
|
||||
return peerCtx
|
||||
|
||||
try:
|
||||
let (succeededFuts, failedFuts) = await allFinishedFailed(
|
||||
toSeq(self.peers.peers.values).filterIt(it.peerHave.anyIt(it in addrs)).map(
|
||||
processPeer
|
||||
)
|
||||
)
|
||||
|
||||
(await allFinished(succeededFuts)).mapIt(it.read).apply do(peerCtx: BlockExcPeerCtx):
|
||||
peerCtx.cleanPresence(addrs)
|
||||
peerCtx
|
||||
|
||||
let failed = (await allFinished(map(toSeq(self.peers.peers.values), mapPeers))).filterIt(
|
||||
it.failed
|
||||
)
|
||||
|
||||
if failed.len > 0:
|
||||
warn "Failed to send block request cancellations to peers", peers = failed.len
|
||||
else:
|
||||
trace "Block request cancellations sent to peers", peers = self.peers.len
|
||||
if failedFuts.len > 0:
|
||||
warn "Failed to send block request cancellations to peers", peers = failedFuts.len
|
||||
else:
|
||||
trace "Block request cancellations sent to peers", peers = self.peers.len
|
||||
except CancelledError as exc:
|
||||
warn "Error sending block request cancellations", error = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error sending block request cancellations", error = exc.msg
|
||||
|
||||
proc resolveBlocks*(
|
||||
self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]
|
||||
) {.async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
self.pendingBlocks.resolve(blocksDelivery)
|
||||
await self.scheduleTasks(blocksDelivery)
|
||||
await self.cancelBlocks(blocksDelivery.mapIt(it.address))
|
||||
|
||||
proc resolveBlocks*(self: BlockExcEngine, blocks: seq[Block]) {.async.} =
|
||||
proc resolveBlocks*(
|
||||
self: BlockExcEngine, blocks: seq[Block]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
await self.resolveBlocks(
|
||||
blocks.mapIt(
|
||||
BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid))
|
||||
@ -326,7 +346,7 @@ proc resolveBlocks*(self: BlockExcEngine, blocks: seq[Block]) {.async.} =
|
||||
|
||||
proc payForBlocks(
|
||||
self: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery]
|
||||
) {.async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
let
|
||||
sendPayment = self.network.request.sendPayment
|
||||
price = peer.price(blocksDelivery.mapIt(it.address))
|
||||
@ -367,7 +387,7 @@ proc validateBlockDelivery(self: BlockExcEngine, bd: BlockDelivery): ?!void =
|
||||
|
||||
proc blocksDeliveryHandler*(
|
||||
self: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
) {.async.} =
|
||||
) {.async: (raises: []).} =
|
||||
trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address))
|
||||
|
||||
var validatedBlocksDelivery: seq[BlockDelivery]
|
||||
@ -376,41 +396,47 @@ proc blocksDeliveryHandler*(
|
||||
peer = peer
|
||||
address = bd.address
|
||||
|
||||
if err =? self.validateBlockDelivery(bd).errorOption:
|
||||
warn "Block validation failed", msg = err.msg
|
||||
continue
|
||||
|
||||
if err =? (await self.localStore.putBlock(bd.blk)).errorOption:
|
||||
error "Unable to store block", err = err.msg
|
||||
continue
|
||||
|
||||
if bd.address.leaf:
|
||||
without proof =? bd.proof:
|
||||
error "Proof expected for a leaf block delivery"
|
||||
try:
|
||||
if err =? self.validateBlockDelivery(bd).errorOption:
|
||||
warn "Block validation failed", msg = err.msg
|
||||
continue
|
||||
if err =? (
|
||||
await self.localStore.putCidAndProof(
|
||||
bd.address.treeCid, bd.address.index, bd.blk.cid, proof
|
||||
)
|
||||
).errorOption:
|
||||
error "Unable to store proof and cid for a block"
|
||||
|
||||
if err =? (await self.localStore.putBlock(bd.blk)).errorOption:
|
||||
error "Unable to store block", err = err.msg
|
||||
continue
|
||||
|
||||
if bd.address.leaf:
|
||||
without proof =? bd.proof:
|
||||
warn "Proof expected for a leaf block delivery"
|
||||
continue
|
||||
if err =? (
|
||||
await self.localStore.putCidAndProof(
|
||||
bd.address.treeCid, bd.address.index, bd.blk.cid, proof
|
||||
)
|
||||
).errorOption:
|
||||
warn "Unable to store proof and cid for a block"
|
||||
continue
|
||||
except CatchableError as exc:
|
||||
warn "Error handling block delivery", error = exc.msg
|
||||
continue
|
||||
|
||||
validatedBlocksDelivery.add(bd)
|
||||
|
||||
await self.resolveBlocks(validatedBlocksDelivery)
|
||||
codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64)
|
||||
|
||||
let peerCtx = self.peers.get(peer)
|
||||
|
||||
if peerCtx != nil:
|
||||
await self.payForBlocks(peerCtx, blocksDelivery)
|
||||
## shouldn't we remove them from the want-list instead of this:
|
||||
peerCtx.cleanPresence(blocksDelivery.mapIt(it.address))
|
||||
if err =? catch(await self.payForBlocks(peerCtx, blocksDelivery)).errorOption:
|
||||
warn "Error paying for blocks", err = err.msg
|
||||
return
|
||||
|
||||
if err =? catch(await self.resolveBlocks(validatedBlocksDelivery)).errorOption:
|
||||
warn "Error resolving blocks", err = err.msg
|
||||
return
|
||||
|
||||
proc wantListHandler*(
|
||||
self: BlockExcEngine, peer: PeerId, wantList: WantList
|
||||
) {.async.} =
|
||||
) {.async: (raises: []).} =
|
||||
trace "Received want list from peer", peer, wantList = wantList.entries.len
|
||||
|
||||
let peerCtx = self.peers.get(peer)
|
||||
@ -422,68 +448,81 @@ proc wantListHandler*(
|
||||
presence: seq[BlockPresence]
|
||||
schedulePeer = false
|
||||
|
||||
for e in wantList.entries:
|
||||
let idx = peerCtx.peerWants.findIt(it.address == e.address)
|
||||
try:
|
||||
for e in wantList.entries:
|
||||
let idx = peerCtx.peerWants.findIt(it.address == e.address)
|
||||
|
||||
logScope:
|
||||
peer = peerCtx.id
|
||||
address = e.address
|
||||
wantType = $e.wantType
|
||||
logScope:
|
||||
peer = peerCtx.id
|
||||
address = e.address
|
||||
wantType = $e.wantType
|
||||
|
||||
if idx < 0: # Adding new entry to peer wants
|
||||
let
|
||||
have = await e.address in self.localStore
|
||||
price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE)
|
||||
if idx < 0: # Adding new entry to peer wants
|
||||
let
|
||||
have =
|
||||
try:
|
||||
await e.address in self.localStore
|
||||
except CatchableError as exc:
|
||||
# TODO: should not be necessary once we have proper exception tracking on the BlockStore interface
|
||||
false
|
||||
price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE)
|
||||
|
||||
if e.cancel:
|
||||
trace "Received cancelation for untracked block, skipping", address = e.address
|
||||
continue
|
||||
if e.cancel:
|
||||
trace "Received cancelation for untracked block, skipping",
|
||||
address = e.address
|
||||
continue
|
||||
|
||||
trace "Processing want list entry", wantList = $e
|
||||
case e.wantType
|
||||
of WantType.WantHave:
|
||||
if have:
|
||||
presence.add(
|
||||
BlockPresence(
|
||||
address: e.address, `type`: BlockPresenceType.Have, price: price
|
||||
)
|
||||
)
|
||||
else:
|
||||
if e.sendDontHave:
|
||||
trace "Processing want list entry", wantList = $e
|
||||
case e.wantType
|
||||
of WantType.WantHave:
|
||||
if have:
|
||||
presence.add(
|
||||
BlockPresence(
|
||||
address: e.address, `type`: BlockPresenceType.DontHave, price: price
|
||||
address: e.address, `type`: BlockPresenceType.Have, price: price
|
||||
)
|
||||
)
|
||||
else:
|
||||
if e.sendDontHave:
|
||||
presence.add(
|
||||
BlockPresence(
|
||||
address: e.address, `type`: BlockPresenceType.DontHave, price: price
|
||||
)
|
||||
)
|
||||
|
||||
codex_block_exchange_want_have_lists_received.inc()
|
||||
of WantType.WantBlock:
|
||||
peerCtx.peerWants.add(e)
|
||||
schedulePeer = true
|
||||
codex_block_exchange_want_block_lists_received.inc()
|
||||
else: # Updating existing entry in peer wants
|
||||
# peer doesn't want this block anymore
|
||||
if e.cancel:
|
||||
trace "Canceling want for block", address = e.address
|
||||
peerCtx.peerWants.del(idx)
|
||||
trace "Canceled block request", address = e.address, len = peerCtx.peerWants.len
|
||||
else:
|
||||
if e.wantType == WantType.WantBlock:
|
||||
codex_block_exchange_want_have_lists_received.inc()
|
||||
of WantType.WantBlock:
|
||||
peerCtx.peerWants.add(e)
|
||||
schedulePeer = true
|
||||
# peer might want to ask for the same cid with
|
||||
# different want params
|
||||
trace "Updating want for block", address = e.address
|
||||
peerCtx.peerWants[idx] = e # update entry
|
||||
trace "Updated block request", address = e.address, len = peerCtx.peerWants.len
|
||||
codex_block_exchange_want_block_lists_received.inc()
|
||||
else: # Updating existing entry in peer wants
|
||||
# peer doesn't want this block anymore
|
||||
if e.cancel:
|
||||
trace "Canceling want for block", address = e.address
|
||||
peerCtx.peerWants.del(idx)
|
||||
trace "Canceled block request",
|
||||
address = e.address, len = peerCtx.peerWants.len
|
||||
else:
|
||||
if e.wantType == WantType.WantBlock:
|
||||
schedulePeer = true
|
||||
# peer might want to ask for the same cid with
|
||||
# different want params
|
||||
trace "Updating want for block", address = e.address
|
||||
peerCtx.peerWants[idx] = e # update entry
|
||||
trace "Updated block request",
|
||||
address = e.address, len = peerCtx.peerWants.len
|
||||
|
||||
if presence.len > 0:
|
||||
trace "Sending presence to remote", items = presence.mapIt($it).join(",")
|
||||
await self.network.request.sendPresence(peer, presence)
|
||||
if presence.len > 0:
|
||||
trace "Sending presence to remote", items = presence.mapIt($it).join(",")
|
||||
await self.network.request.sendPresence(peer, presence)
|
||||
|
||||
if schedulePeer and not self.scheduleTask(peerCtx):
|
||||
warn "Unable to schedule task for peer", peer
|
||||
if schedulePeer:
|
||||
self.scheduleTask(peerCtx)
|
||||
except CancelledError as exc: #TODO: replace with CancelledError
|
||||
warn "Error processing want list", error = exc.msg
|
||||
|
||||
proc accountHandler*(self: BlockExcEngine, peer: PeerId, account: Account) {.async.} =
|
||||
proc accountHandler*(
|
||||
self: BlockExcEngine, peer: PeerId, account: Account
|
||||
) {.async: (raises: []).} =
|
||||
let context = self.peers.get(peer)
|
||||
if context.isNil:
|
||||
return
|
||||
@ -492,7 +531,7 @@ proc accountHandler*(self: BlockExcEngine, peer: PeerId, account: Account) {.asy
|
||||
|
||||
proc paymentHandler*(
|
||||
self: BlockExcEngine, peer: PeerId, payment: SignedState
|
||||
) {.async.} =
|
||||
) {.async: (raises: []).} =
|
||||
trace "Handling payments", peer
|
||||
|
||||
without context =? self.peers.get(peer).option and account =? context.account:
|
||||
@ -505,7 +544,9 @@ proc paymentHandler*(
|
||||
else:
|
||||
context.paymentChannel = self.wallet.acceptChannel(payment).option
|
||||
|
||||
proc setupPeer*(self: BlockExcEngine, peer: PeerId) {.async.} =
|
||||
proc setupPeer*(
|
||||
self: BlockExcEngine, peer: PeerId
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
## Perform initial setup, such as want
|
||||
## list exchange
|
||||
##
|
||||
@ -524,9 +565,10 @@ proc setupPeer*(self: BlockExcEngine, peer: PeerId) {.async.} =
|
||||
await self.network.request.sendWantList(peer, cids, full = true)
|
||||
|
||||
if address =? self.pricing .? address:
|
||||
trace "Sending account to peer", peer
|
||||
await self.network.request.sendAccount(peer, Account(address: address))
|
||||
|
||||
proc dropPeer*(self: BlockExcEngine, peer: PeerId) =
|
||||
proc dropPeer*(self: BlockExcEngine, peer: PeerId) {.raises: [].} =
|
||||
## Cleanup disconnected peer
|
||||
##
|
||||
|
||||
@ -535,7 +577,9 @@ proc dropPeer*(self: BlockExcEngine, peer: PeerId) =
|
||||
# drop the peer from the peers table
|
||||
self.peers.remove(peer)
|
||||
|
||||
proc taskHandler*(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
|
||||
proc taskHandler*(
|
||||
self: BlockExcEngine, task: BlockExcPeerCtx
|
||||
) {.gcsafe, async: (raises: [CancelledError, RetriesExhaustedError]).} =
|
||||
# Send to the peer blocks he wants to get,
|
||||
# if they present in our local store
|
||||
|
||||
@ -572,8 +616,11 @@ proc taskHandler*(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.}
|
||||
|
||||
let
|
||||
blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup))
|
||||
blocksDelivery =
|
||||
blocksDeliveryFut.filterIt(it.completed and it.read.isOk).mapIt(it.read.get)
|
||||
blocksDelivery = blocksDeliveryFut.filterIt(it.completed and it.value.isOk).mapIt:
|
||||
if bd =? it.value:
|
||||
bd
|
||||
else:
|
||||
raiseAssert "Unexpected error in local lookup"
|
||||
|
||||
# All the wants that failed local lookup must be set to not-in-flight again.
|
||||
let
|
||||
@ -595,15 +642,12 @@ proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} =
|
||||
##
|
||||
|
||||
trace "Starting blockexc task runner"
|
||||
while self.blockexcRunning:
|
||||
try:
|
||||
try:
|
||||
while self.blockexcRunning:
|
||||
let peerCtx = await self.taskQueue.pop()
|
||||
|
||||
await self.taskHandler(peerCtx)
|
||||
except CancelledError:
|
||||
break # do not propagate as blockexcTaskRunner was asyncSpawned
|
||||
except CatchableError as e:
|
||||
error "error running block exchange task", error = e.msgDetail
|
||||
except CatchableError as exc:
|
||||
error "error running block exchange task", error = exc.msg
|
||||
|
||||
info "Exiting blockexc task runner"
|
||||
|
||||
@ -634,7 +678,9 @@ proc new*(
|
||||
advertiser: advertiser,
|
||||
)
|
||||
|
||||
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
proc peerEventHandler(
|
||||
peerId: PeerId, event: PeerEvent
|
||||
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
|
||||
if event.kind == PeerEventKind.Joined:
|
||||
await self.setupPeer(peerId)
|
||||
else:
|
||||
@ -644,23 +690,29 @@ proc new*(
|
||||
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
|
||||
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
|
||||
|
||||
proc blockWantListHandler(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} =
|
||||
proc blockWantListHandler(
|
||||
peer: PeerId, wantList: WantList
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
self.wantListHandler(peer, wantList)
|
||||
|
||||
proc blockPresenceHandler(
|
||||
peer: PeerId, presence: seq[BlockPresence]
|
||||
): Future[void] {.gcsafe.} =
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
self.blockPresenceHandler(peer, presence)
|
||||
|
||||
proc blocksDeliveryHandler(
|
||||
peer: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
): Future[void] {.gcsafe.} =
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
self.blocksDeliveryHandler(peer, blocksDelivery)
|
||||
|
||||
proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} =
|
||||
proc accountHandler(
|
||||
peer: PeerId, account: Account
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
self.accountHandler(peer, account)
|
||||
|
||||
proc paymentHandler(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} =
|
||||
proc paymentHandler(
|
||||
peer: PeerId, payment: SignedState
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
self.paymentHandler(peer, payment)
|
||||
|
||||
network.handlers = BlockExcHandlers(
|
||||
|
||||
@ -7,6 +7,8 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/math
|
||||
import pkg/nitro
|
||||
import pkg/questionable/results
|
||||
@ -15,9 +17,6 @@ import ../peers
|
||||
export nitro
|
||||
export results
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
|
||||
const ChainId* = 0.u256 # invalid chain id for now
|
||||
const Asset* = EthAddress.zero # invalid ERC20 asset address for now
|
||||
const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals
|
||||
|
||||
@ -35,13 +35,15 @@ const
|
||||
DefaultMaxInflight* = 100
|
||||
|
||||
type
|
||||
WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.}
|
||||
WantListHandler* =
|
||||
proc(peer: PeerId, wantList: WantList) {.gcsafe, async: (raises: []).}
|
||||
BlocksDeliveryHandler* =
|
||||
proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.}
|
||||
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.gcsafe, async: (raises: []).}
|
||||
BlockPresenceHandler* =
|
||||
proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
|
||||
AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
|
||||
PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
|
||||
proc(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async: (raises: []).}
|
||||
AccountHandler* = proc(peer: PeerId, account: Account) {.gcsafe, async: (raises: []).}
|
||||
PaymentHandler* =
|
||||
proc(peer: PeerId, payment: SignedState) {.gcsafe, async: (raises: []).}
|
||||
|
||||
BlockExcHandlers* = object
|
||||
onWantList*: WantListHandler
|
||||
@ -58,15 +60,20 @@ type
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false,
|
||||
): Future[void] {.gcsafe.}
|
||||
WantCancellationSender* =
|
||||
proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.}
|
||||
BlocksDeliverySender* =
|
||||
proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.}
|
||||
PresenceSender* =
|
||||
proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
|
||||
AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
|
||||
PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
|
||||
) {.async: (raises: [CancelledError]).}
|
||||
WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]) {.
|
||||
async: (raises: [CancelledError])
|
||||
.}
|
||||
BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.
|
||||
async: (raises: [CancelledError])
|
||||
.}
|
||||
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]) {.
|
||||
async: (raises: [CancelledError])
|
||||
.}
|
||||
AccountSender* =
|
||||
proc(peer: PeerId, account: Account) {.async: (raises: [CancelledError]).}
|
||||
PaymentSender* =
|
||||
proc(peer: PeerId, payment: SignedState) {.async: (raises: [CancelledError]).}
|
||||
|
||||
BlockExcRequest* = object
|
||||
sendWantList*: WantListSender
|
||||
@ -98,7 +105,9 @@ proc isSelf*(b: BlockExcNetwork, peer: PeerId): bool =
|
||||
|
||||
return b.peerId == peer
|
||||
|
||||
proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
|
||||
proc send*(
|
||||
b: BlockExcNetwork, id: PeerId, msg: pb.Message
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
## Send message to peer
|
||||
##
|
||||
|
||||
@ -106,8 +115,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
|
||||
trace "Unable to send, peer not found", peerId = id
|
||||
return
|
||||
|
||||
let peer = b.peers[id]
|
||||
try:
|
||||
let peer = b.peers[id]
|
||||
|
||||
await b.inflightSema.acquire()
|
||||
await peer.send(msg)
|
||||
except CancelledError as error:
|
||||
@ -117,7 +127,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
|
||||
finally:
|
||||
b.inflightSema.release()
|
||||
|
||||
proc handleWantList(b: BlockExcNetwork, peer: NetworkPeer, list: WantList) {.async.} =
|
||||
proc handleWantList(
|
||||
b: BlockExcNetwork, peer: NetworkPeer, list: WantList
|
||||
) {.async: (raises: []).} =
|
||||
## Handle incoming want list
|
||||
##
|
||||
|
||||
@ -133,7 +145,7 @@ proc sendWantList*(
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false,
|
||||
): Future[void] =
|
||||
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||
## Send a want message to peer
|
||||
##
|
||||
|
||||
@ -154,14 +166,14 @@ proc sendWantList*(
|
||||
|
||||
proc sendWantCancellations*(
|
||||
b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress]
|
||||
): Future[void] {.async.} =
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
## Informs a remote peer that we're no longer interested in a set of blocks
|
||||
##
|
||||
await b.sendWantList(id = id, addresses = addresses, cancel = true)
|
||||
|
||||
proc handleBlocksDelivery(
|
||||
b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery]
|
||||
) {.async.} =
|
||||
) {.async: (raises: []).} =
|
||||
## Handle incoming blocks
|
||||
##
|
||||
|
||||
@ -170,7 +182,7 @@ proc handleBlocksDelivery(
|
||||
|
||||
proc sendBlocksDelivery*(
|
||||
b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
): Future[void] =
|
||||
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||
## Send blocks to remote
|
||||
##
|
||||
|
||||
@ -178,7 +190,7 @@ proc sendBlocksDelivery*(
|
||||
|
||||
proc handleBlockPresence(
|
||||
b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence]
|
||||
) {.async.} =
|
||||
) {.async: (raises: []).} =
|
||||
## Handle block presence
|
||||
##
|
||||
|
||||
@ -187,7 +199,7 @@ proc handleBlockPresence(
|
||||
|
||||
proc sendBlockPresence*(
|
||||
b: BlockExcNetwork, id: PeerId, presence: seq[BlockPresence]
|
||||
): Future[void] =
|
||||
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||
## Send presence to remote
|
||||
##
|
||||
|
||||
@ -195,20 +207,24 @@ proc sendBlockPresence*(
|
||||
|
||||
proc handleAccount(
|
||||
network: BlockExcNetwork, peer: NetworkPeer, account: Account
|
||||
) {.async.} =
|
||||
) {.async: (raises: []).} =
|
||||
## Handle account info
|
||||
##
|
||||
|
||||
if not network.handlers.onAccount.isNil:
|
||||
await network.handlers.onAccount(peer.id, account)
|
||||
|
||||
proc sendAccount*(b: BlockExcNetwork, id: PeerId, account: Account): Future[void] =
|
||||
proc sendAccount*(
|
||||
b: BlockExcNetwork, id: PeerId, account: Account
|
||||
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||
## Send account info to remote
|
||||
##
|
||||
|
||||
b.send(id, Message(account: AccountMessage.init(account)))
|
||||
|
||||
proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[void] =
|
||||
proc sendPayment*(
|
||||
b: BlockExcNetwork, id: PeerId, payment: SignedState
|
||||
) {.async: (raw: true, raises: [CancelledError]).} =
|
||||
## Send payment to remote
|
||||
##
|
||||
|
||||
@ -216,7 +232,7 @@ proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[
|
||||
|
||||
proc handlePayment(
|
||||
network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState
|
||||
) {.async.} =
|
||||
) {.async: (raises: []).} =
|
||||
## Handle payment
|
||||
##
|
||||
|
||||
@ -225,7 +241,7 @@ proc handlePayment(
|
||||
|
||||
proc rpcHandler(
|
||||
b: BlockExcNetwork, peer: NetworkPeer, msg: Message
|
||||
) {.async: (raises: [CatchableError]).} =
|
||||
) {.async: (raises: []).} =
|
||||
## handle rpc messages
|
||||
##
|
||||
if msg.wantList.entries.len > 0:
|
||||
@ -250,7 +266,9 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
|
||||
if peer in b.peers:
|
||||
return b.peers.getOrDefault(peer, nil)
|
||||
|
||||
var getConn: ConnProvider = proc(): Future[Connection] {.async, gcsafe, closure.} =
|
||||
var getConn: ConnProvider = proc(): Future[Connection] {.
|
||||
async: (raises: [CancelledError])
|
||||
.} =
|
||||
try:
|
||||
trace "Getting new connection stream", peer
|
||||
return await b.switch.dial(peer, Codec)
|
||||
@ -262,9 +280,7 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
|
||||
if not isNil(b.getConn):
|
||||
getConn = b.getConn
|
||||
|
||||
let rpcHandler = proc(
|
||||
p: NetworkPeer, msg: Message
|
||||
) {.async: (raises: [CatchableError]).} =
|
||||
let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} =
|
||||
await b.rpcHandler(p, msg)
|
||||
|
||||
# create new pubsub peer
|
||||
@ -307,7 +323,9 @@ method init*(self: BlockExcNetwork) =
|
||||
## Perform protocol initialization
|
||||
##
|
||||
|
||||
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
proc peerEventHandler(
|
||||
peerId: PeerId, event: PeerEvent
|
||||
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
|
||||
if event.kind == PeerEventKind.Joined:
|
||||
self.setupPeer(peerId)
|
||||
else:
|
||||
@ -316,7 +334,9 @@ method init*(self: BlockExcNetwork) =
|
||||
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
|
||||
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
|
||||
|
||||
proc handler(conn: Connection, proto: string) {.async.} =
|
||||
proc handler(
|
||||
conn: Connection, proto: string
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
let peerId = conn.peerId
|
||||
let blockexcPeer = self.getOrCreatePeer(peerId)
|
||||
await blockexcPeer.readLoop(conn) # attach read loop
|
||||
@ -353,26 +373,32 @@ proc new*(
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false,
|
||||
): Future[void] {.gcsafe.} =
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave)
|
||||
|
||||
proc sendWantCancellations(
|
||||
id: PeerId, addresses: seq[BlockAddress]
|
||||
): Future[void] {.gcsafe.} =
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendWantCancellations(id, addresses)
|
||||
|
||||
proc sendBlocksDelivery(
|
||||
id: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
): Future[void] {.gcsafe.} =
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendBlocksDelivery(id, blocksDelivery)
|
||||
|
||||
proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
|
||||
proc sendPresence(
|
||||
id: PeerId, presence: seq[BlockPresence]
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendBlockPresence(id, presence)
|
||||
|
||||
proc sendAccount(id: PeerId, account: Account): Future[void] {.gcsafe.} =
|
||||
proc sendAccount(
|
||||
id: PeerId, account: Account
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendAccount(id, account)
|
||||
|
||||
proc sendPayment(id: PeerId, payment: SignedState): Future[void] {.gcsafe.} =
|
||||
proc sendPayment(
|
||||
id: PeerId, payment: SignedState
|
||||
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
|
||||
self.sendPayment(id, payment)
|
||||
|
||||
self.request = BlockExcRequest(
|
||||
|
||||
@ -7,9 +7,7 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
push:
|
||||
{.upraises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
@ -18,6 +16,7 @@ import ../protobuf/blockexc
|
||||
import ../protobuf/message
|
||||
import ../../errors
|
||||
import ../../logutils
|
||||
import ../../utils/trackedfutures
|
||||
|
||||
logScope:
|
||||
topics = "codex blockexcnetworkpeer"
|
||||
@ -25,11 +24,10 @@ logScope:
|
||||
const DefaultYieldInterval = 50.millis
|
||||
|
||||
type
|
||||
ConnProvider* = proc(): Future[Connection] {.gcsafe, closure.}
|
||||
ConnProvider* =
|
||||
proc(): Future[Connection] {.gcsafe, async: (raises: [CancelledError]).}
|
||||
|
||||
RPCHandler* = proc(
|
||||
peer: NetworkPeer, msg: Message
|
||||
): Future[void].Raising(CatchableError) {.gcsafe.}
|
||||
RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.gcsafe, async: (raises: []).}
|
||||
|
||||
NetworkPeer* = ref object of RootObj
|
||||
id*: PeerId
|
||||
@ -37,55 +35,60 @@ type
|
||||
sendConn: Connection
|
||||
getConn: ConnProvider
|
||||
yieldInterval*: Duration = DefaultYieldInterval
|
||||
trackedFutures: TrackedFutures
|
||||
|
||||
proc connected*(b: NetworkPeer): bool =
|
||||
not (isNil(b.sendConn)) and not (b.sendConn.closed or b.sendConn.atEof)
|
||||
proc connected*(self: NetworkPeer): bool =
|
||||
not (isNil(self.sendConn)) and not (self.sendConn.closed or self.sendConn.atEof)
|
||||
|
||||
proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} =
|
||||
proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} =
|
||||
if isNil(conn):
|
||||
trace "No connection to read from", peer = b.id
|
||||
trace "No connection to read from", peer = self.id
|
||||
return
|
||||
|
||||
trace "Attaching read loop", peer = b.id, connId = conn.oid
|
||||
trace "Attaching read loop", peer = self.id, connId = conn.oid
|
||||
try:
|
||||
var nextYield = Moment.now() + b.yieldInterval
|
||||
var nextYield = Moment.now() + self.yieldInterval
|
||||
while not conn.atEof or not conn.closed:
|
||||
if Moment.now() > nextYield:
|
||||
nextYield = Moment.now() + b.yieldInterval
|
||||
nextYield = Moment.now() + self.yieldInterval
|
||||
trace "Yielding in read loop",
|
||||
peer = b.id, nextYield = nextYield, interval = b.yieldInterval
|
||||
peer = self.id, nextYield = nextYield, interval = self.yieldInterval
|
||||
await sleepAsync(10.millis)
|
||||
|
||||
let
|
||||
data = await conn.readLp(MaxMessageSize.int)
|
||||
msg = Message.protobufDecode(data).mapFailure().tryGet()
|
||||
trace "Received message", peer = b.id, connId = conn.oid
|
||||
await b.handler(b, msg)
|
||||
trace "Received message", peer = self.id, connId = conn.oid
|
||||
await self.handler(self, msg)
|
||||
except CancelledError:
|
||||
trace "Read loop cancelled"
|
||||
except CatchableError as err:
|
||||
warn "Exception in blockexc read loop", msg = err.msg
|
||||
finally:
|
||||
trace "Detaching read loop", peer = b.id, connId = conn.oid
|
||||
trace "Detaching read loop", peer = self.id, connId = conn.oid
|
||||
await conn.close()
|
||||
|
||||
proc connect*(b: NetworkPeer): Future[Connection] {.async.} =
|
||||
if b.connected:
|
||||
trace "Already connected", peer = b.id, connId = b.sendConn.oid
|
||||
return b.sendConn
|
||||
proc connect*(
|
||||
self: NetworkPeer
|
||||
): Future[Connection] {.async: (raises: [CancelledError]).} =
|
||||
if self.connected:
|
||||
trace "Already connected", peer = self.id, connId = self.sendConn.oid
|
||||
return self.sendConn
|
||||
|
||||
b.sendConn = await b.getConn()
|
||||
asyncSpawn b.readLoop(b.sendConn)
|
||||
return b.sendConn
|
||||
self.sendConn = await self.getConn()
|
||||
self.trackedFutures.track(self.readLoop(self.sendConn))
|
||||
return self.sendConn
|
||||
|
||||
proc send*(b: NetworkPeer, msg: Message) {.async.} =
|
||||
let conn = await b.connect()
|
||||
proc send*(
|
||||
self: NetworkPeer, msg: Message
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
let conn = await self.connect()
|
||||
|
||||
if isNil(conn):
|
||||
warn "Unable to get send connection for peer message not sent", peer = b.id
|
||||
warn "Unable to get send connection for peer message not sent", peer = self.id
|
||||
return
|
||||
|
||||
trace "Sending message", peer = b.id, connId = conn.oid
|
||||
trace "Sending message", peer = self.id, connId = conn.oid
|
||||
await conn.writeLp(protobufEncode(msg))
|
||||
|
||||
func new*(
|
||||
@ -96,4 +99,9 @@ func new*(
|
||||
): NetworkPeer =
|
||||
doAssert(not isNil(connProvider), "should supply connection provider")
|
||||
|
||||
NetworkPeer(id: peer, getConn: connProvider, handler: rpcHandler)
|
||||
NetworkPeer(
|
||||
id: peer,
|
||||
getConn: connProvider,
|
||||
handler: rpcHandler,
|
||||
trackedFutures: TrackedFutures(),
|
||||
)
|
||||
|
||||
@ -7,16 +7,13 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
import std/tables
|
||||
import std/algorithm
|
||||
import std/sequtils
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
|
||||
|
||||
@ -97,7 +97,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: WantList) =
|
||||
pb.write(field, ipb)
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) =
|
||||
var ipb = initProtoBuffer(maxSize = MaxBlockSize)
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, value.blk.cid.data.buffer)
|
||||
ipb.write(2, value.blk.data)
|
||||
ipb.write(3, value.address)
|
||||
@ -128,7 +128,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
|
||||
pb.write(field, ipb)
|
||||
|
||||
proc protobufEncode*(value: Message): seq[byte] =
|
||||
var ipb = initProtoBuffer(maxSize = MaxMessageSize)
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, value.wantList)
|
||||
for v in value.payload:
|
||||
ipb.write(3, v)
|
||||
@ -254,16 +254,14 @@ proc decode*(
|
||||
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
|
||||
var
|
||||
value = Message()
|
||||
pb = initProtoBuffer(msg, maxSize = MaxMessageSize)
|
||||
pb = initProtoBuffer(msg)
|
||||
ipb: ProtoBuffer
|
||||
sublist: seq[seq[byte]]
|
||||
if ?pb.getField(1, ipb):
|
||||
value.wantList = ?WantList.decode(ipb)
|
||||
if ?pb.getRepeatedField(3, sublist):
|
||||
for item in sublist:
|
||||
value.payload.add(
|
||||
?BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize))
|
||||
)
|
||||
value.payload.add(?BlockDelivery.decode(initProtoBuffer(item)))
|
||||
if ?pb.getRepeatedField(4, sublist):
|
||||
for item in sublist:
|
||||
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))
|
||||
|
||||
@ -1,8 +1,9 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import pkg/stew/byteutils
|
||||
import pkg/stint
|
||||
import pkg/nitro
|
||||
import pkg/questionable
|
||||
import pkg/upraises
|
||||
import ./blockexc
|
||||
|
||||
export AccountMessage
|
||||
@ -11,9 +12,6 @@ export StateChannelUpdate
|
||||
export stint
|
||||
export nitro
|
||||
|
||||
push:
|
||||
{.upraises: [].}
|
||||
|
||||
type Account* = object
|
||||
address*: EthAddress
|
||||
|
||||
|
||||
@ -1,8 +1,9 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import libp2p
|
||||
import pkg/stint
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/upraises
|
||||
import ./blockexc
|
||||
|
||||
import ../../blocktype
|
||||
@ -11,9 +12,6 @@ export questionable
|
||||
export stint
|
||||
export BlockPresenceType
|
||||
|
||||
upraises.push:
|
||||
{.upraises: [].}
|
||||
|
||||
type
|
||||
PresenceMessage* = blockexc.BlockPresence
|
||||
Presence* = object
|
||||
|
||||
@ -28,8 +28,11 @@ const DefaultChunkSize* = DefaultBlockSize
|
||||
|
||||
type
|
||||
# default reader type
|
||||
ChunkerError* = object of CatchableError
|
||||
ChunkBuffer* = ptr UncheckedArray[byte]
|
||||
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.gcsafe, raises: [Defect].}
|
||||
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.
|
||||
gcsafe, async: (raises: [ChunkerError, CancelledError])
|
||||
.}
|
||||
|
||||
# Reader that splits input data into fixed-size chunks
|
||||
Chunker* = ref object
|
||||
@ -74,7 +77,7 @@ proc new*(
|
||||
|
||||
proc reader(
|
||||
data: ChunkBuffer, len: int
|
||||
): Future[int] {.gcsafe, async, raises: [Defect].} =
|
||||
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
|
||||
var res = 0
|
||||
try:
|
||||
while res < len:
|
||||
@ -85,7 +88,7 @@ proc new*(
|
||||
raise error
|
||||
except LPStreamError as error:
|
||||
error "LPStream error", err = error.msg
|
||||
raise error
|
||||
raise newException(ChunkerError, "LPStream error", error)
|
||||
except CatchableError as exc:
|
||||
error "CatchableError exception", exc = exc.msg
|
||||
raise newException(Defect, exc.msg)
|
||||
@ -102,7 +105,7 @@ proc new*(
|
||||
|
||||
proc reader(
|
||||
data: ChunkBuffer, len: int
|
||||
): Future[int] {.gcsafe, async, raises: [Defect].} =
|
||||
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
|
||||
var total = 0
|
||||
try:
|
||||
while total < len:
|
||||
|
||||
@ -134,6 +134,10 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
|
||||
if config.simulateProofFailures > 0:
|
||||
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
|
||||
|
||||
if error =? (await market.loadConfig()).errorOption:
|
||||
fatal "Cannot load market configuration", error = error.msg
|
||||
quit QuitFailure
|
||||
|
||||
let purchasing = Purchasing.new(market, clock)
|
||||
let sales = Sales.new(market, clock, repo, proofFailures)
|
||||
client = some ClientInteractions.new(clock, purchasing)
|
||||
@ -173,14 +177,20 @@ proc start*(s: CodexServer) {.async.} =
|
||||
proc stop*(s: CodexServer) {.async.} =
|
||||
notice "Stopping codex node"
|
||||
|
||||
await allFuturesThrowing(
|
||||
s.restServer.stop(),
|
||||
s.codexNode.switch.stop(),
|
||||
s.codexNode.stop(),
|
||||
s.repoStore.stop(),
|
||||
s.maintenance.stop(),
|
||||
let res = await noCancel allFinishedFailed(
|
||||
@[
|
||||
s.restServer.stop(),
|
||||
s.codexNode.switch.stop(),
|
||||
s.codexNode.stop(),
|
||||
s.repoStore.stop(),
|
||||
s.maintenance.stop(),
|
||||
]
|
||||
)
|
||||
|
||||
if res.failure.len > 0:
|
||||
error "Failed to stop codex node", failures = res.failure.len
|
||||
raiseAssert "Failed to stop codex node"
|
||||
|
||||
proc new*(
|
||||
T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey
|
||||
): CodexServer =
|
||||
|
||||
@ -5,6 +5,7 @@ import pkg/chronos
|
||||
import pkg/stint
|
||||
import ../clock
|
||||
import ../conf
|
||||
import ../utils/trackedfutures
|
||||
|
||||
export clock
|
||||
|
||||
@ -18,9 +19,12 @@ type OnChainClock* = ref object of Clock
|
||||
blockNumber: UInt256
|
||||
started: bool
|
||||
newBlock: AsyncEvent
|
||||
trackedFutures: TrackedFutures
|
||||
|
||||
proc new*(_: type OnChainClock, provider: Provider): OnChainClock =
|
||||
OnChainClock(provider: provider, newBlock: newAsyncEvent())
|
||||
OnChainClock(
|
||||
provider: provider, newBlock: newAsyncEvent(), trackedFutures: TrackedFutures()
|
||||
)
|
||||
|
||||
proc update(clock: OnChainClock, blck: Block) =
|
||||
if number =? blck.number and number > clock.blockNumber:
|
||||
@ -32,15 +36,12 @@ proc update(clock: OnChainClock, blck: Block) =
|
||||
blockTime = blck.timestamp, blockNumber = number, offset = clock.offset
|
||||
clock.newBlock.fire()
|
||||
|
||||
proc update(clock: OnChainClock) {.async.} =
|
||||
proc update(clock: OnChainClock) {.async: (raises: []).} =
|
||||
try:
|
||||
if latest =? (await clock.provider.getBlock(BlockTag.latest)):
|
||||
clock.update(latest)
|
||||
except CancelledError as error:
|
||||
raise error
|
||||
except CatchableError as error:
|
||||
debug "error updating clock: ", error = error.msg
|
||||
discard
|
||||
|
||||
method start*(clock: OnChainClock) {.async.} =
|
||||
if clock.started:
|
||||
@ -52,7 +53,7 @@ method start*(clock: OnChainClock) {.async.} =
|
||||
return
|
||||
|
||||
# ignore block parameter; hardhat may call this with pending blocks
|
||||
asyncSpawn clock.update()
|
||||
clock.trackedFutures.track(clock.update())
|
||||
|
||||
await clock.update()
|
||||
|
||||
@ -64,6 +65,7 @@ method stop*(clock: OnChainClock) {.async.} =
|
||||
return
|
||||
|
||||
await clock.subscription.unsubscribe()
|
||||
await clock.trackedFutures.cancelTracked()
|
||||
clock.started = false
|
||||
|
||||
method now*(clock: OnChainClock): SecondsSince1970 =
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
import std/strformat
|
||||
import std/strutils
|
||||
import pkg/ethers
|
||||
import pkg/upraises
|
||||
@ -49,130 +50,173 @@ func new*(
|
||||
proc raiseMarketError(message: string) {.raises: [MarketError].} =
|
||||
raise newException(MarketError, message)
|
||||
|
||||
template convertEthersError(body) =
|
||||
func prefixWith(suffix, prefix: string, separator = ": "): string =
|
||||
if prefix.len > 0:
|
||||
return &"{prefix}{separator}{suffix}"
|
||||
else:
|
||||
return suffix
|
||||
|
||||
template convertEthersError(msg: string = "", body) =
|
||||
try:
|
||||
body
|
||||
except EthersError as error:
|
||||
raiseMarketError(error.msgDetail)
|
||||
raiseMarketError(error.msgDetail.prefixWith(msg))
|
||||
|
||||
proc config(market: OnChainMarket): Future[MarketplaceConfig] {.async.} =
|
||||
proc config(
|
||||
market: OnChainMarket
|
||||
): Future[MarketplaceConfig] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
without resolvedConfig =? market.configuration:
|
||||
let fetchedConfig = await market.contract.configuration()
|
||||
market.configuration = some fetchedConfig
|
||||
return fetchedConfig
|
||||
if err =? (await market.loadConfig()).errorOption:
|
||||
raiseMarketError(err.msg)
|
||||
|
||||
without config =? market.configuration:
|
||||
raiseMarketError("Failed to access to config from the Marketplace contract")
|
||||
|
||||
return config
|
||||
|
||||
return resolvedConfig
|
||||
|
||||
proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} =
|
||||
debug "Approving tokens", amount
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to approve funds"):
|
||||
let tokenAddress = await market.contract.token()
|
||||
let token = Erc20Token.new(tokenAddress, market.signer)
|
||||
discard await token.increaseAllowance(market.contract.address(), amount).confirm(1)
|
||||
|
||||
method getZkeyHash*(market: OnChainMarket): Future[?string] {.async.} =
|
||||
method loadConfig*(
|
||||
market: OnChainMarket
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
without config =? market.configuration:
|
||||
let fetchedConfig = await market.contract.configuration()
|
||||
|
||||
market.configuration = some fetchedConfig
|
||||
|
||||
return success()
|
||||
except EthersError as err:
|
||||
return failure newException(
|
||||
MarketError,
|
||||
"Failed to fetch the config from the Marketplace contract: " & err.msg,
|
||||
)
|
||||
|
||||
method getZkeyHash*(
|
||||
market: OnChainMarket
|
||||
): Future[?string] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
let config = await market.config()
|
||||
return some config.proofs.zkeyHash
|
||||
|
||||
method getSigner*(market: OnChainMarket): Future[Address] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get signer address"):
|
||||
return await market.signer.getAddress()
|
||||
|
||||
method periodicity*(market: OnChainMarket): Future[Periodicity] {.async.} =
|
||||
convertEthersError:
|
||||
method periodicity*(
|
||||
market: OnChainMarket
|
||||
): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
let period = config.proofs.period
|
||||
return Periodicity(seconds: period)
|
||||
|
||||
method proofTimeout*(market: OnChainMarket): Future[uint64] {.async.} =
|
||||
convertEthersError:
|
||||
method proofTimeout*(
|
||||
market: OnChainMarket
|
||||
): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
return config.proofs.timeout
|
||||
|
||||
method repairRewardPercentage*(market: OnChainMarket): Future[uint8] {.async.} =
|
||||
convertEthersError:
|
||||
method repairRewardPercentage*(
|
||||
market: OnChainMarket
|
||||
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
return config.collateral.repairRewardPercentage
|
||||
|
||||
method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
return config.requestDurationLimit
|
||||
|
||||
method proofDowntime*(market: OnChainMarket): Future[uint8] {.async.} =
|
||||
convertEthersError:
|
||||
method proofDowntime*(
|
||||
market: OnChainMarket
|
||||
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get Marketplace config"):
|
||||
let config = await market.config()
|
||||
return config.proofs.downtime
|
||||
|
||||
method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get slot pointer"):
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.getPointer(slotId, overrides)
|
||||
|
||||
method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get my requests"):
|
||||
return await market.contract.myRequests
|
||||
|
||||
method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get my slots"):
|
||||
let slots = await market.contract.mySlots()
|
||||
debug "Fetched my slots", numSlots = len(slots)
|
||||
|
||||
return slots
|
||||
|
||||
method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to request storage"):
|
||||
debug "Requesting storage"
|
||||
await market.approveFunds(request.totalPrice())
|
||||
discard await market.contract.requestStorage(request).confirm(1)
|
||||
|
||||
method getRequest*(
|
||||
market: OnChainMarket, id: RequestId
|
||||
): Future[?StorageRequest] {.async.} =
|
||||
let key = $id
|
||||
): Future[?StorageRequest] {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
let key = $id
|
||||
|
||||
if market.requestCache.contains(key):
|
||||
return some market.requestCache[key]
|
||||
if key in market.requestCache:
|
||||
return some market.requestCache[key]
|
||||
|
||||
convertEthersError:
|
||||
try:
|
||||
let request = await market.contract.getRequest(id)
|
||||
market.requestCache[key] = request
|
||||
return some request
|
||||
except Marketplace_UnknownRequest:
|
||||
return none StorageRequest
|
||||
let request = await market.contract.getRequest(id)
|
||||
market.requestCache[key] = request
|
||||
return some request
|
||||
except Marketplace_UnknownRequest, KeyError:
|
||||
warn "Cannot retrieve the request", error = getCurrentExceptionMsg()
|
||||
return none StorageRequest
|
||||
except EthersError as e:
|
||||
error "Cannot retrieve the request", error = e.msg
|
||||
return none StorageRequest
|
||||
|
||||
method requestState*(
|
||||
market: OnChainMarket, requestId: RequestId
|
||||
): Future[?RequestState] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get request state"):
|
||||
try:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return some await market.contract.requestState(requestId, overrides)
|
||||
except Marketplace_UnknownRequest:
|
||||
return none RequestState
|
||||
|
||||
method slotState*(market: OnChainMarket, slotId: SlotId): Future[SlotState] {.async.} =
|
||||
convertEthersError:
|
||||
method slotState*(
|
||||
market: OnChainMarket, slotId: SlotId
|
||||
): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to fetch the slot state from the Marketplace contract"):
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.slotState(slotId, overrides)
|
||||
|
||||
method getRequestEnd*(
|
||||
market: OnChainMarket, id: RequestId
|
||||
): Future[SecondsSince1970] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get request end"):
|
||||
return await market.contract.requestEnd(id)
|
||||
|
||||
method requestExpiresAt*(
|
||||
market: OnChainMarket, id: RequestId
|
||||
): Future[SecondsSince1970] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get request expiry"):
|
||||
return await market.contract.requestExpiry(id)
|
||||
|
||||
method getHost(
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?Address] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get slot's host"):
|
||||
let slotId = slotId(requestId, slotIndex)
|
||||
let address = await market.contract.getHost(slotId)
|
||||
if address != Address.default:
|
||||
@ -183,11 +227,11 @@ method getHost(
|
||||
method currentCollateral*(
|
||||
market: OnChainMarket, slotId: SlotId
|
||||
): Future[UInt256] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get slot's current collateral"):
|
||||
return await market.contract.currentCollateral(slotId)
|
||||
|
||||
method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get active slot"):
|
||||
try:
|
||||
return some await market.contract.getActiveSlot(slotId)
|
||||
except Marketplace_SlotIsFree:
|
||||
@ -200,18 +244,24 @@ method fillSlot(
|
||||
proof: Groth16Proof,
|
||||
collateral: UInt256,
|
||||
) {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to fill slot"):
|
||||
logScope:
|
||||
requestId
|
||||
slotIndex
|
||||
|
||||
await market.approveFunds(collateral)
|
||||
trace "calling fillSlot on contract"
|
||||
discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(1)
|
||||
trace "fillSlot transaction completed"
|
||||
try:
|
||||
await market.approveFunds(collateral)
|
||||
trace "calling fillSlot on contract"
|
||||
discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(1)
|
||||
trace "fillSlot transaction completed"
|
||||
except Marketplace_SlotNotFree as parent:
|
||||
raise newException(
|
||||
SlotStateMismatchError, "Failed to fill slot because the slot is not free",
|
||||
parent,
|
||||
)
|
||||
|
||||
method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to free slot"):
|
||||
var freeSlot: Future[Confirmable]
|
||||
if rewardRecipient =? market.rewardRecipient:
|
||||
# If --reward-recipient specified, use it as the reward recipient, and use
|
||||
@ -230,11 +280,11 @@ method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
|
||||
discard await freeSlot.confirm(1)
|
||||
|
||||
method withdrawFunds(market: OnChainMarket, requestId: RequestId) {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to withdraw funds"):
|
||||
discard await market.contract.withdrawFunds(requestId).confirm(1)
|
||||
|
||||
method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get proof requirement"):
|
||||
try:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.isProofRequired(id, overrides)
|
||||
@ -242,7 +292,7 @@ method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async
|
||||
return false
|
||||
|
||||
method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get future proof requirement"):
|
||||
try:
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.willProofBeRequired(id, overrides)
|
||||
@ -252,18 +302,18 @@ method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.a
|
||||
method getChallenge*(
|
||||
market: OnChainMarket, id: SlotId
|
||||
): Future[ProofChallenge] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get proof challenge"):
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.getChallenge(id, overrides)
|
||||
|
||||
method submitProof*(market: OnChainMarket, id: SlotId, proof: Groth16Proof) {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to submit proof"):
|
||||
discard await market.contract.submitProof(id, proof).confirm(1)
|
||||
|
||||
method markProofAsMissing*(
|
||||
market: OnChainMarket, id: SlotId, period: Period
|
||||
) {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to mark proof as missing"):
|
||||
discard await market.contract.markProofAsMissing(id, period).confirm(1)
|
||||
|
||||
method canProofBeMarkedAsMissing*(
|
||||
@ -282,20 +332,26 @@ method canProofBeMarkedAsMissing*(
|
||||
method reserveSlot*(
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
) {.async.} =
|
||||
convertEthersError:
|
||||
discard await market.contract
|
||||
.reserveSlot(
|
||||
requestId,
|
||||
slotIndex,
|
||||
# reserveSlot runs out of gas for unknown reason, but 100k gas covers it
|
||||
TransactionOverrides(gasLimit: some 100000.u256),
|
||||
)
|
||||
.confirm(1)
|
||||
convertEthersError("Failed to reserve slot"):
|
||||
try:
|
||||
discard await market.contract
|
||||
.reserveSlot(
|
||||
requestId,
|
||||
slotIndex,
|
||||
# reserveSlot runs out of gas for unknown reason, but 100k gas covers it
|
||||
TransactionOverrides(gasLimit: some 100000.u256),
|
||||
)
|
||||
.confirm(1)
|
||||
except SlotReservations_ReservationNotAllowed:
|
||||
raise newException(
|
||||
SlotReservationNotAllowedError,
|
||||
"Failed to reserve slot because reservation is not allowed",
|
||||
)
|
||||
|
||||
method canReserveSlot*(
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
): Future[bool] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Unable to determine if slot can be reserved"):
|
||||
return await market.contract.canReserveSlot(requestId, slotIndex)
|
||||
|
||||
method subscribeRequests*(
|
||||
@ -308,7 +364,7 @@ method subscribeRequests*(
|
||||
|
||||
callback(event.requestId, event.ask, event.expiry)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to StorageRequested events"):
|
||||
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -322,7 +378,7 @@ method subscribeSlotFilled*(
|
||||
|
||||
callback(event.requestId, event.slotIndex)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to SlotFilled events"):
|
||||
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -336,7 +392,7 @@ method subscribeSlotFilled*(
|
||||
if eventRequestId == requestId and eventSlotIndex == slotIndex:
|
||||
callback(requestId, slotIndex)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to SlotFilled events"):
|
||||
return await market.subscribeSlotFilled(onSlotFilled)
|
||||
|
||||
method subscribeSlotFreed*(
|
||||
@ -349,7 +405,7 @@ method subscribeSlotFreed*(
|
||||
|
||||
callback(event.requestId, event.slotIndex)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to SlotFreed events"):
|
||||
let subscription = await market.contract.subscribe(SlotFreed, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -364,7 +420,7 @@ method subscribeSlotReservationsFull*(
|
||||
|
||||
callback(event.requestId, event.slotIndex)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to SlotReservationsFull events"):
|
||||
let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -378,7 +434,7 @@ method subscribeFulfillment(
|
||||
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to RequestFulfilled events"):
|
||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -393,7 +449,7 @@ method subscribeFulfillment(
|
||||
if event.requestId == requestId:
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to RequestFulfilled events"):
|
||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -407,7 +463,7 @@ method subscribeRequestCancelled*(
|
||||
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to RequestCancelled events"):
|
||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -422,7 +478,7 @@ method subscribeRequestCancelled*(
|
||||
if event.requestId == requestId:
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to RequestCancelled events"):
|
||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -436,7 +492,7 @@ method subscribeRequestFailed*(
|
||||
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to RequestFailed events"):
|
||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -451,7 +507,7 @@ method subscribeRequestFailed*(
|
||||
if event.requestId == requestId:
|
||||
callback(event.requestId)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to RequestFailed events"):
|
||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -465,7 +521,7 @@ method subscribeProofSubmission*(
|
||||
|
||||
callback(event.id)
|
||||
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to subscribe to ProofSubmitted events"):
|
||||
let subscription = await market.contract.subscribe(ProofSubmitted, onEvent)
|
||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||
|
||||
@ -475,13 +531,13 @@ method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: OnChainMarket, fromBlock: BlockTag
|
||||
): Future[seq[SlotFilled]] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get past SlotFilled events from block"):
|
||||
return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest)
|
||||
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: OnChainMarket, blocksAgo: int
|
||||
): Future[seq[SlotFilled]] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get past SlotFilled events"):
|
||||
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
|
||||
|
||||
return await market.queryPastSlotFilledEvents(fromBlock)
|
||||
@ -489,21 +545,58 @@ method queryPastSlotFilledEvents*(
|
||||
method queryPastSlotFilledEvents*(
|
||||
market: OnChainMarket, fromTime: SecondsSince1970
|
||||
): Future[seq[SlotFilled]] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get past SlotFilled events from time"):
|
||||
let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime)
|
||||
return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock))
|
||||
|
||||
method queryPastStorageRequestedEvents*(
|
||||
market: OnChainMarket, fromBlock: BlockTag
|
||||
): Future[seq[StorageRequested]] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get past StorageRequested events from block"):
|
||||
return
|
||||
await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest)
|
||||
|
||||
method queryPastStorageRequestedEvents*(
|
||||
market: OnChainMarket, blocksAgo: int
|
||||
): Future[seq[StorageRequested]] {.async.} =
|
||||
convertEthersError:
|
||||
convertEthersError("Failed to get past StorageRequested events"):
|
||||
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
|
||||
|
||||
return await market.queryPastStorageRequestedEvents(fromBlock)
|
||||
|
||||
method slotCollateral*(
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?!UInt256] {.async: (raises: [CancelledError]).} =
|
||||
let slotid = slotId(requestId, slotIndex)
|
||||
|
||||
try:
|
||||
let slotState = await market.slotState(slotid)
|
||||
|
||||
without request =? await market.getRequest(requestId):
|
||||
return failure newException(
|
||||
MarketError, "Failure calculating the slotCollateral, cannot get the request"
|
||||
)
|
||||
|
||||
return market.slotCollateral(request.ask.collateralPerSlot, slotState)
|
||||
except MarketError as error:
|
||||
error "Error when trying to calculate the slotCollateral", error = error.msg
|
||||
return failure error
|
||||
|
||||
method slotCollateral*(
|
||||
market: OnChainMarket, collateralPerSlot: UInt256, slotState: SlotState
|
||||
): ?!UInt256 {.raises: [].} =
|
||||
if slotState == SlotState.Repair:
|
||||
without repairRewardPercentage =?
|
||||
market.configuration .? collateral .? repairRewardPercentage:
|
||||
return failure newException(
|
||||
MarketError,
|
||||
"Failure calculating the slotCollateral, cannot get the reward percentage",
|
||||
)
|
||||
|
||||
return success (
|
||||
collateralPerSlot - (collateralPerSlot * repairRewardPercentage.u256).div(
|
||||
100.u256
|
||||
)
|
||||
)
|
||||
|
||||
return success(collateralPerSlot)
|
||||
|
||||
@ -53,6 +53,7 @@ type
|
||||
Proofs_ProofAlreadyMarkedMissing* = object of SolidityError
|
||||
Proofs_InvalidProbability* = object of SolidityError
|
||||
Periods_InvalidSecondsPerPeriod* = object of SolidityError
|
||||
SlotReservations_ReservationNotAllowed* = object of SolidityError
|
||||
|
||||
proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
|
||||
proc token*(marketplace: Marketplace): Address {.contract, view.}
|
||||
|
||||
@ -7,6 +7,8 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/algorithm
|
||||
import std/sequtils
|
||||
|
||||
@ -54,70 +56,122 @@ proc toNodeId*(host: ca.Address): NodeId =
|
||||
|
||||
readUintBE[256](keccak256.digest(host.toArray).data)
|
||||
|
||||
proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} =
|
||||
proc findPeer*(
|
||||
d: Discovery, peerId: PeerId
|
||||
): Future[?PeerRecord] {.async: (raises: [CancelledError]).} =
|
||||
trace "protocol.resolve..."
|
||||
## Find peer using the given Discovery object
|
||||
##
|
||||
let node = await d.protocol.resolve(toNodeId(peerId))
|
||||
|
||||
return
|
||||
if node.isSome():
|
||||
node.get().record.data.some
|
||||
else:
|
||||
PeerRecord.none
|
||||
try:
|
||||
let node = await d.protocol.resolve(toNodeId(peerId))
|
||||
|
||||
method find*(d: Discovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} =
|
||||
return
|
||||
if node.isSome():
|
||||
node.get().record.data.some
|
||||
else:
|
||||
PeerRecord.none
|
||||
except CancelledError as exc:
|
||||
warn "Error finding peer", peerId = peerId, exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error finding peer", peerId = peerId, exc = exc.msg
|
||||
|
||||
return PeerRecord.none
|
||||
|
||||
method find*(
|
||||
d: Discovery, cid: Cid
|
||||
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} =
|
||||
## Find block providers
|
||||
##
|
||||
without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure, error:
|
||||
warn "Error finding providers for block", cid, error = error.msg
|
||||
|
||||
return providers.filterIt(not (it.data.peerId == d.peerId))
|
||||
try:
|
||||
without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure,
|
||||
error:
|
||||
warn "Error finding providers for block", cid, error = error.msg
|
||||
|
||||
method provide*(d: Discovery, cid: Cid) {.async, base.} =
|
||||
return providers.filterIt(not (it.data.peerId == d.peerId))
|
||||
except CancelledError as exc:
|
||||
warn "Error finding providers for block", cid, exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error finding providers for block", cid, exc = exc.msg
|
||||
|
||||
method provide*(d: Discovery, cid: Cid) {.async: (raises: [CancelledError]), base.} =
|
||||
## Provide a block Cid
|
||||
##
|
||||
let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get)
|
||||
try:
|
||||
let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get)
|
||||
|
||||
if nodes.len <= 0:
|
||||
warn "Couldn't provide to any nodes!"
|
||||
if nodes.len <= 0:
|
||||
warn "Couldn't provide to any nodes!"
|
||||
except CancelledError as exc:
|
||||
warn "Error providing block", cid, exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error providing block", cid, exc = exc.msg
|
||||
|
||||
method find*(
|
||||
d: Discovery, host: ca.Address
|
||||
): Future[seq[SignedPeerRecord]] {.async, base.} =
|
||||
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} =
|
||||
## Find host providers
|
||||
##
|
||||
|
||||
trace "Finding providers for host", host = $host
|
||||
without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure,
|
||||
error:
|
||||
trace "Error finding providers for host", host = $host, exc = error.msg
|
||||
return
|
||||
try:
|
||||
trace "Finding providers for host", host = $host
|
||||
without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure,
|
||||
error:
|
||||
trace "Error finding providers for host", host = $host, exc = error.msg
|
||||
return
|
||||
|
||||
if providers.len <= 0:
|
||||
trace "No providers found", host = $host
|
||||
return
|
||||
if providers.len <= 0:
|
||||
trace "No providers found", host = $host
|
||||
return
|
||||
|
||||
providers.sort do(a, b: SignedPeerRecord) -> int:
|
||||
system.cmp[uint64](a.data.seqNo, b.data.seqNo)
|
||||
providers.sort do(a, b: SignedPeerRecord) -> int:
|
||||
system.cmp[uint64](a.data.seqNo, b.data.seqNo)
|
||||
|
||||
return providers
|
||||
return providers
|
||||
except CancelledError as exc:
|
||||
warn "Error finding providers for host", host = $host, exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error finding providers for host", host = $host, exc = exc.msg
|
||||
|
||||
method provide*(d: Discovery, host: ca.Address) {.async, base.} =
|
||||
method provide*(
|
||||
d: Discovery, host: ca.Address
|
||||
) {.async: (raises: [CancelledError]), base.} =
|
||||
## Provide hosts
|
||||
##
|
||||
|
||||
trace "Providing host", host = $host
|
||||
let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get)
|
||||
if nodes.len > 0:
|
||||
trace "Provided to nodes", nodes = nodes.len
|
||||
try:
|
||||
trace "Providing host", host = $host
|
||||
let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get)
|
||||
if nodes.len > 0:
|
||||
trace "Provided to nodes", nodes = nodes.len
|
||||
except CancelledError as exc:
|
||||
warn "Error providing host", host = $host, exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error providing host", host = $host, exc = exc.msg
|
||||
|
||||
method removeProvider*(d: Discovery, peerId: PeerId): Future[void] {.base, gcsafe.} =
|
||||
method removeProvider*(
|
||||
d: Discovery, peerId: PeerId
|
||||
): Future[void] {.base, gcsafe, async: (raises: [CancelledError]).} =
|
||||
## Remove provider from providers table
|
||||
##
|
||||
|
||||
trace "Removing provider", peerId
|
||||
d.protocol.removeProvidersLocal(peerId)
|
||||
try:
|
||||
await d.protocol.removeProvidersLocal(peerId)
|
||||
except CancelledError as exc:
|
||||
warn "Error removing provider", peerId = peerId, exc = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error removing provider", peerId = peerId, exc = exc.msg
|
||||
except Exception as exc: # Something in discv5 is raising Exception
|
||||
warn "Error removing provider", peerId = peerId, exc = exc.msg
|
||||
raiseAssert("Unexpected Exception in removeProvider")
|
||||
|
||||
proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
||||
## Update providers record
|
||||
@ -125,7 +179,7 @@ proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
||||
|
||||
d.announceAddrs = @addrs
|
||||
|
||||
trace "Updating announce record", addrs = d.announceAddrs
|
||||
info "Updating announce record", addrs = d.announceAddrs
|
||||
d.providerRecord = SignedPeerRecord
|
||||
.init(d.key, PeerRecord.init(d.peerId, d.announceAddrs))
|
||||
.expect("Should construct signed record").some
|
||||
@ -137,7 +191,7 @@ proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
||||
## Update providers record
|
||||
##
|
||||
|
||||
trace "Updating Dht record", addrs = addrs
|
||||
info "Updating Dht record", addrs = addrs
|
||||
d.dhtRecord = SignedPeerRecord
|
||||
.init(d.key, PeerRecord.init(d.peerId, @addrs))
|
||||
.expect("Should construct signed record").some
|
||||
@ -145,12 +199,18 @@ proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
||||
if not d.protocol.isNil:
|
||||
d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR")
|
||||
|
||||
proc start*(d: Discovery) {.async.} =
|
||||
d.protocol.open()
|
||||
await d.protocol.start()
|
||||
proc start*(d: Discovery) {.async: (raises: []).} =
|
||||
try:
|
||||
d.protocol.open()
|
||||
await d.protocol.start()
|
||||
except CatchableError as exc:
|
||||
error "Error starting discovery", exc = exc.msg
|
||||
|
||||
proc stop*(d: Discovery) {.async.} =
|
||||
await d.protocol.closeWait()
|
||||
proc stop*(d: Discovery) {.async: (raises: []).} =
|
||||
try:
|
||||
await noCancel d.protocol.closeWait()
|
||||
except CatchableError as exc:
|
||||
error "Error stopping discovery", exc = exc.msg
|
||||
|
||||
proc new*(
|
||||
T: type Discovery,
|
||||
|
||||
@ -310,10 +310,10 @@ proc leopardEncodeTask(tp: Taskpool, task: ptr EncodeTask) {.gcsafe.} =
|
||||
else:
|
||||
task[].success.store(true)
|
||||
|
||||
proc encodeAsync*(
|
||||
proc asyncEncode*(
|
||||
self: Erasure,
|
||||
blockSize, blocksLen, parityLen: int,
|
||||
data: ref seq[seq[byte]],
|
||||
blocks: ref seq[seq[byte]],
|
||||
parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
without threadPtr =? ThreadSignalPtr.new():
|
||||
@ -322,21 +322,18 @@ proc encodeAsync*(
|
||||
defer:
|
||||
threadPtr.close().expect("closing once works")
|
||||
|
||||
var blockData = createDoubleArray(blocksLen, blockSize)
|
||||
|
||||
for i in 0 ..< data[].len:
|
||||
copyMem(blockData[i], addr data[i][0], blockSize)
|
||||
var data = makeUncheckedArray(blocks)
|
||||
|
||||
defer:
|
||||
freeDoubleArray(blockData, blocksLen)
|
||||
dealloc(data)
|
||||
|
||||
## Create an ecode task with block data
|
||||
## Create an ecode task with block data
|
||||
var task = EncodeTask(
|
||||
erasure: addr self,
|
||||
blockSize: blockSize,
|
||||
blocksLen: blocksLen,
|
||||
parityLen: parityLen,
|
||||
blocks: blockData,
|
||||
blocks: data,
|
||||
parity: parity,
|
||||
signal: threadPtr,
|
||||
)
|
||||
@ -348,18 +345,13 @@ proc encodeAsync*(
|
||||
self.taskPool.spawn leopardEncodeTask(self.taskPool, t)
|
||||
let threadFut = threadPtr.wait()
|
||||
|
||||
try:
|
||||
await threadFut.join()
|
||||
except CatchableError as exc:
|
||||
try:
|
||||
await threadFut
|
||||
except AsyncError as asyncExc:
|
||||
return failure(asyncExc.msg)
|
||||
finally:
|
||||
if exc of CancelledError:
|
||||
raise (ref CancelledError) exc
|
||||
else:
|
||||
return failure(exc.msg)
|
||||
if joinErr =? catch(await threadFut.join()).errorOption:
|
||||
if err =? catch(await noCancel threadFut).errorOption:
|
||||
return failure(err)
|
||||
if joinErr of CancelledError:
|
||||
raise (ref CancelledError) joinErr
|
||||
else:
|
||||
return failure(joinErr)
|
||||
|
||||
if not t.success.load():
|
||||
return failure("Leopard encoding failed")
|
||||
@ -409,7 +401,7 @@ proc encodeData(
|
||||
|
||||
try:
|
||||
if err =? (
|
||||
await self.encodeAsync(
|
||||
await self.asyncEncode(
|
||||
manifest.blockSize.int, params.ecK, params.ecM, data, parity
|
||||
)
|
||||
).errorOption:
|
||||
@ -489,6 +481,7 @@ proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} =
|
||||
task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
|
||||
defer:
|
||||
decoder.release()
|
||||
discard task[].signal.fireSync()
|
||||
|
||||
if (
|
||||
let res = decoder.decode(
|
||||
@ -506,9 +499,7 @@ proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} =
|
||||
else:
|
||||
task[].success.store(true)
|
||||
|
||||
discard task[].signal.fireSync()
|
||||
|
||||
proc decodeAsync*(
|
||||
proc asyncDecode*(
|
||||
self: Erasure,
|
||||
blockSize, blocksLen, parityLen: int,
|
||||
blocks, parity: ref seq[seq[byte]],
|
||||
@ -521,33 +512,21 @@ proc decodeAsync*(
|
||||
threadPtr.close().expect("closing once works")
|
||||
|
||||
var
|
||||
blocksData = createDoubleArray(blocksLen, blockSize)
|
||||
parityData = createDoubleArray(parityLen, blockSize)
|
||||
|
||||
for i in 0 ..< blocks[].len:
|
||||
if blocks[i].len > 0:
|
||||
copyMem(blocksData[i], addr blocks[i][0], blockSize)
|
||||
else:
|
||||
blocksData[i] = nil
|
||||
|
||||
for i in 0 ..< parity[].len:
|
||||
if parity[i].len > 0:
|
||||
copyMem(parityData[i], addr parity[i][0], blockSize)
|
||||
else:
|
||||
parityData[i] = nil
|
||||
blockData = makeUncheckedArray(blocks)
|
||||
parityData = makeUncheckedArray(parity)
|
||||
|
||||
defer:
|
||||
freeDoubleArray(blocksData, blocksLen)
|
||||
freeDoubleArray(parityData, parityLen)
|
||||
dealloc(blockData)
|
||||
dealloc(parityData)
|
||||
|
||||
## Create an decode task with block data
|
||||
## Create an decode task with block data
|
||||
var task = DecodeTask(
|
||||
erasure: addr self,
|
||||
blockSize: blockSize,
|
||||
blocksLen: blocksLen,
|
||||
parityLen: parityLen,
|
||||
recoveredLen: blocksLen,
|
||||
blocks: blocksData,
|
||||
blocks: blockData,
|
||||
parity: parityData,
|
||||
recovered: recovered,
|
||||
signal: threadPtr,
|
||||
@ -560,18 +539,13 @@ proc decodeAsync*(
|
||||
self.taskPool.spawn leopardDecodeTask(self.taskPool, t)
|
||||
let threadFut = threadPtr.wait()
|
||||
|
||||
try:
|
||||
await threadFut.join()
|
||||
except CatchableError as exc:
|
||||
try:
|
||||
await threadFut
|
||||
except AsyncError as asyncExc:
|
||||
return failure(asyncExc.msg)
|
||||
finally:
|
||||
if exc of CancelledError:
|
||||
raise (ref CancelledError) exc
|
||||
else:
|
||||
return failure(exc.msg)
|
||||
if joinErr =? catch(await threadFut.join()).errorOption:
|
||||
if err =? catch(await noCancel threadFut).errorOption:
|
||||
return failure(err)
|
||||
if joinErr of CancelledError:
|
||||
raise (ref CancelledError) joinErr
|
||||
else:
|
||||
return failure(joinErr)
|
||||
|
||||
if not t.success.load():
|
||||
return failure("Leopard encoding failed")
|
||||
@ -627,7 +601,7 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
|
||||
trace "Erasure decoding data"
|
||||
try:
|
||||
if err =? (
|
||||
await self.decodeAsync(
|
||||
await self.asyncDecode(
|
||||
encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered
|
||||
)
|
||||
).errorOption:
|
||||
|
||||
@ -19,6 +19,8 @@ type
|
||||
CodexError* = object of CatchableError # base codex error
|
||||
CodexResult*[T] = Result[T, ref CodexError]
|
||||
|
||||
FinishedFailed*[T] = tuple[success: seq[Future[T]], failure: seq[Future[T]]]
|
||||
|
||||
template mapFailure*[T, V, E](
|
||||
exp: Result[T, V], exc: typedesc[E]
|
||||
): Result[T, ref CatchableError] =
|
||||
@ -40,35 +42,18 @@ func toFailure*[T](exp: Option[T]): Result[T, ref CatchableError] {.inline.} =
|
||||
else:
|
||||
T.failure("Option is None")
|
||||
|
||||
# allFuturesThrowing was moved to the tests in libp2p
|
||||
proc allFuturesThrowing*[T](args: varargs[Future[T]]): Future[void] =
|
||||
var futs: seq[Future[T]]
|
||||
for fut in args:
|
||||
futs &= fut
|
||||
proc call() {.async.} =
|
||||
var first: ref CatchableError = nil
|
||||
futs = await allFinished(futs)
|
||||
for fut in futs:
|
||||
if fut.failed:
|
||||
let err = fut.readError()
|
||||
if err of Defect:
|
||||
raise err
|
||||
else:
|
||||
if err of CancelledError:
|
||||
raise err
|
||||
if isNil(first):
|
||||
first = err
|
||||
if not isNil(first):
|
||||
raise first
|
||||
proc allFinishedFailed*[T](futs: seq[Future[T]]): Future[FinishedFailed[T]] {.async.} =
|
||||
## Check if all futures have finished or failed
|
||||
##
|
||||
## TODO: wip, not sure if we want this - at the minimum,
|
||||
## we should probably avoid the async transform
|
||||
|
||||
return call()
|
||||
var res: FinishedFailed[T] = (@[], @[])
|
||||
await allFutures(futs)
|
||||
for f in futs:
|
||||
if f.failed:
|
||||
res.failure.add f
|
||||
else:
|
||||
res.success.add f
|
||||
|
||||
proc allFutureResult*[T](fut: seq[Future[T]]): Future[?!void] {.async.} =
|
||||
try:
|
||||
await allFuturesThrowing(fut)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
return failure(exc.msg)
|
||||
|
||||
return success()
|
||||
return res
|
||||
|
||||
@ -18,6 +18,8 @@ export periods
|
||||
type
|
||||
Market* = ref object of RootObj
|
||||
MarketError* = object of CodexError
|
||||
SlotStateMismatchError* = object of MarketError
|
||||
SlotReservationNotAllowedError* = object of MarketError
|
||||
Subscription* = ref object of RootObj
|
||||
OnRequest* =
|
||||
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].}
|
||||
@ -62,25 +64,40 @@ type
|
||||
ProofSubmitted* = object of MarketplaceEvent
|
||||
id*: SlotId
|
||||
|
||||
method getZkeyHash*(market: Market): Future[?string] {.base, async.} =
|
||||
method loadConfig*(
|
||||
market: Market
|
||||
): Future[?!void] {.base, async: (raises: [CancelledError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getZkeyHash*(
|
||||
market: Market
|
||||
): Future[?string] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getSigner*(market: Market): Future[Address] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method periodicity*(market: Market): Future[Periodicity] {.base, async.} =
|
||||
method periodicity*(
|
||||
market: Market
|
||||
): Future[Periodicity] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method proofTimeout*(market: Market): Future[uint64] {.base, async.} =
|
||||
method proofTimeout*(
|
||||
market: Market
|
||||
): Future[uint64] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method repairRewardPercentage*(market: Market): Future[uint8] {.base, async.} =
|
||||
method repairRewardPercentage*(
|
||||
market: Market
|
||||
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method requestDurationLimit*(market: Market): Future[uint64] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method proofDowntime*(market: Market): Future[uint8] {.base, async.} =
|
||||
method proofDowntime*(
|
||||
market: Market
|
||||
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getPointer*(market: Market, slotId: SlotId): Future[uint8] {.base, async.} =
|
||||
@ -102,7 +119,7 @@ method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} =
|
||||
|
||||
method getRequest*(
|
||||
market: Market, id: RequestId
|
||||
): Future[?StorageRequest] {.base, async.} =
|
||||
): Future[?StorageRequest] {.base, async: (raises: [CancelledError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method requestState*(
|
||||
@ -110,7 +127,9 @@ method requestState*(
|
||||
): Future[?RequestState] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method slotState*(market: Market, slotId: SlotId): Future[SlotState] {.base, async.} =
|
||||
method slotState*(
|
||||
market: Market, slotId: SlotId
|
||||
): Future[SlotState] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getRequestEnd*(
|
||||
@ -270,3 +289,13 @@ method queryPastStorageRequestedEvents*(
|
||||
market: Market, blocksAgo: int
|
||||
): Future[seq[StorageRequested]] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method slotCollateral*(
|
||||
market: Market, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?!UInt256] {.base, async: (raises: [CancelledError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method slotCollateral*(
|
||||
market: Market, collateralPerSlot: UInt256, slotState: SlotState
|
||||
): ?!UInt256 {.base, gcsafe, raises: [].} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
@ -27,11 +27,11 @@ const MaxMerkleTreeSize = 100.MiBs.uint
|
||||
const MaxMerkleProofSize = 1.MiBs.uint
|
||||
|
||||
proc encode*(self: CodexTree): seq[byte] =
|
||||
var pb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, self.mcodec.uint64)
|
||||
pb.write(2, self.leavesCount.uint64)
|
||||
for node in self.nodes:
|
||||
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
||||
var nodesPb = initProtoBuffer()
|
||||
nodesPb.write(1, node)
|
||||
nodesPb.finish()
|
||||
pb.write(3, nodesPb)
|
||||
@ -40,7 +40,7 @@ proc encode*(self: CodexTree): seq[byte] =
|
||||
pb.buffer
|
||||
|
||||
proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
|
||||
var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize)
|
||||
var pb = initProtoBuffer(data)
|
||||
var mcodecCode: uint64
|
||||
var leavesCount: uint64
|
||||
discard ?pb.getField(1, mcodecCode).mapFailure
|
||||
@ -63,13 +63,13 @@ proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
|
||||
CodexTree.fromNodes(mcodec, nodes, leavesCount.int)
|
||||
|
||||
proc encode*(self: CodexProof): seq[byte] =
|
||||
var pb = initProtoBuffer(maxSize = MaxMerkleProofSize)
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, self.mcodec.uint64)
|
||||
pb.write(2, self.index.uint64)
|
||||
pb.write(3, self.nleaves.uint64)
|
||||
|
||||
for node in self.path:
|
||||
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
||||
var nodesPb = initProtoBuffer()
|
||||
nodesPb.write(1, node)
|
||||
nodesPb.finish()
|
||||
pb.write(4, nodesPb)
|
||||
@ -78,7 +78,7 @@ proc encode*(self: CodexProof): seq[byte] =
|
||||
pb.buffer
|
||||
|
||||
proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof =
|
||||
var pb = initProtoBuffer(data, maxSize = MaxMerkleProofSize)
|
||||
var pb = initProtoBuffer(data)
|
||||
var mcodecCode: uint64
|
||||
var index: uint64
|
||||
var nleaves: uint64
|
||||
|
||||
108
codex/node.nim
108
codex/node.nim
@ -153,7 +153,11 @@ proc updateExpiry*(
|
||||
let ensuringFutures = Iter[int].new(0 ..< manifest.blocksCount).mapIt(
|
||||
self.networkStore.localStore.ensureExpiry(manifest.treeCid, it, expiry)
|
||||
)
|
||||
await allFuturesThrowing(ensuringFutures)
|
||||
|
||||
let res = await allFinishedFailed(ensuringFutures)
|
||||
if res.failure.len > 0:
|
||||
trace "Some blocks failed to update expiry", len = res.failure.len
|
||||
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
@ -186,8 +190,10 @@ proc fetchBatched*(
|
||||
if not (await address in self.networkStore) or fetchLocal:
|
||||
self.networkStore.getBlock(address)
|
||||
|
||||
if blocksErr =? (await allFutureResult(blocks)).errorOption:
|
||||
return failure(blocksErr)
|
||||
let res = await allFinishedFailed(blocks)
|
||||
if res.failure.len > 0:
|
||||
trace "Some blocks failed to fetch", len = res.failure.len
|
||||
return failure("Some blocks failed to fetch (" & $res.failure.len & " )")
|
||||
|
||||
if not onBatch.isNil and
|
||||
batchErr =? (await onBatch(blocks.mapIt(it.read.get))).errorOption:
|
||||
@ -213,6 +219,30 @@ proc fetchBatched*(
|
||||
let iter = Iter[int].new(0 ..< manifest.blocksCount)
|
||||
self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch, fetchLocal)
|
||||
|
||||
proc fetchDatasetAsync*(
|
||||
self: CodexNodeRef, manifest: Manifest, fetchLocal = true
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
## Asynchronously fetch a dataset in the background.
|
||||
## This task will be tracked and cleaned up on node shutdown.
|
||||
##
|
||||
try:
|
||||
if err =? (
|
||||
await self.fetchBatched(
|
||||
manifest = manifest, batchSize = DefaultFetchBatch, fetchLocal = fetchLocal
|
||||
)
|
||||
).errorOption:
|
||||
error "Unable to fetch blocks", err = err.msg
|
||||
except CancelledError as exc:
|
||||
trace "Cancelled fetching blocks", exc = exc.msg
|
||||
except CatchableError as exc:
|
||||
error "Error fetching blocks", exc = exc.msg
|
||||
|
||||
proc fetchDatasetAsyncTask*(self: CodexNodeRef, manifest: Manifest) =
|
||||
## Start fetching a dataset in the background.
|
||||
## The task will be tracked and cleaned up on node shutdown.
|
||||
##
|
||||
self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false))
|
||||
|
||||
proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async.} =
|
||||
## Streams the contents of a single block.
|
||||
##
|
||||
@ -223,36 +253,27 @@ proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async
|
||||
without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err:
|
||||
return failure(err)
|
||||
|
||||
proc streamOneBlock(): Future[void] {.async.} =
|
||||
proc streamOneBlock(): Future[void] {.async: (raises: []).} =
|
||||
try:
|
||||
defer:
|
||||
await stream.pushEof()
|
||||
await stream.pushData(blk.data)
|
||||
except CatchableError as exc:
|
||||
trace "Unable to send block", cid, exc = exc.msg
|
||||
discard
|
||||
finally:
|
||||
await stream.pushEof()
|
||||
|
||||
self.trackedFutures.track(streamOneBlock())
|
||||
LPStream(stream).success
|
||||
|
||||
proc streamEntireDataset(
|
||||
self: CodexNodeRef,
|
||||
manifest: Manifest,
|
||||
manifestCid: Cid,
|
||||
prefetchBatch = DefaultFetchBatch,
|
||||
self: CodexNodeRef, manifest: Manifest, manifestCid: Cid
|
||||
): Future[?!LPStream] {.async.} =
|
||||
## Streams the contents of the entire dataset described by the manifest.
|
||||
## Background jobs (erasure decoding and prefetching) will be cancelled when
|
||||
## the stream is closed.
|
||||
##
|
||||
trace "Retrieving blocks from manifest", manifestCid
|
||||
|
||||
let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false))
|
||||
var jobs: seq[Future[void]]
|
||||
|
||||
if manifest.protected:
|
||||
# Retrieve, decode and save to the local store all EС groups
|
||||
proc erasureJob(): Future[void] {.async.} =
|
||||
proc erasureJob(): Future[void] {.async: (raises: []).} =
|
||||
try:
|
||||
# Spawn an erasure decoding job
|
||||
let erasure = Erasure.new(
|
||||
@ -260,36 +281,17 @@ proc streamEntireDataset(
|
||||
)
|
||||
without _ =? (await erasure.decode(manifest)), error:
|
||||
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
|
||||
except CancelledError:
|
||||
trace "Erasure job cancelled", manifestCid
|
||||
except CatchableError as exc:
|
||||
trace "Error erasure decoding manifest", manifestCid, exc = exc.msg
|
||||
|
||||
jobs.add(erasureJob())
|
||||
self.trackedFutures.track(erasureJob())
|
||||
|
||||
proc prefetch(): Future[void] {.async.} =
|
||||
try:
|
||||
if err =?
|
||||
(await self.fetchBatched(manifest, prefetchBatch, fetchLocal = false)).errorOption:
|
||||
error "Unable to fetch blocks", err = err.msg
|
||||
except CancelledError:
|
||||
trace "Prefetch job cancelled"
|
||||
except CatchableError as exc:
|
||||
error "Error fetching blocks", exc = exc.msg
|
||||
|
||||
jobs.add(prefetch())
|
||||
|
||||
# Monitor stream completion and cancel background jobs when done
|
||||
proc monitorStream() {.async.} =
|
||||
try:
|
||||
await stream.join()
|
||||
finally:
|
||||
await allFutures(jobs.mapIt(it.cancelAndWait))
|
||||
|
||||
self.trackedFutures.track(monitorStream())
|
||||
self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false))
|
||||
# prefetch task should not fetch from local store
|
||||
|
||||
# Retrieve all blocks of the dataset sequentially from the local store or network
|
||||
trace "Creating store stream for manifest", manifestCid
|
||||
stream.success
|
||||
LPStream(StoreStream.new(self.networkStore, manifest, pad = false)).success
|
||||
|
||||
proc retrieve*(
|
||||
self: CodexNodeRef, cid: Cid, local: bool = true
|
||||
@ -591,7 +593,11 @@ proc requestStorage*(
|
||||
success purchase.id
|
||||
|
||||
proc onStore(
|
||||
self: CodexNodeRef, request: StorageRequest, slotIdx: uint64, blocksCb: BlocksCb
|
||||
self: CodexNodeRef,
|
||||
request: StorageRequest,
|
||||
slotIdx: uint64,
|
||||
blocksCb: BlocksCb,
|
||||
isRepairing: bool = false,
|
||||
): Future[?!void] {.async.} =
|
||||
## store data in local storage
|
||||
##
|
||||
@ -604,6 +610,10 @@ proc onStore(
|
||||
|
||||
trace "Received a request to store a slot"
|
||||
|
||||
# TODO: Use the isRepairing to manage the slot download.
|
||||
# If isRepairing is true, the slot has to be repaired before
|
||||
# being downloaded.
|
||||
|
||||
without manifest =? (await self.fetchManifest(cid)), err:
|
||||
trace "Unable to fetch manifest for cid", cid, err = err.msg
|
||||
return failure(err)
|
||||
@ -624,8 +634,11 @@ proc onStore(
|
||||
|
||||
let ensureExpiryFutures =
|
||||
blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry.toSecondsSince1970))
|
||||
if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption:
|
||||
return failure(updateExpiryErr)
|
||||
|
||||
let res = await allFinishedFailed(ensureExpiryFutures)
|
||||
if res.failure.len > 0:
|
||||
trace "Some blocks failed to update expiry", len = res.failure.len
|
||||
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
|
||||
|
||||
if not blocksCb.isNil and err =? (await blocksCb(blocks)).errorOption:
|
||||
trace "Unable to process blocks", err = err.msg
|
||||
@ -745,9 +758,12 @@ proc start*(self: CodexNodeRef) {.async.} =
|
||||
|
||||
if hostContracts =? self.contracts.host:
|
||||
hostContracts.sales.onStore = proc(
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc
|
||||
request: StorageRequest,
|
||||
slot: uint64,
|
||||
onBatch: BatchProc,
|
||||
isRepairing: bool = false,
|
||||
): Future[?!void] =
|
||||
self.onStore(request, slot, onBatch)
|
||||
self.onStore(request, slot, onBatch, isRepairing)
|
||||
|
||||
hostContracts.sales.onExpiryUpdate = proc(
|
||||
rootCid: Cid, expiry: SecondsSince1970
|
||||
|
||||
@ -65,9 +65,15 @@ proc formatManifestBlocks(node: CodexNodeRef): Future[JsonNode] {.async.} =
|
||||
|
||||
return %RestContentList.init(content)
|
||||
|
||||
proc isPending(resp: HttpResponseRef): bool =
|
||||
## Checks that an HttpResponseRef object is still pending; i.e.,
|
||||
## that no body has yet been sent. This helps us guard against calling
|
||||
## sendBody(resp: HttpResponseRef, ...) twice, which is illegal.
|
||||
return resp.getResponseState() == HttpResponseState.Empty
|
||||
|
||||
proc retrieveCid(
|
||||
node: CodexNodeRef, cid: Cid, local: bool = true, resp: HttpResponseRef
|
||||
): Future[RestApiResponse] {.async.} =
|
||||
): Future[void] {.async: (raises: [CancelledError, HttpWriteError]).} =
|
||||
## Download a file from the node in a streaming
|
||||
## manner
|
||||
##
|
||||
@ -79,16 +85,21 @@ proc retrieveCid(
|
||||
without stream =? (await node.retrieve(cid, local)), error:
|
||||
if error of BlockNotFoundError:
|
||||
resp.status = Http404
|
||||
return await resp.sendBody("")
|
||||
await resp.sendBody(
|
||||
"The requested CID could not be retrieved (" & error.msg & ")."
|
||||
)
|
||||
return
|
||||
else:
|
||||
resp.status = Http500
|
||||
return await resp.sendBody(error.msg)
|
||||
await resp.sendBody(error.msg)
|
||||
return
|
||||
|
||||
# It is ok to fetch again the manifest because it will hit the cache
|
||||
without manifest =? (await node.fetchManifest(cid)), err:
|
||||
error "Failed to fetch manifest", err = err.msg
|
||||
resp.status = Http404
|
||||
return await resp.sendBody(err.msg)
|
||||
await resp.sendBody(err.msg)
|
||||
return
|
||||
|
||||
if manifest.mimetype.isSome:
|
||||
resp.setHeader("Content-Type", manifest.mimetype.get())
|
||||
@ -103,7 +114,14 @@ proc retrieveCid(
|
||||
else:
|
||||
resp.setHeader("Content-Disposition", "attachment")
|
||||
|
||||
await resp.prepareChunked()
|
||||
# For erasure-coded datasets, we need to return the _original_ length; i.e.,
|
||||
# the length of the non-erasure-coded dataset, as that's what we will be
|
||||
# returning to the client.
|
||||
let contentLength =
|
||||
if manifest.protected: manifest.originalDatasetSize else: manifest.datasetSize
|
||||
resp.setHeader("Content-Length", $(contentLength.int))
|
||||
|
||||
await resp.prepare(HttpResponseStreamType.Plain)
|
||||
|
||||
while not stream.atEof:
|
||||
var
|
||||
@ -116,13 +134,16 @@ proc retrieveCid(
|
||||
|
||||
bytes += buff.len
|
||||
|
||||
await resp.sendChunk(addr buff[0], buff.len)
|
||||
await resp.send(addr buff[0], buff.len)
|
||||
await resp.finish()
|
||||
codex_api_downloads.inc()
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Error streaming blocks", exc = exc.msg
|
||||
resp.status = Http500
|
||||
return await resp.sendBody("")
|
||||
if resp.isPending():
|
||||
await resp.sendBody(exc.msg)
|
||||
finally:
|
||||
info "Sent bytes", cid = cid, bytes
|
||||
if not stream.isNil:
|
||||
@ -299,15 +320,8 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
||||
error "Failed to fetch manifest", err = err.msg
|
||||
return RestApiResponse.error(Http404, err.msg, headers = headers)
|
||||
|
||||
proc fetchDatasetAsync(): Future[void] {.async.} =
|
||||
try:
|
||||
if err =? (await node.fetchBatched(manifest)).errorOption:
|
||||
error "Unable to fetch dataset", cid = cid.get(), err = err.msg
|
||||
except CatchableError as exc:
|
||||
error "CatchableError when fetching dataset", cid = cid.get(), exc = exc.msg
|
||||
discard
|
||||
|
||||
asyncSpawn fetchDatasetAsync()
|
||||
# Start fetching the dataset in the background
|
||||
node.fetchDatasetAsyncTask(manifest)
|
||||
|
||||
let json = %formatManifest(cid.get(), manifest)
|
||||
return RestApiResponse.response($json, contentType = "application/json")
|
||||
@ -328,6 +342,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
||||
resp.setCorsHeaders("GET", corsOrigin)
|
||||
resp.setHeader("Access-Control-Headers", "X-Requested-With")
|
||||
|
||||
resp.setHeader("Access-Control-Expose-Headers", "Content-Disposition")
|
||||
await node.retrieveCid(cid.get(), local = false, resp = resp)
|
||||
|
||||
router.api(MethodGet, "/api/codex/v1/data/{cid}/network/manifest") do(
|
||||
|
||||
111
codex/sales.nim
111
codex/sales.nim
@ -157,13 +157,28 @@ proc cleanUp(
|
||||
# Re-add items back into the queue to prevent small availabilities from
|
||||
# draining the queue. Seen items will be ordered last.
|
||||
if reprocessSlot and request =? data.request:
|
||||
let queue = sales.context.slotQueue
|
||||
var seenItem = SlotQueueItem.init(
|
||||
data.requestId, data.slotIndex.uint16, data.ask, request.expiry, seen = true
|
||||
)
|
||||
trace "pushing ignored item to queue, marked as seen"
|
||||
if err =? queue.push(seenItem).errorOption:
|
||||
error "failed to readd slot to queue", errorType = $(type err), error = err.msg
|
||||
try:
|
||||
without collateral =?
|
||||
await sales.context.market.slotCollateral(data.requestId, data.slotIndex), err:
|
||||
error "Failed to re-add item back to the slot queue: unable to calculate collateral",
|
||||
error = err.msg
|
||||
return
|
||||
|
||||
let queue = sales.context.slotQueue
|
||||
var seenItem = SlotQueueItem.init(
|
||||
data.requestId,
|
||||
data.slotIndex.uint16,
|
||||
data.ask,
|
||||
request.expiry,
|
||||
seen = true,
|
||||
collateral = collateral,
|
||||
)
|
||||
trace "pushing ignored item to queue, marked as seen"
|
||||
if err =? queue.push(seenItem).errorOption:
|
||||
error "failed to readd slot to queue", errorType = $(type err), error = err.msg
|
||||
except MarketError as e:
|
||||
error "Failed to re-add item back to the slot queue.", error = e.msg
|
||||
return
|
||||
|
||||
await sales.remove(agent)
|
||||
|
||||
@ -270,7 +285,7 @@ proc load*(sales: Sales) {.async.} =
|
||||
agent.start(SaleUnknown())
|
||||
sales.agents.add agent
|
||||
|
||||
proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} =
|
||||
proc OnAvailabilitySaved(sales: Sales, availability: Availability) {.async.} =
|
||||
## When availabilities are modified or added, the queue should be unpaused if
|
||||
## it was paused and any slots in the queue should have their `seen` flag
|
||||
## cleared.
|
||||
@ -283,7 +298,7 @@ proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} =
|
||||
|
||||
proc onStorageRequested(
|
||||
sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: uint64
|
||||
) =
|
||||
) {.raises: [].} =
|
||||
logScope:
|
||||
topics = "marketplace sales onStorageRequested"
|
||||
requestId
|
||||
@ -294,7 +309,14 @@ proc onStorageRequested(
|
||||
|
||||
trace "storage requested, adding slots to queue"
|
||||
|
||||
without items =? SlotQueueItem.init(requestId, ask, expiry).catch, err:
|
||||
let market = sales.context.market
|
||||
|
||||
without collateral =? market.slotCollateral(ask.collateralPerSlot, SlotState.Free),
|
||||
err:
|
||||
error "Request failure, unable to calculate collateral", error = err.msg
|
||||
return
|
||||
|
||||
without items =? SlotQueueItem.init(requestId, ask, expiry, collateral).catch, err:
|
||||
if err of SlotsOutOfRangeError:
|
||||
warn "Too many slots, cannot add to queue"
|
||||
else:
|
||||
@ -324,39 +346,54 @@ proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) =
|
||||
let market = context.market
|
||||
let queue = context.slotQueue
|
||||
|
||||
if slotIndex > uint16.high.uint64:
|
||||
error "Cannot cast slot index to uint16, value = ", slotIndex
|
||||
return
|
||||
try:
|
||||
without request =? (await market.getRequest(requestId)), err:
|
||||
error "unknown request in contract", error = err.msgDetail
|
||||
return
|
||||
|
||||
# first attempt to populate request using existing metadata in queue
|
||||
without var found =? queue.populateItem(requestId, slotIndex.uint16):
|
||||
trace "no existing request metadata, getting request info from contract"
|
||||
# if there's no existing slot for that request, retrieve the request
|
||||
# from the contract.
|
||||
try:
|
||||
without request =? await market.getRequest(requestId):
|
||||
error "unknown request in contract"
|
||||
return
|
||||
# Take the repairing state into consideration to calculate the collateral.
|
||||
# This is particularly needed because it will affect the priority in the queue
|
||||
# and we want to give the user the ability to tweak the parameters.
|
||||
# Adding the repairing state directly in the queue priority calculation
|
||||
# would not allow this flexibility.
|
||||
without collateral =?
|
||||
market.slotCollateral(request.ask.collateralPerSlot, SlotState.Repair), err:
|
||||
error "Failed to add freed slot to queue: unable to calculate collateral",
|
||||
error = err.msg
|
||||
return
|
||||
|
||||
found = SlotQueueItem.init(request, slotIndex.uint16)
|
||||
except CancelledError:
|
||||
discard # do not propagate as addSlotToQueue was asyncSpawned
|
||||
except CatchableError as e:
|
||||
error "failed to get request from contract and add slots to queue",
|
||||
error = e.msgDetail
|
||||
if slotIndex > uint16.high.uint64:
|
||||
error "Cannot cast slot index to uint16, value = ", slotIndex
|
||||
return
|
||||
|
||||
if err =? queue.push(found).errorOption:
|
||||
error "failed to push slot items to queue", error = err.msgDetail
|
||||
without slotQueueItem =?
|
||||
SlotQueueItem.init(request, slotIndex.uint16, collateral = collateral).catch,
|
||||
err:
|
||||
warn "Too many slots, cannot add to queue", error = err.msgDetail
|
||||
return
|
||||
|
||||
if err =? queue.push(slotQueueItem).errorOption:
|
||||
if err of SlotQueueItemExistsError:
|
||||
error "Failed to push item to queue becaue it already exists",
|
||||
error = err.msgDetail
|
||||
elif err of QueueNotRunningError:
|
||||
warn "Failed to push item to queue becaue queue is not running",
|
||||
error = err.msgDetail
|
||||
except CatchableError as e:
|
||||
warn "Failed to add slot to queue", error = e.msg
|
||||
|
||||
# We could get rid of this by adding the storage ask in the SlotFreed event,
|
||||
# so we would not need to call getRequest to get the collateralPerSlot.
|
||||
let fut = addSlotToQueue()
|
||||
sales.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
|
||||
proc subscribeRequested(sales: Sales) {.async.} =
|
||||
let context = sales.context
|
||||
let market = context.market
|
||||
|
||||
proc onStorageRequested(requestId: RequestId, ask: StorageAsk, expiry: uint64) =
|
||||
proc onStorageRequested(
|
||||
requestId: RequestId, ask: StorageAsk, expiry: uint64
|
||||
) {.raises: [].} =
|
||||
sales.onStorageRequested(requestId, ask, expiry)
|
||||
|
||||
try:
|
||||
@ -488,16 +525,18 @@ proc startSlotQueue(sales: Sales) =
|
||||
let slotQueue = sales.context.slotQueue
|
||||
let reservations = sales.context.reservations
|
||||
|
||||
slotQueue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
||||
slotQueue.onProcessSlot = proc(
|
||||
item: SlotQueueItem, done: Future[void]
|
||||
) {.async: (raises: []).} =
|
||||
trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex
|
||||
sales.processSlot(item, done)
|
||||
|
||||
slotQueue.start()
|
||||
|
||||
proc onAvailabilityAdded(availability: Availability) {.async.} =
|
||||
await sales.onAvailabilityAdded(availability)
|
||||
proc OnAvailabilitySaved(availability: Availability) {.async.} =
|
||||
await sales.OnAvailabilitySaved(availability)
|
||||
|
||||
reservations.onAvailabilityAdded = onAvailabilityAdded
|
||||
reservations.OnAvailabilitySaved = OnAvailabilitySaved
|
||||
|
||||
proc subscribe(sales: Sales) {.async.} =
|
||||
await sales.subscribeRequested()
|
||||
|
||||
@ -82,11 +82,11 @@ type
|
||||
availabilityLock: AsyncLock
|
||||
# Lock for protecting assertions of availability's sizes when searching for matching availability
|
||||
repo: RepoStore
|
||||
onAvailabilityAdded: ?OnAvailabilityAdded
|
||||
OnAvailabilitySaved: ?OnAvailabilitySaved
|
||||
|
||||
GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.}
|
||||
IterDispose* = proc(): Future[?!void] {.gcsafe, closure.}
|
||||
OnAvailabilityAdded* =
|
||||
OnAvailabilitySaved* =
|
||||
proc(availability: Availability): Future[void] {.upraises: [], gcsafe.}
|
||||
StorableIter* = ref object
|
||||
finished*: bool
|
||||
@ -189,10 +189,10 @@ logutils.formatIt(LogFormat.textLines, SomeStorableId):
|
||||
logutils.formatIt(LogFormat.json, SomeStorableId):
|
||||
it.to0xHexLog
|
||||
|
||||
proc `onAvailabilityAdded=`*(
|
||||
self: Reservations, onAvailabilityAdded: OnAvailabilityAdded
|
||||
proc `OnAvailabilitySaved=`*(
|
||||
self: Reservations, OnAvailabilitySaved: OnAvailabilitySaved
|
||||
) =
|
||||
self.onAvailabilityAdded = some onAvailabilityAdded
|
||||
self.OnAvailabilitySaved = some OnAvailabilitySaved
|
||||
|
||||
func key*(id: AvailabilityId): ?!Key =
|
||||
## sales / reservations / <availabilityId>
|
||||
@ -268,18 +268,18 @@ proc updateAvailability(
|
||||
trace "Creating new Availability"
|
||||
let res = await self.updateImpl(obj)
|
||||
# inform subscribers that Availability has been added
|
||||
if onAvailabilityAdded =? self.onAvailabilityAdded:
|
||||
# when chronos v4 is implemented, and OnAvailabilityAdded is annotated
|
||||
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
|
||||
# when chronos v4 is implemented, and OnAvailabilitySaved is annotated
|
||||
# with async:(raises:[]), we can remove this try/catch as we know, with
|
||||
# certainty, that nothing will be raised
|
||||
try:
|
||||
await onAvailabilityAdded(obj)
|
||||
await OnAvailabilitySaved(obj)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
# we don't have any insight into types of exceptions that
|
||||
# `onAvailabilityAdded` can raise because it is caller-defined
|
||||
warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg
|
||||
# `OnAvailabilitySaved` can raise because it is caller-defined
|
||||
warn "Unknown error during 'OnAvailabilitySaved' callback", error = e.msg
|
||||
return res
|
||||
else:
|
||||
return failure(err)
|
||||
@ -300,21 +300,23 @@ proc updateAvailability(
|
||||
|
||||
let res = await self.updateImpl(obj)
|
||||
|
||||
if oldAvailability.freeSize < obj.freeSize: # availability added
|
||||
if oldAvailability.freeSize < obj.freeSize or oldAvailability.duration < obj.duration or
|
||||
oldAvailability.minPricePerBytePerSecond < obj.minPricePerBytePerSecond or
|
||||
oldAvailability.totalCollateral < obj.totalCollateral: # availability updated
|
||||
# inform subscribers that Availability has been modified (with increased
|
||||
# size)
|
||||
if onAvailabilityAdded =? self.onAvailabilityAdded:
|
||||
# when chronos v4 is implemented, and OnAvailabilityAdded is annotated
|
||||
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
|
||||
# when chronos v4 is implemented, and OnAvailabilitySaved is annotated
|
||||
# with async:(raises:[]), we can remove this try/catch as we know, with
|
||||
# certainty, that nothing will be raised
|
||||
try:
|
||||
await onAvailabilityAdded(obj)
|
||||
await OnAvailabilitySaved(obj)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
# we don't have any insight into types of exceptions that
|
||||
# `onAvailabilityAdded` can raise because it is caller-defined
|
||||
warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg
|
||||
# `OnAvailabilitySaved` can raise because it is caller-defined
|
||||
warn "Unknown error during 'OnAvailabilitySaved' callback", error = e.msg
|
||||
|
||||
return res
|
||||
|
||||
|
||||
@ -103,7 +103,6 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} =
|
||||
error "Error while waiting for expiry to lapse", error = e.msgDetail
|
||||
|
||||
data.cancelled = onCancelled()
|
||||
asyncSpawn data.cancelled
|
||||
|
||||
method onFulfilled*(
|
||||
agent: SalesAgent, requestId: RequestId
|
||||
|
||||
@ -26,7 +26,7 @@ type
|
||||
|
||||
BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].}
|
||||
OnStore* = proc(
|
||||
request: StorageRequest, slot: uint64, blocksCb: BlocksCb
|
||||
request: StorageRequest, slot: uint64, blocksCb: BlocksCb, isRepairing: bool
|
||||
): Future[?!void] {.gcsafe, upraises: [].}
|
||||
OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.
|
||||
gcsafe, upraises: []
|
||||
|
||||
@ -3,7 +3,6 @@ import std/tables
|
||||
import pkg/chronos
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/upraises
|
||||
import ../errors
|
||||
import ../clock
|
||||
import ../logutils
|
||||
@ -17,8 +16,9 @@ logScope:
|
||||
topics = "marketplace slotqueue"
|
||||
|
||||
type
|
||||
OnProcessSlot* =
|
||||
proc(item: SlotQueueItem, done: Future[void]): Future[void] {.gcsafe, upraises: [].}
|
||||
OnProcessSlot* = proc(item: SlotQueueItem, done: Future[void]): Future[void] {.
|
||||
gcsafe, async: (raises: [])
|
||||
.}
|
||||
|
||||
# Non-ref obj copies value when assigned, preventing accidental modification
|
||||
# of values which could cause an incorrect order (eg
|
||||
@ -26,7 +26,7 @@ type
|
||||
# but the heap invariant would no longer be honoured. When non-ref, the
|
||||
# compiler can ensure that statement will fail).
|
||||
SlotQueueWorker = object
|
||||
doneProcessing*: Future[void]
|
||||
doneProcessing*: Future[void].Raising([])
|
||||
|
||||
SlotQueueItem* = object
|
||||
requestId: RequestId
|
||||
@ -34,7 +34,7 @@ type
|
||||
slotSize: uint64
|
||||
duration: uint64
|
||||
pricePerBytePerSecond: UInt256
|
||||
collateralPerByte: UInt256
|
||||
collateral: UInt256 # Collateral computed
|
||||
expiry: uint64
|
||||
seen: bool
|
||||
|
||||
@ -76,9 +76,6 @@ proc profitability(item: SlotQueueItem): UInt256 =
|
||||
slotSize: item.slotSize,
|
||||
).pricePerSlot
|
||||
|
||||
proc collateralPerSlot(item: SlotQueueItem): UInt256 =
|
||||
StorageAsk(collateralPerByte: item.collateralPerByte, slotSize: item.slotSize).collateralPerSlot
|
||||
|
||||
proc `<`*(a, b: SlotQueueItem): bool =
|
||||
# for A to have a higher priority than B (in a min queue), A must be less than
|
||||
# B.
|
||||
@ -95,8 +92,8 @@ proc `<`*(a, b: SlotQueueItem): bool =
|
||||
scoreA.addIf(a.profitability > b.profitability, 3)
|
||||
scoreB.addIf(a.profitability < b.profitability, 3)
|
||||
|
||||
scoreA.addIf(a.collateralPerSlot < b.collateralPerSlot, 2)
|
||||
scoreB.addIf(a.collateralPerSlot > b.collateralPerSlot, 2)
|
||||
scoreA.addIf(a.collateral < b.collateral, 2)
|
||||
scoreB.addIf(a.collateral > b.collateral, 2)
|
||||
|
||||
scoreA.addIf(a.expiry > b.expiry, 1)
|
||||
scoreB.addIf(a.expiry < b.expiry, 1)
|
||||
@ -129,7 +126,17 @@ proc new*(
|
||||
# `newAsyncQueue` procedure
|
||||
|
||||
proc init(_: type SlotQueueWorker): SlotQueueWorker =
|
||||
SlotQueueWorker(doneProcessing: newFuture[void]("slotqueue.worker.processing"))
|
||||
let workerFut = Future[void].Raising([]).init(
|
||||
"slotqueue.worker.processing", {FutureFlag.OwnCancelSchedule}
|
||||
)
|
||||
|
||||
workerFut.cancelCallback = proc(data: pointer) {.raises: [].} =
|
||||
# this is equivalent to try: ... except CatchableError: ...
|
||||
if not workerFut.finished:
|
||||
workerFut.complete()
|
||||
trace "Cancelling `SlotQueue` worker processing future"
|
||||
|
||||
SlotQueueWorker(doneProcessing: workerFut)
|
||||
|
||||
proc init*(
|
||||
_: type SlotQueueItem,
|
||||
@ -137,6 +144,7 @@ proc init*(
|
||||
slotIndex: uint16,
|
||||
ask: StorageAsk,
|
||||
expiry: uint64,
|
||||
collateral: UInt256,
|
||||
seen = false,
|
||||
): SlotQueueItem =
|
||||
SlotQueueItem(
|
||||
@ -145,25 +153,32 @@ proc init*(
|
||||
slotSize: ask.slotSize,
|
||||
duration: ask.duration,
|
||||
pricePerBytePerSecond: ask.pricePerBytePerSecond,
|
||||
collateralPerByte: ask.collateralPerByte,
|
||||
collateral: collateral,
|
||||
expiry: expiry,
|
||||
seen: seen,
|
||||
)
|
||||
|
||||
proc init*(
|
||||
_: type SlotQueueItem, request: StorageRequest, slotIndex: uint16
|
||||
_: type SlotQueueItem,
|
||||
request: StorageRequest,
|
||||
slotIndex: uint16,
|
||||
collateral: UInt256,
|
||||
): SlotQueueItem =
|
||||
SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry)
|
||||
SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry, collateral)
|
||||
|
||||
proc init*(
|
||||
_: type SlotQueueItem, requestId: RequestId, ask: StorageAsk, expiry: uint64
|
||||
): seq[SlotQueueItem] =
|
||||
_: type SlotQueueItem,
|
||||
requestId: RequestId,
|
||||
ask: StorageAsk,
|
||||
expiry: uint64,
|
||||
collateral: UInt256,
|
||||
): seq[SlotQueueItem] {.raises: [SlotsOutOfRangeError].} =
|
||||
if not ask.slots.inRange:
|
||||
raise newException(SlotsOutOfRangeError, "Too many slots")
|
||||
|
||||
var i = 0'u16
|
||||
proc initSlotQueueItem(): SlotQueueItem =
|
||||
let item = SlotQueueItem.init(requestId, i, ask, expiry)
|
||||
let item = SlotQueueItem.init(requestId, i, ask, expiry, collateral)
|
||||
inc i
|
||||
return item
|
||||
|
||||
@ -171,8 +186,10 @@ proc init*(
|
||||
Rng.instance.shuffle(items)
|
||||
return items
|
||||
|
||||
proc init*(_: type SlotQueueItem, request: StorageRequest): seq[SlotQueueItem] =
|
||||
return SlotQueueItem.init(request.id, request.ask, request.expiry)
|
||||
proc init*(
|
||||
_: type SlotQueueItem, request: StorageRequest, collateral: UInt256
|
||||
): seq[SlotQueueItem] =
|
||||
return SlotQueueItem.init(request.id, request.ask, request.expiry, collateral)
|
||||
|
||||
proc inRange*(val: SomeUnsignedInt): bool =
|
||||
val.uint16 in SlotQueueSize.low .. SlotQueueSize.high
|
||||
@ -234,25 +251,7 @@ proc unpause*(self: SlotQueue) =
|
||||
# set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait()
|
||||
self.unpaused.fire()
|
||||
|
||||
proc populateItem*(
|
||||
self: SlotQueue, requestId: RequestId, slotIndex: uint16
|
||||
): ?SlotQueueItem =
|
||||
trace "populate item, items in queue", len = self.queue.len
|
||||
for item in self.queue.items:
|
||||
trace "populate item search", itemRequestId = item.requestId, requestId
|
||||
if item.requestId == requestId:
|
||||
return some SlotQueueItem(
|
||||
requestId: requestId,
|
||||
slotIndex: slotIndex,
|
||||
slotSize: item.slotSize,
|
||||
duration: item.duration,
|
||||
pricePerBytePerSecond: item.pricePerBytePerSecond,
|
||||
collateralPerByte: item.collateralPerByte,
|
||||
expiry: item.expiry,
|
||||
)
|
||||
return none SlotQueueItem
|
||||
|
||||
proc push*(self: SlotQueue, item: SlotQueueItem): ?!void =
|
||||
proc push*(self: SlotQueue, item: SlotQueueItem): ?!void {.raises: [].} =
|
||||
logScope:
|
||||
requestId = item.requestId
|
||||
slotIndex = item.slotIndex
|
||||
@ -430,7 +429,6 @@ proc run(self: SlotQueue) {.async: (raises: []).} =
|
||||
|
||||
let fut = self.dispatch(worker, item)
|
||||
self.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
|
||||
await sleepAsync(1.millis) # poll
|
||||
except CancelledError:
|
||||
@ -458,7 +456,6 @@ proc start*(self: SlotQueue) =
|
||||
|
||||
let fut = self.run()
|
||||
self.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
|
||||
proc stop*(self: SlotQueue) {.async.} =
|
||||
if not self.running:
|
||||
|
||||
@ -67,8 +67,11 @@ method run*(
|
||||
return await reservations.release(reservation.id, reservation.availabilityId, bytes)
|
||||
|
||||
try:
|
||||
let slotId = slotId(request.id, data.slotIndex)
|
||||
let isRepairing = (await context.market.slotState(slotId)) == SlotState.Repair
|
||||
|
||||
trace "Starting download"
|
||||
if err =? (await onStore(request, data.slotIndex, onBlocks)).errorOption:
|
||||
if err =? (await onStore(request, data.slotIndex, onBlocks, isRepairing)).errorOption:
|
||||
return some State(SaleErrored(error: err, reprocessSlot: false))
|
||||
|
||||
trace "Download complete"
|
||||
|
||||
@ -30,6 +30,7 @@ method run*(
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let data = SalesAgent(machine).data
|
||||
let market = SalesAgent(machine).context.market
|
||||
|
||||
without (request =? data.request):
|
||||
raiseAssert "Request not set"
|
||||
|
||||
@ -38,28 +39,20 @@ method run*(
|
||||
slotIndex = data.slotIndex
|
||||
|
||||
try:
|
||||
let slotState = await market.slotState(slotId(data.requestId, data.slotIndex))
|
||||
let requestedCollateral = request.ask.collateralPerSlot
|
||||
var collateral: UInt256
|
||||
|
||||
if slotState == SlotState.Repair:
|
||||
# When repairing the node gets "discount" on the collateral that it needs to
|
||||
let repairRewardPercentage = (await market.repairRewardPercentage).u256
|
||||
collateral =
|
||||
requestedCollateral -
|
||||
((requestedCollateral * repairRewardPercentage)).div(100.u256)
|
||||
else:
|
||||
collateral = requestedCollateral
|
||||
without collateral =? await market.slotCollateral(data.requestId, data.slotIndex),
|
||||
err:
|
||||
error "Failure attempting to fill slot: unable to calculate collateral",
|
||||
error = err.msg
|
||||
return some State(SaleErrored(error: err))
|
||||
|
||||
debug "Filling slot"
|
||||
try:
|
||||
await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral)
|
||||
except SlotStateMismatchError as e:
|
||||
debug "Slot is already filled, ignoring slot"
|
||||
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
|
||||
except MarketError as e:
|
||||
if e.msg.contains "Slot is not free":
|
||||
debug "Slot is already filled, ignoring slot"
|
||||
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
|
||||
else:
|
||||
return some State(SaleErrored(error: e))
|
||||
return some State(SaleErrored(error: e))
|
||||
# other CatchableErrors are handled "automatically" by the SaleState
|
||||
|
||||
return some State(SaleFilled())
|
||||
|
||||
@ -44,12 +44,11 @@ method run*(
|
||||
try:
|
||||
trace "Reserving slot"
|
||||
await market.reserveSlot(data.requestId, data.slotIndex)
|
||||
except SlotReservationNotAllowedError as e:
|
||||
debug "Slot cannot be reserved, ignoring", error = e.msg
|
||||
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
|
||||
except MarketError as e:
|
||||
if e.msg.contains "SlotReservations_ReservationNotAllowed":
|
||||
debug "Slot cannot be reserved, ignoring", error = e.msg
|
||||
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
|
||||
else:
|
||||
return some State(SaleErrored(error: e))
|
||||
return some State(SaleErrored(error: e))
|
||||
# other CatchableErrors are handled "automatically" by the SaleState
|
||||
|
||||
trace "Slot successfully reserved"
|
||||
|
||||
@ -315,13 +315,15 @@ proc new*[T, H](
|
||||
cellSize = cellSize
|
||||
|
||||
if (manifest.blocksCount mod manifest.numSlots) != 0:
|
||||
trace "Number of blocks must be divisable by number of slots."
|
||||
return failure("Number of blocks must be divisable by number of slots.")
|
||||
const msg = "Number of blocks must be divisible by number of slots."
|
||||
trace msg
|
||||
return failure(msg)
|
||||
|
||||
let cellSize = if manifest.verifiable: manifest.cellSize else: cellSize
|
||||
if (manifest.blockSize mod cellSize) != 0.NBytes:
|
||||
trace "Block size must be divisable by cell size."
|
||||
return failure("Block size must be divisable by cell size.")
|
||||
const msg = "Block size must be divisible by cell size."
|
||||
trace msg
|
||||
return failure(msg)
|
||||
|
||||
let
|
||||
numSlotBlocks = manifest.numSlotBlocks
|
||||
|
||||
@ -38,7 +38,9 @@ type
|
||||
AnyProof* = CircomProof
|
||||
|
||||
AnySampler* = Poseidon2Sampler
|
||||
# add any other generic type here, eg. Poseidon2Sampler | ReinforceConcreteSampler
|
||||
AnyBuilder* = Poseidon2Builder
|
||||
# add any other generic type here, eg. Poseidon2Builder | ReinforceConcreteBuilder
|
||||
|
||||
AnyProofInputs* = ProofInputs[Poseidon2Hash]
|
||||
Prover* = ref object of RootObj
|
||||
|
||||
@ -57,6 +57,8 @@ template withExceptions(body: untyped) =
|
||||
raise newLPStreamEOFError()
|
||||
except AsyncStreamError as exc:
|
||||
raise newException(LPStreamError, exc.msg)
|
||||
except CatchableError as exc:
|
||||
raise newException(Defect, "Unexpected error in AsyncStreamWrapper", exc)
|
||||
|
||||
method readOnce*(
|
||||
self: AsyncStreamWrapper, pbytes: pointer, nbytes: int
|
||||
@ -74,11 +76,13 @@ method readOnce*(
|
||||
|
||||
proc completeWrite(
|
||||
self: AsyncStreamWrapper, fut: Future[void], msgLen: int
|
||||
): Future[void] {.async.} =
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
withExceptions:
|
||||
await fut
|
||||
|
||||
method write*(self: AsyncStreamWrapper, msg: seq[byte]): Future[void] =
|
||||
method write*(
|
||||
self: AsyncStreamWrapper, msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
# Avoid a copy of msg being kept in the closure created by `{.async.}` as this
|
||||
# drives up memory usage
|
||||
|
||||
|
||||
@ -67,13 +67,9 @@ method atEof*(self: StoreStream): bool =
|
||||
self.offset >= self.size
|
||||
|
||||
type LPStreamReadError* = object of LPStreamError
|
||||
par*: ref CatchableError
|
||||
|
||||
proc newLPStreamReadError*(p: ref CatchableError): ref LPStreamReadError =
|
||||
var w = newException(LPStreamReadError, "Read stream failed")
|
||||
w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg
|
||||
w.par = p
|
||||
result = w
|
||||
newException(LPStreamReadError, "Read stream failed", p)
|
||||
|
||||
method readOnce*(
|
||||
self: StoreStream, pbytes: pointer, nbytes: int
|
||||
|
||||
@ -23,3 +23,16 @@ proc freeDoubleArray*(
|
||||
# Free outer array
|
||||
if not arr.isNil:
|
||||
deallocShared(arr)
|
||||
|
||||
proc makeUncheckedArray*(
|
||||
data: ref seq[seq[byte]]
|
||||
): ptr UncheckedArray[ptr UncheckedArray[byte]] =
|
||||
result = cast[ptr UncheckedArray[ptr UncheckedArray[byte]]](alloc0(
|
||||
sizeof(ptr UncheckedArray[byte]) * data[].len
|
||||
))
|
||||
|
||||
for i, blk in data[]:
|
||||
if blk.len > 0:
|
||||
result[i] = cast[ptr UncheckedArray[byte]](addr blk[0])
|
||||
else:
|
||||
result[i] = nil
|
||||
|
||||
@ -74,7 +74,6 @@ proc scheduler(machine: Machine) {.async: (raises: []).} =
|
||||
debug "enter state", state = fromState & " => " & $machine.state
|
||||
running = machine.run(machine.state)
|
||||
machine.trackedFutures.track(running)
|
||||
asyncSpawn running
|
||||
except CancelledError:
|
||||
break # do not propagate bc it is asyncSpawned
|
||||
|
||||
@ -88,7 +87,6 @@ proc start*(machine: Machine, initialState: State) =
|
||||
machine.started = true
|
||||
let fut = machine.scheduler()
|
||||
machine.trackedFutures.track(fut)
|
||||
asyncSpawn fut
|
||||
machine.schedule(Event.transition(machine.state, initialState))
|
||||
|
||||
proc stop*(machine: Machine) {.async.} =
|
||||
|
||||
@ -50,7 +50,6 @@ method start*(
|
||||
timer.callback = callback
|
||||
timer.interval = interval
|
||||
timer.loopFuture = timerLoop(timer)
|
||||
asyncSpawn timer.loopFuture
|
||||
|
||||
method stop*(timer: Timer) {.async, base.} =
|
||||
if timer.loopFuture != nil and not timer.loopFuture.finished:
|
||||
|
||||
@ -5,9 +5,11 @@ import ../logutils
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
type TrackedFutures* = ref object
|
||||
futures: Table[uint, FutureBase]
|
||||
cancelling: bool
|
||||
type
|
||||
TrackedFuture = Future[void].Raising([])
|
||||
TrackedFutures* = ref object
|
||||
futures: Table[uint, TrackedFuture]
|
||||
cancelling: bool
|
||||
|
||||
logScope:
|
||||
topics = "trackable futures"
|
||||
@ -15,15 +17,18 @@ logScope:
|
||||
proc len*(self: TrackedFutures): int =
|
||||
self.futures.len
|
||||
|
||||
proc removeFuture(self: TrackedFutures, future: FutureBase) =
|
||||
proc removeFuture(self: TrackedFutures, future: TrackedFuture) =
|
||||
if not self.cancelling and not future.isNil:
|
||||
self.futures.del(future.id)
|
||||
|
||||
proc track*[T](self: TrackedFutures, fut: Future[T]) =
|
||||
proc track*(self: TrackedFutures, fut: TrackedFuture) =
|
||||
if self.cancelling:
|
||||
return
|
||||
|
||||
self.futures[fut.id] = FutureBase(fut)
|
||||
if fut.finished:
|
||||
return
|
||||
|
||||
self.futures[fut.id] = fut
|
||||
|
||||
proc cb(udata: pointer) =
|
||||
self.removeFuture(fut)
|
||||
@ -33,13 +38,8 @@ proc track*[T](self: TrackedFutures, fut: Future[T]) =
|
||||
proc cancelTracked*(self: TrackedFutures) {.async: (raises: []).} =
|
||||
self.cancelling = true
|
||||
|
||||
trace "cancelling tracked futures"
|
||||
|
||||
var cancellations: seq[FutureBase]
|
||||
for future in self.futures.values:
|
||||
if not future.isNil and not future.finished:
|
||||
cancellations.add future.cancelAndWait()
|
||||
|
||||
trace "cancelling tracked futures", len = self.futures.len
|
||||
let cancellations = self.futures.values.toSeq.mapIt(it.cancelAndWait())
|
||||
await noCancel allFutures cancellations
|
||||
|
||||
self.futures.clear()
|
||||
|
||||
@ -142,7 +142,6 @@ proc start*(validation: Validation) {.async.} =
|
||||
await validation.subscribeSlotFilled()
|
||||
await validation.restoreHistoricalState()
|
||||
validation.running = validation.run()
|
||||
asyncSpawn validation.running
|
||||
|
||||
proc stop*(validation: Validation) {.async.} =
|
||||
if not validation.running.isNil and not validation.running.finished:
|
||||
|
||||
@ -56,7 +56,7 @@ in pkgs.gcc13Stdenv.mkDerivation rec {
|
||||
fakeCargo
|
||||
];
|
||||
|
||||
# Disable CPU optmizations that make binary not portable.
|
||||
# Disable CPU optimizations that make binary not portable.
|
||||
NIMFLAGS = "-d:disableMarchNative -d:git_revision_override=${revision}";
|
||||
# Avoid Nim cache permission errors.
|
||||
XDG_CACHE_HOME = "/tmp";
|
||||
|
||||
@ -1,3 +1,3 @@
|
||||
import pkg/asynctest/chronos/unittest
|
||||
import pkg/asynctest/chronos/unittest2
|
||||
|
||||
export unittest
|
||||
export unittest2
|
||||
|
||||
@ -84,12 +84,12 @@ asyncchecksuite "Block Advertising and Discovery":
|
||||
|
||||
blockDiscovery.publishBlockProvideHandler = proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
): Future[void] {.async, gcsafe.} =
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
return
|
||||
|
||||
blockDiscovery.findBlockProvidersHandler = proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
): Future[seq[SignedPeerRecord]] {.async.} =
|
||||
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
|
||||
await engine.resolveBlocks(blocks.filterIt(it.cid == cid))
|
||||
|
||||
await allFuturesThrowing(allFinished(pendingBlocks))
|
||||
@ -97,17 +97,17 @@ asyncchecksuite "Block Advertising and Discovery":
|
||||
await engine.stop()
|
||||
|
||||
test "Should advertise trees":
|
||||
let
|
||||
cids = @[manifest.treeCid]
|
||||
advertised = initTable.collect:
|
||||
for cid in cids:
|
||||
{cid: newFuture[void]()}
|
||||
let cids = @[manifest.treeCid]
|
||||
var advertised = initTable.collect:
|
||||
for cid in cids:
|
||||
{cid: newFuture[void]()}
|
||||
|
||||
blockDiscovery.publishBlockProvideHandler = proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
) {.async.} =
|
||||
if cid in advertised and not advertised[cid].finished():
|
||||
advertised[cid].complete()
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
advertised.withValue(cid, fut):
|
||||
if not fut[].finished:
|
||||
fut[].complete()
|
||||
|
||||
await engine.start()
|
||||
await allFuturesThrowing(allFinished(toSeq(advertised.values)))
|
||||
@ -118,7 +118,7 @@ asyncchecksuite "Block Advertising and Discovery":
|
||||
|
||||
blockDiscovery.publishBlockProvideHandler = proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
) {.async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
check:
|
||||
cid notin blockCids
|
||||
|
||||
@ -138,7 +138,7 @@ asyncchecksuite "Block Advertising and Discovery":
|
||||
|
||||
blockDiscovery.findBlockProvidersHandler = proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
): Future[seq[SignedPeerRecord]] =
|
||||
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
|
||||
check false
|
||||
|
||||
await engine.start()
|
||||
@ -221,17 +221,17 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
|
||||
|
||||
MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
): Future[void] {.async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
advertised[cid] = switch[1].peerInfo.signedPeerRecord
|
||||
|
||||
MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
): Future[void] {.async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
advertised[cid] = switch[2].peerInfo.signedPeerRecord
|
||||
|
||||
MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
): Future[void] {.async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
advertised[cid] = switch[3].peerInfo.signedPeerRecord
|
||||
|
||||
discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid)
|
||||
@ -266,23 +266,21 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
|
||||
|
||||
MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
): Future[seq[SignedPeerRecord]] {.async.} =
|
||||
if cid in advertised:
|
||||
result.add(advertised[cid])
|
||||
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
|
||||
advertised.withValue(cid, val):
|
||||
result.add(val[])
|
||||
|
||||
let futs = collect(newSeq):
|
||||
for m in mBlocks[0 .. 2]:
|
||||
blockexc[0].engine.requestBlock(m.cid)
|
||||
|
||||
await allFuturesThrowing(
|
||||
switch.mapIt(it.start()) & blockexc.mapIt(it.engine.start())
|
||||
)
|
||||
.wait(10.seconds)
|
||||
await allFuturesThrowing(switch.mapIt(it.start())).wait(10.seconds)
|
||||
await allFuturesThrowing(blockexc.mapIt(it.engine.start())).wait(10.seconds)
|
||||
|
||||
await allFutures(futs).wait(10.seconds)
|
||||
|
||||
await allFuturesThrowing(blockexc.mapIt(it.engine.stop()) & switch.mapIt(it.stop()))
|
||||
.wait(10.seconds)
|
||||
await allFuturesThrowing(blockexc.mapIt(it.engine.stop())).wait(10.seconds)
|
||||
await allFuturesThrowing(switch.mapIt(it.stop())).wait(10.seconds)
|
||||
|
||||
test "E2E - Should advertise and discover blocks with peers already connected":
|
||||
# Distribute the blocks amongst 1..3
|
||||
@ -292,17 +290,17 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
|
||||
|
||||
MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
): Future[void] {.async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
advertised[cid] = switch[1].peerInfo.signedPeerRecord
|
||||
|
||||
MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
): Future[void] {.async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
advertised[cid] = switch[2].peerInfo.signedPeerRecord
|
||||
|
||||
MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
): Future[void] {.async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
advertised[cid] = switch[3].peerInfo.signedPeerRecord
|
||||
|
||||
discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid)
|
||||
@ -337,18 +335,16 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
|
||||
|
||||
MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
): Future[seq[SignedPeerRecord]] {.async.} =
|
||||
if cid in advertised:
|
||||
return @[advertised[cid]]
|
||||
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
|
||||
advertised.withValue(cid, val):
|
||||
return @[val[]]
|
||||
|
||||
let futs = mBlocks[0 .. 2].mapIt(blockexc[0].engine.requestBlock(it.cid))
|
||||
|
||||
await allFuturesThrowing(
|
||||
switch.mapIt(it.start()) & blockexc.mapIt(it.engine.start())
|
||||
)
|
||||
.wait(10.seconds)
|
||||
await allFuturesThrowing(switch.mapIt(it.start())).wait(10.seconds)
|
||||
await allFuturesThrowing(blockexc.mapIt(it.engine.start())).wait(10.seconds)
|
||||
|
||||
await allFutures(futs).wait(10.seconds)
|
||||
|
||||
await allFuturesThrowing(blockexc.mapIt(it.engine.stop()) & switch.mapIt(it.stop()))
|
||||
.wait(10.seconds)
|
||||
await allFuturesThrowing(blockexc.mapIt(it.engine.stop())).wait(10.seconds)
|
||||
await allFuturesThrowing(switch.mapIt(it.stop())).wait(10.seconds)
|
||||
|
||||
@ -68,7 +68,7 @@ asyncchecksuite "Test Discovery Engine":
|
||||
|
||||
blockDiscovery.findBlockProvidersHandler = proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
): Future[seq[SignedPeerRecord]] {.async, gcsafe.} =
|
||||
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
|
||||
pendingBlocks.resolve(
|
||||
blocks.filterIt(it.cid == cid).mapIt(
|
||||
BlockDelivery(blk: it, address: it.address)
|
||||
@ -94,7 +94,7 @@ asyncchecksuite "Test Discovery Engine":
|
||||
|
||||
blockDiscovery.findBlockProvidersHandler = proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
): Future[seq[SignedPeerRecord]] {.async, gcsafe.} =
|
||||
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
|
||||
check cid == blocks[0].cid
|
||||
if not want.finished:
|
||||
want.complete()
|
||||
@ -122,7 +122,7 @@ asyncchecksuite "Test Discovery Engine":
|
||||
var pendingCids = newSeq[Cid]()
|
||||
blockDiscovery.findBlockProvidersHandler = proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
): Future[seq[SignedPeerRecord]] {.async, gcsafe.} =
|
||||
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
|
||||
check cid in pendingCids
|
||||
pendingCids.keepItIf(it != cid)
|
||||
check peerStore.len < minPeers
|
||||
@ -159,12 +159,12 @@ asyncchecksuite "Test Discovery Engine":
|
||||
discoveryLoopSleep = 100.millis,
|
||||
concurrentDiscReqs = 2,
|
||||
)
|
||||
reqs = newFuture[void]()
|
||||
reqs = Future[void].Raising([CancelledError]).init()
|
||||
count = 0
|
||||
|
||||
blockDiscovery.findBlockProvidersHandler = proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
): Future[seq[SignedPeerRecord]] {.gcsafe, async.} =
|
||||
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
|
||||
check cid == blocks[0].cid
|
||||
if count > 0:
|
||||
check false
|
||||
|
||||
@ -34,7 +34,7 @@ asyncchecksuite "Advertiser":
|
||||
advertised = newSeq[Cid]()
|
||||
blockDiscovery.publishBlockProvideHandler = proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
) {.async, gcsafe.} =
|
||||
) {.async: (raises: [CancelledError]), gcsafe.} =
|
||||
advertised.add(cid)
|
||||
|
||||
advertiser = Advertiser.new(localStore, blockDiscovery)
|
||||
|
||||
@ -22,7 +22,7 @@ import ../../examples
|
||||
|
||||
const NopSendWantCancellationsProc = proc(
|
||||
id: PeerId, addresses: seq[BlockAddress]
|
||||
) {.gcsafe, async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
discard
|
||||
|
||||
asyncchecksuite "NetworkStore engine basic":
|
||||
@ -66,20 +66,17 @@ asyncchecksuite "NetworkStore engine basic":
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false,
|
||||
) {.gcsafe, async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
check addresses.mapIt($it.cidOrTreeCid).sorted == blocks.mapIt($it.cid).sorted
|
||||
done.complete()
|
||||
|
||||
let
|
||||
network = BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList))
|
||||
|
||||
localStore = CacheStore.new(blocks.mapIt(it))
|
||||
discovery = DiscoveryEngine.new(
|
||||
localStore, peerStore, network, blockDiscovery, pendingBlocks
|
||||
)
|
||||
|
||||
advertiser = Advertiser.new(localStore, blockDiscovery)
|
||||
|
||||
engine = BlockExcEngine.new(
|
||||
localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks
|
||||
)
|
||||
@ -93,7 +90,9 @@ asyncchecksuite "NetworkStore engine basic":
|
||||
test "Should send account to new peers":
|
||||
let pricing = Pricing.example
|
||||
|
||||
proc sendAccount(peer: PeerId, account: Account) {.gcsafe, async.} =
|
||||
proc sendAccount(
|
||||
peer: PeerId, account: Account
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
check account.address == pricing.address
|
||||
done.complete()
|
||||
|
||||
@ -186,7 +185,9 @@ asyncchecksuite "NetworkStore engine handlers":
|
||||
done = newFuture[void]()
|
||||
wantList = makeWantList(blocks.mapIt(it.cid))
|
||||
|
||||
proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
|
||||
proc sendPresence(
|
||||
peerId: PeerId, presence: seq[BlockPresence]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
check presence.mapIt(it.address) == wantList.entries.mapIt(it.address)
|
||||
done.complete()
|
||||
|
||||
@ -203,7 +204,9 @@ asyncchecksuite "NetworkStore engine handlers":
|
||||
done = newFuture[void]()
|
||||
wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true)
|
||||
|
||||
proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
|
||||
proc sendPresence(
|
||||
peerId: PeerId, presence: seq[BlockPresence]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
check presence.mapIt(it.address) == wantList.entries.mapIt(it.address)
|
||||
for p in presence:
|
||||
check:
|
||||
@ -222,7 +225,9 @@ asyncchecksuite "NetworkStore engine handlers":
|
||||
done = newFuture[void]()
|
||||
wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true)
|
||||
|
||||
proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
|
||||
proc sendPresence(
|
||||
peerId: PeerId, presence: seq[BlockPresence]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
for p in presence:
|
||||
if p.address.cidOrTreeCid != blocks[0].cid and
|
||||
p.address.cidOrTreeCid != blocks[1].cid:
|
||||
@ -266,19 +271,21 @@ asyncchecksuite "NetworkStore engine handlers":
|
||||
|
||||
peerContext.account = account.some
|
||||
peerContext.blocks = blocks.mapIt(
|
||||
(it.address, Presence(address: it.address, price: rand(uint16).u256))
|
||||
(it.address, Presence(address: it.address, price: rand(uint16).u256, have: true))
|
||||
).toTable
|
||||
|
||||
engine.network = BlockExcNetwork(
|
||||
request: BlockExcRequest(
|
||||
sendPayment: proc(receiver: PeerId, payment: SignedState) {.gcsafe, async.} =
|
||||
sendPayment: proc(
|
||||
receiver: PeerId, payment: SignedState
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
let
|
||||
amount = blocks.mapIt(peerContext.blocks[it.address].price).foldl(a + b)
|
||||
|
||||
amount =
|
||||
blocks.mapIt(peerContext.blocks[it.address].catch.get.price).foldl(a + b)
|
||||
balances = !payment.state.outcome.balances(Asset)
|
||||
|
||||
check receiver == peerId
|
||||
check balances[account.address.toDestination] == amount
|
||||
check balances[account.address.toDestination].catch.get == amount
|
||||
done.complete(),
|
||||
|
||||
# Install NOP for want list cancellations so they don't cause a crash
|
||||
@ -286,10 +293,12 @@ asyncchecksuite "NetworkStore engine handlers":
|
||||
)
|
||||
)
|
||||
|
||||
let requestedBlocks = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.address))
|
||||
await engine.blocksDeliveryHandler(
|
||||
peerId, blocks.mapIt(BlockDelivery(blk: it, address: it.address))
|
||||
)
|
||||
await done.wait(100.millis)
|
||||
await allFuturesThrowing(requestedBlocks).wait(100.millis)
|
||||
|
||||
test "Should handle block presence":
|
||||
var handles:
|
||||
@ -303,7 +312,7 @@ asyncchecksuite "NetworkStore engine handlers":
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false,
|
||||
) {.gcsafe, async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
engine.pendingBlocks.resolve(
|
||||
blocks.filterIt(it.address in addresses).mapIt(
|
||||
BlockDelivery(blk: it, address: it.address)
|
||||
@ -340,9 +349,9 @@ asyncchecksuite "NetworkStore engine handlers":
|
||||
|
||||
proc sendWantCancellations(
|
||||
id: PeerId, addresses: seq[BlockAddress]
|
||||
) {.gcsafe, async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
for address in addresses:
|
||||
cancellations[address].complete()
|
||||
cancellations[address].catch.expect("address should exist").complete()
|
||||
|
||||
engine.network = BlockExcNetwork(
|
||||
request: BlockExcRequest(sendWantCancellations: sendWantCancellations)
|
||||
@ -416,7 +425,7 @@ asyncchecksuite "Block Download":
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false,
|
||||
) {.gcsafe, async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
check wantType == WantHave
|
||||
check not engine.pendingBlocks.isInFlight(address)
|
||||
check engine.pendingBlocks.retries(address) == retries
|
||||
@ -433,7 +442,7 @@ asyncchecksuite "Block Download":
|
||||
discard (await pending).tryGet()
|
||||
|
||||
test "Should retry block request":
|
||||
let
|
||||
var
|
||||
address = BlockAddress.init(blocks[0].cid)
|
||||
steps = newAsyncEvent()
|
||||
|
||||
@ -445,7 +454,7 @@ asyncchecksuite "Block Download":
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false,
|
||||
) {.gcsafe, async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
case wantType
|
||||
of WantHave:
|
||||
check engine.pendingBlocks.isInFlight(address) == false
|
||||
@ -467,7 +476,7 @@ asyncchecksuite "Block Download":
|
||||
let pending = engine.requestBlock(address)
|
||||
await steps.wait()
|
||||
|
||||
# add blocks presence
|
||||
# add blocks precense
|
||||
peerCtx.blocks = blocks.mapIt(
|
||||
(it.address, Presence(address: it.address, have: true, price: UInt256.example))
|
||||
).toTable
|
||||
@ -493,7 +502,7 @@ asyncchecksuite "Block Download":
|
||||
wantType: WantType = WantType.WantHave,
|
||||
full: bool = false,
|
||||
sendDontHave: bool = false,
|
||||
) {.gcsafe, async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
done.complete()
|
||||
|
||||
engine.pendingBlocks.blockRetries = 10
|
||||
@ -573,7 +582,7 @@ asyncchecksuite "Task Handler":
|
||||
test "Should send want-blocks in priority order":
|
||||
proc sendBlocksDelivery(
|
||||
id: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
) {.gcsafe, async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
check blocksDelivery.len == 2
|
||||
check:
|
||||
blocksDelivery[1].address == blocks[0].address
|
||||
@ -610,7 +619,7 @@ asyncchecksuite "Task Handler":
|
||||
test "Should set in-flight for outgoing blocks":
|
||||
proc sendBlocksDelivery(
|
||||
id: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
) {.gcsafe, async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
check peersCtx[0].peerWants[0].inFlight
|
||||
|
||||
for blk in blocks:
|
||||
@ -649,7 +658,9 @@ asyncchecksuite "Task Handler":
|
||||
let missing = @[Block.new("missing".toBytes).tryGet()]
|
||||
let price = (!engine.pricing).price
|
||||
|
||||
proc sendPresence(id: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
|
||||
proc sendPresence(
|
||||
id: PeerId, presence: seq[BlockPresence]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
check presence.mapIt(!Presence.init(it)) ==
|
||||
@[
|
||||
Presence(address: present[0].address, have: true, price: price),
|
||||
|
||||
@ -1,10 +1,10 @@
|
||||
import std/unittest
|
||||
import pkg/unittest2
|
||||
|
||||
import pkg/codex/stores
|
||||
import ../../examples
|
||||
import ../../helpers
|
||||
|
||||
checksuite "engine payments":
|
||||
suite "Engine payments":
|
||||
let address = EthAddress.example
|
||||
let amount = 42.u256
|
||||
|
||||
|
||||
@ -6,7 +6,7 @@ import ../../../asynctest
|
||||
import ../../examples
|
||||
import ../../helpers
|
||||
|
||||
checksuite "account protobuf messages":
|
||||
suite "account protobuf messages":
|
||||
let account = Account(address: EthAddress.example)
|
||||
let message = AccountMessage.init(account)
|
||||
|
||||
@ -21,7 +21,7 @@ checksuite "account protobuf messages":
|
||||
incorrect.address.del(0)
|
||||
check Account.init(incorrect).isNone
|
||||
|
||||
checksuite "channel update messages":
|
||||
suite "channel update messages":
|
||||
let state = SignedState.example
|
||||
let update = StateChannelUpdate.init(state)
|
||||
|
||||
|
||||
@ -6,7 +6,7 @@ import ../../../asynctest
|
||||
import ../../examples
|
||||
import ../../helpers
|
||||
|
||||
checksuite "block presence protobuf messages":
|
||||
suite "block presence protobuf messages":
|
||||
let
|
||||
cid = Cid.example
|
||||
address = BlockAddress(leaf: false, cid: cid)
|
||||
|
||||
@ -26,7 +26,7 @@ asyncchecksuite "Network - Handlers":
|
||||
blocks: seq[bt.Block]
|
||||
done: Future[void]
|
||||
|
||||
proc getConn(): Future[Connection] {.async.} =
|
||||
proc getConn(): Future[Connection] {.async: (raises: [CancelledError]).} =
|
||||
return Connection(buffer)
|
||||
|
||||
setup:
|
||||
@ -45,7 +45,7 @@ asyncchecksuite "Network - Handlers":
|
||||
discard await networkPeer.connect()
|
||||
|
||||
test "Want List handler":
|
||||
proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} =
|
||||
proc wantListHandler(peer: PeerId, wantList: WantList) {.async: (raises: []).} =
|
||||
# check that we got the correct amount of entries
|
||||
check wantList.entries.len == 4
|
||||
|
||||
@ -72,7 +72,7 @@ asyncchecksuite "Network - Handlers":
|
||||
test "Blocks Handler":
|
||||
proc blocksDeliveryHandler(
|
||||
peer: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
) {.gcsafe, async.} =
|
||||
) {.async: (raises: []).} =
|
||||
check blocks == blocksDelivery.mapIt(it.blk)
|
||||
done.complete()
|
||||
|
||||
@ -85,7 +85,9 @@ asyncchecksuite "Network - Handlers":
|
||||
await done.wait(500.millis)
|
||||
|
||||
test "Presence Handler":
|
||||
proc presenceHandler(peer: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
|
||||
proc presenceHandler(
|
||||
peer: PeerId, presence: seq[BlockPresence]
|
||||
) {.async: (raises: []).} =
|
||||
for b in blocks:
|
||||
check:
|
||||
b.address in presence
|
||||
@ -105,7 +107,7 @@ asyncchecksuite "Network - Handlers":
|
||||
test "Handles account messages":
|
||||
let account = Account(address: EthAddress.example)
|
||||
|
||||
proc handleAccount(peer: PeerId, received: Account) {.gcsafe, async.} =
|
||||
proc handleAccount(peer: PeerId, received: Account) {.async: (raises: []).} =
|
||||
check received == account
|
||||
done.complete()
|
||||
|
||||
@ -119,7 +121,7 @@ asyncchecksuite "Network - Handlers":
|
||||
test "Handles payment messages":
|
||||
let payment = SignedState.example
|
||||
|
||||
proc handlePayment(peer: PeerId, received: SignedState) {.gcsafe, async.} =
|
||||
proc handlePayment(peer: PeerId, received: SignedState) {.async: (raises: []).} =
|
||||
check received == payment
|
||||
done.complete()
|
||||
|
||||
@ -165,7 +167,7 @@ asyncchecksuite "Network - Senders":
|
||||
await allFuturesThrowing(switch1.stop(), switch2.stop())
|
||||
|
||||
test "Send want list":
|
||||
proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} =
|
||||
proc wantListHandler(peer: PeerId, wantList: WantList) {.async: (raises: []).} =
|
||||
# check that we got the correct amount of entries
|
||||
check wantList.entries.len == 4
|
||||
|
||||
@ -195,7 +197,7 @@ asyncchecksuite "Network - Senders":
|
||||
test "send blocks":
|
||||
proc blocksDeliveryHandler(
|
||||
peer: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||
) {.gcsafe, async.} =
|
||||
) {.async: (raises: []).} =
|
||||
check blocks == blocksDelivery.mapIt(it.blk)
|
||||
done.complete()
|
||||
|
||||
@ -207,7 +209,9 @@ asyncchecksuite "Network - Senders":
|
||||
await done.wait(500.millis)
|
||||
|
||||
test "send presence":
|
||||
proc presenceHandler(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async.} =
|
||||
proc presenceHandler(
|
||||
peer: PeerId, precense: seq[BlockPresence]
|
||||
) {.async: (raises: []).} =
|
||||
for b in blocks:
|
||||
check:
|
||||
b.address in precense
|
||||
@ -226,7 +230,7 @@ asyncchecksuite "Network - Senders":
|
||||
test "send account":
|
||||
let account = Account(address: EthAddress.example)
|
||||
|
||||
proc handleAccount(peer: PeerId, received: Account) {.gcsafe, async.} =
|
||||
proc handleAccount(peer: PeerId, received: Account) {.async: (raises: []).} =
|
||||
check received == account
|
||||
done.complete()
|
||||
|
||||
@ -238,7 +242,7 @@ asyncchecksuite "Network - Senders":
|
||||
test "send payment":
|
||||
let payment = SignedState.example
|
||||
|
||||
proc handlePayment(peer: PeerId, received: SignedState) {.gcsafe, async.} =
|
||||
proc handlePayment(peer: PeerId, received: SignedState) {.async: (raises: []).} =
|
||||
check received == payment
|
||||
done.complete()
|
||||
|
||||
@ -276,7 +280,7 @@ asyncchecksuite "Network - Test Limits":
|
||||
let account = Account(address: EthAddress.example)
|
||||
network2.handlers.onAccount = proc(
|
||||
peer: PeerId, received: Account
|
||||
) {.gcsafe, async.} =
|
||||
) {.async: (raises: []).} =
|
||||
check false
|
||||
|
||||
let fut = network1.send(
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import std/sugar
|
||||
import std/sequtils
|
||||
import std/unittest
|
||||
|
||||
import pkg/unittest2
|
||||
import pkg/libp2p
|
||||
|
||||
import pkg/codex/blockexchange/peers
|
||||
@ -11,7 +11,7 @@ import pkg/codex/blockexchange/protobuf/presence
|
||||
import ../helpers
|
||||
import ../examples
|
||||
|
||||
checksuite "Peer Context Store":
|
||||
suite "Peer Context Store":
|
||||
var
|
||||
store: PeerCtxStore
|
||||
peerCtx: BlockExcPeerCtx
|
||||
@ -31,7 +31,7 @@ checksuite "Peer Context Store":
|
||||
test "Should get peer":
|
||||
check store.get(peerCtx.id) == peerCtx
|
||||
|
||||
checksuite "Peer Context Store Peer Selection":
|
||||
suite "Peer Context Store Peer Selection":
|
||||
var
|
||||
store: PeerCtxStore
|
||||
peerCtxs: seq[BlockExcPeerCtx]
|
||||
|
||||
@ -10,7 +10,7 @@ import pkg/codex/blockexchange
|
||||
import ../helpers
|
||||
import ../../asynctest
|
||||
|
||||
checksuite "Pending Blocks":
|
||||
suite "Pending Blocks":
|
||||
test "Should add want handle":
|
||||
let
|
||||
pendingBlocks = PendingBlocksManager.new()
|
||||
|
||||
@ -21,7 +21,7 @@ proc new*(
|
||||
var consumed = 0
|
||||
proc reader(
|
||||
data: ChunkBuffer, len: int
|
||||
): Future[int] {.async, gcsafe, raises: [Defect].} =
|
||||
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
|
||||
if consumed >= dataset.len:
|
||||
return 0
|
||||
|
||||
|
||||
@ -14,29 +14,42 @@ import pkg/codex/discovery
|
||||
import pkg/contractabi/address as ca
|
||||
|
||||
type MockDiscovery* = ref object of Discovery
|
||||
findBlockProvidersHandler*:
|
||||
proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.gcsafe.}
|
||||
publishBlockProvideHandler*: proc(d: MockDiscovery, cid: Cid): Future[void] {.gcsafe.}
|
||||
findHostProvidersHandler*:
|
||||
proc(d: MockDiscovery, host: ca.Address): Future[seq[SignedPeerRecord]] {.gcsafe.}
|
||||
publishHostProvideHandler*:
|
||||
proc(d: MockDiscovery, host: ca.Address): Future[void] {.gcsafe.}
|
||||
findBlockProvidersHandler*: proc(
|
||||
d: MockDiscovery, cid: Cid
|
||||
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).}
|
||||
|
||||
publishBlockProvideHandler*:
|
||||
proc(d: MockDiscovery, cid: Cid): Future[void] {.async: (raises: [CancelledError]).}
|
||||
|
||||
findHostProvidersHandler*: proc(
|
||||
d: MockDiscovery, host: ca.Address
|
||||
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).}
|
||||
|
||||
publishHostProvideHandler*: proc(d: MockDiscovery, host: ca.Address): Future[void] {.
|
||||
async: (raises: [CancelledError])
|
||||
.}
|
||||
|
||||
proc new*(T: type MockDiscovery): MockDiscovery =
|
||||
MockDiscovery()
|
||||
|
||||
proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} =
|
||||
proc findPeer*(
|
||||
d: Discovery, peerId: PeerId
|
||||
): Future[?PeerRecord] {.async: (raises: [CancelledError]).} =
|
||||
## mock find a peer - always return none
|
||||
##
|
||||
##
|
||||
return none(PeerRecord)
|
||||
|
||||
method find*(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async.} =
|
||||
method find*(
|
||||
d: MockDiscovery, cid: Cid
|
||||
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
|
||||
if isNil(d.findBlockProvidersHandler):
|
||||
return
|
||||
|
||||
return await d.findBlockProvidersHandler(d, cid)
|
||||
|
||||
method provide*(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
|
||||
method provide*(
|
||||
d: MockDiscovery, cid: Cid
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
if isNil(d.publishBlockProvideHandler):
|
||||
return
|
||||
|
||||
@ -44,13 +57,15 @@ method provide*(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
|
||||
|
||||
method find*(
|
||||
d: MockDiscovery, host: ca.Address
|
||||
): Future[seq[SignedPeerRecord]] {.async.} =
|
||||
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
|
||||
if isNil(d.findHostProvidersHandler):
|
||||
return
|
||||
|
||||
return await d.findHostProvidersHandler(d, host)
|
||||
|
||||
method provide*(d: MockDiscovery, host: ca.Address): Future[void] {.async.} =
|
||||
method provide*(
|
||||
d: MockDiscovery, host: ca.Address
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
if isNil(d.publishHostProvideHandler):
|
||||
return
|
||||
|
||||
|
||||
@ -46,7 +46,8 @@ type
|
||||
subscriptions: Subscriptions
|
||||
config*: MarketplaceConfig
|
||||
canReserveSlot*: bool
|
||||
reserveSlotThrowError*: ?(ref MarketError)
|
||||
errorOnReserveSlot*: ?(ref MarketError)
|
||||
errorOnFillSlot*: ?(ref CatchableError)
|
||||
clock: ?Clock
|
||||
|
||||
Fulfillment* = object
|
||||
@ -138,22 +139,35 @@ proc new*(_: type MockMarket, clock: ?Clock = Clock.none): MockMarket =
|
||||
signer: Address.example, config: config, canReserveSlot: true, clock: clock
|
||||
)
|
||||
|
||||
method loadConfig*(
|
||||
market: MockMarket
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
discard
|
||||
|
||||
method getSigner*(market: MockMarket): Future[Address] {.async.} =
|
||||
return market.signer
|
||||
|
||||
method periodicity*(mock: MockMarket): Future[Periodicity] {.async.} =
|
||||
method periodicity*(
|
||||
mock: MockMarket
|
||||
): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
return Periodicity(seconds: mock.config.proofs.period)
|
||||
|
||||
method proofTimeout*(market: MockMarket): Future[uint64] {.async.} =
|
||||
method proofTimeout*(
|
||||
market: MockMarket
|
||||
): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
return market.config.proofs.timeout
|
||||
|
||||
method requestDurationLimit*(market: MockMarket): Future[uint64] {.async.} =
|
||||
return market.config.requestDurationLimit
|
||||
|
||||
method proofDowntime*(market: MockMarket): Future[uint8] {.async.} =
|
||||
method proofDowntime*(
|
||||
market: MockMarket
|
||||
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
return market.config.proofs.downtime
|
||||
|
||||
method repairRewardPercentage*(market: MockMarket): Future[uint8] {.async.} =
|
||||
method repairRewardPercentage*(
|
||||
market: MockMarket
|
||||
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
return market.config.collateral.repairRewardPercentage
|
||||
|
||||
method getPointer*(market: MockMarket, slotId: SlotId): Future[uint8] {.async.} =
|
||||
@ -173,7 +187,7 @@ method mySlots*(market: MockMarket): Future[seq[SlotId]] {.async.} =
|
||||
|
||||
method getRequest*(
|
||||
market: MockMarket, id: RequestId
|
||||
): Future[?StorageRequest] {.async.} =
|
||||
): Future[?StorageRequest] {.async: (raises: [CancelledError]).} =
|
||||
for request in market.requested:
|
||||
if request.id == id:
|
||||
return some request
|
||||
@ -191,10 +205,16 @@ method requestState*(
|
||||
): Future[?RequestState] {.async.} =
|
||||
return market.requestState .? [requestId]
|
||||
|
||||
method slotState*(market: MockMarket, slotId: SlotId): Future[SlotState] {.async.} =
|
||||
if not market.slotState.hasKey(slotId):
|
||||
method slotState*(
|
||||
market: MockMarket, slotId: SlotId
|
||||
): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
if slotId notin market.slotState:
|
||||
return SlotState.Free
|
||||
return market.slotState[slotId]
|
||||
|
||||
try:
|
||||
return market.slotState[slotId]
|
||||
except KeyError as e:
|
||||
raiseAssert "SlotId not found in known slots (MockMarket.slotState)"
|
||||
|
||||
method getRequestEnd*(
|
||||
market: MockMarket, id: RequestId
|
||||
@ -270,6 +290,9 @@ proc fillSlot*(
|
||||
host: Address,
|
||||
collateral = 0.u256,
|
||||
) =
|
||||
if error =? market.errorOnFillSlot:
|
||||
raise error
|
||||
|
||||
let slot = MockSlot(
|
||||
requestId: requestId,
|
||||
slotIndex: slotIndex,
|
||||
@ -351,7 +374,7 @@ method canProofBeMarkedAsMissing*(
|
||||
method reserveSlot*(
|
||||
market: MockMarket, requestId: RequestId, slotIndex: uint64
|
||||
) {.async.} =
|
||||
if error =? market.reserveSlotThrowError:
|
||||
if error =? market.errorOnReserveSlot:
|
||||
raise error
|
||||
|
||||
method canReserveSlot*(
|
||||
@ -362,8 +385,19 @@ method canReserveSlot*(
|
||||
func setCanReserveSlot*(market: MockMarket, canReserveSlot: bool) =
|
||||
market.canReserveSlot = canReserveSlot
|
||||
|
||||
func setReserveSlotThrowError*(market: MockMarket, error: ?(ref MarketError)) =
|
||||
market.reserveSlotThrowError = error
|
||||
func setErrorOnReserveSlot*(market: MockMarket, error: ref MarketError) =
|
||||
market.errorOnReserveSlot =
|
||||
if error.isNil:
|
||||
none (ref MarketError)
|
||||
else:
|
||||
some error
|
||||
|
||||
func setErrorOnFillSlot*(market: MockMarket, error: ref CatchableError) =
|
||||
market.errorOnFillSlot =
|
||||
if error.isNil:
|
||||
none (ref CatchableError)
|
||||
else:
|
||||
some error
|
||||
|
||||
method subscribeRequests*(
|
||||
market: MockMarket, callback: OnRequest
|
||||
@ -534,3 +568,33 @@ method unsubscribe*(subscription: ProofSubmittedSubscription) {.async.} =
|
||||
|
||||
method unsubscribe*(subscription: SlotReservationsFullSubscription) {.async.} =
|
||||
subscription.market.subscriptions.onSlotReservationsFull.keepItIf(it != subscription)
|
||||
|
||||
method slotCollateral*(
|
||||
market: MockMarket, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?!UInt256] {.async: (raises: [CancelledError]).} =
|
||||
let slotid = slotId(requestId, slotIndex)
|
||||
|
||||
try:
|
||||
let state = await slotState(market, slotid)
|
||||
|
||||
without request =? await market.getRequest(requestId):
|
||||
return failure newException(
|
||||
MarketError, "Failure calculating the slotCollateral, cannot get the request"
|
||||
)
|
||||
|
||||
return market.slotCollateral(request.ask.collateralPerSlot, state)
|
||||
except MarketError as error:
|
||||
error "Error when trying to calculate the slotCollateral", error = error.msg
|
||||
return failure error
|
||||
|
||||
method slotCollateral*(
|
||||
market: MockMarket, collateralPerSlot: UInt256, slotState: SlotState
|
||||
): ?!UInt256 {.raises: [].} =
|
||||
if slotState == SlotState.Repair:
|
||||
let repairRewardPercentage = market.config.collateral.repairRewardPercentage.u256
|
||||
|
||||
return success (
|
||||
collateralPerSlot - (collateralPerSlot * repairRewardPercentage).div(100.u256)
|
||||
)
|
||||
|
||||
return success collateralPerSlot
|
||||
|
||||
@ -7,7 +7,7 @@ type MockSlotQueueItem* = object
|
||||
slotSize*: uint64
|
||||
duration*: uint64
|
||||
pricePerBytePerSecond*: UInt256
|
||||
collateralPerByte*: UInt256
|
||||
collateral*: UInt256
|
||||
expiry*: uint64
|
||||
seen*: bool
|
||||
|
||||
@ -19,8 +19,8 @@ proc toSlotQueueItem*(item: MockSlotQueueItem): SlotQueueItem =
|
||||
slotSize: item.slotSize,
|
||||
duration: item.duration,
|
||||
pricePerBytePerSecond: item.pricePerBytePerSecond,
|
||||
collateralPerByte: item.collateralPerByte,
|
||||
),
|
||||
expiry = item.expiry,
|
||||
seen = item.seen,
|
||||
collateral = item.collateral,
|
||||
)
|
||||
|
||||
@ -26,7 +26,7 @@ proc new*(
|
||||
var consumed = 0
|
||||
proc reader(
|
||||
data: ChunkBuffer, len: int
|
||||
): Future[int] {.async, gcsafe, raises: [Defect].} =
|
||||
): Future[int] {.async: (raises: [ChunkerError, CancelledError]), gcsafe.} =
|
||||
var alpha = toSeq(byte('A') .. byte('z'))
|
||||
|
||||
if consumed >= size:
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import std/unittest
|
||||
import pkg/unittest2
|
||||
|
||||
import pkg/codex/merkletree
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import std/unittest
|
||||
import pkg/unittest2
|
||||
|
||||
import pkg/questionable/results
|
||||
import pkg/stew/byteutils
|
||||
@ -18,7 +18,7 @@ const data = [
|
||||
"00000000000000000000000000000009".toBytes, "00000000000000000000000000000010".toBytes,
|
||||
]
|
||||
|
||||
checksuite "merkletree - coders":
|
||||
suite "merkletree - coders":
|
||||
test "encoding and decoding a tree yields the same tree":
|
||||
let
|
||||
tree = CodexTree.init(Sha256HashCodec, data).tryGet()
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import std/unittest
|
||||
import std/sequtils
|
||||
|
||||
import pkg/unittest2
|
||||
import pkg/questionable/results
|
||||
import pkg/stew/byteutils
|
||||
import pkg/libp2p
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import std/unittest
|
||||
import std/sequtils
|
||||
import std/random
|
||||
|
||||
import pkg/unittest2
|
||||
import pkg/poseidon2
|
||||
import pkg/poseidon2/sponge
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import std/unittest
|
||||
import std/sequtils
|
||||
|
||||
import pkg/unittest2
|
||||
import pkg/poseidon2
|
||||
import pkg/poseidon2/io
|
||||
import pkg/questionable/results
|
||||
|
||||
@ -125,7 +125,7 @@ asyncchecksuite "Test Node - Host contracts":
|
||||
fetchedBytes += blk.data.len.uint
|
||||
return success()
|
||||
|
||||
(await onStore(request, 1.uint64, onBlocks)).tryGet()
|
||||
(await onStore(request, 1.uint64, onBlocks, isRepairing = false)).tryGet()
|
||||
check fetchedBytes == 12 * DefaultBlockSize.uint
|
||||
|
||||
let indexer = verifiable.protectedStrategy.init(
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import std/unittest
|
||||
import pkg/unittest2
|
||||
import pkg/questionable
|
||||
import pkg/codex/contracts/requests
|
||||
import pkg/codex/sales/states/cancelled
|
||||
@ -8,7 +8,7 @@ import pkg/codex/sales/states/filled
|
||||
import ../../examples
|
||||
import ../../helpers
|
||||
|
||||
checksuite "sales state 'downloading'":
|
||||
suite "sales state 'downloading'":
|
||||
let request = StorageRequest.example
|
||||
let slotIndex = request.ask.slots div 2
|
||||
var state: SaleDownloading
|
||||
|
||||
@ -14,7 +14,7 @@ import ../../helpers/mockmarket
|
||||
import ../../examples
|
||||
import ../../helpers
|
||||
|
||||
checksuite "sales state 'filled'":
|
||||
suite "sales state 'filled'":
|
||||
let request = StorageRequest.example
|
||||
let slotIndex = request.ask.slots div 2
|
||||
|
||||
|
||||
@ -1,18 +1,31 @@
|
||||
import std/unittest
|
||||
import pkg/questionable
|
||||
import pkg/codex/contracts/requests
|
||||
import pkg/codex/sales/states/filling
|
||||
import pkg/codex/sales/states/cancelled
|
||||
import pkg/codex/sales/states/failed
|
||||
import pkg/codex/sales/states/ignored
|
||||
import pkg/codex/sales/states/errored
|
||||
import pkg/codex/sales/salesagent
|
||||
import pkg/codex/sales/salescontext
|
||||
import ../../../asynctest
|
||||
import ../../examples
|
||||
import ../../helpers
|
||||
import ../../helpers/mockmarket
|
||||
import ../../helpers/mockclock
|
||||
|
||||
checksuite "sales state 'filling'":
|
||||
suite "sales state 'filling'":
|
||||
let request = StorageRequest.example
|
||||
let slotIndex = request.ask.slots div 2
|
||||
var state: SaleFilling
|
||||
var market: MockMarket
|
||||
var clock: MockClock
|
||||
var agent: SalesAgent
|
||||
|
||||
setup:
|
||||
clock = MockClock.new()
|
||||
market = MockMarket.new()
|
||||
let context = SalesContext(market: market, clock: clock)
|
||||
agent = newSalesAgent(context, request.id, slotIndex, request.some)
|
||||
state = SaleFilling.new()
|
||||
|
||||
test "switches to cancelled state when request expires":
|
||||
@ -22,3 +35,28 @@ checksuite "sales state 'filling'":
|
||||
test "switches to failed state when request fails":
|
||||
let next = state.onFailed(request)
|
||||
check !next of SaleFailed
|
||||
|
||||
test "run switches to ignored when slot is not free":
|
||||
let error = newException(
|
||||
SlotStateMismatchError, "Failed to fill slot because the slot is not free"
|
||||
)
|
||||
market.setErrorOnFillSlot(error)
|
||||
market.requested.add(request)
|
||||
market.slotState[request.slotId(slotIndex)] = SlotState.Filled
|
||||
|
||||
let next = !(await state.run(agent))
|
||||
check next of SaleIgnored
|
||||
check SaleIgnored(next).reprocessSlot == false
|
||||
check SaleIgnored(next).returnBytes
|
||||
|
||||
test "run switches to errored with other error ":
|
||||
let error = newException(MarketError, "some error")
|
||||
market.setErrorOnFillSlot(error)
|
||||
market.requested.add(request)
|
||||
market.slotState[request.slotId(slotIndex)] = SlotState.Filled
|
||||
|
||||
let next = !(await state.run(agent))
|
||||
check next of SaleErrored
|
||||
|
||||
let errored = SaleErrored(next)
|
||||
check errored.error == error
|
||||
|
||||
@ -54,15 +54,16 @@ asyncchecksuite "sales state 'SlotReserving'":
|
||||
|
||||
test "run switches to errored when slot reservation errors":
|
||||
let error = newException(MarketError, "some error")
|
||||
market.setReserveSlotThrowError(some error)
|
||||
market.setErrorOnReserveSlot(error)
|
||||
let next = !(await state.run(agent))
|
||||
check next of SaleErrored
|
||||
let errored = SaleErrored(next)
|
||||
check errored.error == error
|
||||
|
||||
test "catches reservation not allowed error":
|
||||
let error = newException(MarketError, "SlotReservations_ReservationNotAllowed")
|
||||
market.setReserveSlotThrowError(some error)
|
||||
test "run switches to ignored when reservation is not allowed":
|
||||
let error =
|
||||
newException(SlotReservationNotAllowedError, "Reservation is not allowed")
|
||||
market.setErrorOnReserveSlot(error)
|
||||
let next = !(await state.run(agent))
|
||||
check next of SaleIgnored
|
||||
check SaleIgnored(next).reprocessSlot == false
|
||||
|
||||
@ -14,7 +14,7 @@ import ../../helpers/mockmarket
|
||||
import ../../examples
|
||||
import ../../helpers
|
||||
|
||||
checksuite "sales state 'unknown'":
|
||||
suite "sales state 'unknown'":
|
||||
let request = StorageRequest.example
|
||||
let slotIndex = request.ask.slots div 2
|
||||
let slotId = slotId(request.id, slotIndex)
|
||||
|
||||
@ -283,35 +283,95 @@ asyncchecksuite "Reservations module":
|
||||
check updated.isErr
|
||||
check updated.error of NotExistsError
|
||||
|
||||
test "onAvailabilityAdded called when availability is created":
|
||||
test "OnAvailabilitySaved called when availability is created":
|
||||
var added: Availability
|
||||
reservations.onAvailabilityAdded = proc(a: Availability) {.async.} =
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
|
||||
added = a
|
||||
|
||||
let availability = createAvailability()
|
||||
|
||||
check added == availability
|
||||
|
||||
test "onAvailabilityAdded called when availability size is increased":
|
||||
test "OnAvailabilitySaved called when availability size is increased":
|
||||
var availability = createAvailability()
|
||||
var added: Availability
|
||||
reservations.onAvailabilityAdded = proc(a: Availability) {.async.} =
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
|
||||
added = a
|
||||
availability.freeSize += 1
|
||||
discard await reservations.update(availability)
|
||||
|
||||
check added == availability
|
||||
|
||||
test "onAvailabilityAdded is not called when availability size is decreased":
|
||||
test "OnAvailabilitySaved is not called when availability size is decreased":
|
||||
var availability = createAvailability()
|
||||
var called = false
|
||||
reservations.onAvailabilityAdded = proc(a: Availability) {.async.} =
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
|
||||
called = true
|
||||
availability.freeSize -= 1
|
||||
discard await reservations.update(availability)
|
||||
|
||||
check not called
|
||||
|
||||
test "OnAvailabilitySaved called when availability duration is increased":
|
||||
var availability = createAvailability()
|
||||
var added: Availability
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
|
||||
added = a
|
||||
availability.duration += 1
|
||||
discard await reservations.update(availability)
|
||||
|
||||
check added == availability
|
||||
|
||||
test "OnAvailabilitySaved is not called when availability duration is decreased":
|
||||
var availability = createAvailability()
|
||||
var called = false
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
|
||||
called = true
|
||||
availability.duration -= 1
|
||||
discard await reservations.update(availability)
|
||||
|
||||
check not called
|
||||
|
||||
test "OnAvailabilitySaved called when availability minPricePerBytePerSecond is increased":
|
||||
var availability = createAvailability()
|
||||
var added: Availability
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
|
||||
added = a
|
||||
availability.minPricePerBytePerSecond += 1.u256
|
||||
discard await reservations.update(availability)
|
||||
|
||||
check added == availability
|
||||
|
||||
test "OnAvailabilitySaved is not called when availability minPricePerBytePerSecond is decreased":
|
||||
var availability = createAvailability()
|
||||
var called = false
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
|
||||
called = true
|
||||
availability.minPricePerBytePerSecond -= 1.u256
|
||||
discard await reservations.update(availability)
|
||||
|
||||
check not called
|
||||
|
||||
test "OnAvailabilitySaved called when availability totalCollateral is increased":
|
||||
var availability = createAvailability()
|
||||
var added: Availability
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
|
||||
added = a
|
||||
availability.totalCollateral = availability.totalCollateral + 1.u256
|
||||
discard await reservations.update(availability)
|
||||
|
||||
check added == availability
|
||||
|
||||
test "OnAvailabilitySaved is not called when availability totalCollateral is decreased":
|
||||
var availability = createAvailability()
|
||||
var called = false
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
|
||||
called = true
|
||||
availability.totalCollateral = availability.totalCollateral - 1.u256
|
||||
discard await reservations.update(availability)
|
||||
|
||||
check not called
|
||||
|
||||
test "availabilities can be found":
|
||||
let availability = createAvailability()
|
||||
|
||||
|
||||
@ -62,7 +62,7 @@ asyncchecksuite "Sales - start":
|
||||
sales = Sales.new(market, clock, repo)
|
||||
reservations = sales.context.reservations
|
||||
sales.onStore = proc(
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
|
||||
): Future[?!void] {.async.} =
|
||||
return success()
|
||||
|
||||
@ -181,7 +181,7 @@ asyncchecksuite "Sales":
|
||||
sales = Sales.new(market, clock, repo)
|
||||
reservations = sales.context.reservations
|
||||
sales.onStore = proc(
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
|
||||
): Future[?!void] {.async.} =
|
||||
return success()
|
||||
|
||||
@ -229,17 +229,24 @@ asyncchecksuite "Sales":
|
||||
availability = a.get # update id
|
||||
|
||||
proc notProcessed(itemsProcessed: seq[SlotQueueItem], request: StorageRequest): bool =
|
||||
let items = SlotQueueItem.init(request)
|
||||
let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot)
|
||||
for i in 0 ..< items.len:
|
||||
if itemsProcessed.contains(items[i]):
|
||||
return false
|
||||
return true
|
||||
|
||||
proc addRequestToSaturatedQueue(): Future[StorageRequest] {.async.} =
|
||||
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
||||
await sleepAsync(10.millis)
|
||||
itemsProcessed.add item
|
||||
done.complete()
|
||||
queue.onProcessSlot = proc(
|
||||
item: SlotQueueItem, done: Future[void]
|
||||
) {.async: (raises: []).} =
|
||||
try:
|
||||
await sleepAsync(10.millis)
|
||||
itemsProcessed.add item
|
||||
except CancelledError as exc:
|
||||
checkpoint(exc.msg)
|
||||
finally:
|
||||
if not done.finished:
|
||||
done.complete()
|
||||
|
||||
var request1 = StorageRequest.example
|
||||
request1.ask.collateralPerByte = request.ask.collateralPerByte + 1
|
||||
@ -261,12 +268,15 @@ asyncchecksuite "Sales":
|
||||
waitFor run()
|
||||
|
||||
test "processes all request's slots once StorageRequested emitted":
|
||||
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
||||
queue.onProcessSlot = proc(
|
||||
item: SlotQueueItem, done: Future[void]
|
||||
) {.async: (raises: []).} =
|
||||
itemsProcessed.add item
|
||||
done.complete()
|
||||
if not done.finished:
|
||||
done.complete()
|
||||
createAvailability()
|
||||
await market.requestStorage(request)
|
||||
let items = SlotQueueItem.init(request)
|
||||
let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot)
|
||||
check eventually items.allIt(itemsProcessed.contains(it))
|
||||
|
||||
test "removes slots from slot queue once RequestCancelled emitted":
|
||||
@ -287,30 +297,42 @@ asyncchecksuite "Sales":
|
||||
test "removes slot index from slot queue once SlotFilled emitted":
|
||||
let request1 = await addRequestToSaturatedQueue()
|
||||
market.emitSlotFilled(request1.id, 1.uint64)
|
||||
let expected = SlotQueueItem.init(request1, 1'u16)
|
||||
let expected =
|
||||
SlotQueueItem.init(request1, 1'u16, collateral = request1.ask.collateralPerSlot)
|
||||
check always (not itemsProcessed.contains(expected))
|
||||
|
||||
test "removes slot index from slot queue once SlotReservationsFull emitted":
|
||||
let request1 = await addRequestToSaturatedQueue()
|
||||
market.emitSlotReservationsFull(request1.id, 1.uint64)
|
||||
let expected = SlotQueueItem.init(request1, 1'u16)
|
||||
let expected =
|
||||
SlotQueueItem.init(request1, 1'u16, collateral = request1.ask.collateralPerSlot)
|
||||
check always (not itemsProcessed.contains(expected))
|
||||
|
||||
test "adds slot index to slot queue once SlotFreed emitted":
|
||||
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
||||
queue.onProcessSlot = proc(
|
||||
item: SlotQueueItem, done: Future[void]
|
||||
) {.async: (raises: []).} =
|
||||
itemsProcessed.add item
|
||||
done.complete()
|
||||
if not done.finished:
|
||||
done.complete()
|
||||
|
||||
createAvailability()
|
||||
market.requested.add request # "contract" must be able to return request
|
||||
|
||||
market.emitSlotFreed(request.id, 2.uint64)
|
||||
|
||||
let expected = SlotQueueItem.init(request, 2.uint16)
|
||||
without collateralPerSlot =? await market.slotCollateral(request.id, 2.uint64),
|
||||
error:
|
||||
fail()
|
||||
|
||||
let expected =
|
||||
SlotQueueItem.init(request, 2.uint16, collateral = request.ask.collateralPerSlot)
|
||||
|
||||
check eventually itemsProcessed.contains(expected)
|
||||
|
||||
test "items in queue are readded (and marked seen) once ignored":
|
||||
await market.requestStorage(request)
|
||||
let items = SlotQueueItem.init(request)
|
||||
let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot)
|
||||
check eventually queue.len > 0
|
||||
# queue starts paused, allow items to be added to the queue
|
||||
check eventually queue.paused
|
||||
@ -331,7 +353,7 @@ asyncchecksuite "Sales":
|
||||
test "queue is paused once availability is insufficient to service slots in queue":
|
||||
createAvailability() # enough to fill a single slot
|
||||
await market.requestStorage(request)
|
||||
let items = SlotQueueItem.init(request)
|
||||
let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot)
|
||||
check eventually queue.len > 0
|
||||
# queue starts paused, allow items to be added to the queue
|
||||
check eventually queue.paused
|
||||
@ -348,7 +370,7 @@ asyncchecksuite "Sales":
|
||||
|
||||
test "availability size is reduced by request slot size when fully downloaded":
|
||||
sales.onStore = proc(
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
|
||||
): Future[?!void] {.async.} =
|
||||
let blk = bt.Block.new(@[1.byte]).get
|
||||
await onBatch(blk.repeat(request.ask.slotSize.int))
|
||||
@ -361,7 +383,7 @@ asyncchecksuite "Sales":
|
||||
test "non-downloaded bytes are returned to availability once finished":
|
||||
var slotIndex = 0.uint64
|
||||
sales.onStore = proc(
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
|
||||
): Future[?!void] {.async.} =
|
||||
slotIndex = slot
|
||||
let blk = bt.Block.new(@[1.byte]).get
|
||||
@ -421,7 +443,7 @@ asyncchecksuite "Sales":
|
||||
var storingRequest: StorageRequest
|
||||
var storingSlot: uint64
|
||||
sales.onStore = proc(
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
|
||||
): Future[?!void] {.async.} =
|
||||
storingRequest = request
|
||||
storingSlot = slot
|
||||
@ -434,7 +456,7 @@ asyncchecksuite "Sales":
|
||||
test "makes storage available again when data retrieval fails":
|
||||
let error = newException(IOError, "data retrieval failed")
|
||||
sales.onStore = proc(
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
|
||||
): Future[?!void] {.async.} =
|
||||
return failure(error)
|
||||
createAvailability()
|
||||
@ -503,7 +525,7 @@ asyncchecksuite "Sales":
|
||||
test "makes storage available again when other host fills the slot":
|
||||
let otherHost = Address.example
|
||||
sales.onStore = proc(
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
|
||||
): Future[?!void] {.async.} =
|
||||
await sleepAsync(chronos.hours(1))
|
||||
return success()
|
||||
@ -519,7 +541,7 @@ asyncchecksuite "Sales":
|
||||
|
||||
let origSize = availability.freeSize
|
||||
sales.onStore = proc(
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
|
||||
): Future[?!void] {.async.} =
|
||||
await sleepAsync(chronos.hours(1))
|
||||
return success()
|
||||
@ -544,7 +566,7 @@ asyncchecksuite "Sales":
|
||||
|
||||
let origSize = availability.freeSize
|
||||
sales.onStore = proc(
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
|
||||
): Future[?!void] {.async.} =
|
||||
await sleepAsync(chronos.hours(1))
|
||||
return success()
|
||||
|
||||
@ -50,12 +50,19 @@ suite "Slot queue start/stop":
|
||||
suite "Slot queue workers":
|
||||
var queue: SlotQueue
|
||||
|
||||
proc onProcessSlot(item: SlotQueueItem, doneProcessing: Future[void]) {.async.} =
|
||||
await sleepAsync(1000.millis)
|
||||
proc onProcessSlot(
|
||||
item: SlotQueueItem, doneProcessing: Future[void]
|
||||
) {.async: (raises: []).} =
|
||||
# this is not illustrative of the realistic scenario as the
|
||||
# `doneProcessing` future would be passed to another context before being
|
||||
# completed and therefore is not as simple as making the callback async
|
||||
doneProcessing.complete()
|
||||
try:
|
||||
await sleepAsync(1000.millis)
|
||||
except CatchableError as exc:
|
||||
checkpoint(exc.msg)
|
||||
finally:
|
||||
if not doneProcessing.finished:
|
||||
doneProcessing.complete()
|
||||
|
||||
setup:
|
||||
let request = StorageRequest.example
|
||||
@ -89,9 +96,14 @@ suite "Slot queue workers":
|
||||
check eventually queue.activeWorkers == 3
|
||||
|
||||
test "discards workers once processing completed":
|
||||
proc processSlot(item: SlotQueueItem, done: Future[void]) {.async.} =
|
||||
await sleepAsync(1.millis)
|
||||
done.complete()
|
||||
proc processSlot(item: SlotQueueItem, done: Future[void]) {.async: (raises: []).} =
|
||||
try:
|
||||
await sleepAsync(1.millis)
|
||||
except CatchableError as exc:
|
||||
checkpoint(exc.msg)
|
||||
finally:
|
||||
if not done.finished:
|
||||
done.complete()
|
||||
|
||||
queue.onProcessSlot = processSlot
|
||||
|
||||
@ -114,11 +126,19 @@ suite "Slot queue":
|
||||
|
||||
proc newSlotQueue(maxSize, maxWorkers: int, processSlotDelay = 1.millis) =
|
||||
queue = SlotQueue.new(maxWorkers, maxSize.uint16)
|
||||
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
||||
await sleepAsync(processSlotDelay)
|
||||
onProcessSlotCalled = true
|
||||
onProcessSlotCalledWith.add (item.requestId, item.slotIndex)
|
||||
done.complete()
|
||||
queue.onProcessSlot = proc(
|
||||
item: SlotQueueItem, done: Future[void]
|
||||
) {.async: (raises: []).} =
|
||||
try:
|
||||
await sleepAsync(processSlotDelay)
|
||||
except CatchableError as exc:
|
||||
checkpoint(exc.msg)
|
||||
finally:
|
||||
onProcessSlotCalled = true
|
||||
onProcessSlotCalledWith.add (item.requestId, item.slotIndex)
|
||||
if not done.finished:
|
||||
done.complete()
|
||||
|
||||
queue.start()
|
||||
|
||||
setup:
|
||||
@ -159,8 +179,10 @@ suite "Slot queue":
|
||||
requestB.ask.collateralPerByte = 1.u256
|
||||
requestB.expiry = 1000.uint64
|
||||
|
||||
let itemA = SlotQueueItem.init(requestA, 0)
|
||||
let itemB = SlotQueueItem.init(requestB, 0)
|
||||
let itemA =
|
||||
SlotQueueItem.init(requestA, 0, collateral = requestA.ask.collateralPerSlot)
|
||||
let itemB =
|
||||
SlotQueueItem.init(requestB, 0, collateral = requestB.ask.collateralPerSlot)
|
||||
check itemB < itemA # B higher priority than A
|
||||
check itemA > itemB
|
||||
|
||||
@ -172,7 +194,7 @@ suite "Slot queue":
|
||||
slotSize: 1.uint64,
|
||||
duration: 1.uint64,
|
||||
pricePerBytePerSecond: 2.u256, # profitability is higher (good)
|
||||
collateralPerByte: 1.u256,
|
||||
collateral: 1.u256,
|
||||
expiry: 1.uint64,
|
||||
seen: true, # seen (bad), more weight than profitability
|
||||
)
|
||||
@ -182,7 +204,7 @@ suite "Slot queue":
|
||||
slotSize: 1.uint64,
|
||||
duration: 1.uint64,
|
||||
pricePerBytePerSecond: 1.u256, # profitability is lower (bad)
|
||||
collateralPerByte: 1.u256,
|
||||
collateral: 1.u256,
|
||||
expiry: 1.uint64,
|
||||
seen: false, # not seen (good)
|
||||
)
|
||||
@ -197,7 +219,7 @@ suite "Slot queue":
|
||||
slotSize: 1.uint64,
|
||||
duration: 1.uint64,
|
||||
pricePerBytePerSecond: 1.u256, # reward is lower (bad)
|
||||
collateralPerByte: 1.u256, # collateral is lower (good)
|
||||
collateral: 1.u256, # collateral is lower (good)
|
||||
expiry: 1.uint64,
|
||||
seen: false,
|
||||
)
|
||||
@ -208,7 +230,7 @@ suite "Slot queue":
|
||||
duration: 1.uint64,
|
||||
pricePerBytePerSecond: 2.u256,
|
||||
# reward is higher (good), more weight than collateral
|
||||
collateralPerByte: 2.u256, # collateral is higher (bad)
|
||||
collateral: 2.u256, # collateral is higher (bad)
|
||||
expiry: 1.uint64,
|
||||
seen: false,
|
||||
)
|
||||
@ -223,7 +245,7 @@ suite "Slot queue":
|
||||
slotSize: 1.uint64,
|
||||
duration: 1.uint64,
|
||||
pricePerBytePerSecond: 1.u256,
|
||||
collateralPerByte: 2.u256, # collateral is higher (bad)
|
||||
collateral: 2.u256, # collateral is higher (bad)
|
||||
expiry: 2.uint64, # expiry is longer (good)
|
||||
seen: false,
|
||||
)
|
||||
@ -233,7 +255,7 @@ suite "Slot queue":
|
||||
slotSize: 1.uint64,
|
||||
duration: 1.uint64,
|
||||
pricePerBytePerSecond: 1.u256,
|
||||
collateralPerByte: 1.u256, # collateral is lower (good), more weight than expiry
|
||||
collateral: 1.u256, # collateral is lower (good), more weight than expiry
|
||||
expiry: 1.uint64, # expiry is shorter (bad)
|
||||
seen: false,
|
||||
)
|
||||
@ -248,7 +270,7 @@ suite "Slot queue":
|
||||
slotSize: 1.uint64, # slotSize is smaller (good)
|
||||
duration: 1.uint64,
|
||||
pricePerBytePerSecond: 1.u256,
|
||||
collateralPerByte: 1.u256,
|
||||
collateral: 1.u256,
|
||||
expiry: 1.uint64, # expiry is shorter (bad)
|
||||
seen: false,
|
||||
)
|
||||
@ -258,7 +280,7 @@ suite "Slot queue":
|
||||
slotSize: 2.uint64, # slotSize is larger (bad)
|
||||
duration: 1.uint64,
|
||||
pricePerBytePerSecond: 1.u256,
|
||||
collateralPerByte: 1.u256,
|
||||
collateral: 1.u256,
|
||||
expiry: 2.uint64, # expiry is longer (good), more weight than slotSize
|
||||
seen: false,
|
||||
)
|
||||
@ -273,7 +295,7 @@ suite "Slot queue":
|
||||
slotSize: 2.uint64, # slotSize is larger (bad)
|
||||
duration: 1.uint64,
|
||||
pricePerBytePerSecond: 1.u256,
|
||||
collateralPerByte: 1.u256,
|
||||
collateral: 1.u256,
|
||||
expiry: 1.uint64, # expiry is shorter (bad)
|
||||
seen: false,
|
||||
)
|
||||
@ -283,7 +305,7 @@ suite "Slot queue":
|
||||
slotSize: 1.uint64, # slotSize is smaller (good)
|
||||
duration: 1.uint64,
|
||||
pricePerBytePerSecond: 1.u256,
|
||||
collateralPerByte: 1.u256,
|
||||
collateral: 1.u256,
|
||||
expiry: 1.uint64,
|
||||
seen: false,
|
||||
)
|
||||
@ -292,11 +314,16 @@ suite "Slot queue":
|
||||
|
||||
test "expands available all possible slot indices on init":
|
||||
let request = StorageRequest.example
|
||||
let items = SlotQueueItem.init(request)
|
||||
let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot)
|
||||
check items.len.uint64 == request.ask.slots
|
||||
var checked = 0
|
||||
for slotIndex in 0'u16 ..< request.ask.slots.uint16:
|
||||
check items.anyIt(it == SlotQueueItem.init(request, slotIndex))
|
||||
check items.anyIt(
|
||||
it ==
|
||||
SlotQueueItem.init(
|
||||
request, slotIndex, collateral = request.ask.collateralPerSlot
|
||||
)
|
||||
)
|
||||
inc checked
|
||||
check checked == items.len
|
||||
|
||||
@ -322,34 +349,17 @@ suite "Slot queue":
|
||||
check isOk queue.push(item3)
|
||||
check isOk queue.push(item4)
|
||||
|
||||
test "populates item with exisiting request metadata":
|
||||
newSlotQueue(maxSize = 8, maxWorkers = 1, processSlotDelay = 10.millis)
|
||||
let request0 = StorageRequest.example
|
||||
var request1 = StorageRequest.example
|
||||
request1.ask.collateralPerByte += 1.u256
|
||||
let items0 = SlotQueueItem.init(request0)
|
||||
let items1 = SlotQueueItem.init(request1)
|
||||
check queue.push(items0).isOk
|
||||
check queue.push(items1).isOk
|
||||
let populated = !queue.populateItem(request1.id, 12'u16)
|
||||
check populated.requestId == request1.id
|
||||
check populated.slotIndex == 12'u16
|
||||
check populated.slotSize == request1.ask.slotSize
|
||||
check populated.duration == request1.ask.duration
|
||||
check populated.pricePerBytePerSecond == request1.ask.pricePerBytePerSecond
|
||||
check populated.collateralPerByte == request1.ask.collateralPerByte
|
||||
|
||||
test "does not find exisiting request metadata":
|
||||
newSlotQueue(maxSize = 2, maxWorkers = 2)
|
||||
let item = SlotQueueItem.example
|
||||
check queue.populateItem(item.requestId, 12'u16).isNone
|
||||
|
||||
test "can support uint16.high slots":
|
||||
var request = StorageRequest.example
|
||||
let maxUInt16 = uint16.high
|
||||
let uint64Slots = uint64(maxUInt16)
|
||||
request.ask.slots = uint64Slots
|
||||
let items = SlotQueueItem.init(request.id, request.ask, request.expiry)
|
||||
let items = SlotQueueItem.init(
|
||||
request.id,
|
||||
request.ask,
|
||||
request.expiry,
|
||||
collateral = request.ask.collateralPerSlot,
|
||||
)
|
||||
check items.len.uint16 == maxUInt16
|
||||
|
||||
test "cannot support greater than uint16.high slots":
|
||||
@ -358,7 +368,12 @@ suite "Slot queue":
|
||||
let uint64Slots = uint64(int32Slots)
|
||||
request.ask.slots = uint64Slots
|
||||
expect SlotsOutOfRangeError:
|
||||
discard SlotQueueItem.init(request.id, request.ask, request.expiry)
|
||||
discard SlotQueueItem.init(
|
||||
request.id,
|
||||
request.ask,
|
||||
request.expiry,
|
||||
collateral = request.ask.collateralPerSlot,
|
||||
)
|
||||
|
||||
test "cannot push duplicate items":
|
||||
newSlotQueue(maxSize = 6, maxWorkers = 1, processSlotDelay = 15.millis)
|
||||
@ -399,8 +414,10 @@ suite "Slot queue":
|
||||
let request0 = StorageRequest.example
|
||||
var request1 = StorageRequest.example
|
||||
request1.ask.collateralPerByte += 1.u256
|
||||
let items0 = SlotQueueItem.init(request0)
|
||||
let items1 = SlotQueueItem.init(request1)
|
||||
let items0 =
|
||||
SlotQueueItem.init(request0, collateral = request0.ask.collateralPerSlot)
|
||||
let items1 =
|
||||
SlotQueueItem.init(request1, collateral = request1.ask.collateralPerSlot)
|
||||
check queue.push(items0).isOk
|
||||
check queue.push(items1).isOk
|
||||
let last = items1[items1.high]
|
||||
@ -413,8 +430,10 @@ suite "Slot queue":
|
||||
let request0 = StorageRequest.example
|
||||
var request1 = StorageRequest.example
|
||||
request1.ask.collateralPerByte += 1.u256
|
||||
let items0 = SlotQueueItem.init(request0)
|
||||
let items1 = SlotQueueItem.init(request1)
|
||||
let items0 =
|
||||
SlotQueueItem.init(request0, collateral = request0.ask.collateralPerSlot)
|
||||
let items1 =
|
||||
SlotQueueItem.init(request1, collateral = request1.ask.collateralPerSlot)
|
||||
check queue.push(items0).isOk
|
||||
check queue.push(items1).isOk
|
||||
queue.delete(request1.id)
|
||||
@ -433,42 +452,56 @@ suite "Slot queue":
|
||||
request3.ask.collateralPerByte = request2.ask.collateralPerByte + 1
|
||||
request4.ask.collateralPerByte = request3.ask.collateralPerByte + 1
|
||||
request5.ask.collateralPerByte = request4.ask.collateralPerByte + 1
|
||||
let item0 = SlotQueueItem.init(request0, 0)
|
||||
let item1 = SlotQueueItem.init(request1, 0)
|
||||
let item2 = SlotQueueItem.init(request2, 0)
|
||||
let item3 = SlotQueueItem.init(request3, 0)
|
||||
let item4 = SlotQueueItem.init(request4, 0)
|
||||
let item5 = SlotQueueItem.init(request5, 0)
|
||||
let item0 =
|
||||
SlotQueueItem.init(request0, 0, collateral = request0.ask.collateralPerSlot)
|
||||
let item1 =
|
||||
SlotQueueItem.init(request1, 0, collateral = request1.ask.collateralPerSlot)
|
||||
let item2 =
|
||||
SlotQueueItem.init(request2, 0, collateral = request2.ask.collateralPerSlot)
|
||||
let item3 =
|
||||
SlotQueueItem.init(request3, 0, collateral = request3.ask.collateralPerSlot)
|
||||
let item4 =
|
||||
SlotQueueItem.init(request4, 0, collateral = request4.ask.collateralPerSlot)
|
||||
let item5 =
|
||||
SlotQueueItem.init(request5, 0, collateral = request5.ask.collateralPerSlot)
|
||||
check queue.contains(item5) == false
|
||||
check queue.push(@[item0, item1, item2, item3, item4, item5]).isOk
|
||||
check queue.contains(item5)
|
||||
|
||||
test "sorts items by profitability descending (higher pricePerBytePerSecond == higher priority == goes first in the list)":
|
||||
var request = StorageRequest.example
|
||||
let item0 = SlotQueueItem.init(request, 0)
|
||||
let item0 =
|
||||
SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot)
|
||||
request.ask.pricePerBytePerSecond += 1.u256
|
||||
let item1 = SlotQueueItem.init(request, 1)
|
||||
let item1 =
|
||||
SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot)
|
||||
check item1 < item0
|
||||
|
||||
test "sorts items by collateral ascending (higher required collateralPerByte = lower priority == comes later in the list)":
|
||||
test "sorts items by collateral ascending (higher required collateral = lower priority == comes later in the list)":
|
||||
var request = StorageRequest.example
|
||||
let item0 = SlotQueueItem.init(request, 0)
|
||||
request.ask.collateralPerByte += 1.u256
|
||||
let item1 = SlotQueueItem.init(request, 1)
|
||||
let item0 =
|
||||
SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot)
|
||||
let item1 = SlotQueueItem.init(
|
||||
request, 1, collateral = request.ask.collateralPerSlot + 1.u256
|
||||
)
|
||||
check item1 > item0
|
||||
|
||||
test "sorts items by expiry descending (longer expiry = higher priority)":
|
||||
var request = StorageRequest.example
|
||||
let item0 = SlotQueueItem.init(request, 0)
|
||||
let item0 =
|
||||
SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot)
|
||||
request.expiry += 1
|
||||
let item1 = SlotQueueItem.init(request, 1)
|
||||
let item1 =
|
||||
SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot)
|
||||
check item1 < item0
|
||||
|
||||
test "sorts items by slot size descending (bigger dataset = higher profitability = higher priority)":
|
||||
var request = StorageRequest.example
|
||||
let item0 = SlotQueueItem.init(request, 0)
|
||||
let item0 =
|
||||
SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot)
|
||||
request.ask.slotSize += 1
|
||||
let item1 = SlotQueueItem.init(request, 1)
|
||||
let item1 =
|
||||
SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot)
|
||||
check item1 < item0
|
||||
|
||||
test "should call callback once an item is added":
|
||||
@ -489,13 +522,17 @@ suite "Slot queue":
|
||||
# sleeping after push allows the slotqueue loop to iterate,
|
||||
# calling the callback for each pushed/updated item
|
||||
var request = StorageRequest.example
|
||||
let item0 = SlotQueueItem.init(request, 0)
|
||||
let item0 =
|
||||
SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot)
|
||||
request.ask.pricePerBytePerSecond += 1.u256
|
||||
let item1 = SlotQueueItem.init(request, 1)
|
||||
let item1 =
|
||||
SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot)
|
||||
request.ask.pricePerBytePerSecond += 1.u256
|
||||
let item2 = SlotQueueItem.init(request, 2)
|
||||
let item2 =
|
||||
SlotQueueItem.init(request, 2, collateral = request.ask.collateralPerSlot)
|
||||
request.ask.pricePerBytePerSecond += 1.u256
|
||||
let item3 = SlotQueueItem.init(request, 3)
|
||||
let item3 =
|
||||
SlotQueueItem.init(request, 3, collateral = request.ask.collateralPerSlot)
|
||||
|
||||
check queue.push(item0).isOk
|
||||
await sleepAsync(1.millis)
|
||||
@ -520,13 +557,17 @@ suite "Slot queue":
|
||||
# sleeping after push allows the slotqueue loop to iterate,
|
||||
# calling the callback for each pushed/updated item
|
||||
var request = StorageRequest.example
|
||||
let item0 = SlotQueueItem.init(request, 0)
|
||||
let item0 =
|
||||
SlotQueueItem.init(request, 0, collateral = request.ask.collateralPerSlot)
|
||||
request.ask.pricePerBytePerSecond += 1.u256
|
||||
let item1 = SlotQueueItem.init(request, 1)
|
||||
let item1 =
|
||||
SlotQueueItem.init(request, 1, collateral = request.ask.collateralPerSlot)
|
||||
request.ask.pricePerBytePerSecond += 1.u256
|
||||
let item2 = SlotQueueItem.init(request, 2)
|
||||
let item2 =
|
||||
SlotQueueItem.init(request, 2, collateral = request.ask.collateralPerSlot)
|
||||
request.ask.pricePerBytePerSecond += 1.u256
|
||||
let item3 = SlotQueueItem.init(request, 3)
|
||||
let item3 =
|
||||
SlotQueueItem.init(request, 3, collateral = request.ask.collateralPerSlot)
|
||||
|
||||
check queue.push(item0).isOk
|
||||
check queue.push(item1).isOk
|
||||
@ -550,7 +591,7 @@ suite "Slot queue":
|
||||
queue.pause
|
||||
|
||||
let request = StorageRequest.example
|
||||
var items = SlotQueueItem.init(request)
|
||||
var items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot)
|
||||
check queue.push(items).isOk
|
||||
# check all items processed
|
||||
check eventually queue.len == 0
|
||||
@ -558,8 +599,14 @@ suite "Slot queue":
|
||||
test "pushing seen item does not unpause queue":
|
||||
newSlotQueue(maxSize = 4, maxWorkers = 4)
|
||||
let request = StorageRequest.example
|
||||
let item0 =
|
||||
SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = true)
|
||||
let item0 = SlotQueueItem.init(
|
||||
request.id,
|
||||
0'u16,
|
||||
request.ask,
|
||||
request.expiry,
|
||||
request.ask.collateralPerSlot,
|
||||
seen = true,
|
||||
)
|
||||
check queue.paused
|
||||
check queue.push(item0).isOk
|
||||
check queue.paused
|
||||
@ -567,8 +614,14 @@ suite "Slot queue":
|
||||
test "paused queue waits for unpause before continuing processing":
|
||||
newSlotQueue(maxSize = 4, maxWorkers = 4)
|
||||
let request = StorageRequest.example
|
||||
let item =
|
||||
SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = false)
|
||||
let item = SlotQueueItem.init(
|
||||
request.id,
|
||||
1'u16,
|
||||
request.ask,
|
||||
request.expiry,
|
||||
request.ask.collateralPerSlot,
|
||||
seen = false,
|
||||
)
|
||||
check queue.paused
|
||||
# push causes unpause
|
||||
check queue.push(item).isOk
|
||||
@ -579,10 +632,22 @@ suite "Slot queue":
|
||||
test "processing a 'seen' item pauses the queue":
|
||||
newSlotQueue(maxSize = 4, maxWorkers = 4)
|
||||
let request = StorageRequest.example
|
||||
let unseen =
|
||||
SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = false)
|
||||
let seen =
|
||||
SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = true)
|
||||
let unseen = SlotQueueItem.init(
|
||||
request.id,
|
||||
0'u16,
|
||||
request.ask,
|
||||
request.expiry,
|
||||
request.ask.collateralPerSlot,
|
||||
seen = false,
|
||||
)
|
||||
let seen = SlotQueueItem.init(
|
||||
request.id,
|
||||
1'u16,
|
||||
request.ask,
|
||||
request.expiry,
|
||||
request.ask.collateralPerSlot,
|
||||
seen = true,
|
||||
)
|
||||
# push causes unpause
|
||||
check queue.push(unseen).isSuccess
|
||||
# check all items processed
|
||||
@ -595,10 +660,22 @@ suite "Slot queue":
|
||||
test "processing a 'seen' item does not decrease the number of workers":
|
||||
newSlotQueue(maxSize = 4, maxWorkers = 4)
|
||||
let request = StorageRequest.example
|
||||
let unseen =
|
||||
SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = false)
|
||||
let seen =
|
||||
SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = true)
|
||||
let unseen = SlotQueueItem.init(
|
||||
request.id,
|
||||
0'u16,
|
||||
request.ask,
|
||||
request.expiry,
|
||||
request.ask.collateralPerSlot,
|
||||
seen = false,
|
||||
)
|
||||
let seen = SlotQueueItem.init(
|
||||
request.id,
|
||||
1'u16,
|
||||
request.ask,
|
||||
request.expiry,
|
||||
request.ask.collateralPerSlot,
|
||||
seen = true,
|
||||
)
|
||||
# push seen item to ensure that queue is pausing
|
||||
check queue.push(seen).isSuccess
|
||||
# unpause and pause a number of times
|
||||
@ -615,10 +692,22 @@ suite "Slot queue":
|
||||
test "item 'seen' flags can be cleared":
|
||||
newSlotQueue(maxSize = 4, maxWorkers = 1)
|
||||
let request = StorageRequest.example
|
||||
let item0 =
|
||||
SlotQueueItem.init(request.id, 0'u16, request.ask, request.expiry, seen = true)
|
||||
let item1 =
|
||||
SlotQueueItem.init(request.id, 1'u16, request.ask, request.expiry, seen = true)
|
||||
let item0 = SlotQueueItem.init(
|
||||
request.id,
|
||||
0'u16,
|
||||
request.ask,
|
||||
request.expiry,
|
||||
request.ask.collateralPerSlot,
|
||||
seen = true,
|
||||
)
|
||||
let item1 = SlotQueueItem.init(
|
||||
request.id,
|
||||
1'u16,
|
||||
request.ask,
|
||||
request.expiry,
|
||||
request.ask.collateralPerSlot,
|
||||
seen = true,
|
||||
)
|
||||
check queue.push(item0).isOk
|
||||
check queue.push(item1).isOk
|
||||
check queue[0].seen
|
||||
|
||||
@ -133,7 +133,7 @@ suite "Slot builder":
|
||||
|
||||
check:
|
||||
Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg ==
|
||||
"Number of blocks must be divisable by number of slots."
|
||||
"Number of blocks must be divisible by number of slots."
|
||||
|
||||
test "Block size must be divisable by cell size":
|
||||
let mismatchManifest = Manifest.new(
|
||||
@ -151,7 +151,7 @@ suite "Slot builder":
|
||||
|
||||
check:
|
||||
Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg ==
|
||||
"Block size must be divisable by cell size."
|
||||
"Block size must be divisible by cell size."
|
||||
|
||||
test "Should build correct slot builder":
|
||||
builder =
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import std/unittest
|
||||
import std/random
|
||||
|
||||
import pkg/unittest2
|
||||
import pkg/stew/objects
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
@ -11,7 +11,7 @@ import pkg/codex/stores/repostore/coders
|
||||
|
||||
import ../../helpers
|
||||
|
||||
checksuite "Test coders":
|
||||
suite "Test coders":
|
||||
proc rand(T: type NBytes): T =
|
||||
rand(Natural).NBytes
|
||||
|
||||
|
||||
@ -11,7 +11,7 @@ import ./commonstoretests
|
||||
import ../../asynctest
|
||||
import ../helpers
|
||||
|
||||
checksuite "Cache Store":
|
||||
suite "Cache Store":
|
||||
var
|
||||
newBlock, newBlock1, newBlock2, newBlock3: Block
|
||||
store: CacheStore
|
||||
|
||||
@ -36,7 +36,7 @@ proc createManifestCid(): ?!Cid =
|
||||
let cid = ?Cid.init(version, codec, hash).mapFailure
|
||||
return success cid
|
||||
|
||||
checksuite "KeyUtils":
|
||||
suite "KeyUtils":
|
||||
test "makePrefixKey should create block key":
|
||||
let length = 6
|
||||
let cid = Cid.example
|
||||
|
||||
@ -21,7 +21,7 @@ import ../examples
|
||||
|
||||
import codex/stores/maintenance
|
||||
|
||||
checksuite "BlockMaintainer":
|
||||
suite "BlockMaintainer":
|
||||
var mockRepoStore: MockRepoStore
|
||||
var interval: Duration
|
||||
var mockTimer: MockTimer
|
||||
|
||||
@ -24,7 +24,7 @@ import ../helpers/mockclock
|
||||
import ../examples
|
||||
import ./commonstoretests
|
||||
|
||||
checksuite "Test RepoStore start/stop":
|
||||
suite "Test RepoStore start/stop":
|
||||
var
|
||||
repoDs: Datastore
|
||||
metaDs: Datastore
|
||||
|
||||
@ -22,7 +22,7 @@ proc toSortedSeq[T](h: AsyncHeapQueue[T], queueType = QueueType.Min): seq[T] =
|
||||
while tmp.len > 0:
|
||||
result.add(popNoWait(tmp).tryGet())
|
||||
|
||||
checksuite "Synchronous tests":
|
||||
suite "Synchronous tests":
|
||||
test "Test pushNoWait - Min":
|
||||
var heap = newAsyncHeapQueue[int]()
|
||||
let data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
|
||||
|
||||
@ -27,7 +27,7 @@ asyncchecksuite "Chunking":
|
||||
let contents = [1.byte, 2, 3, 4, 5, 6, 7, 8, 9, 0]
|
||||
proc reader(
|
||||
data: ChunkBuffer, len: int
|
||||
): Future[int] {.gcsafe, async, raises: [Defect].} =
|
||||
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
|
||||
let read = min(contents.len - offset, len)
|
||||
if read == 0:
|
||||
return 0
|
||||
@ -97,8 +97,13 @@ asyncchecksuite "Chunking":
|
||||
discard (await chunker.getBytes())
|
||||
|
||||
test "stream should forward LPStreamError":
|
||||
expect LPStreamError:
|
||||
try:
|
||||
await raiseStreamException(newException(LPStreamError, "test error"))
|
||||
except ChunkerError as exc:
|
||||
check exc.parent of LPStreamError
|
||||
except CatchableError as exc:
|
||||
checkpoint("Unexpected error: " & exc.msg)
|
||||
fail()
|
||||
|
||||
test "stream should catch LPStreamEOFError":
|
||||
await raiseStreamException(newException(LPStreamEOFError, "test error"))
|
||||
@ -106,7 +111,3 @@ asyncchecksuite "Chunking":
|
||||
test "stream should forward CancelledError":
|
||||
expect CancelledError:
|
||||
await raiseStreamException(newException(CancelledError, "test error"))
|
||||
|
||||
test "stream should forward LPStreamError":
|
||||
expect LPStreamError:
|
||||
await raiseStreamException(newException(LPStreamError, "test error"))
|
||||
|
||||
@ -1,9 +1,9 @@
|
||||
import std/unittest
|
||||
import pkg/unittest2
|
||||
|
||||
import codex/clock
|
||||
import ./helpers
|
||||
|
||||
checksuite "Clock":
|
||||
suite "Clock":
|
||||
proc testConversion(seconds: SecondsSince1970) =
|
||||
let asBytes = seconds.toBytes
|
||||
|
||||
|
||||
@ -228,7 +228,7 @@ suite "Erasure encode/decode":
|
||||
discard (await erasure.decode(encoded)).tryGet()
|
||||
|
||||
test "Should concurrently encode/decode multiple datasets":
|
||||
const iterations = 2
|
||||
const iterations = 5
|
||||
|
||||
let
|
||||
datasetSize = 1.MiBs
|
||||
@ -335,18 +335,18 @@ suite "Erasure encode/decode":
|
||||
for i in 0 ..< parityLen:
|
||||
paritySeq[i] = cast[seq[byte]](parity[i])
|
||||
|
||||
# call encodeAsync to get the parity
|
||||
# call asyncEncode to get the parity
|
||||
let encFut =
|
||||
await erasure.encodeAsync(BlockSize.int, blocksLen, parityLen, data, parity)
|
||||
await erasure.asyncEncode(BlockSize.int, blocksLen, parityLen, data, parity)
|
||||
check encFut.isOk
|
||||
|
||||
let decFut = await erasure.decodeAsync(
|
||||
let decFut = await erasure.asyncDecode(
|
||||
BlockSize.int, blocksLen, parityLen, data, paritySeq, recovered
|
||||
)
|
||||
check decFut.isOk
|
||||
|
||||
# call encodeAsync and cancel the task
|
||||
let encodeFut = erasure.encodeAsync(
|
||||
# call asyncEncode and cancel the task
|
||||
let encodeFut = erasure.asyncEncode(
|
||||
BlockSize.int, blocksLen, parityLen, data, cancelledTaskParity
|
||||
)
|
||||
encodeFut.cancel()
|
||||
@ -359,8 +359,8 @@ suite "Erasure encode/decode":
|
||||
for i in 0 ..< parityLen:
|
||||
check equalMem(parity[i], cancelledTaskParity[i], BlockSize.int)
|
||||
|
||||
# call decodeAsync and cancel the task
|
||||
let decodeFut = erasure.decodeAsync(
|
||||
# call asyncDecode and cancel the task
|
||||
let decodeFut = erasure.asyncDecode(
|
||||
BlockSize.int, blocksLen, parityLen, data, paritySeq, cancelledTaskRecovered
|
||||
)
|
||||
decodeFut.cancel()
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import std/options
|
||||
import std/strutils
|
||||
import std/unittest
|
||||
|
||||
import pkg/unittest2
|
||||
import pkg/codex/blocktype
|
||||
import pkg/codex/conf
|
||||
import pkg/codex/contracts/requests
|
||||
|
||||
@ -13,7 +13,7 @@ import ../asynctest
|
||||
import ./helpers
|
||||
import ./examples
|
||||
|
||||
checksuite "Manifest":
|
||||
suite "Manifest":
|
||||
let
|
||||
manifest =
|
||||
Manifest.new(treeCid = Cid.example, blockSize = 1.MiBs, datasetSize = 100.MiBs)
|
||||
|
||||
@ -116,7 +116,7 @@ asyncchecksuite "Purchasing":
|
||||
await purchase.wait()
|
||||
check market.withdrawn == @[request.id]
|
||||
|
||||
checksuite "Purchasing state machine":
|
||||
suite "Purchasing state machine":
|
||||
var purchasing: Purchasing
|
||||
var market: MockMarket
|
||||
var clock: MockClock
|
||||
|
||||
@ -1,10 +1,10 @@
|
||||
import std/times
|
||||
import std/unittest
|
||||
|
||||
import codex/systemclock
|
||||
import pkg/unittest2
|
||||
import pkg/codex/systemclock
|
||||
import ./helpers
|
||||
|
||||
checksuite "SystemClock":
|
||||
suite "SystemClock":
|
||||
test "Should get now":
|
||||
let clock = SystemClock.new()
|
||||
|
||||
|
||||
@ -7,7 +7,7 @@ import pkg/codex/utils/iter
|
||||
import ../../asynctest
|
||||
import ../helpers
|
||||
|
||||
checksuite "Test Iter":
|
||||
suite "Test Iter":
|
||||
test "Should be finished":
|
||||
let iter = Iter[int].empty()
|
||||
|
||||
|
||||
@ -1,12 +1,14 @@
|
||||
import std/unittest
|
||||
import std/os
|
||||
import codex/utils/keyutils
|
||||
|
||||
import pkg/unittest2
|
||||
import pkg/codex/utils/keyutils
|
||||
|
||||
import ../helpers
|
||||
|
||||
when defined(windows):
|
||||
import stew/windows/acl
|
||||
|
||||
checksuite "keyutils":
|
||||
suite "keyutils":
|
||||
let path = getTempDir() / "CodexTest"
|
||||
|
||||
setup:
|
||||
|
||||
@ -1,8 +1,9 @@
|
||||
import std/unittest
|
||||
import codex/utils/options
|
||||
import pkg/unittest2
|
||||
import pkg/codex/utils/options
|
||||
|
||||
import ../helpers
|
||||
|
||||
checksuite "optional casts":
|
||||
suite "optional casts":
|
||||
test "casting value to same type works":
|
||||
check 42 as int == some 42
|
||||
|
||||
@ -31,7 +32,7 @@ checksuite "optional casts":
|
||||
check 42.some as string == string.none
|
||||
check int.none as int == int.none
|
||||
|
||||
checksuite "Optionalize":
|
||||
suite "Optionalize":
|
||||
test "does not except non-object types":
|
||||
static:
|
||||
doAssert not compiles(Optionalize(int))
|
||||
|
||||
@ -17,47 +17,71 @@ asyncchecksuite "tracked futures":
|
||||
check module.trackedFutures.len == 0
|
||||
|
||||
test "tracks unfinished futures":
|
||||
let fut = newFuture[void]("test")
|
||||
let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule})
|
||||
module.trackedFutures.track(fut)
|
||||
check module.trackedFutures.len == 1
|
||||
|
||||
test "does not track completed futures":
|
||||
let fut = newFuture[void]("test")
|
||||
let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule})
|
||||
fut.complete()
|
||||
module.trackedFutures.track(fut)
|
||||
check eventually module.trackedFutures.len == 0
|
||||
|
||||
test "does not track failed futures":
|
||||
let fut = newFuture[void]("test")
|
||||
fut.fail((ref CatchableError)(msg: "some error"))
|
||||
module.trackedFutures.track(fut)
|
||||
check eventually module.trackedFutures.len == 0
|
||||
check module.trackedFutures.len == 0
|
||||
|
||||
test "does not track cancelled futures":
|
||||
let fut = newFuture[void]("test")
|
||||
let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule})
|
||||
fut.cancelCallback = proc(data: pointer) =
|
||||
fut.cancelAndSchedule() # manually schedule the cancel
|
||||
|
||||
await fut.cancelAndWait()
|
||||
module.trackedFutures.track(fut)
|
||||
check eventually module.trackedFutures.len == 0
|
||||
|
||||
test "removes tracked future when finished":
|
||||
let fut = newFuture[void]("test")
|
||||
let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule})
|
||||
module.trackedFutures.track(fut)
|
||||
check module.trackedFutures.len == 1
|
||||
fut.complete()
|
||||
check eventually module.trackedFutures.len == 0
|
||||
|
||||
test "removes tracked future when cancelled":
|
||||
let fut = newFuture[void]("test")
|
||||
let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule})
|
||||
fut.cancelCallback = proc(data: pointer) =
|
||||
fut.cancelAndSchedule() # manually schedule the cancel
|
||||
|
||||
module.trackedFutures.track(fut)
|
||||
check module.trackedFutures.len == 1
|
||||
await fut.cancelAndWait()
|
||||
check eventually module.trackedFutures.len == 0
|
||||
|
||||
test "completed and removes future on cancel":
|
||||
let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule})
|
||||
fut.cancelCallback = proc(data: pointer) =
|
||||
fut.complete()
|
||||
|
||||
module.trackedFutures.track(fut)
|
||||
check module.trackedFutures.len == 1
|
||||
await fut.cancelAndWait()
|
||||
check eventually module.trackedFutures.len == 0
|
||||
|
||||
test "cancels and removes all tracked futures":
|
||||
let fut1 = newFuture[void]("test1")
|
||||
let fut2 = newFuture[void]("test2")
|
||||
let fut3 = newFuture[void]("test3")
|
||||
let fut1 = Future[void].Raising([]).init("test1", {FutureFlag.OwnCancelSchedule})
|
||||
fut1.cancelCallback = proc(data: pointer) =
|
||||
fut1.cancelAndSchedule() # manually schedule the cancel
|
||||
|
||||
let fut2 = Future[void].Raising([]).init("test2", {FutureFlag.OwnCancelSchedule})
|
||||
fut2.cancelCallback = proc(data: pointer) =
|
||||
fut2.cancelAndSchedule() # manually schedule the cancel
|
||||
|
||||
let fut3 = Future[void].Raising([]).init("test3", {FutureFlag.OwnCancelSchedule})
|
||||
fut3.cancelCallback = proc(data: pointer) =
|
||||
fut3.cancelAndSchedule() # manually schedule the cancel
|
||||
|
||||
module.trackedFutures.track(fut1)
|
||||
check module.trackedFutures.len == 1
|
||||
module.trackedFutures.track(fut2)
|
||||
check module.trackedFutures.len == 2
|
||||
module.trackedFutures.track(fut3)
|
||||
check module.trackedFutures.len == 3
|
||||
await module.trackedFutures.cancelTracked()
|
||||
check eventually fut1.cancelled
|
||||
check eventually fut2.cancelled
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import std/unittest
|
||||
import pkg/unittest2
|
||||
|
||||
import pkg/codex/utils
|
||||
|
||||
|
||||
@ -598,6 +598,37 @@ ethersuite "On-Chain Market":
|
||||
check endBalanceHost == (startBalanceHost + request.ask.collateralPerSlot)
|
||||
check endBalanceReward == (startBalanceReward + expectedPayout)
|
||||
|
||||
test "returns the collateral when the slot is not being repaired":
|
||||
await market.requestStorage(request)
|
||||
await market.reserveSlot(request.id, 0.uint64)
|
||||
await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot)
|
||||
|
||||
let slotId = request.slotId(0.uint64)
|
||||
without collateral =? await market.slotCollateral(request.id, 0.uint64), error:
|
||||
fail()
|
||||
|
||||
check collateral == request.ask.collateralPerSlot
|
||||
|
||||
test "calculates correctly the collateral when the slot is being repaired":
|
||||
# Ensure that the config is loaded and repairRewardPercentage is available
|
||||
discard await market.repairRewardPercentage()
|
||||
|
||||
await market.requestStorage(request)
|
||||
await market.reserveSlot(request.id, 0.uint64)
|
||||
await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot)
|
||||
await market.freeSlot(slotId(request.id, 0.uint64))
|
||||
|
||||
let slotId = request.slotId(0.uint64)
|
||||
|
||||
without collateral =? await market.slotCollateral(request.id, 0.uint64), error:
|
||||
fail()
|
||||
|
||||
# slotCollateral
|
||||
# repairRewardPercentage = 10
|
||||
# expected collateral = slotCollateral - slotCollateral * 0.1
|
||||
check collateral ==
|
||||
request.ask.collateralPerSlot - (request.ask.collateralPerSlot * 10).div(100.u256)
|
||||
|
||||
test "the request is added in cache after the fist access":
|
||||
await market.requestStorage(request)
|
||||
|
||||
|
||||
@ -72,7 +72,9 @@ proc example*(_: type Slot): Slot =
|
||||
proc example*(_: type SlotQueueItem): SlotQueueItem =
|
||||
let request = StorageRequest.example
|
||||
let slot = Slot.example
|
||||
SlotQueueItem.init(request, slot.slotIndex.uint16)
|
||||
SlotQueueItem.init(
|
||||
request, slot.slotIndex.uint16, collateral = request.ask.collateralPerSlot
|
||||
)
|
||||
|
||||
proc example(_: type G1Point): G1Point =
|
||||
G1Point(x: UInt256.example, y: UInt256.example)
|
||||
|
||||
@ -2,4 +2,36 @@ import helpers/multisetup
|
||||
import helpers/trackers
|
||||
import helpers/templeveldb
|
||||
|
||||
import std/sequtils, chronos
|
||||
|
||||
export multisetup, trackers, templeveldb
|
||||
|
||||
### taken from libp2p errorhelpers.nim
|
||||
proc allFuturesThrowing*(args: varargs[FutureBase]): Future[void] =
|
||||
# This proc is only meant for use in tests / not suitable for general use.
|
||||
# - Swallowing errors arbitrarily instead of aggregating them is bad design
|
||||
# - It raises `CatchableError` instead of the union of the `futs` errors,
|
||||
# inflating the caller's `raises` list unnecessarily. `macro` could fix it
|
||||
let futs = @args
|
||||
(
|
||||
proc() {.async: (raises: [CatchableError]).} =
|
||||
await allFutures(futs)
|
||||
var firstErr: ref CatchableError
|
||||
for fut in futs:
|
||||
if fut.failed:
|
||||
let err = fut.error()
|
||||
if err of CancelledError:
|
||||
raise err
|
||||
if firstErr == nil:
|
||||
firstErr = err
|
||||
if firstErr != nil:
|
||||
raise firstErr
|
||||
)()
|
||||
|
||||
proc allFuturesThrowing*[T](futs: varargs[Future[T]]): Future[void] =
|
||||
allFuturesThrowing(futs.mapIt(FutureBase(it)))
|
||||
|
||||
proc allFuturesThrowing*[T, E]( # https://github.com/nim-lang/Nim/issues/23432
|
||||
futs: varargs[InternalRaisesFuture[T, E]]
|
||||
): Future[void] =
|
||||
allFuturesThrowing(futs.mapIt(FutureBase(it)))
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import pkg/codex/streams/storestream
|
||||
import std/unittest
|
||||
import pkg/unittest2
|
||||
|
||||
# From lip2p/tests/helpers
|
||||
const trackerNames = [StoreStreamTrackerName]
|
||||
|
||||
@ -4,115 +4,216 @@ import std/strutils
|
||||
from pkg/libp2p import Cid, `$`, init
|
||||
import pkg/stint
|
||||
import pkg/questionable/results
|
||||
import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient]
|
||||
import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient, httptable]
|
||||
import pkg/codex/logutils
|
||||
import pkg/codex/rest/json
|
||||
import pkg/codex/purchasing
|
||||
import pkg/codex/errors
|
||||
import pkg/codex/sales/reservations
|
||||
|
||||
export purchasing
|
||||
export purchasing, httptable, httpclient
|
||||
|
||||
type CodexClient* = ref object
|
||||
http: HttpClient
|
||||
baseurl: string
|
||||
session: HttpSessionRef
|
||||
|
||||
type CodexClientError* = object of CatchableError
|
||||
|
||||
const HttpClientTimeoutMs = 60 * 1000
|
||||
|
||||
proc new*(_: type CodexClient, baseurl: string): CodexClient =
|
||||
CodexClient(
|
||||
http: newHttpClient(timeout = HttpClientTimeoutMs),
|
||||
baseurl: baseurl,
|
||||
session: HttpSessionRef.new({HttpClientFlag.Http11Pipeline}),
|
||||
)
|
||||
CodexClient(session: HttpSessionRef.new(), baseurl: baseurl)
|
||||
|
||||
proc info*(client: CodexClient): ?!JsonNode =
|
||||
let url = client.baseurl & "/debug/info"
|
||||
JsonNode.parse(client.http.getContent(url))
|
||||
proc close*(self: CodexClient): Future[void] {.async: (raises: []).} =
|
||||
await self.session.closeWait()
|
||||
|
||||
proc setLogLevel*(client: CodexClient, level: string) =
|
||||
let url = client.baseurl & "/debug/chronicles/loglevel?level=" & level
|
||||
let headers = newHttpHeaders({"Content-Type": "text/plain"})
|
||||
let response = client.http.request(url, httpMethod = HttpPost, headers = headers)
|
||||
assert response.status == "200 OK"
|
||||
proc request(
|
||||
self: CodexClient,
|
||||
httpMethod: httputils.HttpMethod,
|
||||
url: string,
|
||||
body: openArray[char] = [],
|
||||
headers: openArray[HttpHeaderTuple] = [],
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
HttpClientRequestRef
|
||||
.new(
|
||||
self.session,
|
||||
url,
|
||||
httpMethod,
|
||||
version = HttpVersion11,
|
||||
flags = {},
|
||||
maxResponseHeadersSize = HttpMaxHeadersSize,
|
||||
headers = headers,
|
||||
body = body.toOpenArrayByte(0, len(body) - 1),
|
||||
).get
|
||||
.send()
|
||||
|
||||
proc upload*(client: CodexClient, contents: string): ?!Cid =
|
||||
let response = client.http.post(client.baseurl & "/data", contents)
|
||||
assert response.status == "200 OK"
|
||||
Cid.init(response.body).mapFailure
|
||||
proc post(
|
||||
self: CodexClient,
|
||||
url: string,
|
||||
body: string = "",
|
||||
headers: seq[HttpHeaderTuple] = @[],
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
return self.request(MethodPost, url, headers = headers, body = body)
|
||||
|
||||
proc upload*(client: CodexClient, bytes: seq[byte]): ?!Cid =
|
||||
client.upload(string.fromBytes(bytes))
|
||||
proc get(
|
||||
self: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[]
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
return self.request(MethodGet, url, headers = headers)
|
||||
|
||||
proc download*(client: CodexClient, cid: Cid, local = false): ?!string =
|
||||
let response = client.http.get(
|
||||
client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream")
|
||||
)
|
||||
proc delete(
|
||||
self: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[]
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
return self.request(MethodDelete, url, headers = headers)
|
||||
|
||||
if response.status != "200 OK":
|
||||
return failure(response.status)
|
||||
proc patch(
|
||||
self: CodexClient,
|
||||
url: string,
|
||||
body: string = "",
|
||||
headers: seq[HttpHeaderTuple] = @[],
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
return self.request(MethodPatch, url, headers = headers, body = body)
|
||||
|
||||
success response.body
|
||||
proc body*(
|
||||
response: HttpClientResponseRef
|
||||
): Future[string] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
return bytesToString (await response.getBodyBytes())
|
||||
|
||||
proc downloadManifestOnly*(client: CodexClient, cid: Cid): ?!string =
|
||||
let response = client.http.get(client.baseurl & "/data/" & $cid & "/network/manifest")
|
||||
proc getContent(
|
||||
client: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[]
|
||||
): Future[string] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response = await client.get(url, headers)
|
||||
return await response.body
|
||||
|
||||
if response.status != "200 OK":
|
||||
return failure(response.status)
|
||||
proc info*(
|
||||
client: CodexClient
|
||||
): Future[?!JsonNode] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response = await client.get(client.baseurl & "/debug/info")
|
||||
return JsonNode.parse(await response.body)
|
||||
|
||||
success response.body
|
||||
proc setLogLevel*(
|
||||
client: CodexClient, level: string
|
||||
): Future[void] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let
|
||||
url = client.baseurl & "/debug/chronicles/loglevel?level=" & level
|
||||
headers = @[("Content-Type", "text/plain")]
|
||||
response = await client.post(url, headers = headers, body = "")
|
||||
assert response.status == 200
|
||||
|
||||
proc downloadNoStream*(client: CodexClient, cid: Cid): ?!string =
|
||||
let response = client.http.post(client.baseurl & "/data/" & $cid & "/network")
|
||||
proc uploadRaw*(
|
||||
client: CodexClient, contents: string, headers: seq[HttpHeaderTuple] = @[]
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
return client.post(client.baseurl & "/data", body = contents, headers = headers)
|
||||
|
||||
if response.status != "200 OK":
|
||||
return failure(response.status)
|
||||
proc upload*(
|
||||
client: CodexClient, contents: string
|
||||
): Future[?!Cid] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response = await client.uploadRaw(contents)
|
||||
assert response.status == 200
|
||||
Cid.init(await response.body).mapFailure
|
||||
|
||||
success response.body
|
||||
proc upload*(
|
||||
client: CodexClient, bytes: seq[byte]
|
||||
): Future[?!Cid] {.async: (raw: true).} =
|
||||
return client.upload(string.fromBytes(bytes))
|
||||
|
||||
proc downloadRaw*(
|
||||
client: CodexClient, cid: string, local = false
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
return
|
||||
client.get(client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream"))
|
||||
|
||||
proc downloadBytes*(
|
||||
client: CodexClient, cid: Cid, local = false
|
||||
): Future[?!seq[byte]] {.async.} =
|
||||
let uri =
|
||||
parseUri(client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream"))
|
||||
): Future[?!seq[byte]] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response = await client.downloadRaw($cid, local = local)
|
||||
|
||||
let (status, bytes) = await client.session.fetch(uri)
|
||||
if response.status != 200:
|
||||
return failure($response.status)
|
||||
|
||||
if status != 200:
|
||||
return failure("fetch failed with status " & $status)
|
||||
success await response.getBodyBytes()
|
||||
|
||||
success bytes
|
||||
proc download*(
|
||||
client: CodexClient, cid: Cid, local = false
|
||||
): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
without response =? await client.downloadBytes(cid, local = local), err:
|
||||
return failure(err)
|
||||
return success bytesToString(response)
|
||||
|
||||
proc delete*(client: CodexClient, cid: Cid): ?!void =
|
||||
let
|
||||
url = client.baseurl & "/data/" & $cid
|
||||
response = client.http.delete(url)
|
||||
proc downloadNoStream*(
|
||||
client: CodexClient, cid: Cid
|
||||
): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response = await client.post(client.baseurl & "/data/" & $cid & "/network")
|
||||
|
||||
if response.status != "204 No Content":
|
||||
return failure(response.status)
|
||||
if response.status != 200:
|
||||
return failure($response.status)
|
||||
|
||||
success await response.body
|
||||
|
||||
proc downloadManifestOnly*(
|
||||
client: CodexClient, cid: Cid
|
||||
): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response =
|
||||
await client.get(client.baseurl & "/data/" & $cid & "/network/manifest")
|
||||
|
||||
if response.status != 200:
|
||||
return failure($response.status)
|
||||
|
||||
success await response.body
|
||||
|
||||
proc deleteRaw*(
|
||||
client: CodexClient, cid: string
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
return client.delete(client.baseurl & "/data/" & cid)
|
||||
|
||||
proc delete*(
|
||||
client: CodexClient, cid: Cid
|
||||
): Future[?!void] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response = await client.deleteRaw($cid)
|
||||
|
||||
if response.status != 204:
|
||||
return failure($response.status)
|
||||
|
||||
success()
|
||||
|
||||
proc list*(client: CodexClient): ?!RestContentList =
|
||||
let url = client.baseurl & "/data"
|
||||
let response = client.http.get(url)
|
||||
proc listRaw*(
|
||||
client: CodexClient
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
return client.get(client.baseurl & "/data")
|
||||
|
||||
if response.status != "200 OK":
|
||||
return failure(response.status)
|
||||
proc list*(
|
||||
client: CodexClient
|
||||
): Future[?!RestContentList] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response = await client.listRaw()
|
||||
|
||||
RestContentList.fromJson(response.body)
|
||||
if response.status != 200:
|
||||
return failure($response.status)
|
||||
|
||||
proc space*(client: CodexClient): ?!RestRepoStore =
|
||||
RestContentList.fromJson(await response.body)
|
||||
|
||||
proc space*(
|
||||
client: CodexClient
|
||||
): Future[?!RestRepoStore] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let url = client.baseurl & "/space"
|
||||
let response = client.http.get(url)
|
||||
let response = await client.get(url)
|
||||
|
||||
if response.status != "200 OK":
|
||||
return failure(response.status)
|
||||
if response.status != 200:
|
||||
return failure($response.status)
|
||||
|
||||
RestRepoStore.fromJson(response.body)
|
||||
RestRepoStore.fromJson(await response.body)
|
||||
|
||||
proc requestStorageRaw*(
|
||||
client: CodexClient,
|
||||
@ -124,7 +225,9 @@ proc requestStorageRaw*(
|
||||
expiry: uint64 = 0,
|
||||
nodes: uint = 3,
|
||||
tolerance: uint = 1,
|
||||
): Response =
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
## Call request storage REST endpoint
|
||||
##
|
||||
let url = client.baseurl & "/storage/request/" & $cid
|
||||
@ -141,7 +244,7 @@ proc requestStorageRaw*(
|
||||
if expiry != 0:
|
||||
json["expiry"] = %($expiry)
|
||||
|
||||
return client.http.post(url, $json)
|
||||
return client.post(url, $json)
|
||||
|
||||
proc requestStorage*(
|
||||
client: CodexClient,
|
||||
@ -153,43 +256,45 @@ proc requestStorage*(
|
||||
collateralPerByte: UInt256,
|
||||
nodes: uint = 3,
|
||||
tolerance: uint = 1,
|
||||
): ?!PurchaseId =
|
||||
): Future[?!PurchaseId] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
## Call request storage REST endpoint
|
||||
##
|
||||
let response = client.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry,
|
||||
nodes, tolerance,
|
||||
)
|
||||
if response.status != "200 OK":
|
||||
doAssert(false, response.body)
|
||||
PurchaseId.fromHex(response.body).catch
|
||||
let
|
||||
response = await client.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry,
|
||||
nodes, tolerance,
|
||||
)
|
||||
body = await response.body
|
||||
|
||||
proc getPurchase*(client: CodexClient, purchaseId: PurchaseId): ?!RestPurchase =
|
||||
if response.status != 200:
|
||||
doAssert(false, body)
|
||||
PurchaseId.fromHex(body).catch
|
||||
|
||||
proc getPurchase*(
|
||||
client: CodexClient, purchaseId: PurchaseId
|
||||
): Future[?!RestPurchase] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let url = client.baseurl & "/storage/purchases/" & purchaseId.toHex
|
||||
try:
|
||||
let body = client.http.getContent(url)
|
||||
let body = await client.getContent(url)
|
||||
return RestPurchase.fromJson(body)
|
||||
except CatchableError as e:
|
||||
return failure e.msg
|
||||
|
||||
proc getSalesAgent*(client: CodexClient, slotId: SlotId): ?!RestSalesAgent =
|
||||
proc getSalesAgent*(
|
||||
client: CodexClient, slotId: SlotId
|
||||
): Future[?!RestSalesAgent] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let url = client.baseurl & "/sales/slots/" & slotId.toHex
|
||||
try:
|
||||
let body = client.http.getContent(url)
|
||||
let body = await client.getContent(url)
|
||||
return RestSalesAgent.fromJson(body)
|
||||
except CatchableError as e:
|
||||
return failure e.msg
|
||||
|
||||
proc getSlots*(client: CodexClient): ?!seq[Slot] =
|
||||
let url = client.baseurl & "/sales/slots"
|
||||
let body = client.http.getContent(url)
|
||||
seq[Slot].fromJson(body)
|
||||
|
||||
proc postAvailability*(
|
||||
client: CodexClient,
|
||||
totalSize, duration: uint64,
|
||||
minPricePerBytePerSecond, totalCollateral: UInt256,
|
||||
): ?!Availability =
|
||||
): Future[?!Availability] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
## Post sales availability endpoint
|
||||
##
|
||||
let url = client.baseurl & "/sales/availability"
|
||||
@ -200,17 +305,21 @@ proc postAvailability*(
|
||||
"minPricePerBytePerSecond": minPricePerBytePerSecond,
|
||||
"totalCollateral": totalCollateral,
|
||||
}
|
||||
let response = client.http.post(url, $json)
|
||||
doAssert response.status == "201 Created",
|
||||
"expected 201 Created, got " & response.status & ", body: " & response.body
|
||||
Availability.fromJson(response.body)
|
||||
let response = await client.post(url, $json)
|
||||
let body = await response.body
|
||||
|
||||
doAssert response.status == 201,
|
||||
"expected 201 Created, got " & $response.status & ", body: " & body
|
||||
Availability.fromJson(body)
|
||||
|
||||
proc patchAvailabilityRaw*(
|
||||
client: CodexClient,
|
||||
availabilityId: AvailabilityId,
|
||||
totalSize, freeSize, duration: ?uint64 = uint64.none,
|
||||
minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none,
|
||||
): Response =
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
## Updates availability
|
||||
##
|
||||
let url = client.baseurl & "/sales/availability/" & $availabilityId
|
||||
@ -233,68 +342,50 @@ proc patchAvailabilityRaw*(
|
||||
if totalCollateral =? totalCollateral:
|
||||
json["totalCollateral"] = %totalCollateral
|
||||
|
||||
client.http.patch(url, $json)
|
||||
client.patch(url, $json)
|
||||
|
||||
proc patchAvailability*(
|
||||
client: CodexClient,
|
||||
availabilityId: AvailabilityId,
|
||||
totalSize, duration: ?uint64 = uint64.none,
|
||||
minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none,
|
||||
): void =
|
||||
let response = client.patchAvailabilityRaw(
|
||||
): Future[void] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response = await client.patchAvailabilityRaw(
|
||||
availabilityId,
|
||||
totalSize = totalSize,
|
||||
duration = duration,
|
||||
minPricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
totalCollateral = totalCollateral,
|
||||
)
|
||||
doAssert response.status == "200 OK", "expected 200 OK, got " & response.status
|
||||
doAssert response.status == 200, "expected 200 OK, got " & $response.status
|
||||
|
||||
proc getAvailabilities*(client: CodexClient): ?!seq[Availability] =
|
||||
proc getAvailabilities*(
|
||||
client: CodexClient
|
||||
): Future[?!seq[Availability]] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
## Call sales availability REST endpoint
|
||||
let url = client.baseurl & "/sales/availability"
|
||||
let body = client.http.getContent(url)
|
||||
let body = await client.getContent(url)
|
||||
seq[Availability].fromJson(body)
|
||||
|
||||
proc getAvailabilityReservations*(
|
||||
client: CodexClient, availabilityId: AvailabilityId
|
||||
): ?!seq[Reservation] =
|
||||
): Future[?!seq[Reservation]] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
## Retrieves Availability's Reservations
|
||||
let url = client.baseurl & "/sales/availability/" & $availabilityId & "/reservations"
|
||||
let body = client.http.getContent(url)
|
||||
let body = await client.getContent(url)
|
||||
seq[Reservation].fromJson(body)
|
||||
|
||||
proc close*(client: CodexClient) =
|
||||
client.http.close()
|
||||
proc purchaseStateIs*(
|
||||
client: CodexClient, id: PurchaseId, state: string
|
||||
): Future[bool] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
(await client.getPurchase(id)).option .? state == some state
|
||||
|
||||
proc restart*(client: CodexClient) =
|
||||
client.http.close()
|
||||
client.http = newHttpClient(timeout = HttpClientTimeoutMs)
|
||||
proc saleStateIs*(
|
||||
client: CodexClient, id: SlotId, state: string
|
||||
): Future[bool] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
(await client.getSalesAgent(id)).option .? state == some state
|
||||
|
||||
proc purchaseStateIs*(client: CodexClient, id: PurchaseId, state: string): bool =
|
||||
client.getPurchase(id).option .? state == some state
|
||||
|
||||
proc saleStateIs*(client: CodexClient, id: SlotId, state: string): bool =
|
||||
client.getSalesAgent(id).option .? state == some state
|
||||
|
||||
proc requestId*(client: CodexClient, id: PurchaseId): ?RequestId =
|
||||
return client.getPurchase(id).option .? requestId
|
||||
|
||||
proc uploadRaw*(
|
||||
client: CodexClient, contents: string, headers = newHttpHeaders()
|
||||
): Response =
|
||||
return client.http.request(
|
||||
client.baseurl & "/data", body = contents, httpMethod = HttpPost, headers = headers
|
||||
)
|
||||
|
||||
proc listRaw*(client: CodexClient): Response =
|
||||
return client.http.request(client.baseurl & "/data", httpMethod = HttpGet)
|
||||
|
||||
proc downloadRaw*(client: CodexClient, cid: string, local = false): Response =
|
||||
return client.http.request(
|
||||
client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream"),
|
||||
httpMethod = HttpGet,
|
||||
)
|
||||
|
||||
proc deleteRaw*(client: CodexClient, cid: string): Response =
|
||||
return client.http.request(client.baseurl & "/data/" & cid, httpMethod = HttpDelete)
|
||||
proc requestId*(
|
||||
client: CodexClient, id: PurchaseId
|
||||
): Future[?RequestId] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
return (await client.getPurchase(id)).option .? requestId
|
||||
|
||||
@ -68,7 +68,7 @@ method stop*(node: CodexProcess) {.async.} =
|
||||
|
||||
trace "stopping codex client"
|
||||
if client =? node.client:
|
||||
client.close()
|
||||
await client.close()
|
||||
node.client = none CodexClient
|
||||
|
||||
method removeDataDir*(node: CodexProcess) =
|
||||
|
||||
@ -60,13 +60,13 @@ template marketplacesuite*(name: string, body: untyped) =
|
||||
duration: uint64,
|
||||
collateralPerByte: UInt256,
|
||||
minPricePerBytePerSecond: UInt256,
|
||||
) =
|
||||
): Future[void] {.async: (raises: [CancelledError, HttpError, ConfigurationError]).} =
|
||||
let totalCollateral = datasetSize.u256 * collateralPerByte
|
||||
# post availability to each provider
|
||||
for i in 0 ..< providers().len:
|
||||
let provider = providers()[i].client
|
||||
|
||||
discard provider.postAvailability(
|
||||
discard await provider.postAvailability(
|
||||
totalSize = datasetSize,
|
||||
duration = duration.uint64,
|
||||
minPricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
@ -83,16 +83,18 @@ template marketplacesuite*(name: string, body: untyped) =
|
||||
expiry: uint64 = 4.periods,
|
||||
nodes = providers().len,
|
||||
tolerance = 0,
|
||||
): Future[PurchaseId] {.async.} =
|
||||
let id = client.requestStorage(
|
||||
cid,
|
||||
expiry = expiry,
|
||||
duration = duration,
|
||||
proofProbability = proofProbability,
|
||||
collateralPerByte = collateralPerByte,
|
||||
pricePerBytePerSecond = pricePerBytePerSecond,
|
||||
nodes = nodes.uint,
|
||||
tolerance = tolerance.uint,
|
||||
): Future[PurchaseId] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let id = (
|
||||
await client.requestStorage(
|
||||
cid,
|
||||
expiry = expiry,
|
||||
duration = duration,
|
||||
proofProbability = proofProbability,
|
||||
collateralPerByte = collateralPerByte,
|
||||
pricePerBytePerSecond = pricePerBytePerSecond,
|
||||
nodes = nodes.uint,
|
||||
tolerance = tolerance.uint,
|
||||
)
|
||||
).get
|
||||
|
||||
return id
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user