Merge branch 'feat/libp2p-upgrade' into feat/store-benchmark

This commit is contained in:
munna0908 2025-03-20 17:41:54 +05:30
commit 94ce22ba06
No known key found for this signature in database
GPG Key ID: 2FFCD637E937D3E6
112 changed files with 1965 additions and 1378 deletions

View File

@ -89,7 +89,7 @@ runs:
- name: Install gcc 14 on Linux
# We don't want to install gcc 14 for coverage (Ubuntu 20.04)
if : ${{ inputs.os == 'linux' && !inputs.coverage }}
if : ${{ inputs.os == 'linux' && inputs.coverage != 'true' }}
shell: ${{ inputs.shell }} {0}
run: |
# Add GCC-14 to alternatives
@ -202,7 +202,7 @@ runs:
- name: Restore Nim toolchain binaries from cache
id: nim-cache
uses: actions/cache@v4
if : ${{ !inputs.coverage }}
if : ${{ inputs.coverage != 'true' }}
with:
path: NimBinaries
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }}

View File

@ -20,10 +20,10 @@ jobs:
uses: fabiocaccamo/create-matrix-action@v5
with:
matrix: |
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
build:
needs: matrix

10
.gitmodules vendored
View File

@ -221,3 +221,13 @@
[submodule "vendor/nph"]
path = vendor/nph
url = https://github.com/arnetheduck/nph.git
[submodule "vendor/nim-quic"]
path = vendor/nim-quic
url = https://github.com/vacp2p/nim-quic.git
ignore = untracked
branch = master
[submodule "vendor/nim-ngtcp2"]
path = vendor/nim-ngtcp2
url = https://github.com/vacp2p/nim-ngtcp2.git
ignore = untracked
branch = master

View File

@ -59,8 +59,8 @@ Feel free to dive in, contributions are welcomed! Open an issue or submit PRs.
### Linting and formatting
`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is requrired to adhere to its styling.
`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling.
If you are setting up fresh setup, in order to get `nph` run `make build-nph`.
In order to format files run `make nph/<file/folder you want to format>`.
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior commiting them.
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them.
If you are using VSCode and the [NimLang](https://marketplace.visualstudio.com/items?itemName=NimLang.nimlang) extension you can enable "Format On Save" (eq. the `nim.formatOnSave` property) that will format the files using `nph`.

View File

@ -41,80 +41,86 @@ type Advertiser* = ref object of RootObj
advertiserRunning*: bool # Indicates if discovery is running
concurrentAdvReqs: int # Concurrent advertise requests
advertiseLocalStoreLoop*: Future[void] # Advertise loop task handle
advertiseLocalStoreLoop*: Future[void].Raising([]) # Advertise loop task handle
advertiseQueue*: AsyncQueue[Cid] # Advertise queue
trackedFutures*: TrackedFutures # Advertise tasks futures
advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
proc addCidToQueue(b: Advertiser, cid: Cid) {.async.} =
proc addCidToQueue(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} =
if cid notin b.advertiseQueue:
await b.advertiseQueue.put(cid)
trace "Advertising", cid
proc advertiseBlock(b: Advertiser, cid: Cid) {.async.} =
proc advertiseBlock(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} =
without isM =? cid.isManifest, err:
warn "Unable to determine if cid is manifest"
return
if isM:
without blk =? await b.localStore.getBlock(cid), err:
error "Error retrieving manifest block", cid, err = err.msg
return
try:
if isM:
without blk =? await b.localStore.getBlock(cid), err:
error "Error retrieving manifest block", cid, err = err.msg
return
without manifest =? Manifest.decode(blk), err:
error "Unable to decode as manifest", err = err.msg
return
without manifest =? Manifest.decode(blk), err:
error "Unable to decode as manifest", err = err.msg
return
# announce manifest cid and tree cid
await b.addCidToQueue(cid)
await b.addCidToQueue(manifest.treeCid)
# announce manifest cid and tree cid
await b.addCidToQueue(cid)
await b.addCidToQueue(manifest.treeCid)
except CancelledError as exc:
trace "Cancelled advertise block", cid
raise exc
except CatchableError as e:
error "failed to advertise block", cid, error = e.msgDetail
proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} =
while b.advertiserRunning:
try:
if cids =? await b.localStore.listBlocks(blockType = BlockType.Manifest):
trace "Advertiser begins iterating blocks..."
for c in cids:
if cid =? await c:
await b.advertiseBlock(cid)
trace "Advertiser iterating blocks finished."
try:
while b.advertiserRunning:
try:
if cids =? await b.localStore.listBlocks(blockType = BlockType.Manifest):
trace "Advertiser begins iterating blocks..."
for c in cids:
if cid =? await c:
await b.advertiseBlock(cid)
trace "Advertiser iterating blocks finished."
except CatchableError as e:
error "Error in advertise local store loop", error = e.msgDetail
raiseAssert("Unexpected exception in advertiseLocalStoreLoop")
await sleepAsync(b.advertiseLocalStoreLoopSleep)
except CancelledError:
break # do not propagate as advertiseLocalStoreLoop was asyncSpawned
except CatchableError as e:
error "failed to advertise blocks in local store", error = e.msgDetail
except CancelledError:
warn "Cancelled advertise local store loop"
info "Exiting advertise task loop"
proc processQueueLoop(b: Advertiser) {.async: (raises: []).} =
while b.advertiserRunning:
try:
try:
while b.advertiserRunning:
let cid = await b.advertiseQueue.get()
if cid in b.inFlightAdvReqs:
continue
try:
let request = b.discovery.provide(cid)
let request = b.discovery.provide(cid)
b.inFlightAdvReqs[cid] = request
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
b.inFlightAdvReqs[cid] = request
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
await request
finally:
defer:
b.inFlightAdvReqs.del(cid)
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
except CancelledError:
trace "Advertise task cancelled"
return
except CatchableError as exc:
warn "Exception in advertise task runner", exc = exc.msg
await request
except CancelledError:
warn "Cancelled advertise task runner"
info "Exiting advertise task runner"
proc start*(b: Advertiser) {.async.} =
proc start*(b: Advertiser) {.async: (raises: []).} =
## Start the advertiser
##
@ -134,13 +140,11 @@ proc start*(b: Advertiser) {.async.} =
for i in 0 ..< b.concurrentAdvReqs:
let fut = b.processQueueLoop()
b.trackedFutures.track(fut)
asyncSpawn fut
b.advertiseLocalStoreLoop = advertiseLocalStoreLoop(b)
b.trackedFutures.track(b.advertiseLocalStoreLoop)
asyncSpawn b.advertiseLocalStoreLoop
proc stop*(b: Advertiser) {.async.} =
proc stop*(b: Advertiser) {.async: (raises: []).} =
## Stop the advertiser
##

View File

@ -48,7 +48,7 @@ type DiscoveryEngine* = ref object of RootObj
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
discEngineRunning*: bool # Indicates if discovery is running
concurrentDiscReqs: int # Concurrent discovery requests
discoveryLoop*: Future[void] # Discovery loop task handle
discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle
discoveryQueue*: AsyncQueue[Cid] # Discovery queue
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
minPeersPerBlock*: int # Max number of peers with block
@ -57,30 +57,21 @@ type DiscoveryEngine* = ref object of RootObj
# Inflight discovery requests
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
while b.discEngineRunning:
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
try:
try:
while b.discEngineRunning:
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
await b.discoveryQueue.put(cid)
except CancelledError:
trace "Discovery loop cancelled"
return
except CatchableError as exc:
warn "Exception in discovery loop", exc = exc.msg
try:
logScope:
sleep = b.discoveryLoopSleep
wanted = b.pendingBlocks.len
await sleepAsync(b.discoveryLoopSleep)
except CancelledError:
discard # do not propagate as discoveryQueueLoop was asyncSpawned
except CancelledError:
trace "Discovery loop cancelled"
proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
## Run discovery tasks
##
while b.discEngineRunning:
try:
try:
while b.discEngineRunning:
let cid = await b.discoveryQueue.get()
if cid in b.inFlightDiscReqs:
@ -90,35 +81,28 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
let haves = b.peers.peersHave(cid)
if haves.len < b.minPeersPerBlock:
try:
let request = b.discovery.find(cid).wait(DefaultDiscoveryTimeout)
let request = b.discovery.find(cid)
b.inFlightDiscReqs[cid] = request
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
b.inFlightDiscReqs[cid] = request
defer:
b.inFlightDiscReqs.del(cid)
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
let peers = await request
if (await request.withTimeout(DefaultDiscoveryTimeout)) and
peers =? (await request).catch:
let dialed = await allFinished(peers.mapIt(b.network.dialPeer(it.data)))
for i, f in dialed:
if f.failed:
await b.discovery.removeProvider(peers[i].data.peerId)
finally:
b.inFlightDiscReqs.del(cid)
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
except CancelledError:
trace "Discovery task cancelled"
return
except CatchableError as exc:
warn "Exception in discovery task runner", exc = exc.msg
except Exception as e:
# Raised by b.discovery.removeProvider somehow...
# This should not be catchable, and we should never get here. Therefore,
# raise a Defect.
raiseAssert "Exception when removing provider"
except CancelledError:
trace "Discovery task cancelled"
return
info "Exiting discovery task runner"
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) =
for cid in cids:
if cid notin b.discoveryQueue:
try:
@ -126,11 +110,11 @@ proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
except CatchableError as exc:
warn "Exception queueing discovery request", exc = exc.msg
proc start*(b: DiscoveryEngine) {.async.} =
proc start*(b: DiscoveryEngine) {.async: (raises: []).} =
## Start the discengine task
##
trace "Discovery engine start"
trace "Discovery engine starting"
if b.discEngineRunning:
warn "Starting discovery engine twice"
@ -140,12 +124,13 @@ proc start*(b: DiscoveryEngine) {.async.} =
for i in 0 ..< b.concurrentDiscReqs:
let fut = b.discoveryTaskLoop()
b.trackedFutures.track(fut)
asyncSpawn fut
b.discoveryLoop = b.discoveryQueueLoop()
b.trackedFutures.track(b.discoveryLoop)
proc stop*(b: DiscoveryEngine) {.async.} =
trace "Discovery engine started"
proc stop*(b: DiscoveryEngine) {.async: (raises: []).} =
## Stop the discovery engine
##

View File

@ -93,12 +93,15 @@ type
price*: UInt256
# attach task scheduler to engine
proc scheduleTask(self: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe.} =
self.taskQueue.pushOrUpdateNoWait(task).isOk()
proc scheduleTask(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, raises: [].} =
if self.taskQueue.pushOrUpdateNoWait(task).isOk():
trace "Task scheduled for peer", peer = task.id
else:
warn "Unable to schedule task for peer", peer = task.id
proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).}
proc start*(self: BlockExcEngine) {.async.} =
proc start*(self: BlockExcEngine) {.async: (raises: []).} =
## Start the blockexc task
##
@ -115,7 +118,7 @@ proc start*(self: BlockExcEngine) {.async.} =
let fut = self.blockexcTaskRunner()
self.trackedFutures.track(fut)
proc stop*(self: BlockExcEngine) {.async.} =
proc stop*(self: BlockExcEngine) {.async: (raises: []).} =
## Stop the blockexc blockexc
##
@ -135,7 +138,7 @@ proc stop*(self: BlockExcEngine) {.async.} =
proc sendWantHave(
self: BlockExcEngine, addresses: seq[BlockAddress], peers: seq[BlockExcPeerCtx]
): Future[void] {.async.} =
): Future[void] {.async: (raises: [CancelledError]).} =
for p in peers:
let toAsk = addresses.filterIt(it notin p.peerHave)
trace "Sending wantHave request", toAsk, peer = p.id
@ -144,7 +147,7 @@ proc sendWantHave(
proc sendWantBlock(
self: BlockExcEngine, addresses: seq[BlockAddress], blockPeer: BlockExcPeerCtx
): Future[void] {.async.} =
): Future[void] {.async: (raises: [CancelledError]).} =
trace "Sending wantBlock request to", addresses, peer = blockPeer.id
await self.network.request.sendWantList(
blockPeer.id, addresses, wantType = WantType.WantBlock
@ -229,7 +232,7 @@ proc requestBlock*(
proc blockPresenceHandler*(
self: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence]
) {.async.} =
) {.async: (raises: []).} =
trace "Received block presence from peer", peer, blocks = blocks.mapIt($it)
let
peerCtx = self.peers.get(peer)
@ -249,20 +252,23 @@ proc blockPresenceHandler*(
if dontWantCids.len > 0:
peerCtx.cleanPresence(dontWantCids)
let ourWantCids = ourWantList.filter do(address: BlockAddress) -> bool:
if address in peerHave and not self.pendingBlocks.retriesExhausted(address) and
not self.pendingBlocks.isInFlight(address):
self.pendingBlocks.setInFlight(address, true)
self.pendingBlocks.decRetries(address)
true
else:
false
let ourWantCids = ourWantList.filterIt(
it in peerHave and not self.pendingBlocks.retriesExhausted(it) and
not self.pendingBlocks.isInFlight(it)
)
for address in ourWantCids:
self.pendingBlocks.setInFlight(address, true)
self.pendingBlocks.decRetries(address)
if ourWantCids.len > 0:
trace "Peer has blocks in our wantList", peer, wants = ourWantCids
await self.sendWantBlock(ourWantCids, peerCtx)
if err =? catch(await self.sendWantBlock(ourWantCids, peerCtx)).errorOption:
warn "Failed to send wantBlock to peer", peer, err = err.msg
proc scheduleTasks(self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} =
proc scheduleTasks(
self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]
) {.async: (raises: [CancelledError]).} =
let cids = blocksDelivery.mapIt(it.blk.cid)
# schedule any new peers to provide blocks to
@ -271,15 +277,21 @@ proc scheduleTasks(self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.a
# schedule a peer if it wants at least one cid
# and we have it in our local store
if c in p.peerWantsCids:
if await (c in self.localStore):
if self.scheduleTask(p):
trace "Task scheduled for peer", peer = p.id
else:
warn "Unable to schedule task for peer", peer = p.id
try:
if await (c in self.localStore):
# TODO: the try/except should go away once blockstore tracks exceptions
self.scheduleTask(p)
break
except CancelledError as exc:
warn "Checking local store canceled", cid = c, err = exc.msg
return
except CatchableError as exc:
error "Error checking local store for cid", cid = c, err = exc.msg
raiseAssert "Unexpected error checking local store for cid"
break # do next peer
proc cancelBlocks(self: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} =
proc cancelBlocks(
self: BlockExcEngine, addrs: seq[BlockAddress]
) {.async: (raises: [CancelledError]).} =
## Tells neighboring peers that we're no longer interested in a block.
##
@ -289,35 +301,43 @@ proc cancelBlocks(self: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} =
trace "Sending block request cancellations to peers",
addrs, peers = self.peers.peerIds
proc mapPeers(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} =
let blocks = addrs.filter do(a: BlockAddress) -> bool:
a in peerCtx.blocks
proc processPeer(peerCtx: BlockExcPeerCtx): Future[BlockExcPeerCtx] {.async.} =
await self.network.request.sendWantCancellations(
peer = peerCtx.id, addresses = addrs.filterIt(it in peerCtx)
)
if blocks.len > 0:
trace "Sending block request cancellations to peer", peer = peerCtx.id, blocks
await self.network.request.sendWantCancellations(
peer = peerCtx.id, addresses = blocks
return peerCtx
try:
let (succeededFuts, failedFuts) = await allFinishedFailed(
toSeq(self.peers.peers.values).filterIt(it.peerHave.anyIt(it in addrs)).map(
processPeer
)
)
(await allFinished(succeededFuts)).mapIt(it.read).apply do(peerCtx: BlockExcPeerCtx):
peerCtx.cleanPresence(addrs)
peerCtx
let failed = (await allFinished(map(toSeq(self.peers.peers.values), mapPeers))).filterIt(
it.failed
)
if failed.len > 0:
warn "Failed to send block request cancellations to peers", peers = failed.len
else:
trace "Block request cancellations sent to peers", peers = self.peers.len
if failedFuts.len > 0:
warn "Failed to send block request cancellations to peers", peers = failedFuts.len
else:
trace "Block request cancellations sent to peers", peers = self.peers.len
except CancelledError as exc:
warn "Error sending block request cancellations", error = exc.msg
raise exc
except CatchableError as exc:
warn "Error sending block request cancellations", error = exc.msg
proc resolveBlocks*(
self: BlockExcEngine, blocksDelivery: seq[BlockDelivery]
) {.async.} =
) {.async: (raises: [CancelledError]).} =
self.pendingBlocks.resolve(blocksDelivery)
await self.scheduleTasks(blocksDelivery)
await self.cancelBlocks(blocksDelivery.mapIt(it.address))
proc resolveBlocks*(self: BlockExcEngine, blocks: seq[Block]) {.async.} =
proc resolveBlocks*(
self: BlockExcEngine, blocks: seq[Block]
) {.async: (raises: [CancelledError]).} =
await self.resolveBlocks(
blocks.mapIt(
BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid))
@ -326,7 +346,7 @@ proc resolveBlocks*(self: BlockExcEngine, blocks: seq[Block]) {.async.} =
proc payForBlocks(
self: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery]
) {.async.} =
) {.async: (raises: [CancelledError]).} =
let
sendPayment = self.network.request.sendPayment
price = peer.price(blocksDelivery.mapIt(it.address))
@ -367,7 +387,7 @@ proc validateBlockDelivery(self: BlockExcEngine, bd: BlockDelivery): ?!void =
proc blocksDeliveryHandler*(
self: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery]
) {.async.} =
) {.async: (raises: []).} =
trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address))
var validatedBlocksDelivery: seq[BlockDelivery]
@ -376,41 +396,47 @@ proc blocksDeliveryHandler*(
peer = peer
address = bd.address
if err =? self.validateBlockDelivery(bd).errorOption:
warn "Block validation failed", msg = err.msg
continue
if err =? (await self.localStore.putBlock(bd.blk)).errorOption:
error "Unable to store block", err = err.msg
continue
if bd.address.leaf:
without proof =? bd.proof:
error "Proof expected for a leaf block delivery"
try:
if err =? self.validateBlockDelivery(bd).errorOption:
warn "Block validation failed", msg = err.msg
continue
if err =? (
await self.localStore.putCidAndProof(
bd.address.treeCid, bd.address.index, bd.blk.cid, proof
)
).errorOption:
error "Unable to store proof and cid for a block"
if err =? (await self.localStore.putBlock(bd.blk)).errorOption:
error "Unable to store block", err = err.msg
continue
if bd.address.leaf:
without proof =? bd.proof:
warn "Proof expected for a leaf block delivery"
continue
if err =? (
await self.localStore.putCidAndProof(
bd.address.treeCid, bd.address.index, bd.blk.cid, proof
)
).errorOption:
warn "Unable to store proof and cid for a block"
continue
except CatchableError as exc:
warn "Error handling block delivery", error = exc.msg
continue
validatedBlocksDelivery.add(bd)
await self.resolveBlocks(validatedBlocksDelivery)
codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64)
let peerCtx = self.peers.get(peer)
if peerCtx != nil:
await self.payForBlocks(peerCtx, blocksDelivery)
## shouldn't we remove them from the want-list instead of this:
peerCtx.cleanPresence(blocksDelivery.mapIt(it.address))
if err =? catch(await self.payForBlocks(peerCtx, blocksDelivery)).errorOption:
warn "Error paying for blocks", err = err.msg
return
if err =? catch(await self.resolveBlocks(validatedBlocksDelivery)).errorOption:
warn "Error resolving blocks", err = err.msg
return
proc wantListHandler*(
self: BlockExcEngine, peer: PeerId, wantList: WantList
) {.async.} =
) {.async: (raises: []).} =
trace "Received want list from peer", peer, wantList = wantList.entries.len
let peerCtx = self.peers.get(peer)
@ -422,68 +448,81 @@ proc wantListHandler*(
presence: seq[BlockPresence]
schedulePeer = false
for e in wantList.entries:
let idx = peerCtx.peerWants.findIt(it.address == e.address)
try:
for e in wantList.entries:
let idx = peerCtx.peerWants.findIt(it.address == e.address)
logScope:
peer = peerCtx.id
address = e.address
wantType = $e.wantType
logScope:
peer = peerCtx.id
address = e.address
wantType = $e.wantType
if idx < 0: # Adding new entry to peer wants
let
have = await e.address in self.localStore
price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE)
if idx < 0: # Adding new entry to peer wants
let
have =
try:
await e.address in self.localStore
except CatchableError as exc:
# TODO: should not be necessary once we have proper exception tracking on the BlockStore interface
false
price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE)
if e.cancel:
trace "Received cancelation for untracked block, skipping", address = e.address
continue
if e.cancel:
trace "Received cancelation for untracked block, skipping",
address = e.address
continue
trace "Processing want list entry", wantList = $e
case e.wantType
of WantType.WantHave:
if have:
presence.add(
BlockPresence(
address: e.address, `type`: BlockPresenceType.Have, price: price
)
)
else:
if e.sendDontHave:
trace "Processing want list entry", wantList = $e
case e.wantType
of WantType.WantHave:
if have:
presence.add(
BlockPresence(
address: e.address, `type`: BlockPresenceType.DontHave, price: price
address: e.address, `type`: BlockPresenceType.Have, price: price
)
)
else:
if e.sendDontHave:
presence.add(
BlockPresence(
address: e.address, `type`: BlockPresenceType.DontHave, price: price
)
)
codex_block_exchange_want_have_lists_received.inc()
of WantType.WantBlock:
peerCtx.peerWants.add(e)
schedulePeer = true
codex_block_exchange_want_block_lists_received.inc()
else: # Updating existing entry in peer wants
# peer doesn't want this block anymore
if e.cancel:
trace "Canceling want for block", address = e.address
peerCtx.peerWants.del(idx)
trace "Canceled block request", address = e.address, len = peerCtx.peerWants.len
else:
if e.wantType == WantType.WantBlock:
codex_block_exchange_want_have_lists_received.inc()
of WantType.WantBlock:
peerCtx.peerWants.add(e)
schedulePeer = true
# peer might want to ask for the same cid with
# different want params
trace "Updating want for block", address = e.address
peerCtx.peerWants[idx] = e # update entry
trace "Updated block request", address = e.address, len = peerCtx.peerWants.len
codex_block_exchange_want_block_lists_received.inc()
else: # Updating existing entry in peer wants
# peer doesn't want this block anymore
if e.cancel:
trace "Canceling want for block", address = e.address
peerCtx.peerWants.del(idx)
trace "Canceled block request",
address = e.address, len = peerCtx.peerWants.len
else:
if e.wantType == WantType.WantBlock:
schedulePeer = true
# peer might want to ask for the same cid with
# different want params
trace "Updating want for block", address = e.address
peerCtx.peerWants[idx] = e # update entry
trace "Updated block request",
address = e.address, len = peerCtx.peerWants.len
if presence.len > 0:
trace "Sending presence to remote", items = presence.mapIt($it).join(",")
await self.network.request.sendPresence(peer, presence)
if presence.len > 0:
trace "Sending presence to remote", items = presence.mapIt($it).join(",")
await self.network.request.sendPresence(peer, presence)
if schedulePeer and not self.scheduleTask(peerCtx):
warn "Unable to schedule task for peer", peer
if schedulePeer:
self.scheduleTask(peerCtx)
except CancelledError as exc: #TODO: replace with CancelledError
warn "Error processing want list", error = exc.msg
proc accountHandler*(self: BlockExcEngine, peer: PeerId, account: Account) {.async.} =
proc accountHandler*(
self: BlockExcEngine, peer: PeerId, account: Account
) {.async: (raises: []).} =
let context = self.peers.get(peer)
if context.isNil:
return
@ -492,7 +531,7 @@ proc accountHandler*(self: BlockExcEngine, peer: PeerId, account: Account) {.asy
proc paymentHandler*(
self: BlockExcEngine, peer: PeerId, payment: SignedState
) {.async.} =
) {.async: (raises: []).} =
trace "Handling payments", peer
without context =? self.peers.get(peer).option and account =? context.account:
@ -505,7 +544,9 @@ proc paymentHandler*(
else:
context.paymentChannel = self.wallet.acceptChannel(payment).option
proc setupPeer*(self: BlockExcEngine, peer: PeerId) {.async.} =
proc setupPeer*(
self: BlockExcEngine, peer: PeerId
) {.async: (raises: [CancelledError]).} =
## Perform initial setup, such as want
## list exchange
##
@ -524,9 +565,10 @@ proc setupPeer*(self: BlockExcEngine, peer: PeerId) {.async.} =
await self.network.request.sendWantList(peer, cids, full = true)
if address =? self.pricing .? address:
trace "Sending account to peer", peer
await self.network.request.sendAccount(peer, Account(address: address))
proc dropPeer*(self: BlockExcEngine, peer: PeerId) =
proc dropPeer*(self: BlockExcEngine, peer: PeerId) {.raises: [].} =
## Cleanup disconnected peer
##
@ -535,7 +577,9 @@ proc dropPeer*(self: BlockExcEngine, peer: PeerId) =
# drop the peer from the peers table
self.peers.remove(peer)
proc taskHandler*(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
proc taskHandler*(
self: BlockExcEngine, task: BlockExcPeerCtx
) {.gcsafe, async: (raises: [CancelledError, RetriesExhaustedError]).} =
# Send to the peer blocks he wants to get,
# if they present in our local store
@ -572,8 +616,11 @@ proc taskHandler*(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.}
let
blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup))
blocksDelivery =
blocksDeliveryFut.filterIt(it.completed and it.read.isOk).mapIt(it.read.get)
blocksDelivery = blocksDeliveryFut.filterIt(it.completed and it.value.isOk).mapIt:
if bd =? it.value:
bd
else:
raiseAssert "Unexpected error in local lookup"
# All the wants that failed local lookup must be set to not-in-flight again.
let
@ -595,15 +642,12 @@ proc blockexcTaskRunner(self: BlockExcEngine) {.async: (raises: []).} =
##
trace "Starting blockexc task runner"
while self.blockexcRunning:
try:
try:
while self.blockexcRunning:
let peerCtx = await self.taskQueue.pop()
await self.taskHandler(peerCtx)
except CancelledError:
break # do not propagate as blockexcTaskRunner was asyncSpawned
except CatchableError as e:
error "error running block exchange task", error = e.msgDetail
except CatchableError as exc:
error "error running block exchange task", error = exc.msg
info "Exiting blockexc task runner"
@ -634,7 +678,9 @@ proc new*(
advertiser: advertiser,
)
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
proc peerEventHandler(
peerId: PeerId, event: PeerEvent
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined:
await self.setupPeer(peerId)
else:
@ -644,23 +690,29 @@ proc new*(
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
proc blockWantListHandler(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} =
proc blockWantListHandler(
peer: PeerId, wantList: WantList
): Future[void] {.async: (raises: []).} =
self.wantListHandler(peer, wantList)
proc blockPresenceHandler(
peer: PeerId, presence: seq[BlockPresence]
): Future[void] {.gcsafe.} =
): Future[void] {.async: (raises: []).} =
self.blockPresenceHandler(peer, presence)
proc blocksDeliveryHandler(
peer: PeerId, blocksDelivery: seq[BlockDelivery]
): Future[void] {.gcsafe.} =
): Future[void] {.async: (raises: []).} =
self.blocksDeliveryHandler(peer, blocksDelivery)
proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} =
proc accountHandler(
peer: PeerId, account: Account
): Future[void] {.async: (raises: []).} =
self.accountHandler(peer, account)
proc paymentHandler(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} =
proc paymentHandler(
peer: PeerId, payment: SignedState
): Future[void] {.async: (raises: []).} =
self.paymentHandler(peer, payment)
network.handlers = BlockExcHandlers(

View File

@ -7,6 +7,8 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/math
import pkg/nitro
import pkg/questionable/results
@ -15,9 +17,6 @@ import ../peers
export nitro
export results
push:
{.upraises: [].}
const ChainId* = 0.u256 # invalid chain id for now
const Asset* = EthAddress.zero # invalid ERC20 asset address for now
const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals

View File

@ -35,13 +35,15 @@ const
DefaultMaxInflight* = 100
type
WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.}
WantListHandler* =
proc(peer: PeerId, wantList: WantList) {.gcsafe, async: (raises: []).}
BlocksDeliveryHandler* =
proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.}
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.gcsafe, async: (raises: []).}
BlockPresenceHandler* =
proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
proc(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async: (raises: []).}
AccountHandler* = proc(peer: PeerId, account: Account) {.gcsafe, async: (raises: []).}
PaymentHandler* =
proc(peer: PeerId, payment: SignedState) {.gcsafe, async: (raises: []).}
BlockExcHandlers* = object
onWantList*: WantListHandler
@ -58,15 +60,20 @@ type
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
): Future[void] {.gcsafe.}
WantCancellationSender* =
proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.}
BlocksDeliverySender* =
proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.}
PresenceSender* =
proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
) {.async: (raises: [CancelledError]).}
WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]) {.
async: (raises: [CancelledError])
.}
BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.
async: (raises: [CancelledError])
.}
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]) {.
async: (raises: [CancelledError])
.}
AccountSender* =
proc(peer: PeerId, account: Account) {.async: (raises: [CancelledError]).}
PaymentSender* =
proc(peer: PeerId, payment: SignedState) {.async: (raises: [CancelledError]).}
BlockExcRequest* = object
sendWantList*: WantListSender
@ -98,7 +105,9 @@ proc isSelf*(b: BlockExcNetwork, peer: PeerId): bool =
return b.peerId == peer
proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
proc send*(
b: BlockExcNetwork, id: PeerId, msg: pb.Message
) {.async: (raises: [CancelledError]).} =
## Send message to peer
##
@ -106,8 +115,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
trace "Unable to send, peer not found", peerId = id
return
let peer = b.peers[id]
try:
let peer = b.peers[id]
await b.inflightSema.acquire()
await peer.send(msg)
except CancelledError as error:
@ -117,7 +127,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
finally:
b.inflightSema.release()
proc handleWantList(b: BlockExcNetwork, peer: NetworkPeer, list: WantList) {.async.} =
proc handleWantList(
b: BlockExcNetwork, peer: NetworkPeer, list: WantList
) {.async: (raises: []).} =
## Handle incoming want list
##
@ -133,7 +145,7 @@ proc sendWantList*(
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
): Future[void] =
) {.async: (raw: true, raises: [CancelledError]).} =
## Send a want message to peer
##
@ -154,14 +166,14 @@ proc sendWantList*(
proc sendWantCancellations*(
b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress]
): Future[void] {.async.} =
): Future[void] {.async: (raises: [CancelledError]).} =
## Informs a remote peer that we're no longer interested in a set of blocks
##
await b.sendWantList(id = id, addresses = addresses, cancel = true)
proc handleBlocksDelivery(
b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery]
) {.async.} =
) {.async: (raises: []).} =
## Handle incoming blocks
##
@ -170,7 +182,7 @@ proc handleBlocksDelivery(
proc sendBlocksDelivery*(
b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery]
): Future[void] =
) {.async: (raw: true, raises: [CancelledError]).} =
## Send blocks to remote
##
@ -178,7 +190,7 @@ proc sendBlocksDelivery*(
proc handleBlockPresence(
b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence]
) {.async.} =
) {.async: (raises: []).} =
## Handle block presence
##
@ -187,7 +199,7 @@ proc handleBlockPresence(
proc sendBlockPresence*(
b: BlockExcNetwork, id: PeerId, presence: seq[BlockPresence]
): Future[void] =
) {.async: (raw: true, raises: [CancelledError]).} =
## Send presence to remote
##
@ -195,20 +207,24 @@ proc sendBlockPresence*(
proc handleAccount(
network: BlockExcNetwork, peer: NetworkPeer, account: Account
) {.async.} =
) {.async: (raises: []).} =
## Handle account info
##
if not network.handlers.onAccount.isNil:
await network.handlers.onAccount(peer.id, account)
proc sendAccount*(b: BlockExcNetwork, id: PeerId, account: Account): Future[void] =
proc sendAccount*(
b: BlockExcNetwork, id: PeerId, account: Account
) {.async: (raw: true, raises: [CancelledError]).} =
## Send account info to remote
##
b.send(id, Message(account: AccountMessage.init(account)))
proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[void] =
proc sendPayment*(
b: BlockExcNetwork, id: PeerId, payment: SignedState
) {.async: (raw: true, raises: [CancelledError]).} =
## Send payment to remote
##
@ -216,7 +232,7 @@ proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[
proc handlePayment(
network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState
) {.async.} =
) {.async: (raises: []).} =
## Handle payment
##
@ -225,7 +241,7 @@ proc handlePayment(
proc rpcHandler(
b: BlockExcNetwork, peer: NetworkPeer, msg: Message
) {.async: (raises: [CatchableError]).} =
) {.async: (raises: []).} =
## handle rpc messages
##
if msg.wantList.entries.len > 0:
@ -250,7 +266,9 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
if peer in b.peers:
return b.peers.getOrDefault(peer, nil)
var getConn: ConnProvider = proc(): Future[Connection] {.async, gcsafe, closure.} =
var getConn: ConnProvider = proc(): Future[Connection] {.
async: (raises: [CancelledError])
.} =
try:
trace "Getting new connection stream", peer
return await b.switch.dial(peer, Codec)
@ -262,9 +280,7 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
if not isNil(b.getConn):
getConn = b.getConn
let rpcHandler = proc(
p: NetworkPeer, msg: Message
) {.async: (raises: [CatchableError]).} =
let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} =
await b.rpcHandler(p, msg)
# create new pubsub peer
@ -307,7 +323,9 @@ method init*(self: BlockExcNetwork) =
## Perform protocol initialization
##
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
proc peerEventHandler(
peerId: PeerId, event: PeerEvent
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined:
self.setupPeer(peerId)
else:
@ -316,7 +334,9 @@ method init*(self: BlockExcNetwork) =
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
proc handler(conn: Connection, proto: string) {.async.} =
proc handler(
conn: Connection, proto: string
): Future[void] {.async: (raises: [CancelledError]).} =
let peerId = conn.peerId
let blockexcPeer = self.getOrCreatePeer(peerId)
await blockexcPeer.readLoop(conn) # attach read loop
@ -353,26 +373,32 @@ proc new*(
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
): Future[void] {.gcsafe.} =
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave)
proc sendWantCancellations(
id: PeerId, addresses: seq[BlockAddress]
): Future[void] {.gcsafe.} =
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendWantCancellations(id, addresses)
proc sendBlocksDelivery(
id: PeerId, blocksDelivery: seq[BlockDelivery]
): Future[void] {.gcsafe.} =
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendBlocksDelivery(id, blocksDelivery)
proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
proc sendPresence(
id: PeerId, presence: seq[BlockPresence]
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendBlockPresence(id, presence)
proc sendAccount(id: PeerId, account: Account): Future[void] {.gcsafe.} =
proc sendAccount(
id: PeerId, account: Account
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendAccount(id, account)
proc sendPayment(id: PeerId, payment: SignedState): Future[void] {.gcsafe.} =
proc sendPayment(
id: PeerId, payment: SignedState
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendPayment(id, payment)
self.request = BlockExcRequest(

View File

@ -7,9 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [].}
import pkg/chronos
import pkg/libp2p
@ -18,6 +16,7 @@ import ../protobuf/blockexc
import ../protobuf/message
import ../../errors
import ../../logutils
import ../../utils/trackedfutures
logScope:
topics = "codex blockexcnetworkpeer"
@ -25,11 +24,10 @@ logScope:
const DefaultYieldInterval = 50.millis
type
ConnProvider* = proc(): Future[Connection] {.gcsafe, closure.}
ConnProvider* =
proc(): Future[Connection] {.gcsafe, async: (raises: [CancelledError]).}
RPCHandler* = proc(
peer: NetworkPeer, msg: Message
): Future[void].Raising(CatchableError) {.gcsafe.}
RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.gcsafe, async: (raises: []).}
NetworkPeer* = ref object of RootObj
id*: PeerId
@ -37,55 +35,60 @@ type
sendConn: Connection
getConn: ConnProvider
yieldInterval*: Duration = DefaultYieldInterval
trackedFutures: TrackedFutures
proc connected*(b: NetworkPeer): bool =
not (isNil(b.sendConn)) and not (b.sendConn.closed or b.sendConn.atEof)
proc connected*(self: NetworkPeer): bool =
not (isNil(self.sendConn)) and not (self.sendConn.closed or self.sendConn.atEof)
proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} =
proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} =
if isNil(conn):
trace "No connection to read from", peer = b.id
trace "No connection to read from", peer = self.id
return
trace "Attaching read loop", peer = b.id, connId = conn.oid
trace "Attaching read loop", peer = self.id, connId = conn.oid
try:
var nextYield = Moment.now() + b.yieldInterval
var nextYield = Moment.now() + self.yieldInterval
while not conn.atEof or not conn.closed:
if Moment.now() > nextYield:
nextYield = Moment.now() + b.yieldInterval
nextYield = Moment.now() + self.yieldInterval
trace "Yielding in read loop",
peer = b.id, nextYield = nextYield, interval = b.yieldInterval
peer = self.id, nextYield = nextYield, interval = self.yieldInterval
await sleepAsync(10.millis)
let
data = await conn.readLp(MaxMessageSize.int)
msg = Message.protobufDecode(data).mapFailure().tryGet()
trace "Received message", peer = b.id, connId = conn.oid
await b.handler(b, msg)
trace "Received message", peer = self.id, connId = conn.oid
await self.handler(self, msg)
except CancelledError:
trace "Read loop cancelled"
except CatchableError as err:
warn "Exception in blockexc read loop", msg = err.msg
finally:
trace "Detaching read loop", peer = b.id, connId = conn.oid
trace "Detaching read loop", peer = self.id, connId = conn.oid
await conn.close()
proc connect*(b: NetworkPeer): Future[Connection] {.async.} =
if b.connected:
trace "Already connected", peer = b.id, connId = b.sendConn.oid
return b.sendConn
proc connect*(
self: NetworkPeer
): Future[Connection] {.async: (raises: [CancelledError]).} =
if self.connected:
trace "Already connected", peer = self.id, connId = self.sendConn.oid
return self.sendConn
b.sendConn = await b.getConn()
asyncSpawn b.readLoop(b.sendConn)
return b.sendConn
self.sendConn = await self.getConn()
self.trackedFutures.track(self.readLoop(self.sendConn))
return self.sendConn
proc send*(b: NetworkPeer, msg: Message) {.async.} =
let conn = await b.connect()
proc send*(
self: NetworkPeer, msg: Message
) {.async: (raises: [CancelledError, LPStreamError]).} =
let conn = await self.connect()
if isNil(conn):
warn "Unable to get send connection for peer message not sent", peer = b.id
warn "Unable to get send connection for peer message not sent", peer = self.id
return
trace "Sending message", peer = b.id, connId = conn.oid
trace "Sending message", peer = self.id, connId = conn.oid
await conn.writeLp(protobufEncode(msg))
func new*(
@ -96,4 +99,9 @@ func new*(
): NetworkPeer =
doAssert(not isNil(connProvider), "should supply connection provider")
NetworkPeer(id: peer, getConn: connProvider, handler: rpcHandler)
NetworkPeer(
id: peer,
getConn: connProvider,
handler: rpcHandler,
trackedFutures: TrackedFutures(),
)

View File

@ -7,16 +7,13 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/sequtils
import std/tables
import std/algorithm
import std/sequtils
import pkg/upraises
push:
{.upraises: [].}
import pkg/chronos
import pkg/libp2p

View File

@ -97,7 +97,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: WantList) =
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) =
var ipb = initProtoBuffer(maxSize = MaxBlockSize)
var ipb = initProtoBuffer()
ipb.write(1, value.blk.cid.data.buffer)
ipb.write(2, value.blk.data)
ipb.write(3, value.address)
@ -128,7 +128,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
pb.write(field, ipb)
proc protobufEncode*(value: Message): seq[byte] =
var ipb = initProtoBuffer(maxSize = MaxMessageSize)
var ipb = initProtoBuffer()
ipb.write(1, value.wantList)
for v in value.payload:
ipb.write(3, v)
@ -254,16 +254,14 @@ proc decode*(
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
var
value = Message()
pb = initProtoBuffer(msg, maxSize = MaxMessageSize)
pb = initProtoBuffer(msg)
ipb: ProtoBuffer
sublist: seq[seq[byte]]
if ?pb.getField(1, ipb):
value.wantList = ?WantList.decode(ipb)
if ?pb.getRepeatedField(3, sublist):
for item in sublist:
value.payload.add(
?BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize))
)
value.payload.add(?BlockDelivery.decode(initProtoBuffer(item)))
if ?pb.getRepeatedField(4, sublist):
for item in sublist:
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))

View File

@ -1,8 +1,9 @@
{.push raises: [].}
import pkg/stew/byteutils
import pkg/stint
import pkg/nitro
import pkg/questionable
import pkg/upraises
import ./blockexc
export AccountMessage
@ -11,9 +12,6 @@ export StateChannelUpdate
export stint
export nitro
push:
{.upraises: [].}
type Account* = object
address*: EthAddress

View File

@ -1,8 +1,9 @@
{.push raises: [].}
import libp2p
import pkg/stint
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import ./blockexc
import ../../blocktype
@ -11,9 +12,6 @@ export questionable
export stint
export BlockPresenceType
upraises.push:
{.upraises: [].}
type
PresenceMessage* = blockexc.BlockPresence
Presence* = object

View File

@ -28,8 +28,11 @@ const DefaultChunkSize* = DefaultBlockSize
type
# default reader type
ChunkerError* = object of CatchableError
ChunkBuffer* = ptr UncheckedArray[byte]
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.gcsafe, raises: [Defect].}
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.
gcsafe, async: (raises: [ChunkerError, CancelledError])
.}
# Reader that splits input data into fixed-size chunks
Chunker* = ref object
@ -74,7 +77,7 @@ proc new*(
proc reader(
data: ChunkBuffer, len: int
): Future[int] {.gcsafe, async, raises: [Defect].} =
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
var res = 0
try:
while res < len:
@ -85,7 +88,7 @@ proc new*(
raise error
except LPStreamError as error:
error "LPStream error", err = error.msg
raise error
raise newException(ChunkerError, "LPStream error", error)
except CatchableError as exc:
error "CatchableError exception", exc = exc.msg
raise newException(Defect, exc.msg)
@ -102,7 +105,7 @@ proc new*(
proc reader(
data: ChunkBuffer, len: int
): Future[int] {.gcsafe, async, raises: [Defect].} =
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
var total = 0
try:
while total < len:

View File

@ -177,14 +177,20 @@ proc start*(s: CodexServer) {.async.} =
proc stop*(s: CodexServer) {.async.} =
notice "Stopping codex node"
await allFuturesThrowing(
s.restServer.stop(),
s.codexNode.switch.stop(),
s.codexNode.stop(),
s.repoStore.stop(),
s.maintenance.stop(),
let res = await noCancel allFinishedFailed(
@[
s.restServer.stop(),
s.codexNode.switch.stop(),
s.codexNode.stop(),
s.repoStore.stop(),
s.maintenance.stop(),
]
)
if res.failure.len > 0:
error "Failed to stop codex node", failures = res.failure.len
raiseAssert "Failed to stop codex node"
proc new*(
T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey
): CodexServer =

View File

@ -5,6 +5,7 @@ import pkg/chronos
import pkg/stint
import ../clock
import ../conf
import ../utils/trackedfutures
export clock
@ -18,9 +19,12 @@ type OnChainClock* = ref object of Clock
blockNumber: UInt256
started: bool
newBlock: AsyncEvent
trackedFutures: TrackedFutures
proc new*(_: type OnChainClock, provider: Provider): OnChainClock =
OnChainClock(provider: provider, newBlock: newAsyncEvent())
OnChainClock(
provider: provider, newBlock: newAsyncEvent(), trackedFutures: TrackedFutures()
)
proc update(clock: OnChainClock, blck: Block) =
if number =? blck.number and number > clock.blockNumber:
@ -32,15 +36,12 @@ proc update(clock: OnChainClock, blck: Block) =
blockTime = blck.timestamp, blockNumber = number, offset = clock.offset
clock.newBlock.fire()
proc update(clock: OnChainClock) {.async.} =
proc update(clock: OnChainClock) {.async: (raises: []).} =
try:
if latest =? (await clock.provider.getBlock(BlockTag.latest)):
clock.update(latest)
except CancelledError as error:
raise error
except CatchableError as error:
debug "error updating clock: ", error = error.msg
discard
method start*(clock: OnChainClock) {.async.} =
if clock.started:
@ -52,7 +53,7 @@ method start*(clock: OnChainClock) {.async.} =
return
# ignore block parameter; hardhat may call this with pending blocks
asyncSpawn clock.update()
clock.trackedFutures.track(clock.update())
await clock.update()
@ -64,6 +65,7 @@ method stop*(clock: OnChainClock) {.async.} =
return
await clock.subscription.unsubscribe()
await clock.trackedFutures.cancelTracked()
clock.started = false
method now*(clock: OnChainClock): SecondsSince1970 =

View File

@ -1,3 +1,4 @@
import std/strformat
import std/strutils
import pkg/ethers
import pkg/upraises
@ -49,11 +50,17 @@ func new*(
proc raiseMarketError(message: string) {.raises: [MarketError].} =
raise newException(MarketError, message)
template convertEthersError(body) =
func prefixWith(suffix, prefix: string, separator = ": "): string =
if prefix.len > 0:
return &"{prefix}{separator}{suffix}"
else:
return suffix
template convertEthersError(msg: string = "", body) =
try:
body
except EthersError as error:
raiseMarketError(error.msgDetail)
raiseMarketError(error.msgDetail.prefixWith(msg))
proc config(
market: OnChainMarket
@ -71,7 +78,7 @@ proc config(
proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} =
debug "Approving tokens", amount
convertEthersError:
convertEthersError("Failed to approve funds"):
let tokenAddress = await market.contract.token()
let token = Erc20Token.new(tokenAddress, market.signer)
discard await token.increaseAllowance(market.contract.address(), amount).confirm(1)
@ -86,8 +93,7 @@ method loadConfig*(
market.configuration = some fetchedConfig
return success()
except AsyncLockError, EthersError:
let err = getCurrentException()
except EthersError as err:
return failure newException(
MarketError,
"Failed to fetch the config from the Marketplace contract: " & err.msg,
@ -100,13 +106,13 @@ method getZkeyHash*(
return some config.proofs.zkeyHash
method getSigner*(market: OnChainMarket): Future[Address] {.async.} =
convertEthersError:
convertEthersError("Failed to get signer address"):
return await market.signer.getAddress()
method periodicity*(
market: OnChainMarket
): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError:
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
let period = config.proofs.period
return Periodicity(seconds: period)
@ -114,47 +120,47 @@ method periodicity*(
method proofTimeout*(
market: OnChainMarket
): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError:
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.proofs.timeout
method repairRewardPercentage*(
market: OnChainMarket
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError:
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.collateral.repairRewardPercentage
method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} =
convertEthersError:
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.requestDurationLimit
method proofDowntime*(
market: OnChainMarket
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError:
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.proofs.downtime
method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} =
convertEthersError:
convertEthersError("Failed to get slot pointer"):
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.getPointer(slotId, overrides)
method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} =
convertEthersError:
convertEthersError("Failed to get my requests"):
return await market.contract.myRequests
method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} =
convertEthersError:
convertEthersError("Failed to get my slots"):
let slots = await market.contract.mySlots()
debug "Fetched my slots", numSlots = len(slots)
return slots
method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} =
convertEthersError:
convertEthersError("Failed to request storage"):
debug "Requesting storage"
await market.approveFunds(request.totalPrice())
discard await market.contract.requestStorage(request).confirm(1)
@ -174,14 +180,14 @@ method getRequest*(
except Marketplace_UnknownRequest, KeyError:
warn "Cannot retrieve the request", error = getCurrentExceptionMsg()
return none StorageRequest
except EthersError, AsyncLockError:
error "Cannot retrieve the request", error = getCurrentExceptionMsg()
except EthersError as e:
error "Cannot retrieve the request", error = e.msg
return none StorageRequest
method requestState*(
market: OnChainMarket, requestId: RequestId
): Future[?RequestState] {.async.} =
convertEthersError:
convertEthersError("Failed to get request state"):
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return some await market.contract.requestState(requestId, overrides)
@ -191,31 +197,26 @@ method requestState*(
method slotState*(
market: OnChainMarket, slotId: SlotId
): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError:
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.slotState(slotId, overrides)
except AsyncLockError as err:
raiseMarketError(
"Failed to fetch the slot state from the Marketplace contract: " & err.msg
)
convertEthersError("Failed to fetch the slot state from the Marketplace contract"):
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.slotState(slotId, overrides)
method getRequestEnd*(
market: OnChainMarket, id: RequestId
): Future[SecondsSince1970] {.async.} =
convertEthersError:
convertEthersError("Failed to get request end"):
return await market.contract.requestEnd(id)
method requestExpiresAt*(
market: OnChainMarket, id: RequestId
): Future[SecondsSince1970] {.async.} =
convertEthersError:
convertEthersError("Failed to get request expiry"):
return await market.contract.requestExpiry(id)
method getHost(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[?Address] {.async.} =
convertEthersError:
convertEthersError("Failed to get slot's host"):
let slotId = slotId(requestId, slotIndex)
let address = await market.contract.getHost(slotId)
if address != Address.default:
@ -226,11 +227,11 @@ method getHost(
method currentCollateral*(
market: OnChainMarket, slotId: SlotId
): Future[UInt256] {.async.} =
convertEthersError:
convertEthersError("Failed to get slot's current collateral"):
return await market.contract.currentCollateral(slotId)
method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} =
convertEthersError:
convertEthersError("Failed to get active slot"):
try:
return some await market.contract.getActiveSlot(slotId)
except Marketplace_SlotIsFree:
@ -243,18 +244,24 @@ method fillSlot(
proof: Groth16Proof,
collateral: UInt256,
) {.async.} =
convertEthersError:
convertEthersError("Failed to fill slot"):
logScope:
requestId
slotIndex
await market.approveFunds(collateral)
trace "calling fillSlot on contract"
discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(1)
trace "fillSlot transaction completed"
try:
await market.approveFunds(collateral)
trace "calling fillSlot on contract"
discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(1)
trace "fillSlot transaction completed"
except Marketplace_SlotNotFree as parent:
raise newException(
SlotStateMismatchError, "Failed to fill slot because the slot is not free",
parent,
)
method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
convertEthersError:
convertEthersError("Failed to free slot"):
var freeSlot: Future[Confirmable]
if rewardRecipient =? market.rewardRecipient:
# If --reward-recipient specified, use it as the reward recipient, and use
@ -273,11 +280,11 @@ method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
discard await freeSlot.confirm(1)
method withdrawFunds(market: OnChainMarket, requestId: RequestId) {.async.} =
convertEthersError:
convertEthersError("Failed to withdraw funds"):
discard await market.contract.withdrawFunds(requestId).confirm(1)
method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
convertEthersError:
convertEthersError("Failed to get proof requirement"):
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.isProofRequired(id, overrides)
@ -285,7 +292,7 @@ method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async
return false
method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
convertEthersError:
convertEthersError("Failed to get future proof requirement"):
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.willProofBeRequired(id, overrides)
@ -295,18 +302,18 @@ method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.a
method getChallenge*(
market: OnChainMarket, id: SlotId
): Future[ProofChallenge] {.async.} =
convertEthersError:
convertEthersError("Failed to get proof challenge"):
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.getChallenge(id, overrides)
method submitProof*(market: OnChainMarket, id: SlotId, proof: Groth16Proof) {.async.} =
convertEthersError:
convertEthersError("Failed to submit proof"):
discard await market.contract.submitProof(id, proof).confirm(1)
method markProofAsMissing*(
market: OnChainMarket, id: SlotId, period: Period
) {.async.} =
convertEthersError:
convertEthersError("Failed to mark proof as missing"):
discard await market.contract.markProofAsMissing(id, period).confirm(1)
method canProofBeMarkedAsMissing*(
@ -325,20 +332,26 @@ method canProofBeMarkedAsMissing*(
method reserveSlot*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
) {.async.} =
convertEthersError:
discard await market.contract
.reserveSlot(
requestId,
slotIndex,
# reserveSlot runs out of gas for unknown reason, but 100k gas covers it
TransactionOverrides(gasLimit: some 100000.u256),
)
.confirm(1)
convertEthersError("Failed to reserve slot"):
try:
discard await market.contract
.reserveSlot(
requestId,
slotIndex,
# reserveSlot runs out of gas for unknown reason, but 100k gas covers it
TransactionOverrides(gasLimit: some 100000.u256),
)
.confirm(1)
except SlotReservations_ReservationNotAllowed:
raise newException(
SlotReservationNotAllowedError,
"Failed to reserve slot because reservation is not allowed",
)
method canReserveSlot*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[bool] {.async.} =
convertEthersError:
convertEthersError("Unable to determine if slot can be reserved"):
return await market.contract.canReserveSlot(requestId, slotIndex)
method subscribeRequests*(
@ -351,7 +364,7 @@ method subscribeRequests*(
callback(event.requestId, event.ask, event.expiry)
convertEthersError:
convertEthersError("Failed to subscribe to StorageRequested events"):
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
@ -365,7 +378,7 @@ method subscribeSlotFilled*(
callback(event.requestId, event.slotIndex)
convertEthersError:
convertEthersError("Failed to subscribe to SlotFilled events"):
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
@ -379,7 +392,7 @@ method subscribeSlotFilled*(
if eventRequestId == requestId and eventSlotIndex == slotIndex:
callback(requestId, slotIndex)
convertEthersError:
convertEthersError("Failed to subscribe to SlotFilled events"):
return await market.subscribeSlotFilled(onSlotFilled)
method subscribeSlotFreed*(
@ -392,7 +405,7 @@ method subscribeSlotFreed*(
callback(event.requestId, event.slotIndex)
convertEthersError:
convertEthersError("Failed to subscribe to SlotFreed events"):
let subscription = await market.contract.subscribe(SlotFreed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
@ -407,7 +420,7 @@ method subscribeSlotReservationsFull*(
callback(event.requestId, event.slotIndex)
convertEthersError:
convertEthersError("Failed to subscribe to SlotReservationsFull events"):
let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
@ -421,7 +434,7 @@ method subscribeFulfillment(
callback(event.requestId)
convertEthersError:
convertEthersError("Failed to subscribe to RequestFulfilled events"):
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
@ -436,7 +449,7 @@ method subscribeFulfillment(
if event.requestId == requestId:
callback(event.requestId)
convertEthersError:
convertEthersError("Failed to subscribe to RequestFulfilled events"):
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
@ -450,7 +463,7 @@ method subscribeRequestCancelled*(
callback(event.requestId)
convertEthersError:
convertEthersError("Failed to subscribe to RequestCancelled events"):
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
@ -465,7 +478,7 @@ method subscribeRequestCancelled*(
if event.requestId == requestId:
callback(event.requestId)
convertEthersError:
convertEthersError("Failed to subscribe to RequestCancelled events"):
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
@ -479,7 +492,7 @@ method subscribeRequestFailed*(
callback(event.requestId)
convertEthersError:
convertEthersError("Failed to subscribe to RequestFailed events"):
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
@ -494,7 +507,7 @@ method subscribeRequestFailed*(
if event.requestId == requestId:
callback(event.requestId)
convertEthersError:
convertEthersError("Failed to subscribe to RequestFailed events"):
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
@ -508,7 +521,7 @@ method subscribeProofSubmission*(
callback(event.id)
convertEthersError:
convertEthersError("Failed to subscribe to ProofSubmitted events"):
let subscription = await market.contract.subscribe(ProofSubmitted, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
@ -518,13 +531,13 @@ method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
method queryPastSlotFilledEvents*(
market: OnChainMarket, fromBlock: BlockTag
): Future[seq[SlotFilled]] {.async.} =
convertEthersError:
convertEthersError("Failed to get past SlotFilled events from block"):
return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest)
method queryPastSlotFilledEvents*(
market: OnChainMarket, blocksAgo: int
): Future[seq[SlotFilled]] {.async.} =
convertEthersError:
convertEthersError("Failed to get past SlotFilled events"):
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
return await market.queryPastSlotFilledEvents(fromBlock)
@ -532,21 +545,21 @@ method queryPastSlotFilledEvents*(
method queryPastSlotFilledEvents*(
market: OnChainMarket, fromTime: SecondsSince1970
): Future[seq[SlotFilled]] {.async.} =
convertEthersError:
convertEthersError("Failed to get past SlotFilled events from time"):
let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime)
return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock))
method queryPastStorageRequestedEvents*(
market: OnChainMarket, fromBlock: BlockTag
): Future[seq[StorageRequested]] {.async.} =
convertEthersError:
convertEthersError("Failed to get past StorageRequested events from block"):
return
await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest)
method queryPastStorageRequestedEvents*(
market: OnChainMarket, blocksAgo: int
): Future[seq[StorageRequested]] {.async.} =
convertEthersError:
convertEthersError("Failed to get past StorageRequested events"):
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
return await market.queryPastStorageRequestedEvents(fromBlock)

View File

@ -53,6 +53,7 @@ type
Proofs_ProofAlreadyMarkedMissing* = object of SolidityError
Proofs_InvalidProbability* = object of SolidityError
Periods_InvalidSecondsPerPeriod* = object of SolidityError
SlotReservations_ReservationNotAllowed* = object of SolidityError
proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
proc token*(marketplace: Marketplace): Address {.contract, view.}

View File

@ -7,6 +7,8 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/algorithm
import std/sequtils
@ -54,70 +56,122 @@ proc toNodeId*(host: ca.Address): NodeId =
readUintBE[256](keccak256.digest(host.toArray).data)
proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} =
proc findPeer*(
d: Discovery, peerId: PeerId
): Future[?PeerRecord] {.async: (raises: [CancelledError]).} =
trace "protocol.resolve..."
## Find peer using the given Discovery object
##
let node = await d.protocol.resolve(toNodeId(peerId))
return
if node.isSome():
node.get().record.data.some
else:
PeerRecord.none
try:
let node = await d.protocol.resolve(toNodeId(peerId))
method find*(d: Discovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} =
return
if node.isSome():
node.get().record.data.some
else:
PeerRecord.none
except CancelledError as exc:
warn "Error finding peer", peerId = peerId, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error finding peer", peerId = peerId, exc = exc.msg
return PeerRecord.none
method find*(
d: Discovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} =
## Find block providers
##
without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure, error:
warn "Error finding providers for block", cid, error = error.msg
return providers.filterIt(not (it.data.peerId == d.peerId))
try:
without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure,
error:
warn "Error finding providers for block", cid, error = error.msg
method provide*(d: Discovery, cid: Cid) {.async, base.} =
return providers.filterIt(not (it.data.peerId == d.peerId))
except CancelledError as exc:
warn "Error finding providers for block", cid, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error finding providers for block", cid, exc = exc.msg
method provide*(d: Discovery, cid: Cid) {.async: (raises: [CancelledError]), base.} =
## Provide a block Cid
##
let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get)
try:
let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get)
if nodes.len <= 0:
warn "Couldn't provide to any nodes!"
if nodes.len <= 0:
warn "Couldn't provide to any nodes!"
except CancelledError as exc:
warn "Error providing block", cid, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error providing block", cid, exc = exc.msg
method find*(
d: Discovery, host: ca.Address
): Future[seq[SignedPeerRecord]] {.async, base.} =
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} =
## Find host providers
##
trace "Finding providers for host", host = $host
without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure,
error:
trace "Error finding providers for host", host = $host, exc = error.msg
return
try:
trace "Finding providers for host", host = $host
without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure,
error:
trace "Error finding providers for host", host = $host, exc = error.msg
return
if providers.len <= 0:
trace "No providers found", host = $host
return
if providers.len <= 0:
trace "No providers found", host = $host
return
providers.sort do(a, b: SignedPeerRecord) -> int:
system.cmp[uint64](a.data.seqNo, b.data.seqNo)
providers.sort do(a, b: SignedPeerRecord) -> int:
system.cmp[uint64](a.data.seqNo, b.data.seqNo)
return providers
return providers
except CancelledError as exc:
warn "Error finding providers for host", host = $host, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error finding providers for host", host = $host, exc = exc.msg
method provide*(d: Discovery, host: ca.Address) {.async, base.} =
method provide*(
d: Discovery, host: ca.Address
) {.async: (raises: [CancelledError]), base.} =
## Provide hosts
##
trace "Providing host", host = $host
let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get)
if nodes.len > 0:
trace "Provided to nodes", nodes = nodes.len
try:
trace "Providing host", host = $host
let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get)
if nodes.len > 0:
trace "Provided to nodes", nodes = nodes.len
except CancelledError as exc:
warn "Error providing host", host = $host, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error providing host", host = $host, exc = exc.msg
method removeProvider*(d: Discovery, peerId: PeerId): Future[void] {.base, gcsafe.} =
method removeProvider*(
d: Discovery, peerId: PeerId
): Future[void] {.base, gcsafe, async: (raises: [CancelledError]).} =
## Remove provider from providers table
##
trace "Removing provider", peerId
d.protocol.removeProvidersLocal(peerId)
try:
await d.protocol.removeProvidersLocal(peerId)
except CancelledError as exc:
warn "Error removing provider", peerId = peerId, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error removing provider", peerId = peerId, exc = exc.msg
except Exception as exc: # Something in discv5 is raising Exception
warn "Error removing provider", peerId = peerId, exc = exc.msg
raiseAssert("Unexpected Exception in removeProvider")
proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
## Update providers record
@ -125,7 +179,7 @@ proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
d.announceAddrs = @addrs
trace "Updating announce record", addrs = d.announceAddrs
info "Updating announce record", addrs = d.announceAddrs
d.providerRecord = SignedPeerRecord
.init(d.key, PeerRecord.init(d.peerId, d.announceAddrs))
.expect("Should construct signed record").some
@ -137,7 +191,7 @@ proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
## Update providers record
##
trace "Updating Dht record", addrs = addrs
info "Updating Dht record", addrs = addrs
d.dhtRecord = SignedPeerRecord
.init(d.key, PeerRecord.init(d.peerId, @addrs))
.expect("Should construct signed record").some
@ -145,12 +199,18 @@ proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
if not d.protocol.isNil:
d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR")
proc start*(d: Discovery) {.async.} =
d.protocol.open()
await d.protocol.start()
proc start*(d: Discovery) {.async: (raises: []).} =
try:
d.protocol.open()
await d.protocol.start()
except CatchableError as exc:
error "Error starting discovery", exc = exc.msg
proc stop*(d: Discovery) {.async.} =
await d.protocol.closeWait()
proc stop*(d: Discovery) {.async: (raises: []).} =
try:
await noCancel d.protocol.closeWait()
except CatchableError as exc:
error "Error stopping discovery", exc = exc.msg
proc new*(
T: type Discovery,

View File

@ -310,10 +310,10 @@ proc leopardEncodeTask(tp: Taskpool, task: ptr EncodeTask) {.gcsafe.} =
else:
task[].success.store(true)
proc encodeAsync*(
proc asyncEncode*(
self: Erasure,
blockSize, blocksLen, parityLen: int,
data: ref seq[seq[byte]],
blocks: ref seq[seq[byte]],
parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
): Future[?!void] {.async: (raises: [CancelledError]).} =
without threadPtr =? ThreadSignalPtr.new():
@ -322,21 +322,18 @@ proc encodeAsync*(
defer:
threadPtr.close().expect("closing once works")
var blockData = createDoubleArray(blocksLen, blockSize)
for i in 0 ..< data[].len:
copyMem(blockData[i], addr data[i][0], blockSize)
var data = makeUncheckedArray(blocks)
defer:
freeDoubleArray(blockData, blocksLen)
dealloc(data)
## Create an ecode task with block data
## Create an ecode task with block data
var task = EncodeTask(
erasure: addr self,
blockSize: blockSize,
blocksLen: blocksLen,
parityLen: parityLen,
blocks: blockData,
blocks: data,
parity: parity,
signal: threadPtr,
)
@ -348,18 +345,13 @@ proc encodeAsync*(
self.taskPool.spawn leopardEncodeTask(self.taskPool, t)
let threadFut = threadPtr.wait()
try:
await threadFut.join()
except CatchableError as exc:
try:
await threadFut
except AsyncError as asyncExc:
return failure(asyncExc.msg)
finally:
if exc of CancelledError:
raise (ref CancelledError) exc
else:
return failure(exc.msg)
if joinErr =? catch(await threadFut.join()).errorOption:
if err =? catch(await noCancel threadFut).errorOption:
return failure(err)
if joinErr of CancelledError:
raise (ref CancelledError) joinErr
else:
return failure(joinErr)
if not t.success.load():
return failure("Leopard encoding failed")
@ -409,7 +401,7 @@ proc encodeData(
try:
if err =? (
await self.encodeAsync(
await self.asyncEncode(
manifest.blockSize.int, params.ecK, params.ecM, data, parity
)
).errorOption:
@ -489,6 +481,7 @@ proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} =
task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
defer:
decoder.release()
discard task[].signal.fireSync()
if (
let res = decoder.decode(
@ -506,9 +499,7 @@ proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} =
else:
task[].success.store(true)
discard task[].signal.fireSync()
proc decodeAsync*(
proc asyncDecode*(
self: Erasure,
blockSize, blocksLen, parityLen: int,
blocks, parity: ref seq[seq[byte]],
@ -521,33 +512,21 @@ proc decodeAsync*(
threadPtr.close().expect("closing once works")
var
blocksData = createDoubleArray(blocksLen, blockSize)
parityData = createDoubleArray(parityLen, blockSize)
for i in 0 ..< blocks[].len:
if blocks[i].len > 0:
copyMem(blocksData[i], addr blocks[i][0], blockSize)
else:
blocksData[i] = nil
for i in 0 ..< parity[].len:
if parity[i].len > 0:
copyMem(parityData[i], addr parity[i][0], blockSize)
else:
parityData[i] = nil
blockData = makeUncheckedArray(blocks)
parityData = makeUncheckedArray(parity)
defer:
freeDoubleArray(blocksData, blocksLen)
freeDoubleArray(parityData, parityLen)
dealloc(blockData)
dealloc(parityData)
## Create an decode task with block data
## Create an decode task with block data
var task = DecodeTask(
erasure: addr self,
blockSize: blockSize,
blocksLen: blocksLen,
parityLen: parityLen,
recoveredLen: blocksLen,
blocks: blocksData,
blocks: blockData,
parity: parityData,
recovered: recovered,
signal: threadPtr,
@ -560,18 +539,13 @@ proc decodeAsync*(
self.taskPool.spawn leopardDecodeTask(self.taskPool, t)
let threadFut = threadPtr.wait()
try:
await threadFut.join()
except CatchableError as exc:
try:
await threadFut
except AsyncError as asyncExc:
return failure(asyncExc.msg)
finally:
if exc of CancelledError:
raise (ref CancelledError) exc
else:
return failure(exc.msg)
if joinErr =? catch(await threadFut.join()).errorOption:
if err =? catch(await noCancel threadFut).errorOption:
return failure(err)
if joinErr of CancelledError:
raise (ref CancelledError) joinErr
else:
return failure(joinErr)
if not t.success.load():
return failure("Leopard encoding failed")
@ -627,7 +601,7 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
trace "Erasure decoding data"
try:
if err =? (
await self.decodeAsync(
await self.asyncDecode(
encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered
)
).errorOption:

View File

@ -19,6 +19,8 @@ type
CodexError* = object of CatchableError # base codex error
CodexResult*[T] = Result[T, ref CodexError]
FinishedFailed*[T] = tuple[success: seq[Future[T]], failure: seq[Future[T]]]
template mapFailure*[T, V, E](
exp: Result[T, V], exc: typedesc[E]
): Result[T, ref CatchableError] =
@ -40,35 +42,18 @@ func toFailure*[T](exp: Option[T]): Result[T, ref CatchableError] {.inline.} =
else:
T.failure("Option is None")
# allFuturesThrowing was moved to the tests in libp2p
proc allFuturesThrowing*[T](args: varargs[Future[T]]): Future[void] =
var futs: seq[Future[T]]
for fut in args:
futs &= fut
proc call() {.async.} =
var first: ref CatchableError = nil
futs = await allFinished(futs)
for fut in futs:
if fut.failed:
let err = fut.readError()
if err of Defect:
raise err
else:
if err of CancelledError:
raise err
if isNil(first):
first = err
if not isNil(first):
raise first
proc allFinishedFailed*[T](futs: seq[Future[T]]): Future[FinishedFailed[T]] {.async.} =
## Check if all futures have finished or failed
##
## TODO: wip, not sure if we want this - at the minimum,
## we should probably avoid the async transform
return call()
var res: FinishedFailed[T] = (@[], @[])
await allFutures(futs)
for f in futs:
if f.failed:
res.failure.add f
else:
res.success.add f
proc allFutureResult*[T](fut: seq[Future[T]]): Future[?!void] {.async.} =
try:
await allFuturesThrowing(fut)
except CancelledError as exc:
raise exc
except CatchableError as exc:
return failure(exc.msg)
return success()
return res

View File

@ -18,6 +18,8 @@ export periods
type
Market* = ref object of RootObj
MarketError* = object of CodexError
SlotStateMismatchError* = object of MarketError
SlotReservationNotAllowedError* = object of MarketError
Subscription* = ref object of RootObj
OnRequest* =
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].}

View File

@ -27,11 +27,11 @@ const MaxMerkleTreeSize = 100.MiBs.uint
const MaxMerkleProofSize = 1.MiBs.uint
proc encode*(self: CodexTree): seq[byte] =
var pb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
var pb = initProtoBuffer()
pb.write(1, self.mcodec.uint64)
pb.write(2, self.leavesCount.uint64)
for node in self.nodes:
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
var nodesPb = initProtoBuffer()
nodesPb.write(1, node)
nodesPb.finish()
pb.write(3, nodesPb)
@ -40,7 +40,7 @@ proc encode*(self: CodexTree): seq[byte] =
pb.buffer
proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize)
var pb = initProtoBuffer(data)
var mcodecCode: uint64
var leavesCount: uint64
discard ?pb.getField(1, mcodecCode).mapFailure
@ -63,13 +63,13 @@ proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
CodexTree.fromNodes(mcodec, nodes, leavesCount.int)
proc encode*(self: CodexProof): seq[byte] =
var pb = initProtoBuffer(maxSize = MaxMerkleProofSize)
var pb = initProtoBuffer()
pb.write(1, self.mcodec.uint64)
pb.write(2, self.index.uint64)
pb.write(3, self.nleaves.uint64)
for node in self.path:
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
var nodesPb = initProtoBuffer()
nodesPb.write(1, node)
nodesPb.finish()
pb.write(4, nodesPb)
@ -78,7 +78,7 @@ proc encode*(self: CodexProof): seq[byte] =
pb.buffer
proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof =
var pb = initProtoBuffer(data, maxSize = MaxMerkleProofSize)
var pb = initProtoBuffer(data)
var mcodecCode: uint64
var index: uint64
var nleaves: uint64

View File

@ -153,7 +153,11 @@ proc updateExpiry*(
let ensuringFutures = Iter[int].new(0 ..< manifest.blocksCount).mapIt(
self.networkStore.localStore.ensureExpiry(manifest.treeCid, it, expiry)
)
await allFuturesThrowing(ensuringFutures)
let res = await allFinishedFailed(ensuringFutures)
if res.failure.len > 0:
trace "Some blocks failed to update expiry", len = res.failure.len
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
except CancelledError as exc:
raise exc
except CatchableError as exc:
@ -186,8 +190,10 @@ proc fetchBatched*(
if not (await address in self.networkStore) or fetchLocal:
self.networkStore.getBlock(address)
if blocksErr =? (await allFutureResult(blocks)).errorOption:
return failure(blocksErr)
let res = await allFinishedFailed(blocks)
if res.failure.len > 0:
trace "Some blocks failed to fetch", len = res.failure.len
return failure("Some blocks failed to fetch (" & $res.failure.len & " )")
if not onBatch.isNil and
batchErr =? (await onBatch(blocks.mapIt(it.read.get))).errorOption:
@ -213,6 +219,30 @@ proc fetchBatched*(
let iter = Iter[int].new(0 ..< manifest.blocksCount)
self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch, fetchLocal)
proc fetchDatasetAsync*(
self: CodexNodeRef, manifest: Manifest, fetchLocal = true
): Future[void] {.async: (raises: []).} =
## Asynchronously fetch a dataset in the background.
## This task will be tracked and cleaned up on node shutdown.
##
try:
if err =? (
await self.fetchBatched(
manifest = manifest, batchSize = DefaultFetchBatch, fetchLocal = fetchLocal
)
).errorOption:
error "Unable to fetch blocks", err = err.msg
except CancelledError as exc:
trace "Cancelled fetching blocks", exc = exc.msg
except CatchableError as exc:
error "Error fetching blocks", exc = exc.msg
proc fetchDatasetAsyncTask*(self: CodexNodeRef, manifest: Manifest) =
## Start fetching a dataset in the background.
## The task will be tracked and cleaned up on node shutdown.
##
self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false))
proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async.} =
## Streams the contents of a single block.
##
@ -223,36 +253,27 @@ proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async
without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err:
return failure(err)
proc streamOneBlock(): Future[void] {.async.} =
proc streamOneBlock(): Future[void] {.async: (raises: []).} =
try:
defer:
await stream.pushEof()
await stream.pushData(blk.data)
except CatchableError as exc:
trace "Unable to send block", cid, exc = exc.msg
discard
finally:
await stream.pushEof()
self.trackedFutures.track(streamOneBlock())
LPStream(stream).success
proc streamEntireDataset(
self: CodexNodeRef,
manifest: Manifest,
manifestCid: Cid,
prefetchBatch = DefaultFetchBatch,
self: CodexNodeRef, manifest: Manifest, manifestCid: Cid
): Future[?!LPStream] {.async.} =
## Streams the contents of the entire dataset described by the manifest.
## Background jobs (erasure decoding and prefetching) will be cancelled when
## the stream is closed.
##
trace "Retrieving blocks from manifest", manifestCid
let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false))
var jobs: seq[Future[void]]
if manifest.protected:
# Retrieve, decode and save to the local store all EС groups
proc erasureJob(): Future[void] {.async.} =
proc erasureJob(): Future[void] {.async: (raises: []).} =
try:
# Spawn an erasure decoding job
let erasure = Erasure.new(
@ -260,36 +281,17 @@ proc streamEntireDataset(
)
without _ =? (await erasure.decode(manifest)), error:
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
except CancelledError:
trace "Erasure job cancelled", manifestCid
except CatchableError as exc:
trace "Error erasure decoding manifest", manifestCid, exc = exc.msg
jobs.add(erasureJob())
self.trackedFutures.track(erasureJob())
proc prefetch(): Future[void] {.async.} =
try:
if err =?
(await self.fetchBatched(manifest, prefetchBatch, fetchLocal = false)).errorOption:
error "Unable to fetch blocks", err = err.msg
except CancelledError:
trace "Prefetch job cancelled"
except CatchableError as exc:
error "Error fetching blocks", exc = exc.msg
jobs.add(prefetch())
# Monitor stream completion and cancel background jobs when done
proc monitorStream() {.async.} =
try:
await stream.join()
finally:
await allFutures(jobs.mapIt(it.cancelAndWait))
self.trackedFutures.track(monitorStream())
self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false))
# prefetch task should not fetch from local store
# Retrieve all blocks of the dataset sequentially from the local store or network
trace "Creating store stream for manifest", manifestCid
stream.success
LPStream(StoreStream.new(self.networkStore, manifest, pad = false)).success
proc retrieve*(
self: CodexNodeRef, cid: Cid, local: bool = true
@ -632,8 +634,11 @@ proc onStore(
let ensureExpiryFutures =
blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry.toSecondsSince1970))
if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption:
return failure(updateExpiryErr)
let res = await allFinishedFailed(ensureExpiryFutures)
if res.failure.len > 0:
trace "Some blocks failed to update expiry", len = res.failure.len
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
if not blocksCb.isNil and err =? (await blocksCb(blocks)).errorOption:
trace "Unable to process blocks", err = err.msg

View File

@ -65,9 +65,15 @@ proc formatManifestBlocks(node: CodexNodeRef): Future[JsonNode] {.async.} =
return %RestContentList.init(content)
proc isPending(resp: HttpResponseRef): bool =
## Checks that an HttpResponseRef object is still pending; i.e.,
## that no body has yet been sent. This helps us guard against calling
## sendBody(resp: HttpResponseRef, ...) twice, which is illegal.
return resp.getResponseState() == HttpResponseState.Empty
proc retrieveCid(
node: CodexNodeRef, cid: Cid, local: bool = true, resp: HttpResponseRef
): Future[RestApiResponse] {.async.} =
): Future[void] {.async: (raises: [CancelledError, HttpWriteError]).} =
## Download a file from the node in a streaming
## manner
##
@ -79,16 +85,21 @@ proc retrieveCid(
without stream =? (await node.retrieve(cid, local)), error:
if error of BlockNotFoundError:
resp.status = Http404
return await resp.sendBody("")
await resp.sendBody(
"The requested CID could not be retrieved (" & error.msg & ")."
)
return
else:
resp.status = Http500
return await resp.sendBody(error.msg)
await resp.sendBody(error.msg)
return
# It is ok to fetch again the manifest because it will hit the cache
without manifest =? (await node.fetchManifest(cid)), err:
error "Failed to fetch manifest", err = err.msg
resp.status = Http404
return await resp.sendBody(err.msg)
await resp.sendBody(err.msg)
return
if manifest.mimetype.isSome:
resp.setHeader("Content-Type", manifest.mimetype.get())
@ -103,7 +114,14 @@ proc retrieveCid(
else:
resp.setHeader("Content-Disposition", "attachment")
await resp.prepareChunked()
# For erasure-coded datasets, we need to return the _original_ length; i.e.,
# the length of the non-erasure-coded dataset, as that's what we will be
# returning to the client.
let contentLength =
if manifest.protected: manifest.originalDatasetSize else: manifest.datasetSize
resp.setHeader("Content-Length", $(contentLength.int))
await resp.prepare(HttpResponseStreamType.Plain)
while not stream.atEof:
var
@ -116,13 +134,16 @@ proc retrieveCid(
bytes += buff.len
await resp.sendChunk(addr buff[0], buff.len)
await resp.send(addr buff[0], buff.len)
await resp.finish()
codex_api_downloads.inc()
except CancelledError as exc:
raise exc
except CatchableError as exc:
warn "Error streaming blocks", exc = exc.msg
resp.status = Http500
return await resp.sendBody("")
if resp.isPending():
await resp.sendBody(exc.msg)
finally:
info "Sent bytes", cid = cid, bytes
if not stream.isNil:
@ -299,15 +320,8 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
error "Failed to fetch manifest", err = err.msg
return RestApiResponse.error(Http404, err.msg, headers = headers)
proc fetchDatasetAsync(): Future[void] {.async.} =
try:
if err =? (await node.fetchBatched(manifest)).errorOption:
error "Unable to fetch dataset", cid = cid.get(), err = err.msg
except CatchableError as exc:
error "CatchableError when fetching dataset", cid = cid.get(), exc = exc.msg
discard
asyncSpawn fetchDatasetAsync()
# Start fetching the dataset in the background
node.fetchDatasetAsyncTask(manifest)
let json = %formatManifest(cid.get(), manifest)
return RestApiResponse.response($json, contentType = "application/json")
@ -328,6 +342,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.setCorsHeaders("GET", corsOrigin)
resp.setHeader("Access-Control-Headers", "X-Requested-With")
resp.setHeader("Access-Control-Expose-Headers", "Content-Disposition")
await node.retrieveCid(cid.get(), local = false, resp = resp)
router.api(MethodGet, "/api/codex/v1/data/{cid}/network/manifest") do(

View File

@ -285,7 +285,7 @@ proc load*(sales: Sales) {.async.} =
agent.start(SaleUnknown())
sales.agents.add agent
proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} =
proc OnAvailabilitySaved(sales: Sales, availability: Availability) {.async.} =
## When availabilities are modified or added, the queue should be unpaused if
## it was paused and any slots in the queue should have their `seen` flag
## cleared.
@ -341,48 +341,51 @@ proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) =
trace "slot freed, adding to queue"
proc addSlotToQueue() {.async: (raises: [CancelledError]).} =
proc addSlotToQueue() {.async: (raises: []).} =
let context = sales.context
let market = context.market
let queue = context.slotQueue
without request =? (await market.getRequest(requestId)), err:
error "unknown request in contract", error = err.msgDetail
return
try:
without request =? (await market.getRequest(requestId)), err:
error "unknown request in contract", error = err.msgDetail
return
# Take the repairing state into consideration to calculate the collateral.
# This is particularly needed because it will affect the priority in the queue
# and we want to give the user the ability to tweak the parameters.
# Adding the repairing state directly in the queue priority calculation
# would not allow this flexibility.
without collateral =?
market.slotCollateral(request.ask.collateralPerSlot, SlotState.Repair), err:
error "Failed to add freed slot to queue: unable to calculate collateral",
error = err.msg
return
# Take the repairing state into consideration to calculate the collateral.
# This is particularly needed because it will affect the priority in the queue
# and we want to give the user the ability to tweak the parameters.
# Adding the repairing state directly in the queue priority calculation
# would not allow this flexibility.
without collateral =?
market.slotCollateral(request.ask.collateralPerSlot, SlotState.Repair), err:
error "Failed to add freed slot to queue: unable to calculate collateral",
error = err.msg
return
if slotIndex > uint16.high.uint64:
error "Cannot cast slot index to uint16, value = ", slotIndex
return
if slotIndex > uint16.high.uint64:
error "Cannot cast slot index to uint16, value = ", slotIndex
return
without slotQueueItem =?
SlotQueueItem.init(request, slotIndex.uint16, collateral = collateral).catch, err:
warn "Too many slots, cannot add to queue", error = err.msgDetail
return
without slotQueueItem =?
SlotQueueItem.init(request, slotIndex.uint16, collateral = collateral).catch,
err:
warn "Too many slots, cannot add to queue", error = err.msgDetail
return
if err =? queue.push(slotQueueItem).errorOption:
if err of SlotQueueItemExistsError:
error "Failed to push item to queue becaue it already exists",
error = err.msgDetail
elif err of QueueNotRunningError:
warn "Failed to push item to queue becaue queue is not running",
error = err.msgDetail
if err =? queue.push(slotQueueItem).errorOption:
if err of SlotQueueItemExistsError:
error "Failed to push item to queue becaue it already exists",
error = err.msgDetail
elif err of QueueNotRunningError:
warn "Failed to push item to queue becaue queue is not running",
error = err.msgDetail
except CatchableError as e:
warn "Failed to add slot to queue", error = e.msg
# We could get rid of this by adding the storage ask in the SlotFreed event,
# so we would not need to call getRequest to get the collateralPerSlot.
let fut = addSlotToQueue()
sales.trackedFutures.track(fut)
asyncSpawn fut
proc subscribeRequested(sales: Sales) {.async.} =
let context = sales.context
@ -522,16 +525,18 @@ proc startSlotQueue(sales: Sales) =
let slotQueue = sales.context.slotQueue
let reservations = sales.context.reservations
slotQueue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
slotQueue.onProcessSlot = proc(
item: SlotQueueItem, done: Future[void]
) {.async: (raises: []).} =
trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex
sales.processSlot(item, done)
slotQueue.start()
proc onAvailabilityAdded(availability: Availability) {.async.} =
await sales.onAvailabilityAdded(availability)
proc OnAvailabilitySaved(availability: Availability) {.async.} =
await sales.OnAvailabilitySaved(availability)
reservations.onAvailabilityAdded = onAvailabilityAdded
reservations.OnAvailabilitySaved = OnAvailabilitySaved
proc subscribe(sales: Sales) {.async.} =
await sales.subscribeRequested()

View File

@ -82,11 +82,11 @@ type
availabilityLock: AsyncLock
# Lock for protecting assertions of availability's sizes when searching for matching availability
repo: RepoStore
onAvailabilityAdded: ?OnAvailabilityAdded
OnAvailabilitySaved: ?OnAvailabilitySaved
GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.}
IterDispose* = proc(): Future[?!void] {.gcsafe, closure.}
OnAvailabilityAdded* =
OnAvailabilitySaved* =
proc(availability: Availability): Future[void] {.upraises: [], gcsafe.}
StorableIter* = ref object
finished*: bool
@ -189,10 +189,10 @@ logutils.formatIt(LogFormat.textLines, SomeStorableId):
logutils.formatIt(LogFormat.json, SomeStorableId):
it.to0xHexLog
proc `onAvailabilityAdded=`*(
self: Reservations, onAvailabilityAdded: OnAvailabilityAdded
proc `OnAvailabilitySaved=`*(
self: Reservations, OnAvailabilitySaved: OnAvailabilitySaved
) =
self.onAvailabilityAdded = some onAvailabilityAdded
self.OnAvailabilitySaved = some OnAvailabilitySaved
func key*(id: AvailabilityId): ?!Key =
## sales / reservations / <availabilityId>
@ -268,18 +268,18 @@ proc updateAvailability(
trace "Creating new Availability"
let res = await self.updateImpl(obj)
# inform subscribers that Availability has been added
if onAvailabilityAdded =? self.onAvailabilityAdded:
# when chronos v4 is implemented, and OnAvailabilityAdded is annotated
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
# when chronos v4 is implemented, and OnAvailabilitySaved is annotated
# with async:(raises:[]), we can remove this try/catch as we know, with
# certainty, that nothing will be raised
try:
await onAvailabilityAdded(obj)
await OnAvailabilitySaved(obj)
except CancelledError as e:
raise e
except CatchableError as e:
# we don't have any insight into types of exceptions that
# `onAvailabilityAdded` can raise because it is caller-defined
warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg
# `OnAvailabilitySaved` can raise because it is caller-defined
warn "Unknown error during 'OnAvailabilitySaved' callback", error = e.msg
return res
else:
return failure(err)
@ -300,21 +300,23 @@ proc updateAvailability(
let res = await self.updateImpl(obj)
if oldAvailability.freeSize < obj.freeSize: # availability added
if oldAvailability.freeSize < obj.freeSize or oldAvailability.duration < obj.duration or
oldAvailability.minPricePerBytePerSecond < obj.minPricePerBytePerSecond or
oldAvailability.totalCollateral < obj.totalCollateral: # availability updated
# inform subscribers that Availability has been modified (with increased
# size)
if onAvailabilityAdded =? self.onAvailabilityAdded:
# when chronos v4 is implemented, and OnAvailabilityAdded is annotated
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
# when chronos v4 is implemented, and OnAvailabilitySaved is annotated
# with async:(raises:[]), we can remove this try/catch as we know, with
# certainty, that nothing will be raised
try:
await onAvailabilityAdded(obj)
await OnAvailabilitySaved(obj)
except CancelledError as e:
raise e
except CatchableError as e:
# we don't have any insight into types of exceptions that
# `onAvailabilityAdded` can raise because it is caller-defined
warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg
# `OnAvailabilitySaved` can raise because it is caller-defined
warn "Unknown error during 'OnAvailabilitySaved' callback", error = e.msg
return res

View File

@ -103,7 +103,6 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} =
error "Error while waiting for expiry to lapse", error = e.msgDetail
data.cancelled = onCancelled()
asyncSpawn data.cancelled
method onFulfilled*(
agent: SalesAgent, requestId: RequestId

View File

@ -3,7 +3,6 @@ import std/tables
import pkg/chronos
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import ../errors
import ../clock
import ../logutils
@ -17,8 +16,9 @@ logScope:
topics = "marketplace slotqueue"
type
OnProcessSlot* =
proc(item: SlotQueueItem, done: Future[void]): Future[void] {.gcsafe, upraises: [].}
OnProcessSlot* = proc(item: SlotQueueItem, done: Future[void]): Future[void] {.
gcsafe, async: (raises: [])
.}
# Non-ref obj copies value when assigned, preventing accidental modification
# of values which could cause an incorrect order (eg
@ -26,7 +26,7 @@ type
# but the heap invariant would no longer be honoured. When non-ref, the
# compiler can ensure that statement will fail).
SlotQueueWorker = object
doneProcessing*: Future[void]
doneProcessing*: Future[void].Raising([])
SlotQueueItem* = object
requestId: RequestId
@ -126,7 +126,17 @@ proc new*(
# `newAsyncQueue` procedure
proc init(_: type SlotQueueWorker): SlotQueueWorker =
SlotQueueWorker(doneProcessing: newFuture[void]("slotqueue.worker.processing"))
let workerFut = Future[void].Raising([]).init(
"slotqueue.worker.processing", {FutureFlag.OwnCancelSchedule}
)
workerFut.cancelCallback = proc(data: pointer) {.raises: [].} =
# this is equivalent to try: ... except CatchableError: ...
if not workerFut.finished:
workerFut.complete()
trace "Cancelling `SlotQueue` worker processing future"
SlotQueueWorker(doneProcessing: workerFut)
proc init*(
_: type SlotQueueItem,
@ -419,7 +429,6 @@ proc run(self: SlotQueue) {.async: (raises: []).} =
let fut = self.dispatch(worker, item)
self.trackedFutures.track(fut)
asyncSpawn fut
await sleepAsync(1.millis) # poll
except CancelledError:
@ -447,7 +456,6 @@ proc start*(self: SlotQueue) =
let fut = self.run()
self.trackedFutures.track(fut)
asyncSpawn fut
proc stop*(self: SlotQueue) {.async.} =
if not self.running:

View File

@ -30,6 +30,7 @@ method run*(
): Future[?State] {.async: (raises: []).} =
let data = SalesAgent(machine).data
let market = SalesAgent(machine).context.market
without (request =? data.request):
raiseAssert "Request not set"
@ -42,17 +43,16 @@ method run*(
err:
error "Failure attempting to fill slot: unable to calculate collateral",
error = err.msg
return
return some State(SaleErrored(error: err))
debug "Filling slot"
try:
await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral)
except SlotStateMismatchError as e:
debug "Slot is already filled, ignoring slot"
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
except MarketError as e:
if e.msg.contains "Slot is not free":
debug "Slot is already filled, ignoring slot"
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
else:
return some State(SaleErrored(error: e))
return some State(SaleErrored(error: e))
# other CatchableErrors are handled "automatically" by the SaleState
return some State(SaleFilled())

View File

@ -44,12 +44,11 @@ method run*(
try:
trace "Reserving slot"
await market.reserveSlot(data.requestId, data.slotIndex)
except SlotReservationNotAllowedError as e:
debug "Slot cannot be reserved, ignoring", error = e.msg
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
except MarketError as e:
if e.msg.contains "SlotReservations_ReservationNotAllowed":
debug "Slot cannot be reserved, ignoring", error = e.msg
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
else:
return some State(SaleErrored(error: e))
return some State(SaleErrored(error: e))
# other CatchableErrors are handled "automatically" by the SaleState
trace "Slot successfully reserved"

View File

@ -315,13 +315,15 @@ proc new*[T, H](
cellSize = cellSize
if (manifest.blocksCount mod manifest.numSlots) != 0:
trace "Number of blocks must be divisable by number of slots."
return failure("Number of blocks must be divisable by number of slots.")
const msg = "Number of blocks must be divisible by number of slots."
trace msg
return failure(msg)
let cellSize = if manifest.verifiable: manifest.cellSize else: cellSize
if (manifest.blockSize mod cellSize) != 0.NBytes:
trace "Block size must be divisable by cell size."
return failure("Block size must be divisable by cell size.")
const msg = "Block size must be divisible by cell size."
trace msg
return failure(msg)
let
numSlotBlocks = manifest.numSlotBlocks

View File

@ -38,7 +38,9 @@ type
AnyProof* = CircomProof
AnySampler* = Poseidon2Sampler
# add any other generic type here, eg. Poseidon2Sampler | ReinforceConcreteSampler
AnyBuilder* = Poseidon2Builder
# add any other generic type here, eg. Poseidon2Builder | ReinforceConcreteBuilder
AnyProofInputs* = ProofInputs[Poseidon2Hash]
Prover* = ref object of RootObj

View File

@ -57,6 +57,8 @@ template withExceptions(body: untyped) =
raise newLPStreamEOFError()
except AsyncStreamError as exc:
raise newException(LPStreamError, exc.msg)
except CatchableError as exc:
raise newException(Defect, "Unexpected error in AsyncStreamWrapper", exc)
method readOnce*(
self: AsyncStreamWrapper, pbytes: pointer, nbytes: int
@ -74,11 +76,13 @@ method readOnce*(
proc completeWrite(
self: AsyncStreamWrapper, fut: Future[void], msgLen: int
): Future[void] {.async.} =
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
withExceptions:
await fut
method write*(self: AsyncStreamWrapper, msg: seq[byte]): Future[void] =
method write*(
self: AsyncStreamWrapper, msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
# Avoid a copy of msg being kept in the closure created by `{.async.}` as this
# drives up memory usage

View File

@ -67,13 +67,9 @@ method atEof*(self: StoreStream): bool =
self.offset >= self.size
type LPStreamReadError* = object of LPStreamError
par*: ref CatchableError
proc newLPStreamReadError*(p: ref CatchableError): ref LPStreamReadError =
var w = newException(LPStreamReadError, "Read stream failed")
w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg
w.par = p
result = w
newException(LPStreamReadError, "Read stream failed", p)
method readOnce*(
self: StoreStream, pbytes: pointer, nbytes: int

View File

@ -23,3 +23,16 @@ proc freeDoubleArray*(
# Free outer array
if not arr.isNil:
deallocShared(arr)
proc makeUncheckedArray*(
data: ref seq[seq[byte]]
): ptr UncheckedArray[ptr UncheckedArray[byte]] =
result = cast[ptr UncheckedArray[ptr UncheckedArray[byte]]](alloc0(
sizeof(ptr UncheckedArray[byte]) * data[].len
))
for i, blk in data[]:
if blk.len > 0:
result[i] = cast[ptr UncheckedArray[byte]](addr blk[0])
else:
result[i] = nil

View File

@ -74,7 +74,6 @@ proc scheduler(machine: Machine) {.async: (raises: []).} =
debug "enter state", state = fromState & " => " & $machine.state
running = machine.run(machine.state)
machine.trackedFutures.track(running)
asyncSpawn running
except CancelledError:
break # do not propagate bc it is asyncSpawned
@ -88,7 +87,6 @@ proc start*(machine: Machine, initialState: State) =
machine.started = true
let fut = machine.scheduler()
machine.trackedFutures.track(fut)
asyncSpawn fut
machine.schedule(Event.transition(machine.state, initialState))
proc stop*(machine: Machine) {.async.} =

View File

@ -50,7 +50,6 @@ method start*(
timer.callback = callback
timer.interval = interval
timer.loopFuture = timerLoop(timer)
asyncSpawn timer.loopFuture
method stop*(timer: Timer) {.async, base.} =
if timer.loopFuture != nil and not timer.loopFuture.finished:

View File

@ -5,9 +5,11 @@ import ../logutils
{.push raises: [].}
type TrackedFutures* = ref object
futures: Table[uint, FutureBase]
cancelling: bool
type
TrackedFuture = Future[void].Raising([])
TrackedFutures* = ref object
futures: Table[uint, TrackedFuture]
cancelling: bool
logScope:
topics = "trackable futures"
@ -15,15 +17,18 @@ logScope:
proc len*(self: TrackedFutures): int =
self.futures.len
proc removeFuture(self: TrackedFutures, future: FutureBase) =
proc removeFuture(self: TrackedFutures, future: TrackedFuture) =
if not self.cancelling and not future.isNil:
self.futures.del(future.id)
proc track*[T](self: TrackedFutures, fut: Future[T]) =
proc track*(self: TrackedFutures, fut: TrackedFuture) =
if self.cancelling:
return
self.futures[fut.id] = FutureBase(fut)
if fut.finished:
return
self.futures[fut.id] = fut
proc cb(udata: pointer) =
self.removeFuture(fut)
@ -33,13 +38,8 @@ proc track*[T](self: TrackedFutures, fut: Future[T]) =
proc cancelTracked*(self: TrackedFutures) {.async: (raises: []).} =
self.cancelling = true
trace "cancelling tracked futures"
var cancellations: seq[FutureBase]
for future in self.futures.values:
if not future.isNil and not future.finished:
cancellations.add future.cancelAndWait()
trace "cancelling tracked futures", len = self.futures.len
let cancellations = self.futures.values.toSeq.mapIt(it.cancelAndWait())
await noCancel allFutures cancellations
self.futures.clear()

View File

@ -142,7 +142,6 @@ proc start*(validation: Validation) {.async.} =
await validation.subscribeSlotFilled()
await validation.restoreHistoricalState()
validation.running = validation.run()
asyncSpawn validation.running
proc stop*(validation: Validation) {.async.} =
if not validation.running.isNil and not validation.running.finished:

View File

@ -56,7 +56,7 @@ in pkgs.gcc13Stdenv.mkDerivation rec {
fakeCargo
];
# Disable CPU optmizations that make binary not portable.
# Disable CPU optimizations that make binary not portable.
NIMFLAGS = "-d:disableMarchNative -d:git_revision_override=${revision}";
# Avoid Nim cache permission errors.
XDG_CACHE_HOME = "/tmp";

View File

@ -1,3 +1,3 @@
import pkg/asynctest/chronos/unittest
import pkg/asynctest/chronos/unittest2
export unittest
export unittest2

View File

@ -84,12 +84,12 @@ asyncchecksuite "Block Advertising and Discovery":
blockDiscovery.publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
): Future[void] {.async, gcsafe.} =
): Future[void] {.async: (raises: [CancelledError]).} =
return
blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async.} =
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
await engine.resolveBlocks(blocks.filterIt(it.cid == cid))
await allFuturesThrowing(allFinished(pendingBlocks))
@ -97,17 +97,17 @@ asyncchecksuite "Block Advertising and Discovery":
await engine.stop()
test "Should advertise trees":
let
cids = @[manifest.treeCid]
advertised = initTable.collect:
for cid in cids:
{cid: newFuture[void]()}
let cids = @[manifest.treeCid]
var advertised = initTable.collect:
for cid in cids:
{cid: newFuture[void]()}
blockDiscovery.publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
) {.async.} =
if cid in advertised and not advertised[cid].finished():
advertised[cid].complete()
) {.async: (raises: [CancelledError]).} =
advertised.withValue(cid, fut):
if not fut[].finished:
fut[].complete()
await engine.start()
await allFuturesThrowing(allFinished(toSeq(advertised.values)))
@ -118,7 +118,7 @@ asyncchecksuite "Block Advertising and Discovery":
blockDiscovery.publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
) {.async.} =
) {.async: (raises: [CancelledError]).} =
check:
cid notin blockCids
@ -138,7 +138,7 @@ asyncchecksuite "Block Advertising and Discovery":
blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] =
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
check false
await engine.start()
@ -221,17 +221,17 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
): Future[void] {.async.} =
) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[1].peerInfo.signedPeerRecord
MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
): Future[void] {.async.} =
) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[2].peerInfo.signedPeerRecord
MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
): Future[void] {.async.} =
) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[3].peerInfo.signedPeerRecord
discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid)
@ -266,23 +266,21 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async.} =
if cid in advertised:
result.add(advertised[cid])
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
advertised.withValue(cid, val):
result.add(val[])
let futs = collect(newSeq):
for m in mBlocks[0 .. 2]:
blockexc[0].engine.requestBlock(m.cid)
await allFuturesThrowing(
switch.mapIt(it.start()) & blockexc.mapIt(it.engine.start())
)
.wait(10.seconds)
await allFuturesThrowing(switch.mapIt(it.start())).wait(10.seconds)
await allFuturesThrowing(blockexc.mapIt(it.engine.start())).wait(10.seconds)
await allFutures(futs).wait(10.seconds)
await allFuturesThrowing(blockexc.mapIt(it.engine.stop()) & switch.mapIt(it.stop()))
.wait(10.seconds)
await allFuturesThrowing(blockexc.mapIt(it.engine.stop())).wait(10.seconds)
await allFuturesThrowing(switch.mapIt(it.stop())).wait(10.seconds)
test "E2E - Should advertise and discover blocks with peers already connected":
# Distribute the blocks amongst 1..3
@ -292,17 +290,17 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
): Future[void] {.async.} =
) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[1].peerInfo.signedPeerRecord
MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
): Future[void] {.async.} =
) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[2].peerInfo.signedPeerRecord
MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
): Future[void] {.async.} =
) {.async: (raises: [CancelledError]).} =
advertised[cid] = switch[3].peerInfo.signedPeerRecord
discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid)
@ -337,18 +335,16 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async.} =
if cid in advertised:
return @[advertised[cid]]
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
advertised.withValue(cid, val):
return @[val[]]
let futs = mBlocks[0 .. 2].mapIt(blockexc[0].engine.requestBlock(it.cid))
await allFuturesThrowing(
switch.mapIt(it.start()) & blockexc.mapIt(it.engine.start())
)
.wait(10.seconds)
await allFuturesThrowing(switch.mapIt(it.start())).wait(10.seconds)
await allFuturesThrowing(blockexc.mapIt(it.engine.start())).wait(10.seconds)
await allFutures(futs).wait(10.seconds)
await allFuturesThrowing(blockexc.mapIt(it.engine.stop()) & switch.mapIt(it.stop()))
.wait(10.seconds)
await allFuturesThrowing(blockexc.mapIt(it.engine.stop())).wait(10.seconds)
await allFuturesThrowing(switch.mapIt(it.stop())).wait(10.seconds)

View File

@ -68,7 +68,7 @@ asyncchecksuite "Test Discovery Engine":
blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async, gcsafe.} =
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
pendingBlocks.resolve(
blocks.filterIt(it.cid == cid).mapIt(
BlockDelivery(blk: it, address: it.address)
@ -94,7 +94,7 @@ asyncchecksuite "Test Discovery Engine":
blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async, gcsafe.} =
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
check cid == blocks[0].cid
if not want.finished:
want.complete()
@ -122,7 +122,7 @@ asyncchecksuite "Test Discovery Engine":
var pendingCids = newSeq[Cid]()
blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async, gcsafe.} =
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
check cid in pendingCids
pendingCids.keepItIf(it != cid)
check peerStore.len < minPeers
@ -159,12 +159,12 @@ asyncchecksuite "Test Discovery Engine":
discoveryLoopSleep = 100.millis,
concurrentDiscReqs = 2,
)
reqs = newFuture[void]()
reqs = Future[void].Raising([CancelledError]).init()
count = 0
blockDiscovery.findBlockProvidersHandler = proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.gcsafe, async.} =
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
check cid == blocks[0].cid
if count > 0:
check false

View File

@ -34,7 +34,7 @@ asyncchecksuite "Advertiser":
advertised = newSeq[Cid]()
blockDiscovery.publishBlockProvideHandler = proc(
d: MockDiscovery, cid: Cid
) {.async, gcsafe.} =
) {.async: (raises: [CancelledError]), gcsafe.} =
advertised.add(cid)
advertiser = Advertiser.new(localStore, blockDiscovery)

View File

@ -22,7 +22,7 @@ import ../../examples
const NopSendWantCancellationsProc = proc(
id: PeerId, addresses: seq[BlockAddress]
) {.gcsafe, async.} =
) {.async: (raises: [CancelledError]).} =
discard
asyncchecksuite "NetworkStore engine basic":
@ -66,20 +66,17 @@ asyncchecksuite "NetworkStore engine basic":
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
) {.gcsafe, async.} =
) {.async: (raises: [CancelledError]).} =
check addresses.mapIt($it.cidOrTreeCid).sorted == blocks.mapIt($it.cid).sorted
done.complete()
let
network = BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList))
localStore = CacheStore.new(blocks.mapIt(it))
discovery = DiscoveryEngine.new(
localStore, peerStore, network, blockDiscovery, pendingBlocks
)
advertiser = Advertiser.new(localStore, blockDiscovery)
engine = BlockExcEngine.new(
localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks
)
@ -93,7 +90,9 @@ asyncchecksuite "NetworkStore engine basic":
test "Should send account to new peers":
let pricing = Pricing.example
proc sendAccount(peer: PeerId, account: Account) {.gcsafe, async.} =
proc sendAccount(
peer: PeerId, account: Account
) {.async: (raises: [CancelledError]).} =
check account.address == pricing.address
done.complete()
@ -186,7 +185,9 @@ asyncchecksuite "NetworkStore engine handlers":
done = newFuture[void]()
wantList = makeWantList(blocks.mapIt(it.cid))
proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
proc sendPresence(
peerId: PeerId, presence: seq[BlockPresence]
) {.async: (raises: [CancelledError]).} =
check presence.mapIt(it.address) == wantList.entries.mapIt(it.address)
done.complete()
@ -203,7 +204,9 @@ asyncchecksuite "NetworkStore engine handlers":
done = newFuture[void]()
wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true)
proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
proc sendPresence(
peerId: PeerId, presence: seq[BlockPresence]
) {.async: (raises: [CancelledError]).} =
check presence.mapIt(it.address) == wantList.entries.mapIt(it.address)
for p in presence:
check:
@ -222,7 +225,9 @@ asyncchecksuite "NetworkStore engine handlers":
done = newFuture[void]()
wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true)
proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
proc sendPresence(
peerId: PeerId, presence: seq[BlockPresence]
) {.async: (raises: [CancelledError]).} =
for p in presence:
if p.address.cidOrTreeCid != blocks[0].cid and
p.address.cidOrTreeCid != blocks[1].cid:
@ -266,19 +271,21 @@ asyncchecksuite "NetworkStore engine handlers":
peerContext.account = account.some
peerContext.blocks = blocks.mapIt(
(it.address, Presence(address: it.address, price: rand(uint16).u256))
(it.address, Presence(address: it.address, price: rand(uint16).u256, have: true))
).toTable
engine.network = BlockExcNetwork(
request: BlockExcRequest(
sendPayment: proc(receiver: PeerId, payment: SignedState) {.gcsafe, async.} =
sendPayment: proc(
receiver: PeerId, payment: SignedState
) {.async: (raises: [CancelledError]).} =
let
amount = blocks.mapIt(peerContext.blocks[it.address].price).foldl(a + b)
amount =
blocks.mapIt(peerContext.blocks[it.address].catch.get.price).foldl(a + b)
balances = !payment.state.outcome.balances(Asset)
check receiver == peerId
check balances[account.address.toDestination] == amount
check balances[account.address.toDestination].catch.get == amount
done.complete(),
# Install NOP for want list cancellations so they don't cause a crash
@ -286,10 +293,12 @@ asyncchecksuite "NetworkStore engine handlers":
)
)
let requestedBlocks = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.address))
await engine.blocksDeliveryHandler(
peerId, blocks.mapIt(BlockDelivery(blk: it, address: it.address))
)
await done.wait(100.millis)
await allFuturesThrowing(requestedBlocks).wait(100.millis)
test "Should handle block presence":
var handles:
@ -303,7 +312,7 @@ asyncchecksuite "NetworkStore engine handlers":
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
) {.gcsafe, async.} =
) {.async: (raises: [CancelledError]).} =
engine.pendingBlocks.resolve(
blocks.filterIt(it.address in addresses).mapIt(
BlockDelivery(blk: it, address: it.address)
@ -340,9 +349,9 @@ asyncchecksuite "NetworkStore engine handlers":
proc sendWantCancellations(
id: PeerId, addresses: seq[BlockAddress]
) {.gcsafe, async.} =
) {.async: (raises: [CancelledError]).} =
for address in addresses:
cancellations[address].complete()
cancellations[address].catch.expect("address should exist").complete()
engine.network = BlockExcNetwork(
request: BlockExcRequest(sendWantCancellations: sendWantCancellations)
@ -416,7 +425,7 @@ asyncchecksuite "Block Download":
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
) {.gcsafe, async.} =
) {.async: (raises: [CancelledError]).} =
check wantType == WantHave
check not engine.pendingBlocks.isInFlight(address)
check engine.pendingBlocks.retries(address) == retries
@ -433,7 +442,7 @@ asyncchecksuite "Block Download":
discard (await pending).tryGet()
test "Should retry block request":
let
var
address = BlockAddress.init(blocks[0].cid)
steps = newAsyncEvent()
@ -445,7 +454,7 @@ asyncchecksuite "Block Download":
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
) {.gcsafe, async.} =
) {.async: (raises: [CancelledError]).} =
case wantType
of WantHave:
check engine.pendingBlocks.isInFlight(address) == false
@ -467,7 +476,7 @@ asyncchecksuite "Block Download":
let pending = engine.requestBlock(address)
await steps.wait()
# add blocks presence
# add blocks precense
peerCtx.blocks = blocks.mapIt(
(it.address, Presence(address: it.address, have: true, price: UInt256.example))
).toTable
@ -493,7 +502,7 @@ asyncchecksuite "Block Download":
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
) {.gcsafe, async.} =
) {.async: (raises: [CancelledError]).} =
done.complete()
engine.pendingBlocks.blockRetries = 10
@ -573,7 +582,7 @@ asyncchecksuite "Task Handler":
test "Should send want-blocks in priority order":
proc sendBlocksDelivery(
id: PeerId, blocksDelivery: seq[BlockDelivery]
) {.gcsafe, async.} =
) {.async: (raises: [CancelledError]).} =
check blocksDelivery.len == 2
check:
blocksDelivery[1].address == blocks[0].address
@ -610,7 +619,7 @@ asyncchecksuite "Task Handler":
test "Should set in-flight for outgoing blocks":
proc sendBlocksDelivery(
id: PeerId, blocksDelivery: seq[BlockDelivery]
) {.gcsafe, async.} =
) {.async: (raises: [CancelledError]).} =
check peersCtx[0].peerWants[0].inFlight
for blk in blocks:
@ -649,7 +658,9 @@ asyncchecksuite "Task Handler":
let missing = @[Block.new("missing".toBytes).tryGet()]
let price = (!engine.pricing).price
proc sendPresence(id: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
proc sendPresence(
id: PeerId, presence: seq[BlockPresence]
) {.async: (raises: [CancelledError]).} =
check presence.mapIt(!Presence.init(it)) ==
@[
Presence(address: present[0].address, have: true, price: price),

View File

@ -1,10 +1,10 @@
import std/unittest
import pkg/unittest2
import pkg/codex/stores
import ../../examples
import ../../helpers
checksuite "engine payments":
suite "Engine payments":
let address = EthAddress.example
let amount = 42.u256

View File

@ -6,7 +6,7 @@ import ../../../asynctest
import ../../examples
import ../../helpers
checksuite "account protobuf messages":
suite "account protobuf messages":
let account = Account(address: EthAddress.example)
let message = AccountMessage.init(account)
@ -21,7 +21,7 @@ checksuite "account protobuf messages":
incorrect.address.del(0)
check Account.init(incorrect).isNone
checksuite "channel update messages":
suite "channel update messages":
let state = SignedState.example
let update = StateChannelUpdate.init(state)

View File

@ -6,7 +6,7 @@ import ../../../asynctest
import ../../examples
import ../../helpers
checksuite "block presence protobuf messages":
suite "block presence protobuf messages":
let
cid = Cid.example
address = BlockAddress(leaf: false, cid: cid)

View File

@ -26,7 +26,7 @@ asyncchecksuite "Network - Handlers":
blocks: seq[bt.Block]
done: Future[void]
proc getConn(): Future[Connection] {.async.} =
proc getConn(): Future[Connection] {.async: (raises: [CancelledError]).} =
return Connection(buffer)
setup:
@ -45,7 +45,7 @@ asyncchecksuite "Network - Handlers":
discard await networkPeer.connect()
test "Want List handler":
proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} =
proc wantListHandler(peer: PeerId, wantList: WantList) {.async: (raises: []).} =
# check that we got the correct amount of entries
check wantList.entries.len == 4
@ -72,7 +72,7 @@ asyncchecksuite "Network - Handlers":
test "Blocks Handler":
proc blocksDeliveryHandler(
peer: PeerId, blocksDelivery: seq[BlockDelivery]
) {.gcsafe, async.} =
) {.async: (raises: []).} =
check blocks == blocksDelivery.mapIt(it.blk)
done.complete()
@ -85,7 +85,9 @@ asyncchecksuite "Network - Handlers":
await done.wait(500.millis)
test "Presence Handler":
proc presenceHandler(peer: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
proc presenceHandler(
peer: PeerId, presence: seq[BlockPresence]
) {.async: (raises: []).} =
for b in blocks:
check:
b.address in presence
@ -105,7 +107,7 @@ asyncchecksuite "Network - Handlers":
test "Handles account messages":
let account = Account(address: EthAddress.example)
proc handleAccount(peer: PeerId, received: Account) {.gcsafe, async.} =
proc handleAccount(peer: PeerId, received: Account) {.async: (raises: []).} =
check received == account
done.complete()
@ -119,7 +121,7 @@ asyncchecksuite "Network - Handlers":
test "Handles payment messages":
let payment = SignedState.example
proc handlePayment(peer: PeerId, received: SignedState) {.gcsafe, async.} =
proc handlePayment(peer: PeerId, received: SignedState) {.async: (raises: []).} =
check received == payment
done.complete()
@ -165,7 +167,7 @@ asyncchecksuite "Network - Senders":
await allFuturesThrowing(switch1.stop(), switch2.stop())
test "Send want list":
proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} =
proc wantListHandler(peer: PeerId, wantList: WantList) {.async: (raises: []).} =
# check that we got the correct amount of entries
check wantList.entries.len == 4
@ -195,7 +197,7 @@ asyncchecksuite "Network - Senders":
test "send blocks":
proc blocksDeliveryHandler(
peer: PeerId, blocksDelivery: seq[BlockDelivery]
) {.gcsafe, async.} =
) {.async: (raises: []).} =
check blocks == blocksDelivery.mapIt(it.blk)
done.complete()
@ -207,7 +209,9 @@ asyncchecksuite "Network - Senders":
await done.wait(500.millis)
test "send presence":
proc presenceHandler(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async.} =
proc presenceHandler(
peer: PeerId, precense: seq[BlockPresence]
) {.async: (raises: []).} =
for b in blocks:
check:
b.address in precense
@ -226,7 +230,7 @@ asyncchecksuite "Network - Senders":
test "send account":
let account = Account(address: EthAddress.example)
proc handleAccount(peer: PeerId, received: Account) {.gcsafe, async.} =
proc handleAccount(peer: PeerId, received: Account) {.async: (raises: []).} =
check received == account
done.complete()
@ -238,7 +242,7 @@ asyncchecksuite "Network - Senders":
test "send payment":
let payment = SignedState.example
proc handlePayment(peer: PeerId, received: SignedState) {.gcsafe, async.} =
proc handlePayment(peer: PeerId, received: SignedState) {.async: (raises: []).} =
check received == payment
done.complete()
@ -276,7 +280,7 @@ asyncchecksuite "Network - Test Limits":
let account = Account(address: EthAddress.example)
network2.handlers.onAccount = proc(
peer: PeerId, received: Account
) {.gcsafe, async.} =
) {.async: (raises: []).} =
check false
let fut = network1.send(

View File

@ -1,7 +1,7 @@
import std/sugar
import std/sequtils
import std/unittest
import pkg/unittest2
import pkg/libp2p
import pkg/codex/blockexchange/peers
@ -11,7 +11,7 @@ import pkg/codex/blockexchange/protobuf/presence
import ../helpers
import ../examples
checksuite "Peer Context Store":
suite "Peer Context Store":
var
store: PeerCtxStore
peerCtx: BlockExcPeerCtx
@ -31,7 +31,7 @@ checksuite "Peer Context Store":
test "Should get peer":
check store.get(peerCtx.id) == peerCtx
checksuite "Peer Context Store Peer Selection":
suite "Peer Context Store Peer Selection":
var
store: PeerCtxStore
peerCtxs: seq[BlockExcPeerCtx]

View File

@ -10,7 +10,7 @@ import pkg/codex/blockexchange
import ../helpers
import ../../asynctest
checksuite "Pending Blocks":
suite "Pending Blocks":
test "Should add want handle":
let
pendingBlocks = PendingBlocksManager.new()

View File

@ -21,7 +21,7 @@ proc new*(
var consumed = 0
proc reader(
data: ChunkBuffer, len: int
): Future[int] {.async, gcsafe, raises: [Defect].} =
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
if consumed >= dataset.len:
return 0

View File

@ -14,29 +14,42 @@ import pkg/codex/discovery
import pkg/contractabi/address as ca
type MockDiscovery* = ref object of Discovery
findBlockProvidersHandler*:
proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.gcsafe.}
publishBlockProvideHandler*: proc(d: MockDiscovery, cid: Cid): Future[void] {.gcsafe.}
findHostProvidersHandler*:
proc(d: MockDiscovery, host: ca.Address): Future[seq[SignedPeerRecord]] {.gcsafe.}
publishHostProvideHandler*:
proc(d: MockDiscovery, host: ca.Address): Future[void] {.gcsafe.}
findBlockProvidersHandler*: proc(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).}
publishBlockProvideHandler*:
proc(d: MockDiscovery, cid: Cid): Future[void] {.async: (raises: [CancelledError]).}
findHostProvidersHandler*: proc(
d: MockDiscovery, host: ca.Address
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).}
publishHostProvideHandler*: proc(d: MockDiscovery, host: ca.Address): Future[void] {.
async: (raises: [CancelledError])
.}
proc new*(T: type MockDiscovery): MockDiscovery =
MockDiscovery()
proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} =
proc findPeer*(
d: Discovery, peerId: PeerId
): Future[?PeerRecord] {.async: (raises: [CancelledError]).} =
## mock find a peer - always return none
##
##
return none(PeerRecord)
method find*(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async.} =
method find*(
d: MockDiscovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
if isNil(d.findBlockProvidersHandler):
return
return await d.findBlockProvidersHandler(d, cid)
method provide*(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
method provide*(
d: MockDiscovery, cid: Cid
): Future[void] {.async: (raises: [CancelledError]).} =
if isNil(d.publishBlockProvideHandler):
return
@ -44,13 +57,15 @@ method provide*(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
method find*(
d: MockDiscovery, host: ca.Address
): Future[seq[SignedPeerRecord]] {.async.} =
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]).} =
if isNil(d.findHostProvidersHandler):
return
return await d.findHostProvidersHandler(d, host)
method provide*(d: MockDiscovery, host: ca.Address): Future[void] {.async.} =
method provide*(
d: MockDiscovery, host: ca.Address
): Future[void] {.async: (raises: [CancelledError]).} =
if isNil(d.publishHostProvideHandler):
return

View File

@ -46,7 +46,8 @@ type
subscriptions: Subscriptions
config*: MarketplaceConfig
canReserveSlot*: bool
reserveSlotThrowError*: ?(ref MarketError)
errorOnReserveSlot*: ?(ref MarketError)
errorOnFillSlot*: ?(ref CatchableError)
clock: ?Clock
Fulfillment* = object
@ -289,6 +290,9 @@ proc fillSlot*(
host: Address,
collateral = 0.u256,
) =
if error =? market.errorOnFillSlot:
raise error
let slot = MockSlot(
requestId: requestId,
slotIndex: slotIndex,
@ -370,7 +374,7 @@ method canProofBeMarkedAsMissing*(
method reserveSlot*(
market: MockMarket, requestId: RequestId, slotIndex: uint64
) {.async.} =
if error =? market.reserveSlotThrowError:
if error =? market.errorOnReserveSlot:
raise error
method canReserveSlot*(
@ -381,8 +385,19 @@ method canReserveSlot*(
func setCanReserveSlot*(market: MockMarket, canReserveSlot: bool) =
market.canReserveSlot = canReserveSlot
func setReserveSlotThrowError*(market: MockMarket, error: ?(ref MarketError)) =
market.reserveSlotThrowError = error
func setErrorOnReserveSlot*(market: MockMarket, error: ref MarketError) =
market.errorOnReserveSlot =
if error.isNil:
none (ref MarketError)
else:
some error
func setErrorOnFillSlot*(market: MockMarket, error: ref CatchableError) =
market.errorOnFillSlot =
if error.isNil:
none (ref CatchableError)
else:
some error
method subscribeRequests*(
market: MockMarket, callback: OnRequest

View File

@ -26,7 +26,7 @@ proc new*(
var consumed = 0
proc reader(
data: ChunkBuffer, len: int
): Future[int] {.async, gcsafe, raises: [Defect].} =
): Future[int] {.async: (raises: [ChunkerError, CancelledError]), gcsafe.} =
var alpha = toSeq(byte('A') .. byte('z'))
if consumed >= size:

View File

@ -1,4 +1,4 @@
import std/unittest
import pkg/unittest2
import pkg/codex/merkletree

View File

@ -1,4 +1,4 @@
import std/unittest
import pkg/unittest2
import pkg/questionable/results
import pkg/stew/byteutils
@ -18,7 +18,7 @@ const data = [
"00000000000000000000000000000009".toBytes, "00000000000000000000000000000010".toBytes,
]
checksuite "merkletree - coders":
suite "merkletree - coders":
test "encoding and decoding a tree yields the same tree":
let
tree = CodexTree.init(Sha256HashCodec, data).tryGet()

View File

@ -1,6 +1,6 @@
import std/unittest
import std/sequtils
import pkg/unittest2
import pkg/questionable/results
import pkg/stew/byteutils
import pkg/libp2p

View File

@ -1,7 +1,7 @@
import std/unittest
import std/sequtils
import std/random
import pkg/unittest2
import pkg/poseidon2
import pkg/poseidon2/sponge

View File

@ -1,6 +1,6 @@
import std/unittest
import std/sequtils
import pkg/unittest2
import pkg/poseidon2
import pkg/poseidon2/io
import pkg/questionable/results

View File

@ -1,4 +1,4 @@
import std/unittest
import pkg/unittest2
import pkg/questionable
import pkg/codex/contracts/requests
import pkg/codex/sales/states/cancelled
@ -8,7 +8,7 @@ import pkg/codex/sales/states/filled
import ../../examples
import ../../helpers
checksuite "sales state 'downloading'":
suite "sales state 'downloading'":
let request = StorageRequest.example
let slotIndex = request.ask.slots div 2
var state: SaleDownloading

View File

@ -14,7 +14,7 @@ import ../../helpers/mockmarket
import ../../examples
import ../../helpers
checksuite "sales state 'filled'":
suite "sales state 'filled'":
let request = StorageRequest.example
let slotIndex = request.ask.slots div 2

View File

@ -1,18 +1,31 @@
import std/unittest
import pkg/questionable
import pkg/codex/contracts/requests
import pkg/codex/sales/states/filling
import pkg/codex/sales/states/cancelled
import pkg/codex/sales/states/failed
import pkg/codex/sales/states/ignored
import pkg/codex/sales/states/errored
import pkg/codex/sales/salesagent
import pkg/codex/sales/salescontext
import ../../../asynctest
import ../../examples
import ../../helpers
import ../../helpers/mockmarket
import ../../helpers/mockclock
checksuite "sales state 'filling'":
suite "sales state 'filling'":
let request = StorageRequest.example
let slotIndex = request.ask.slots div 2
var state: SaleFilling
var market: MockMarket
var clock: MockClock
var agent: SalesAgent
setup:
clock = MockClock.new()
market = MockMarket.new()
let context = SalesContext(market: market, clock: clock)
agent = newSalesAgent(context, request.id, slotIndex, request.some)
state = SaleFilling.new()
test "switches to cancelled state when request expires":
@ -22,3 +35,28 @@ checksuite "sales state 'filling'":
test "switches to failed state when request fails":
let next = state.onFailed(request)
check !next of SaleFailed
test "run switches to ignored when slot is not free":
let error = newException(
SlotStateMismatchError, "Failed to fill slot because the slot is not free"
)
market.setErrorOnFillSlot(error)
market.requested.add(request)
market.slotState[request.slotId(slotIndex)] = SlotState.Filled
let next = !(await state.run(agent))
check next of SaleIgnored
check SaleIgnored(next).reprocessSlot == false
check SaleIgnored(next).returnBytes
test "run switches to errored with other error ":
let error = newException(MarketError, "some error")
market.setErrorOnFillSlot(error)
market.requested.add(request)
market.slotState[request.slotId(slotIndex)] = SlotState.Filled
let next = !(await state.run(agent))
check next of SaleErrored
let errored = SaleErrored(next)
check errored.error == error

View File

@ -54,15 +54,16 @@ asyncchecksuite "sales state 'SlotReserving'":
test "run switches to errored when slot reservation errors":
let error = newException(MarketError, "some error")
market.setReserveSlotThrowError(some error)
market.setErrorOnReserveSlot(error)
let next = !(await state.run(agent))
check next of SaleErrored
let errored = SaleErrored(next)
check errored.error == error
test "catches reservation not allowed error":
let error = newException(MarketError, "SlotReservations_ReservationNotAllowed")
market.setReserveSlotThrowError(some error)
test "run switches to ignored when reservation is not allowed":
let error =
newException(SlotReservationNotAllowedError, "Reservation is not allowed")
market.setErrorOnReserveSlot(error)
let next = !(await state.run(agent))
check next of SaleIgnored
check SaleIgnored(next).reprocessSlot == false

View File

@ -14,7 +14,7 @@ import ../../helpers/mockmarket
import ../../examples
import ../../helpers
checksuite "sales state 'unknown'":
suite "sales state 'unknown'":
let request = StorageRequest.example
let slotIndex = request.ask.slots div 2
let slotId = slotId(request.id, slotIndex)

View File

@ -283,35 +283,95 @@ asyncchecksuite "Reservations module":
check updated.isErr
check updated.error of NotExistsError
test "onAvailabilityAdded called when availability is created":
test "OnAvailabilitySaved called when availability is created":
var added: Availability
reservations.onAvailabilityAdded = proc(a: Availability) {.async.} =
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
added = a
let availability = createAvailability()
check added == availability
test "onAvailabilityAdded called when availability size is increased":
test "OnAvailabilitySaved called when availability size is increased":
var availability = createAvailability()
var added: Availability
reservations.onAvailabilityAdded = proc(a: Availability) {.async.} =
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
added = a
availability.freeSize += 1
discard await reservations.update(availability)
check added == availability
test "onAvailabilityAdded is not called when availability size is decreased":
test "OnAvailabilitySaved is not called when availability size is decreased":
var availability = createAvailability()
var called = false
reservations.onAvailabilityAdded = proc(a: Availability) {.async.} =
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
called = true
availability.freeSize -= 1
discard await reservations.update(availability)
check not called
test "OnAvailabilitySaved called when availability duration is increased":
var availability = createAvailability()
var added: Availability
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
added = a
availability.duration += 1
discard await reservations.update(availability)
check added == availability
test "OnAvailabilitySaved is not called when availability duration is decreased":
var availability = createAvailability()
var called = false
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
called = true
availability.duration -= 1
discard await reservations.update(availability)
check not called
test "OnAvailabilitySaved called when availability minPricePerBytePerSecond is increased":
var availability = createAvailability()
var added: Availability
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
added = a
availability.minPricePerBytePerSecond += 1.u256
discard await reservations.update(availability)
check added == availability
test "OnAvailabilitySaved is not called when availability minPricePerBytePerSecond is decreased":
var availability = createAvailability()
var called = false
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
called = true
availability.minPricePerBytePerSecond -= 1.u256
discard await reservations.update(availability)
check not called
test "OnAvailabilitySaved called when availability totalCollateral is increased":
var availability = createAvailability()
var added: Availability
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
added = a
availability.totalCollateral = availability.totalCollateral + 1.u256
discard await reservations.update(availability)
check added == availability
test "OnAvailabilitySaved is not called when availability totalCollateral is decreased":
var availability = createAvailability()
var called = false
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
called = true
availability.totalCollateral = availability.totalCollateral - 1.u256
discard await reservations.update(availability)
check not called
test "availabilities can be found":
let availability = createAvailability()

View File

@ -236,10 +236,17 @@ asyncchecksuite "Sales":
return true
proc addRequestToSaturatedQueue(): Future[StorageRequest] {.async.} =
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
await sleepAsync(10.millis)
itemsProcessed.add item
done.complete()
queue.onProcessSlot = proc(
item: SlotQueueItem, done: Future[void]
) {.async: (raises: []).} =
try:
await sleepAsync(10.millis)
itemsProcessed.add item
except CancelledError as exc:
checkpoint(exc.msg)
finally:
if not done.finished:
done.complete()
var request1 = StorageRequest.example
request1.ask.collateralPerByte = request.ask.collateralPerByte + 1
@ -261,9 +268,12 @@ asyncchecksuite "Sales":
waitFor run()
test "processes all request's slots once StorageRequested emitted":
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
queue.onProcessSlot = proc(
item: SlotQueueItem, done: Future[void]
) {.async: (raises: []).} =
itemsProcessed.add item
done.complete()
if not done.finished:
done.complete()
createAvailability()
await market.requestStorage(request)
let items = SlotQueueItem.init(request, collateral = request.ask.collateralPerSlot)
@ -299,9 +309,12 @@ asyncchecksuite "Sales":
check always (not itemsProcessed.contains(expected))
test "adds slot index to slot queue once SlotFreed emitted":
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
queue.onProcessSlot = proc(
item: SlotQueueItem, done: Future[void]
) {.async: (raises: []).} =
itemsProcessed.add item
done.complete()
if not done.finished:
done.complete()
createAvailability()
market.requested.add request # "contract" must be able to return request

View File

@ -50,12 +50,19 @@ suite "Slot queue start/stop":
suite "Slot queue workers":
var queue: SlotQueue
proc onProcessSlot(item: SlotQueueItem, doneProcessing: Future[void]) {.async.} =
await sleepAsync(1000.millis)
proc onProcessSlot(
item: SlotQueueItem, doneProcessing: Future[void]
) {.async: (raises: []).} =
# this is not illustrative of the realistic scenario as the
# `doneProcessing` future would be passed to another context before being
# completed and therefore is not as simple as making the callback async
doneProcessing.complete()
try:
await sleepAsync(1000.millis)
except CatchableError as exc:
checkpoint(exc.msg)
finally:
if not doneProcessing.finished:
doneProcessing.complete()
setup:
let request = StorageRequest.example
@ -89,9 +96,14 @@ suite "Slot queue workers":
check eventually queue.activeWorkers == 3
test "discards workers once processing completed":
proc processSlot(item: SlotQueueItem, done: Future[void]) {.async.} =
await sleepAsync(1.millis)
done.complete()
proc processSlot(item: SlotQueueItem, done: Future[void]) {.async: (raises: []).} =
try:
await sleepAsync(1.millis)
except CatchableError as exc:
checkpoint(exc.msg)
finally:
if not done.finished:
done.complete()
queue.onProcessSlot = processSlot
@ -114,11 +126,19 @@ suite "Slot queue":
proc newSlotQueue(maxSize, maxWorkers: int, processSlotDelay = 1.millis) =
queue = SlotQueue.new(maxWorkers, maxSize.uint16)
queue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
await sleepAsync(processSlotDelay)
onProcessSlotCalled = true
onProcessSlotCalledWith.add (item.requestId, item.slotIndex)
done.complete()
queue.onProcessSlot = proc(
item: SlotQueueItem, done: Future[void]
) {.async: (raises: []).} =
try:
await sleepAsync(processSlotDelay)
except CatchableError as exc:
checkpoint(exc.msg)
finally:
onProcessSlotCalled = true
onProcessSlotCalledWith.add (item.requestId, item.slotIndex)
if not done.finished:
done.complete()
queue.start()
setup:

View File

@ -133,7 +133,7 @@ suite "Slot builder":
check:
Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg ==
"Number of blocks must be divisable by number of slots."
"Number of blocks must be divisible by number of slots."
test "Block size must be divisable by cell size":
let mismatchManifest = Manifest.new(
@ -151,7 +151,7 @@ suite "Slot builder":
check:
Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg ==
"Block size must be divisable by cell size."
"Block size must be divisible by cell size."
test "Should build correct slot builder":
builder =

View File

@ -1,6 +1,6 @@
import std/unittest
import std/random
import pkg/unittest2
import pkg/stew/objects
import pkg/questionable
import pkg/questionable/results
@ -11,7 +11,7 @@ import pkg/codex/stores/repostore/coders
import ../../helpers
checksuite "Test coders":
suite "Test coders":
proc rand(T: type NBytes): T =
rand(Natural).NBytes

View File

@ -11,7 +11,7 @@ import ./commonstoretests
import ../../asynctest
import ../helpers
checksuite "Cache Store":
suite "Cache Store":
var
newBlock, newBlock1, newBlock2, newBlock3: Block
store: CacheStore

View File

@ -36,7 +36,7 @@ proc createManifestCid(): ?!Cid =
let cid = ?Cid.init(version, codec, hash).mapFailure
return success cid
checksuite "KeyUtils":
suite "KeyUtils":
test "makePrefixKey should create block key":
let length = 6
let cid = Cid.example

View File

@ -21,7 +21,7 @@ import ../examples
import codex/stores/maintenance
checksuite "BlockMaintainer":
suite "BlockMaintainer":
var mockRepoStore: MockRepoStore
var interval: Duration
var mockTimer: MockTimer

View File

@ -24,7 +24,7 @@ import ../helpers/mockclock
import ../examples
import ./commonstoretests
checksuite "Test RepoStore start/stop":
suite "Test RepoStore start/stop":
var
repoDs: Datastore
metaDs: Datastore

View File

@ -22,7 +22,7 @@ proc toSortedSeq[T](h: AsyncHeapQueue[T], queueType = QueueType.Min): seq[T] =
while tmp.len > 0:
result.add(popNoWait(tmp).tryGet())
checksuite "Synchronous tests":
suite "Synchronous tests":
test "Test pushNoWait - Min":
var heap = newAsyncHeapQueue[int]()
let data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]

View File

@ -27,7 +27,7 @@ asyncchecksuite "Chunking":
let contents = [1.byte, 2, 3, 4, 5, 6, 7, 8, 9, 0]
proc reader(
data: ChunkBuffer, len: int
): Future[int] {.gcsafe, async, raises: [Defect].} =
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
let read = min(contents.len - offset, len)
if read == 0:
return 0
@ -97,8 +97,13 @@ asyncchecksuite "Chunking":
discard (await chunker.getBytes())
test "stream should forward LPStreamError":
expect LPStreamError:
try:
await raiseStreamException(newException(LPStreamError, "test error"))
except ChunkerError as exc:
check exc.parent of LPStreamError
except CatchableError as exc:
checkpoint("Unexpected error: " & exc.msg)
fail()
test "stream should catch LPStreamEOFError":
await raiseStreamException(newException(LPStreamEOFError, "test error"))
@ -106,7 +111,3 @@ asyncchecksuite "Chunking":
test "stream should forward CancelledError":
expect CancelledError:
await raiseStreamException(newException(CancelledError, "test error"))
test "stream should forward LPStreamError":
expect LPStreamError:
await raiseStreamException(newException(LPStreamError, "test error"))

View File

@ -1,9 +1,9 @@
import std/unittest
import pkg/unittest2
import codex/clock
import ./helpers
checksuite "Clock":
suite "Clock":
proc testConversion(seconds: SecondsSince1970) =
let asBytes = seconds.toBytes

View File

@ -228,7 +228,7 @@ suite "Erasure encode/decode":
discard (await erasure.decode(encoded)).tryGet()
test "Should concurrently encode/decode multiple datasets":
const iterations = 2
const iterations = 5
let
datasetSize = 1.MiBs
@ -335,18 +335,18 @@ suite "Erasure encode/decode":
for i in 0 ..< parityLen:
paritySeq[i] = cast[seq[byte]](parity[i])
# call encodeAsync to get the parity
# call asyncEncode to get the parity
let encFut =
await erasure.encodeAsync(BlockSize.int, blocksLen, parityLen, data, parity)
await erasure.asyncEncode(BlockSize.int, blocksLen, parityLen, data, parity)
check encFut.isOk
let decFut = await erasure.decodeAsync(
let decFut = await erasure.asyncDecode(
BlockSize.int, blocksLen, parityLen, data, paritySeq, recovered
)
check decFut.isOk
# call encodeAsync and cancel the task
let encodeFut = erasure.encodeAsync(
# call asyncEncode and cancel the task
let encodeFut = erasure.asyncEncode(
BlockSize.int, blocksLen, parityLen, data, cancelledTaskParity
)
encodeFut.cancel()
@ -359,8 +359,8 @@ suite "Erasure encode/decode":
for i in 0 ..< parityLen:
check equalMem(parity[i], cancelledTaskParity[i], BlockSize.int)
# call decodeAsync and cancel the task
let decodeFut = erasure.decodeAsync(
# call asyncDecode and cancel the task
let decodeFut = erasure.asyncDecode(
BlockSize.int, blocksLen, parityLen, data, paritySeq, cancelledTaskRecovered
)
decodeFut.cancel()

View File

@ -1,6 +1,7 @@
import std/options
import std/strutils
import std/unittest
import pkg/unittest2
import pkg/codex/blocktype
import pkg/codex/conf
import pkg/codex/contracts/requests

View File

@ -13,7 +13,7 @@ import ../asynctest
import ./helpers
import ./examples
checksuite "Manifest":
suite "Manifest":
let
manifest =
Manifest.new(treeCid = Cid.example, blockSize = 1.MiBs, datasetSize = 100.MiBs)

View File

@ -116,7 +116,7 @@ asyncchecksuite "Purchasing":
await purchase.wait()
check market.withdrawn == @[request.id]
checksuite "Purchasing state machine":
suite "Purchasing state machine":
var purchasing: Purchasing
var market: MockMarket
var clock: MockClock

View File

@ -1,10 +1,10 @@
import std/times
import std/unittest
import codex/systemclock
import pkg/unittest2
import pkg/codex/systemclock
import ./helpers
checksuite "SystemClock":
suite "SystemClock":
test "Should get now":
let clock = SystemClock.new()

View File

@ -7,7 +7,7 @@ import pkg/codex/utils/iter
import ../../asynctest
import ../helpers
checksuite "Test Iter":
suite "Test Iter":
test "Should be finished":
let iter = Iter[int].empty()

View File

@ -1,12 +1,14 @@
import std/unittest
import std/os
import codex/utils/keyutils
import pkg/unittest2
import pkg/codex/utils/keyutils
import ../helpers
when defined(windows):
import stew/windows/acl
checksuite "keyutils":
suite "keyutils":
let path = getTempDir() / "CodexTest"
setup:

View File

@ -1,8 +1,9 @@
import std/unittest
import codex/utils/options
import pkg/unittest2
import pkg/codex/utils/options
import ../helpers
checksuite "optional casts":
suite "optional casts":
test "casting value to same type works":
check 42 as int == some 42
@ -31,7 +32,7 @@ checksuite "optional casts":
check 42.some as string == string.none
check int.none as int == int.none
checksuite "Optionalize":
suite "Optionalize":
test "does not except non-object types":
static:
doAssert not compiles(Optionalize(int))

View File

@ -17,47 +17,71 @@ asyncchecksuite "tracked futures":
check module.trackedFutures.len == 0
test "tracks unfinished futures":
let fut = newFuture[void]("test")
let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule})
module.trackedFutures.track(fut)
check module.trackedFutures.len == 1
test "does not track completed futures":
let fut = newFuture[void]("test")
let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule})
fut.complete()
module.trackedFutures.track(fut)
check eventually module.trackedFutures.len == 0
test "does not track failed futures":
let fut = newFuture[void]("test")
fut.fail((ref CatchableError)(msg: "some error"))
module.trackedFutures.track(fut)
check eventually module.trackedFutures.len == 0
check module.trackedFutures.len == 0
test "does not track cancelled futures":
let fut = newFuture[void]("test")
let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule})
fut.cancelCallback = proc(data: pointer) =
fut.cancelAndSchedule() # manually schedule the cancel
await fut.cancelAndWait()
module.trackedFutures.track(fut)
check eventually module.trackedFutures.len == 0
test "removes tracked future when finished":
let fut = newFuture[void]("test")
let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule})
module.trackedFutures.track(fut)
check module.trackedFutures.len == 1
fut.complete()
check eventually module.trackedFutures.len == 0
test "removes tracked future when cancelled":
let fut = newFuture[void]("test")
let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule})
fut.cancelCallback = proc(data: pointer) =
fut.cancelAndSchedule() # manually schedule the cancel
module.trackedFutures.track(fut)
check module.trackedFutures.len == 1
await fut.cancelAndWait()
check eventually module.trackedFutures.len == 0
test "completed and removes future on cancel":
let fut = Future[void].Raising([]).init("test", {FutureFlag.OwnCancelSchedule})
fut.cancelCallback = proc(data: pointer) =
fut.complete()
module.trackedFutures.track(fut)
check module.trackedFutures.len == 1
await fut.cancelAndWait()
check eventually module.trackedFutures.len == 0
test "cancels and removes all tracked futures":
let fut1 = newFuture[void]("test1")
let fut2 = newFuture[void]("test2")
let fut3 = newFuture[void]("test3")
let fut1 = Future[void].Raising([]).init("test1", {FutureFlag.OwnCancelSchedule})
fut1.cancelCallback = proc(data: pointer) =
fut1.cancelAndSchedule() # manually schedule the cancel
let fut2 = Future[void].Raising([]).init("test2", {FutureFlag.OwnCancelSchedule})
fut2.cancelCallback = proc(data: pointer) =
fut2.cancelAndSchedule() # manually schedule the cancel
let fut3 = Future[void].Raising([]).init("test3", {FutureFlag.OwnCancelSchedule})
fut3.cancelCallback = proc(data: pointer) =
fut3.cancelAndSchedule() # manually schedule the cancel
module.trackedFutures.track(fut1)
check module.trackedFutures.len == 1
module.trackedFutures.track(fut2)
check module.trackedFutures.len == 2
module.trackedFutures.track(fut3)
check module.trackedFutures.len == 3
await module.trackedFutures.cancelTracked()
check eventually fut1.cancelled
check eventually fut2.cancelled

View File

@ -1,4 +1,4 @@
import std/unittest
import pkg/unittest2
import pkg/codex/utils

View File

@ -2,4 +2,36 @@ import helpers/multisetup
import helpers/trackers
import helpers/templeveldb
import std/sequtils, chronos
export multisetup, trackers, templeveldb
### taken from libp2p errorhelpers.nim
proc allFuturesThrowing*(args: varargs[FutureBase]): Future[void] =
# This proc is only meant for use in tests / not suitable for general use.
# - Swallowing errors arbitrarily instead of aggregating them is bad design
# - It raises `CatchableError` instead of the union of the `futs` errors,
# inflating the caller's `raises` list unnecessarily. `macro` could fix it
let futs = @args
(
proc() {.async: (raises: [CatchableError]).} =
await allFutures(futs)
var firstErr: ref CatchableError
for fut in futs:
if fut.failed:
let err = fut.error()
if err of CancelledError:
raise err
if firstErr == nil:
firstErr = err
if firstErr != nil:
raise firstErr
)()
proc allFuturesThrowing*[T](futs: varargs[Future[T]]): Future[void] =
allFuturesThrowing(futs.mapIt(FutureBase(it)))
proc allFuturesThrowing*[T, E]( # https://github.com/nim-lang/Nim/issues/23432
futs: varargs[InternalRaisesFuture[T, E]]
): Future[void] =
allFuturesThrowing(futs.mapIt(FutureBase(it)))

View File

@ -1,5 +1,5 @@
import pkg/codex/streams/storestream
import std/unittest
import pkg/unittest2
# From lip2p/tests/helpers
const trackerNames = [StoreStreamTrackerName]

View File

@ -4,115 +4,216 @@ import std/strutils
from pkg/libp2p import Cid, `$`, init
import pkg/stint
import pkg/questionable/results
import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient]
import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient, httptable]
import pkg/codex/logutils
import pkg/codex/rest/json
import pkg/codex/purchasing
import pkg/codex/errors
import pkg/codex/sales/reservations
export purchasing
export purchasing, httptable, httpclient
type CodexClient* = ref object
http: HttpClient
baseurl: string
session: HttpSessionRef
type CodexClientError* = object of CatchableError
const HttpClientTimeoutMs = 60 * 1000
proc new*(_: type CodexClient, baseurl: string): CodexClient =
CodexClient(
http: newHttpClient(timeout = HttpClientTimeoutMs),
baseurl: baseurl,
session: HttpSessionRef.new({HttpClientFlag.Http11Pipeline}),
)
CodexClient(session: HttpSessionRef.new(), baseurl: baseurl)
proc info*(client: CodexClient): ?!JsonNode =
let url = client.baseurl & "/debug/info"
JsonNode.parse(client.http.getContent(url))
proc close*(self: CodexClient): Future[void] {.async: (raises: []).} =
await self.session.closeWait()
proc setLogLevel*(client: CodexClient, level: string) =
let url = client.baseurl & "/debug/chronicles/loglevel?level=" & level
let headers = newHttpHeaders({"Content-Type": "text/plain"})
let response = client.http.request(url, httpMethod = HttpPost, headers = headers)
assert response.status == "200 OK"
proc request(
self: CodexClient,
httpMethod: httputils.HttpMethod,
url: string,
body: openArray[char] = [],
headers: openArray[HttpHeaderTuple] = [],
): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
HttpClientRequestRef
.new(
self.session,
url,
httpMethod,
version = HttpVersion11,
flags = {},
maxResponseHeadersSize = HttpMaxHeadersSize,
headers = headers,
body = body.toOpenArrayByte(0, len(body) - 1),
).get
.send()
proc upload*(client: CodexClient, contents: string): ?!Cid =
let response = client.http.post(client.baseurl & "/data", contents)
assert response.status == "200 OK"
Cid.init(response.body).mapFailure
proc post(
self: CodexClient,
url: string,
body: string = "",
headers: seq[HttpHeaderTuple] = @[],
): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
return self.request(MethodPost, url, headers = headers, body = body)
proc upload*(client: CodexClient, bytes: seq[byte]): ?!Cid =
client.upload(string.fromBytes(bytes))
proc get(
self: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[]
): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
return self.request(MethodGet, url, headers = headers)
proc download*(client: CodexClient, cid: Cid, local = false): ?!string =
let response = client.http.get(
client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream")
)
proc delete(
self: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[]
): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
return self.request(MethodDelete, url, headers = headers)
if response.status != "200 OK":
return failure(response.status)
proc patch(
self: CodexClient,
url: string,
body: string = "",
headers: seq[HttpHeaderTuple] = @[],
): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
return self.request(MethodPatch, url, headers = headers, body = body)
success response.body
proc body*(
response: HttpClientResponseRef
): Future[string] {.async: (raises: [CancelledError, HttpError]).} =
return bytesToString (await response.getBodyBytes())
proc downloadManifestOnly*(client: CodexClient, cid: Cid): ?!string =
let response = client.http.get(client.baseurl & "/data/" & $cid & "/network/manifest")
proc getContent(
client: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[]
): Future[string] {.async: (raises: [CancelledError, HttpError]).} =
let response = await client.get(url, headers)
return await response.body
if response.status != "200 OK":
return failure(response.status)
proc info*(
client: CodexClient
): Future[?!JsonNode] {.async: (raises: [CancelledError, HttpError]).} =
let response = await client.get(client.baseurl & "/debug/info")
return JsonNode.parse(await response.body)
success response.body
proc setLogLevel*(
client: CodexClient, level: string
): Future[void] {.async: (raises: [CancelledError, HttpError]).} =
let
url = client.baseurl & "/debug/chronicles/loglevel?level=" & level
headers = @[("Content-Type", "text/plain")]
response = await client.post(url, headers = headers, body = "")
assert response.status == 200
proc downloadNoStream*(client: CodexClient, cid: Cid): ?!string =
let response = client.http.post(client.baseurl & "/data/" & $cid & "/network")
proc uploadRaw*(
client: CodexClient, contents: string, headers: seq[HttpHeaderTuple] = @[]
): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
return client.post(client.baseurl & "/data", body = contents, headers = headers)
if response.status != "200 OK":
return failure(response.status)
proc upload*(
client: CodexClient, contents: string
): Future[?!Cid] {.async: (raises: [CancelledError, HttpError]).} =
let response = await client.uploadRaw(contents)
assert response.status == 200
Cid.init(await response.body).mapFailure
success response.body
proc upload*(
client: CodexClient, bytes: seq[byte]
): Future[?!Cid] {.async: (raw: true).} =
return client.upload(string.fromBytes(bytes))
proc downloadRaw*(
client: CodexClient, cid: string, local = false
): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
return
client.get(client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream"))
proc downloadBytes*(
client: CodexClient, cid: Cid, local = false
): Future[?!seq[byte]] {.async.} =
let uri =
parseUri(client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream"))
): Future[?!seq[byte]] {.async: (raises: [CancelledError, HttpError]).} =
let response = await client.downloadRaw($cid, local = local)
let (status, bytes) = await client.session.fetch(uri)
if response.status != 200:
return failure($response.status)
if status != 200:
return failure("fetch failed with status " & $status)
success await response.getBodyBytes()
success bytes
proc download*(
client: CodexClient, cid: Cid, local = false
): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} =
without response =? await client.downloadBytes(cid, local = local), err:
return failure(err)
return success bytesToString(response)
proc delete*(client: CodexClient, cid: Cid): ?!void =
let
url = client.baseurl & "/data/" & $cid
response = client.http.delete(url)
proc downloadNoStream*(
client: CodexClient, cid: Cid
): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} =
let response = await client.post(client.baseurl & "/data/" & $cid & "/network")
if response.status != "204 No Content":
return failure(response.status)
if response.status != 200:
return failure($response.status)
success await response.body
proc downloadManifestOnly*(
client: CodexClient, cid: Cid
): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} =
let response =
await client.get(client.baseurl & "/data/" & $cid & "/network/manifest")
if response.status != 200:
return failure($response.status)
success await response.body
proc deleteRaw*(
client: CodexClient, cid: string
): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
return client.delete(client.baseurl & "/data/" & cid)
proc delete*(
client: CodexClient, cid: Cid
): Future[?!void] {.async: (raises: [CancelledError, HttpError]).} =
let response = await client.deleteRaw($cid)
if response.status != 204:
return failure($response.status)
success()
proc list*(client: CodexClient): ?!RestContentList =
let url = client.baseurl & "/data"
let response = client.http.get(url)
proc listRaw*(
client: CodexClient
): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
return client.get(client.baseurl & "/data")
if response.status != "200 OK":
return failure(response.status)
proc list*(
client: CodexClient
): Future[?!RestContentList] {.async: (raises: [CancelledError, HttpError]).} =
let response = await client.listRaw()
RestContentList.fromJson(response.body)
if response.status != 200:
return failure($response.status)
proc space*(client: CodexClient): ?!RestRepoStore =
RestContentList.fromJson(await response.body)
proc space*(
client: CodexClient
): Future[?!RestRepoStore] {.async: (raises: [CancelledError, HttpError]).} =
let url = client.baseurl & "/space"
let response = client.http.get(url)
let response = await client.get(url)
if response.status != "200 OK":
return failure(response.status)
if response.status != 200:
return failure($response.status)
RestRepoStore.fromJson(response.body)
RestRepoStore.fromJson(await response.body)
proc requestStorageRaw*(
client: CodexClient,
@ -124,7 +225,9 @@ proc requestStorageRaw*(
expiry: uint64 = 0,
nodes: uint = 3,
tolerance: uint = 1,
): Response =
): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
## Call request storage REST endpoint
##
let url = client.baseurl & "/storage/request/" & $cid
@ -141,7 +244,7 @@ proc requestStorageRaw*(
if expiry != 0:
json["expiry"] = %($expiry)
return client.http.post(url, $json)
return client.post(url, $json)
proc requestStorage*(
client: CodexClient,
@ -153,43 +256,45 @@ proc requestStorage*(
collateralPerByte: UInt256,
nodes: uint = 3,
tolerance: uint = 1,
): ?!PurchaseId =
): Future[?!PurchaseId] {.async: (raises: [CancelledError, HttpError]).} =
## Call request storage REST endpoint
##
let response = client.requestStorageRaw(
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry,
nodes, tolerance,
)
if response.status != "200 OK":
doAssert(false, response.body)
PurchaseId.fromHex(response.body).catch
let
response = await client.requestStorageRaw(
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry,
nodes, tolerance,
)
body = await response.body
proc getPurchase*(client: CodexClient, purchaseId: PurchaseId): ?!RestPurchase =
if response.status != 200:
doAssert(false, body)
PurchaseId.fromHex(body).catch
proc getPurchase*(
client: CodexClient, purchaseId: PurchaseId
): Future[?!RestPurchase] {.async: (raises: [CancelledError, HttpError]).} =
let url = client.baseurl & "/storage/purchases/" & purchaseId.toHex
try:
let body = client.http.getContent(url)
let body = await client.getContent(url)
return RestPurchase.fromJson(body)
except CatchableError as e:
return failure e.msg
proc getSalesAgent*(client: CodexClient, slotId: SlotId): ?!RestSalesAgent =
proc getSalesAgent*(
client: CodexClient, slotId: SlotId
): Future[?!RestSalesAgent] {.async: (raises: [CancelledError, HttpError]).} =
let url = client.baseurl & "/sales/slots/" & slotId.toHex
try:
let body = client.http.getContent(url)
let body = await client.getContent(url)
return RestSalesAgent.fromJson(body)
except CatchableError as e:
return failure e.msg
proc getSlots*(client: CodexClient): ?!seq[Slot] =
let url = client.baseurl & "/sales/slots"
let body = client.http.getContent(url)
seq[Slot].fromJson(body)
proc postAvailability*(
client: CodexClient,
totalSize, duration: uint64,
minPricePerBytePerSecond, totalCollateral: UInt256,
): ?!Availability =
): Future[?!Availability] {.async: (raises: [CancelledError, HttpError]).} =
## Post sales availability endpoint
##
let url = client.baseurl & "/sales/availability"
@ -200,17 +305,21 @@ proc postAvailability*(
"minPricePerBytePerSecond": minPricePerBytePerSecond,
"totalCollateral": totalCollateral,
}
let response = client.http.post(url, $json)
doAssert response.status == "201 Created",
"expected 201 Created, got " & response.status & ", body: " & response.body
Availability.fromJson(response.body)
let response = await client.post(url, $json)
let body = await response.body
doAssert response.status == 201,
"expected 201 Created, got " & $response.status & ", body: " & body
Availability.fromJson(body)
proc patchAvailabilityRaw*(
client: CodexClient,
availabilityId: AvailabilityId,
totalSize, freeSize, duration: ?uint64 = uint64.none,
minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none,
): Response =
): Future[HttpClientResponseRef] {.
async: (raw: true, raises: [CancelledError, HttpError])
.} =
## Updates availability
##
let url = client.baseurl & "/sales/availability/" & $availabilityId
@ -233,68 +342,50 @@ proc patchAvailabilityRaw*(
if totalCollateral =? totalCollateral:
json["totalCollateral"] = %totalCollateral
client.http.patch(url, $json)
client.patch(url, $json)
proc patchAvailability*(
client: CodexClient,
availabilityId: AvailabilityId,
totalSize, duration: ?uint64 = uint64.none,
minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none,
): void =
let response = client.patchAvailabilityRaw(
): Future[void] {.async: (raises: [CancelledError, HttpError]).} =
let response = await client.patchAvailabilityRaw(
availabilityId,
totalSize = totalSize,
duration = duration,
minPricePerBytePerSecond = minPricePerBytePerSecond,
totalCollateral = totalCollateral,
)
doAssert response.status == "200 OK", "expected 200 OK, got " & response.status
doAssert response.status == 200, "expected 200 OK, got " & $response.status
proc getAvailabilities*(client: CodexClient): ?!seq[Availability] =
proc getAvailabilities*(
client: CodexClient
): Future[?!seq[Availability]] {.async: (raises: [CancelledError, HttpError]).} =
## Call sales availability REST endpoint
let url = client.baseurl & "/sales/availability"
let body = client.http.getContent(url)
let body = await client.getContent(url)
seq[Availability].fromJson(body)
proc getAvailabilityReservations*(
client: CodexClient, availabilityId: AvailabilityId
): ?!seq[Reservation] =
): Future[?!seq[Reservation]] {.async: (raises: [CancelledError, HttpError]).} =
## Retrieves Availability's Reservations
let url = client.baseurl & "/sales/availability/" & $availabilityId & "/reservations"
let body = client.http.getContent(url)
let body = await client.getContent(url)
seq[Reservation].fromJson(body)
proc close*(client: CodexClient) =
client.http.close()
proc purchaseStateIs*(
client: CodexClient, id: PurchaseId, state: string
): Future[bool] {.async: (raises: [CancelledError, HttpError]).} =
(await client.getPurchase(id)).option .? state == some state
proc restart*(client: CodexClient) =
client.http.close()
client.http = newHttpClient(timeout = HttpClientTimeoutMs)
proc saleStateIs*(
client: CodexClient, id: SlotId, state: string
): Future[bool] {.async: (raises: [CancelledError, HttpError]).} =
(await client.getSalesAgent(id)).option .? state == some state
proc purchaseStateIs*(client: CodexClient, id: PurchaseId, state: string): bool =
client.getPurchase(id).option .? state == some state
proc saleStateIs*(client: CodexClient, id: SlotId, state: string): bool =
client.getSalesAgent(id).option .? state == some state
proc requestId*(client: CodexClient, id: PurchaseId): ?RequestId =
return client.getPurchase(id).option .? requestId
proc uploadRaw*(
client: CodexClient, contents: string, headers = newHttpHeaders()
): Response =
return client.http.request(
client.baseurl & "/data", body = contents, httpMethod = HttpPost, headers = headers
)
proc listRaw*(client: CodexClient): Response =
return client.http.request(client.baseurl & "/data", httpMethod = HttpGet)
proc downloadRaw*(client: CodexClient, cid: string, local = false): Response =
return client.http.request(
client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream"),
httpMethod = HttpGet,
)
proc deleteRaw*(client: CodexClient, cid: string): Response =
return client.http.request(client.baseurl & "/data/" & cid, httpMethod = HttpDelete)
proc requestId*(
client: CodexClient, id: PurchaseId
): Future[?RequestId] {.async: (raises: [CancelledError, HttpError]).} =
return (await client.getPurchase(id)).option .? requestId

View File

@ -68,7 +68,7 @@ method stop*(node: CodexProcess) {.async.} =
trace "stopping codex client"
if client =? node.client:
client.close()
await client.close()
node.client = none CodexClient
method removeDataDir*(node: CodexProcess) =

View File

@ -60,13 +60,13 @@ template marketplacesuite*(name: string, body: untyped) =
duration: uint64,
collateralPerByte: UInt256,
minPricePerBytePerSecond: UInt256,
) =
): Future[void] {.async: (raises: [CancelledError, HttpError, ConfigurationError]).} =
let totalCollateral = datasetSize.u256 * collateralPerByte
# post availability to each provider
for i in 0 ..< providers().len:
let provider = providers()[i].client
discard provider.postAvailability(
discard await provider.postAvailability(
totalSize = datasetSize,
duration = duration.uint64,
minPricePerBytePerSecond = minPricePerBytePerSecond,
@ -83,16 +83,18 @@ template marketplacesuite*(name: string, body: untyped) =
expiry: uint64 = 4.periods,
nodes = providers().len,
tolerance = 0,
): Future[PurchaseId] {.async.} =
let id = client.requestStorage(
cid,
expiry = expiry,
duration = duration,
proofProbability = proofProbability,
collateralPerByte = collateralPerByte,
pricePerBytePerSecond = pricePerBytePerSecond,
nodes = nodes.uint,
tolerance = tolerance.uint,
): Future[PurchaseId] {.async: (raises: [CancelledError, HttpError]).} =
let id = (
await client.requestStorage(
cid,
expiry = expiry,
duration = duration,
proofProbability = proofProbability,
collateralPerByte = collateralPerByte,
pricePerBytePerSecond = pricePerBytePerSecond,
nodes = nodes.uint,
tolerance = tolerance.uint,
)
).get
return id

View File

@ -275,8 +275,10 @@ template multinodesuite*(name: string, body: untyped) =
fail()
quit(1)
proc updateBootstrapNodes(node: CodexProcess) =
without ninfo =? node.client.info():
proc updateBootstrapNodes(
node: CodexProcess
): Future[void] {.async: (raises: [CatchableError]).} =
without ninfo =? await node.client.info():
# raise CatchableError instead of Defect (with .get or !) so we
# can gracefully shutdown and prevent zombies
raiseMultiNodeSuiteError "Failed to get node info"
@ -315,14 +317,14 @@ template multinodesuite*(name: string, body: untyped) =
for config in clients.configs:
let node = await startClientNode(config)
running.add RunningNode(role: Role.Client, node: node)
CodexProcess(node).updateBootstrapNodes()
await CodexProcess(node).updateBootstrapNodes()
if var providers =? nodeConfigs.providers:
failAndTeardownOnError "failed to start provider nodes":
for config in providers.configs.mitems:
let node = await startProviderNode(config)
running.add RunningNode(role: Role.Provider, node: node)
CodexProcess(node).updateBootstrapNodes()
await CodexProcess(node).updateBootstrapNodes()
if var validators =? nodeConfigs.validators:
failAndTeardownOnError "failed to start validator nodes":

View File

@ -18,11 +18,11 @@ multinodesuite "Node block expiration tests":
let client = clients()[0]
let clientApi = client.client
let contentId = clientApi.upload(content).get
let contentId = (await clientApi.upload(content)).get
await sleepAsync(2.seconds)
let download = clientApi.download(contentId, local = true)
let download = await clientApi.download(contentId, local = true)
check:
download.isOk
@ -39,12 +39,12 @@ multinodesuite "Node block expiration tests":
let client = clients()[0]
let clientApi = client.client
let contentId = clientApi.upload(content).get
let contentId = (await clientApi.upload(content)).get
await sleepAsync(3.seconds)
let download = clientApi.download(contentId, local = true)
let download = await clientApi.download(contentId, local = true)
check:
download.isFailure
download.error.msg == "404 Not Found"
download.error.msg == "404"

View File

@ -13,21 +13,18 @@ marketplacesuite "Bug #821 - node crashes during erasure coding":
.withLogFile()
# uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
.withLogTopics("node", "erasure", "marketplace").some,
providers: CodexConfigs.init(nodes = 0)
# .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock")
.some,
providers: CodexConfigs.init(nodes = 0).some,
):
let pricePerBytePerSecond = 1.u256
let duration = 20.periods
let collateralPerByte = 1.u256
let expiry = 10.periods
let data = await RandomChunker.example(blocks = 8)
let client = clients()[0]
let clientApi = client.client
let
pricePerBytePerSecond = 1.u256
duration = 20.periods
collateralPerByte = 1.u256
expiry = 10.periods
data = await RandomChunker.example(blocks = 8)
client = clients()[0]
clientApi = client.client
let cid = clientApi.upload(data).get
let cid = (await clientApi.upload(data)).get
var requestId = none RequestId
proc onStorageRequested(eventResult: ?!StorageRequested) =
@ -49,9 +46,11 @@ marketplacesuite "Bug #821 - node crashes during erasure coding":
check eventually(requestId.isSome, timeout = expiry.int * 1000)
let request = await marketplace.getRequest(requestId.get)
let cidFromRequest = request.content.cid
let downloaded = await clientApi.downloadBytes(cidFromRequest, local = true)
let
request = await marketplace.getRequest(requestId.get)
cidFromRequest = request.content.cid
downloaded = await clientApi.downloadBytes(cidFromRequest, local = true)
check downloaded.isOk
check downloaded.get.toHex == data.toHex

View File

@ -37,15 +37,17 @@ marketplacesuite "Marketplace":
let size = 0xFFFFFF.uint64
let data = await RandomChunker.example(blocks = blocks)
# host makes storage available
let availability = host.postAvailability(
totalSize = size,
duration = 20 * 60.uint64,
minPricePerBytePerSecond = minPricePerBytePerSecond,
totalCollateral = size.u256 * minPricePerBytePerSecond,
let availability = (
await host.postAvailability(
totalSize = size,
duration = 20 * 60.uint64,
minPricePerBytePerSecond = minPricePerBytePerSecond,
totalCollateral = size.u256 * minPricePerBytePerSecond,
)
).get
# client requests storage
let cid = client.upload(data).get
let cid = (await client.upload(data)).get
let id = await client.requestStorage(
cid,
duration = 20 * 60.uint64,
@ -57,15 +59,17 @@ marketplacesuite "Marketplace":
tolerance = ecTolerance,
)
check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000)
let purchase = client.getPurchase(id).get
check eventually(
await client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000
)
let purchase = (await client.getPurchase(id)).get
check purchase.error == none string
let availabilities = host.getAvailabilities().get
let availabilities = (await host.getAvailabilities()).get
check availabilities.len == 1
let newSize = availabilities[0].freeSize
check newSize > 0 and newSize < size
let reservations = host.getAvailabilityReservations(availability.id).get
let reservations = (await host.getAvailabilityReservations(availability.id)).get
check reservations.len == 3
check reservations[0].requestId == purchase.requestId
@ -80,15 +84,17 @@ marketplacesuite "Marketplace":
# host makes storage available
let startBalanceHost = await token.balanceOf(hostAccount)
discard host.postAvailability(
totalSize = size,
duration = 20 * 60.uint64,
minPricePerBytePerSecond = minPricePerBytePerSecond,
totalCollateral = size.u256 * minPricePerBytePerSecond,
discard (
await host.postAvailability(
totalSize = size,
duration = 20 * 60.uint64,
minPricePerBytePerSecond = minPricePerBytePerSecond,
totalCollateral = size.u256 * minPricePerBytePerSecond,
)
).get
# client requests storage
let cid = client.upload(data).get
let cid = (await client.upload(data)).get
let id = await client.requestStorage(
cid,
duration = duration,
@ -100,8 +106,10 @@ marketplacesuite "Marketplace":
tolerance = ecTolerance,
)
check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000)
let purchase = client.getPurchase(id).get
check eventually(
await client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000
)
let purchase = (await client.getPurchase(id)).get
check purchase.error == none string
let clientBalanceBeforeFinished = await token.balanceOf(clientAccount)
@ -158,7 +166,7 @@ marketplacesuite "Marketplace payouts":
# provider makes storage available
let datasetSize = datasetSize(blocks, ecNodes, ecTolerance)
let totalAvailabilitySize = (datasetSize div 2).truncate(uint64)
discard providerApi.postAvailability(
discard await providerApi.postAvailability(
# make availability size small enough that we can't fill all the slots,
# thus causing a cancellation
totalSize = totalAvailabilitySize,
@ -167,7 +175,7 @@ marketplacesuite "Marketplace payouts":
totalCollateral = collateralPerByte * totalAvailabilitySize.u256,
)
let cid = clientApi.upload(data).get
let cid = (await clientApi.upload(data)).get
var slotIdxFilled = none uint64
proc onSlotFilled(eventResult: ?!SlotFilled) =
@ -189,11 +197,11 @@ marketplacesuite "Marketplace payouts":
# wait until one slot is filled
check eventually(slotIdxFilled.isSome, timeout = expiry.int * 1000)
let slotId = slotId(!clientApi.requestId(id), !slotIdxFilled)
let slotId = slotId(!(await clientApi.requestId(id)), !slotIdxFilled)
# wait until sale is cancelled
await ethProvider.advanceTime(expiry.u256)
check eventually providerApi.saleStateIs(slotId, "SaleCancelled")
check eventually await providerApi.saleStateIs(slotId, "SaleCancelled")
await advanceToNextPeriod()

View File

@ -42,14 +42,14 @@ marketplacesuite "Hosts submit regular proofs":
let data = await RandomChunker.example(blocks = blocks)
let datasetSize =
datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance)
createAvailabilities(
await createAvailabilities(
datasetSize.truncate(uint64),
duration,
collateralPerByte,
minPricePerBytePerSecond,
)
let cid = client0.upload(data).get
let cid = (await client0.upload(data)).get
let purchaseId = await client0.requestStorage(
cid,
@ -59,13 +59,13 @@ marketplacesuite "Hosts submit regular proofs":
tolerance = ecTolerance,
)
let purchase = client0.getPurchase(purchaseId).get
let purchase = (await client0.getPurchase(purchaseId)).get
check purchase.error == none string
let slotSize = slotSize(blocks, ecNodes, ecTolerance)
check eventually(
client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000
await client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000
)
var proofWasSubmitted = false
@ -119,27 +119,29 @@ marketplacesuite "Simulate invalid proofs":
let data = await RandomChunker.example(blocks = blocks)
let datasetSize =
datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance)
createAvailabilities(
await createAvailabilities(
datasetSize.truncate(uint64),
duration,
collateralPerByte,
minPricePerBytePerSecond,
)
let cid = client0.upload(data).get
let cid = (await client0.upload(data)).get
let purchaseId = await client0.requestStorage(
cid,
expiry = expiry,
duration = duration,
nodes = ecNodes,
tolerance = ecTolerance,
proofProbability = 1.u256,
let purchaseId = (
await client0.requestStorage(
cid,
expiry = expiry,
duration = duration,
nodes = ecNodes,
tolerance = ecTolerance,
proofProbability = 1.u256,
)
)
let requestId = client0.requestId(purchaseId).get
let requestId = (await client0.requestId(purchaseId)).get
check eventually(
client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000
await client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000
)
var slotWasFreed = false
@ -182,14 +184,14 @@ marketplacesuite "Simulate invalid proofs":
let data = await RandomChunker.example(blocks = blocks)
let datasetSize =
datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance)
createAvailabilities(
await createAvailabilities(
datasetSize.truncate(uint64),
duration,
collateralPerByte,
minPricePerBytePerSecond,
)
let cid = client0.upload(data).get
let cid = (await client0.upload(data)).get
let purchaseId = await client0.requestStorage(
cid,
@ -199,7 +201,7 @@ marketplacesuite "Simulate invalid proofs":
tolerance = ecTolerance,
proofProbability = 1.u256,
)
let requestId = client0.requestId(purchaseId).get
let requestId = (await client0.requestId(purchaseId)).get
var slotWasFilled = false
proc onSlotFilled(eventResult: ?!SlotFilled) =

View File

@ -8,22 +8,26 @@ import ../examples
twonodessuite "Purchasing":
test "node handles storage request", twoNodesConfig:
let data = await RandomChunker.example(blocks = 2)
let cid = client1.upload(data).get
let id1 = client1.requestStorage(
cid,
duration = 100.uint64,
pricePerBytePerSecond = 1.u256,
proofProbability = 3.u256,
expiry = 10.uint64,
collateralPerByte = 1.u256,
let cid = (await client1.upload(data)).get
let id1 = (
await client1.requestStorage(
cid,
duration = 100.uint64,
pricePerBytePerSecond = 1.u256,
proofProbability = 3.u256,
expiry = 10.uint64,
collateralPerByte = 1.u256,
)
).get
let id2 = client1.requestStorage(
cid,
duration = 400.uint64,
pricePerBytePerSecond = 2.u256,
proofProbability = 6.u256,
expiry = 10.uint64,
collateralPerByte = 2.u256,
let id2 = (
await client1.requestStorage(
cid,
duration = 400.uint64,
pricePerBytePerSecond = 2.u256,
proofProbability = 6.u256,
expiry = 10.uint64,
collateralPerByte = 2.u256,
)
).get
check id1 != id2
@ -34,19 +38,21 @@ twonodessuite "Purchasing":
rng, size = DefaultBlockSize * 2, chunkSize = DefaultBlockSize * 2
)
let data = await chunker.getBytes()
let cid = client1.upload(byteutils.toHex(data)).get
let id = client1.requestStorage(
cid,
duration = 100.uint64,
pricePerBytePerSecond = 1.u256,
proofProbability = 3.u256,
expiry = 30.uint64,
collateralPerByte = 1.u256,
nodes = 3,
tolerance = 1,
let cid = (await client1.upload(byteutils.toHex(data))).get
let id = (
await client1.requestStorage(
cid,
duration = 100.uint64,
pricePerBytePerSecond = 1.u256,
proofProbability = 3.u256,
expiry = 30.uint64,
collateralPerByte = 1.u256,
nodes = 3,
tolerance = 1,
)
).get
let request = client1.getPurchase(id).get.request.get
let request = (await client1.getPurchase(id)).get.request.get
check request.content.cid.data.buffer.len > 0
check request.ask.duration == 100.uint64
@ -75,24 +81,29 @@ twonodessuite "Purchasing":
test "node remembers purchase status after restart", twoNodesConfig:
let data = await RandomChunker.example(blocks = 2)
let cid = client1.upload(data).get
let id = client1.requestStorage(
cid,
duration = 10 * 60.uint64,
pricePerBytePerSecond = 1.u256,
proofProbability = 3.u256,
expiry = 5 * 60.uint64,
collateralPerByte = 1.u256,
nodes = 3.uint,
tolerance = 1.uint,
let cid = (await client1.upload(data)).get
let id = (
await client1.requestStorage(
cid,
duration = 10 * 60.uint64,
pricePerBytePerSecond = 1.u256,
proofProbability = 3.u256,
expiry = 5 * 60.uint64,
collateralPerByte = 1.u256,
nodes = 3.uint,
tolerance = 1.uint,
)
).get
check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000)
check eventually(
await client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000
)
await node1.restart()
client1.restart()
check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000)
let request = client1.getPurchase(id).get.request.get
check eventually(
await client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000
)
let request = (await client1.getPurchase(id)).get.request.get
check request.ask.duration == (10 * 60).uint64
check request.ask.pricePerBytePerSecond == 1.u256
check request.ask.proofProbability == 3.u256
@ -103,19 +114,19 @@ twonodessuite "Purchasing":
test "node requires expiry and its value to be in future", twoNodesConfig:
let data = await RandomChunker.example(blocks = 2)
let cid = client1.upload(data).get
let cid = (await client1.upload(data)).get
let responseMissing = client1.requestStorageRaw(
let responseMissing = await client1.requestStorageRaw(
cid,
duration = 1.uint64,
pricePerBytePerSecond = 1.u256,
proofProbability = 3.u256,
collateralPerByte = 1.u256,
)
check responseMissing.status == "400 Bad Request"
check responseMissing.body == "Expiry required"
check responseMissing.status == 400
check (await responseMissing.body) == "Expiry required"
let responseBefore = client1.requestStorageRaw(
let responseBefore = await client1.requestStorageRaw(
cid,
duration = 10.uint64,
pricePerBytePerSecond = 1.u256,
@ -123,6 +134,6 @@ twonodessuite "Purchasing":
collateralPerByte = 1.u256,
expiry = 10.uint64,
)
check responseBefore.status == "400 Bad Request"
check responseBefore.status == 400
check "Expiry needs value bigger then zero and smaller then the request's duration" in
responseBefore.body
(await responseBefore.body)

Some files were not shown because too many files have changed in this diff Show More