style: nph implementation

This commit is contained in:
Adam Uhlíř 2025-01-09 11:19:09 +01:00
parent 39e8e6e6fa
commit ab566e700a
No known key found for this signature in database
GPG Key ID: 1D17A9E81F76155B
274 changed files with 7294 additions and 7614 deletions

View File

@ -47,6 +47,19 @@ jobs:
matrix: ${{ needs.matrix.outputs.matrix }}
cache_nonce: ${{ needs.matrix.outputs.cache_nonce }}
linting:
runs-on: ubuntu-latest
if: github.event_name == 'pull_request'
steps:
- uses: actions/checkout@v4
- name: Check `nph` formatting
uses: arnetheduck/nph-action@v1
with:
version: latest
options: "codex/ tests/"
fail: true
suggest: true
coverage:
# Force to stick to ubuntu 20.04 for coverage because
# lcov was updated to 2.x version in ubuntu-latest

3
.gitmodules vendored
View File

@ -218,3 +218,6 @@
[submodule "vendor/nim-zippy"]
path = vendor/nim-zippy
url = https://github.com/status-im/nim-zippy.git
[submodule "vendor/nph"]
path = vendor/nph
url = https://github.com/arnetheduck/nph.git

View File

@ -17,6 +17,7 @@
# version pinned by nimbus-build-system.
#PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21
PINNED_NIM_VERSION := v2.0.14
ifeq ($(NIM_COMMIT),)
NIM_COMMIT := $(PINNED_NIM_VERSION)
else ifeq ($(NIM_COMMIT),pinned)
@ -199,4 +200,42 @@ ifneq ($(USE_LIBBACKTRACE), 0)
+ $(MAKE) -C vendor/nim-libbacktrace clean $(HANDLE_OUTPUT)
endif
############
## Format ##
############
.PHONY: build-nph install-nph-hook clean-nph print-nph-path
# Default location for nph binary shall be next to nim binary to make it available on the path.
NPH:=$(shell dirname $(NIM_BINARY))/nph
build-nph:
ifeq ("$(wildcard $(NPH))","")
$(ENV_SCRIPT) nim c vendor/nph/src/nph.nim && \
mv vendor/nph/src/nph $(shell dirname $(NPH))
echo "nph utility is available at " $(NPH)
endif
GIT_PRE_COMMIT_HOOK := .git/hooks/pre-commit
install-nph-hook: build-nph
ifeq ("$(wildcard $(GIT_PRE_COMMIT_HOOK))","")
cp ./tools/scripts/git_pre_commit_format.sh $(GIT_PRE_COMMIT_HOOK)
else
echo "$(GIT_PRE_COMMIT_HOOK) already present, will NOT override"
exit 1
endif
nph/%: build-nph
echo -e $(FORMAT_MSG) "nph/$*" && \
$(NPH) $*
clean-nph:
rm -f $(NPH)
# To avoid hardcoding nph binary location in several places
print-nph-path:
echo "$(NPH)"
clean: | clean-nph
endif # "variables.mk" was not included

View File

@ -31,6 +31,7 @@ Run the client with:
```bash
build/codex
```
## Configuration
It is possible to configure a Codex node in several ways:
@ -51,3 +52,15 @@ To get acquainted with Codex, consider:
## API
The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage).
## Contributing and development
Feel free to dive in, contributions are welcomed! Open an issue or submit PRs.
### Linting and formatting
`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is requrired to adhere to its styling.
If you are setting up fresh setup, in order to get `nph` run `make build-nph`.
In order to format files run `make nph/<file/folder you want to format>`.
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior commiting them.
If you are using VSCode and the NimLang extension you can enable "Format On Save" that will format the files using `nph`.

View File

@ -1,10 +1,5 @@
import ./blockexchange/[
network,
engine,
peers]
import ./blockexchange/[network, engine, peers]
import ./blockexchange/protobuf/[
blockexc,
presence]
import ./blockexchange/protobuf/[blockexc, presence]
export network, engine, blockexc, presence, peers

View File

@ -34,20 +34,19 @@ const
DefaultConcurrentAdvertRequests = 10
DefaultAdvertiseLoopSleep = 30.minutes
type
Advertiser* = ref object of RootObj
localStore*: BlockStore # Local block store for this instance
discovery*: Discovery # Discovery interface
type Advertiser* = ref object of RootObj
localStore*: BlockStore # Local block store for this instance
discovery*: Discovery # Discovery interface
advertiserRunning*: bool # Indicates if discovery is running
concurrentAdvReqs: int # Concurrent advertise requests
advertiserRunning*: bool # Indicates if discovery is running
concurrentAdvReqs: int # Concurrent advertise requests
advertiseLocalStoreLoop*: Future[void] # Advertise loop task handle
advertiseQueue*: AsyncQueue[Cid] # Advertise queue
trackedFutures*: TrackedFutures # Advertise tasks futures
advertiseLocalStoreLoop*: Future[void] # Advertise loop task handle
advertiseQueue*: AsyncQueue[Cid] # Advertise queue
trackedFutures*: TrackedFutures # Advertise tasks futures
advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
proc addCidToQueue(b: Advertiser, cid: Cid) {.async.} =
if cid notin b.advertiseQueue:
@ -83,7 +82,6 @@ proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} =
trace "Advertiser iterating blocks finished."
await sleepAsync(b.advertiseLocalStoreLoopSleep)
except CancelledError:
break # do not propagate as advertiseLocalStoreLoop was asyncSpawned
except CatchableError as e:
@ -94,20 +92,17 @@ proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} =
proc processQueueLoop(b: Advertiser) {.async: (raises: []).} =
while b.advertiserRunning:
try:
let
cid = await b.advertiseQueue.get()
let cid = await b.advertiseQueue.get()
if cid in b.inFlightAdvReqs:
continue
try:
let
request = b.discovery.provide(cid)
let request = b.discovery.provide(cid)
b.inFlightAdvReqs[cid] = request
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
await request
finally:
b.inFlightAdvReqs.del(cid)
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
@ -125,7 +120,7 @@ proc start*(b: Advertiser) {.async.} =
trace "Advertiser start"
proc onBlock(cid: Cid) {.async.} =
proc onBlock(cid: Cid) {.async.} =
await b.advertiseBlock(cid)
doAssert(b.localStore.onBlockStored.isNone())
@ -136,7 +131,7 @@ proc start*(b: Advertiser) {.async.} =
return
b.advertiserRunning = true
for i in 0..<b.concurrentAdvReqs:
for i in 0 ..< b.concurrentAdvReqs:
let fut = b.processQueueLoop()
b.trackedFutures.track(fut)
asyncSpawn fut
@ -166,7 +161,7 @@ proc new*(
localStore: BlockStore,
discovery: Discovery,
concurrentAdvReqs = DefaultConcurrentAdvertRequests,
advertiseLocalStoreLoopSleep = DefaultAdvertiseLoopSleep
advertiseLocalStoreLoopSleep = DefaultAdvertiseLoopSleep,
): Advertiser =
## Create a advertiser instance
##
@ -177,4 +172,5 @@ proc new*(
advertiseQueue: newAsyncQueue[Cid](concurrentAdvReqs),
trackedFutures: TrackedFutures.new(),
inFlightAdvReqs: initTable[Cid, Future[void]](),
advertiseLocalStoreLoopSleep: advertiseLocalStoreLoopSleep)
advertiseLocalStoreLoopSleep: advertiseLocalStoreLoopSleep,
)

View File

@ -40,21 +40,21 @@ const
DefaultMinPeersPerBlock = 3
DefaultDiscoveryLoopSleep = 3.seconds
type
DiscoveryEngine* = ref object of RootObj
localStore*: BlockStore # Local block store for this instance
peers*: PeerCtxStore # Peer context store
network*: BlockExcNetwork # Network interface
discovery*: Discovery # Discovery interface
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
discEngineRunning*: bool # Indicates if discovery is running
concurrentDiscReqs: int # Concurrent discovery requests
discoveryLoop*: Future[void] # Discovery loop task handle
discoveryQueue*: AsyncQueue[Cid] # Discovery queue
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
minPeersPerBlock*: int # Max number of peers with block
discoveryLoopSleep: Duration # Discovery loop sleep
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]] # Inflight discovery requests
type DiscoveryEngine* = ref object of RootObj
localStore*: BlockStore # Local block store for this instance
peers*: PeerCtxStore # Peer context store
network*: BlockExcNetwork # Network interface
discovery*: Discovery # Discovery interface
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
discEngineRunning*: bool # Indicates if discovery is running
concurrentDiscReqs: int # Concurrent discovery requests
discoveryLoop*: Future[void] # Discovery loop task handle
discoveryQueue*: AsyncQueue[Cid] # Discovery queue
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
minPeersPerBlock*: int # Max number of peers with block
discoveryLoopSleep: Duration # Discovery loop sleep
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]]
# Inflight discovery requests
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
while b.discEngineRunning:
@ -81,36 +81,27 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
while b.discEngineRunning:
try:
let
cid = await b.discoveryQueue.get()
let cid = await b.discoveryQueue.get()
if cid in b.inFlightDiscReqs:
trace "Discovery request already in progress", cid
continue
let
haves = b.peers.peersHave(cid)
let haves = b.peers.peersHave(cid)
if haves.len < b.minPeersPerBlock:
try:
let
request = b.discovery
.find(cid)
.wait(DefaultDiscoveryTimeout)
let request = b.discovery.find(cid).wait(DefaultDiscoveryTimeout)
b.inFlightDiscReqs[cid] = request
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
let
peers = await request
let peers = await request
let
dialed = await allFinished(
peers.mapIt( b.network.dialPeer(it.data) ))
let dialed = await allFinished(peers.mapIt(b.network.dialPeer(it.data)))
for i, f in dialed:
if f.failed:
await b.discovery.removeProvider(peers[i].data.peerId)
finally:
b.inFlightDiscReqs.del(cid)
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
@ -146,7 +137,7 @@ proc start*(b: DiscoveryEngine) {.async.} =
return
b.discEngineRunning = true
for i in 0..<b.concurrentDiscReqs:
for i in 0 ..< b.concurrentDiscReqs:
let fut = b.discoveryTaskLoop()
b.trackedFutures.track(fut)
asyncSpawn fut
@ -180,7 +171,7 @@ proc new*(
pendingBlocks: PendingBlocksManager,
concurrentDiscReqs = DefaultConcurrentDiscRequests,
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
minPeersPerBlock = DefaultMinPeersPerBlock
minPeersPerBlock = DefaultMinPeersPerBlock,
): DiscoveryEngine =
## Create a discovery engine instance for advertising services
##
@ -195,4 +186,5 @@ proc new*(
trackedFutures: TrackedFutures.new(),
inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](),
discoveryLoopSleep: discoveryLoopSleep,
minPeersPerBlock: minPeersPerBlock)
minPeersPerBlock: minPeersPerBlock,
)

View File

@ -44,12 +44,24 @@ export peers, pendingblocks, payments, discovery
logScope:
topics = "codex blockexcengine"
declareCounter(codex_block_exchange_want_have_lists_sent, "codex blockexchange wantHave lists sent")
declareCounter(codex_block_exchange_want_have_lists_received, "codex blockexchange wantHave lists received")
declareCounter(codex_block_exchange_want_block_lists_sent, "codex blockexchange wantBlock lists sent")
declareCounter(codex_block_exchange_want_block_lists_received, "codex blockexchange wantBlock lists received")
declareCounter(
codex_block_exchange_want_have_lists_sent, "codex blockexchange wantHave lists sent"
)
declareCounter(
codex_block_exchange_want_have_lists_received,
"codex blockexchange wantHave lists received",
)
declareCounter(
codex_block_exchange_want_block_lists_sent, "codex blockexchange wantBlock lists sent"
)
declareCounter(
codex_block_exchange_want_block_lists_received,
"codex blockexchange wantBlock lists received",
)
declareCounter(codex_block_exchange_blocks_sent, "codex blockexchange blocks sent")
declareCounter(codex_block_exchange_blocks_received, "codex blockexchange blocks received")
declareCounter(
codex_block_exchange_blocks_received, "codex blockexchange blocks received"
)
const
DefaultMaxPeersPerRequest* = 10
@ -67,18 +79,19 @@ type
TaskScheduler* = proc(task: BlockExcPeerCtx): bool {.gcsafe.}
BlockExcEngine* = ref object of RootObj
localStore*: BlockStore # Local block store for this instance
network*: BlockExcNetwork # Petwork interface
peers*: PeerCtxStore # Peers we're currently actively exchanging with
taskQueue*: AsyncHeapQueue[BlockExcPeerCtx] # Peers we're currently processing tasks for
concurrentTasks: int # Number of concurrent peers we're serving at any given time
trackedFutures: TrackedFutures # Tracks futures of blockexc tasks
blockexcRunning: bool # Indicates if the blockexc task is running
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
peersPerRequest: int # Max number of peers to request from
wallet*: WalletRef # Nitro wallet for micropayments
pricing*: ?Pricing # Optional bandwidth pricing
blockFetchTimeout*: Duration # Timeout for fetching blocks over the network
localStore*: BlockStore # Local block store for this instance
network*: BlockExcNetwork # Petwork interface
peers*: PeerCtxStore # Peers we're currently actively exchanging with
taskQueue*: AsyncHeapQueue[BlockExcPeerCtx]
# Peers we're currently processing tasks for
concurrentTasks: int # Number of concurrent peers we're serving at any given time
trackedFutures: TrackedFutures # Tracks futures of blockexc tasks
blockexcRunning: bool # Indicates if the blockexc task is running
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
peersPerRequest: int # Max number of peers to request from
wallet*: WalletRef # Nitro wallet for micropayments
pricing*: ?Pricing # Optional bandwidth pricing
blockFetchTimeout*: Duration # Timeout for fetching blocks over the network
discovery*: DiscoveryEngine
advertiser*: Advertiser
@ -87,7 +100,7 @@ type
price*: UInt256
# attach task scheduler to engine
proc scheduleTask(b: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe} =
proc scheduleTask(b: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe.} =
b.taskQueue.pushOrUpdateNoWait(task).isOk()
proc blockexcTaskRunner(b: BlockExcEngine) {.async: (raises: []).}
@ -105,7 +118,7 @@ proc start*(b: BlockExcEngine) {.async.} =
return
b.blockexcRunning = true
for i in 0..<b.concurrentTasks:
for i in 0 ..< b.concurrentTasks:
let fut = b.blockexcTaskRunner()
b.trackedFutures.track(fut)
asyncSpawn fut
@ -128,35 +141,26 @@ proc stop*(b: BlockExcEngine) {.async.} =
trace "NetworkStore stopped"
proc sendWantHave(
b: BlockExcEngine,
addresses: seq[BlockAddress],
peers: seq[BlockExcPeerCtx]): Future[void] {.async.} =
b: BlockExcEngine, addresses: seq[BlockAddress], peers: seq[BlockExcPeerCtx]
): Future[void] {.async.} =
for p in peers:
let toAsk = addresses.filterIt(it notin p.peerHave)
trace "Sending wantHave request", toAsk, peer = p.id
await b.network.request.sendWantList(
p.id,
toAsk,
wantType = WantType.WantHave)
await b.network.request.sendWantList(p.id, toAsk, wantType = WantType.WantHave)
codex_block_exchange_want_have_lists_sent.inc()
proc sendWantBlock(
b: BlockExcEngine,
addresses: seq[BlockAddress],
blockPeer: BlockExcPeerCtx): Future[void] {.async.} =
b: BlockExcEngine, addresses: seq[BlockAddress], blockPeer: BlockExcPeerCtx
): Future[void] {.async.} =
trace "Sending wantBlock request to", addresses, peer = blockPeer.id
await b.network.request.sendWantList(
blockPeer.id,
addresses,
wantType = WantType.WantBlock) # we want this remote to send us a block
blockPeer.id, addresses, wantType = WantType.WantBlock
) # we want this remote to send us a block
codex_block_exchange_want_block_lists_sent.inc()
proc monitorBlockHandle(
b: BlockExcEngine,
handle: Future[Block],
address: BlockAddress,
peerId: PeerId) {.async.} =
b: BlockExcEngine, handle: Future[Block], address: BlockAddress, peerId: PeerId
) {.async.} =
try:
discard await handle
except CancelledError as exc:
@ -175,12 +179,13 @@ proc monitorBlockHandle(
await b.network.switch.disconnect(peerId)
b.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
proc pickPseudoRandom(address: BlockAddress, peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx =
proc pickPseudoRandom(
address: BlockAddress, peers: seq[BlockExcPeerCtx]
): BlockExcPeerCtx =
return peers[hash(address) mod peers.len]
proc requestBlock*(
b: BlockExcEngine,
address: BlockAddress,
b: BlockExcEngine, address: BlockAddress
): Future[?!Block] {.async.} =
let blockFuture = b.pendingBlocks.getWantHandle(address, b.blockFetchTimeout)
@ -204,16 +209,12 @@ proc requestBlock*(
except AsyncTimeoutError as err:
failure err
proc requestBlock*(
b: BlockExcEngine,
cid: Cid
): Future[?!Block] =
proc requestBlock*(b: BlockExcEngine, cid: Cid): Future[?!Block] =
b.requestBlock(BlockAddress.init(cid))
proc blockPresenceHandler*(
b: BlockExcEngine,
peer: PeerId,
blocks: seq[BlockPresence]) {.async.} =
b: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence]
) {.async.} =
let
peerCtx = b.peers.get(peer)
wantList = toSeq(b.pendingBlocks.wantList)
@ -227,17 +228,12 @@ proc blockPresenceHandler*(
let
peerHave = peerCtx.peerHave
dontWantCids = peerHave.filterIt(
it notin wantList
)
dontWantCids = peerHave.filterIt(it notin wantList)
if dontWantCids.len > 0:
peerCtx.cleanPresence(dontWantCids)
let
wantCids = wantList.filterIt(
it in peerHave
)
let wantCids = wantList.filterIt(it in peerHave)
if wantCids.len > 0:
trace "Peer has blocks in our wantList", peer, wants = wantCids
@ -246,13 +242,12 @@ proc blockPresenceHandler*(
# if none of the connected peers report our wants in their have list,
# fire up discovery
b.discovery.queueFindBlocksReq(
toSeq(b.pendingBlocks.wantListCids)
.filter do(cid: Cid) -> bool:
not b.peers.anyIt( cid in it.peerHaveCids ))
toSeq(b.pendingBlocks.wantListCids).filter do(cid: Cid) -> bool:
not b.peers.anyIt(cid in it.peerHaveCids)
)
proc scheduleTasks(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} =
let
cids = blocksDelivery.mapIt( it.blk.cid )
let cids = blocksDelivery.mapIt(it.blk.cid)
# schedule any new peers to provide blocks to
for p in b.peers:
@ -270,14 +265,16 @@ proc scheduleTasks(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.asyn
proc cancelBlocks(b: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} =
## Tells neighboring peers that we're no longer interested in a block.
trace "Sending block request cancellations to peers", addrs, peers = b.peers.mapIt($it.id)
trace "Sending block request cancellations to peers",
addrs, peers = b.peers.mapIt($it.id)
let failed = (await allFinished(
b.peers.mapIt(
b.network.request.sendWantCancellations(
peer = it.id,
addresses = addrs))))
.filterIt(it.failed)
let failed = (
await allFinished(
b.peers.mapIt(
b.network.request.sendWantCancellations(peer = it.id, addresses = addrs)
)
)
).filterIt(it.failed)
if failed.len > 0:
warn "Failed to send block request cancellations to peers", peers = failed.len
@ -290,12 +287,13 @@ proc resolveBlocks*(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.asy
proc resolveBlocks*(b: BlockExcEngine, blocks: seq[Block]) {.async.} =
await b.resolveBlocks(
blocks.mapIt(
BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid)
)))
BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid))
)
)
proc payForBlocks(engine: BlockExcEngine,
peer: BlockExcPeerCtx,
blocksDelivery: seq[BlockDelivery]) {.async.} =
proc payForBlocks(
engine: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery]
) {.async.} =
let
sendPayment = engine.network.request.sendPayment
price = peer.price(blocksDelivery.mapIt(it.address))
@ -304,9 +302,7 @@ proc payForBlocks(engine: BlockExcEngine,
trace "Sending payment for blocks", price, len = blocksDelivery.len
await sendPayment(peer.id, payment)
proc validateBlockDelivery(
b: BlockExcEngine,
bd: BlockDelivery): ?!void =
proc validateBlockDelivery(b: BlockExcEngine, bd: BlockDelivery): ?!void =
if bd.address notin b.pendingBlocks:
return failure("Received block is not currently a pending block")
@ -315,33 +311,36 @@ proc validateBlockDelivery(
return failure("Missing proof")
if proof.index != bd.address.index:
return failure("Proof index " & $proof.index & " doesn't match leaf index " & $bd.address.index)
return failure(
"Proof index " & $proof.index & " doesn't match leaf index " & $bd.address.index
)
without leaf =? bd.blk.cid.mhash.mapFailure, err:
return failure("Unable to get mhash from cid for block, nested err: " & err.msg)
without treeRoot =? bd.address.treeCid.mhash.mapFailure, err:
return failure("Unable to get mhash from treeCid for block, nested err: " & err.msg)
return
failure("Unable to get mhash from treeCid for block, nested err: " & err.msg)
if err =? proof.verify(leaf, treeRoot).errorOption:
return failure("Unable to verify proof for block, nested err: " & err.msg)
else: # not leaf
if bd.address.cid != bd.blk.cid:
return failure("Delivery cid " & $bd.address.cid & " doesn't match block cid " & $bd.blk.cid)
return failure(
"Delivery cid " & $bd.address.cid & " doesn't match block cid " & $bd.blk.cid
)
return success()
proc blocksDeliveryHandler*(
b: BlockExcEngine,
peer: PeerId,
blocksDelivery: seq[BlockDelivery]) {.async.} =
b: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery]
) {.async.} =
trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address))
var validatedBlocksDelivery: seq[BlockDelivery]
for bd in blocksDelivery:
logScope:
peer = peer
peer = peer
address = bd.address
if err =? b.validateBlockDelivery(bd).errorOption:
@ -356,12 +355,11 @@ proc blocksDeliveryHandler*(
without proof =? bd.proof:
error "Proof expected for a leaf block delivery"
continue
if err =? (await b.localStore.putCidAndProof(
bd.address.treeCid,
bd.address.index,
bd.blk.cid,
proof)).errorOption:
if err =? (
await b.localStore.putCidAndProof(
bd.address.treeCid, bd.address.index, bd.blk.cid, proof
)
).errorOption:
error "Unable to store proof and cid for a block"
continue
@ -370,20 +368,15 @@ proc blocksDeliveryHandler*(
await b.resolveBlocks(validatedBlocksDelivery)
codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64)
let
peerCtx = b.peers.get(peer)
let peerCtx = b.peers.get(peer)
if peerCtx != nil:
await b.payForBlocks(peerCtx, blocksDelivery)
## shouldn't we remove them from the want-list instead of this:
peerCtx.cleanPresence(blocksDelivery.mapIt( it.address ))
peerCtx.cleanPresence(blocksDelivery.mapIt(it.address))
proc wantListHandler*(
b: BlockExcEngine,
peer: PeerId,
wantList: WantList) {.async.} =
let
peerCtx = b.peers.get(peer)
proc wantListHandler*(b: BlockExcEngine, peer: PeerId, wantList: WantList) {.async.} =
let peerCtx = b.peers.get(peer)
if peerCtx.isNil:
return
@ -393,35 +386,32 @@ proc wantListHandler*(
schedulePeer = false
for e in wantList.entries:
let
idx = peerCtx.peerWants.findIt(it.address == e.address)
let idx = peerCtx.peerWants.findIt(it.address == e.address)
logScope:
peer = peerCtx.id
address = e.address
wantType = $e.wantType
peer = peerCtx.id
address = e.address
wantType = $e.wantType
if idx < 0: # Adding new entry to peer wants
let
have = await e.address in b.localStore
price = @(
b.pricing.get(Pricing(price: 0.u256))
.price.toBytesBE)
price = @(b.pricing.get(Pricing(price: 0.u256)).price.toBytesBE)
if e.wantType == WantType.WantHave:
if have:
presence.add(
BlockPresence(
address: e.address,
`type`: BlockPresenceType.Have,
price: price))
address: e.address, `type`: BlockPresenceType.Have, price: price
)
)
else:
if e.sendDontHave:
presence.add(
BlockPresence(
address: e.address,
`type`: BlockPresenceType.DontHave,
price: price))
address: e.address, `type`: BlockPresenceType.DontHave, price: price
)
)
peerCtx.peerWants.add(e)
codex_block_exchange_want_have_lists_received.inc()
@ -446,31 +436,24 @@ proc wantListHandler*(
if not b.scheduleTask(peerCtx):
warn "Unable to schedule task for peer", peer
proc accountHandler*(
engine: BlockExcEngine,
peer: PeerId,
account: Account) {.async.} =
let
context = engine.peers.get(peer)
proc accountHandler*(engine: BlockExcEngine, peer: PeerId, account: Account) {.async.} =
let context = engine.peers.get(peer)
if context.isNil:
return
context.account = account.some
proc paymentHandler*(
engine: BlockExcEngine,
peer: PeerId,
payment: SignedState) {.async.} =
engine: BlockExcEngine, peer: PeerId, payment: SignedState
) {.async.} =
trace "Handling payments", peer
without context =? engine.peers.get(peer).option and
account =? context.account:
without context =? engine.peers.get(peer).option and account =? context.account:
trace "No context or account for peer", peer
return
if channel =? context.paymentChannel:
let
sender = account.address
let sender = account.address
discard engine.wallet.acceptPayment(channel, Asset, sender, payment)
else:
context.paymentChannel = engine.wallet.acceptChannel(payment).option
@ -484,19 +467,16 @@ proc setupPeer*(b: BlockExcEngine, peer: PeerId) {.async.} =
if peer notin b.peers:
trace "Setting up new peer", peer
b.peers.add(BlockExcPeerCtx(
id: peer
))
b.peers.add(BlockExcPeerCtx(id: peer))
trace "Added peer", peers = b.peers.len
# broadcast our want list, the other peer will do the same
if b.pendingBlocks.wantListLen > 0:
trace "Sending our want list to a peer", peer
let cids = toSeq(b.pendingBlocks.wantList)
await b.network.request.sendWantList(
peer, cids, full = true)
await b.network.request.sendWantList(peer, cids, full = true)
if address =? b.pricing.?address:
if address =? b.pricing .? address:
await b.network.request.sendAccount(peer, Account(address: address))
proc dropPeer*(b: BlockExcEngine, peer: PeerId) =
@ -515,10 +495,8 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
# TODO: There should be all sorts of accounting of
# bytes sent/received here
var
wantsBlocks = task.peerWants.filterIt(
it.wantType == WantType.WantBlock and not it.inFlight
)
var wantsBlocks =
task.peerWants.filterIt(it.wantType == WantType.WantBlock and not it.inFlight)
proc updateInFlight(addresses: seq[BlockAddress], inFlight: bool) =
for peerWant in task.peerWants.mitems:
@ -535,18 +513,20 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
if e.address.leaf:
(await b.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map(
(blkAndProof: (Block, CodexProof)) =>
BlockDelivery(address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some)
BlockDelivery(
address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some
)
)
else:
(await b.localStore.getBlock(e.address)).map(
(blk: Block) => BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none)
(blk: Block) =>
BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none)
)
let
blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup))
blocksDelivery = blocksDeliveryFut
.filterIt(it.completed and it.read.isOk)
.mapIt(it.read.get)
blocksDelivery =
blocksDeliveryFut.filterIt(it.completed and it.read.isOk).mapIt(it.read.get)
# All the wants that failed local lookup must be set to not-in-flight again.
let
@ -555,11 +535,9 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
updateInFlight(failedAddresses, false)
if blocksDelivery.len > 0:
trace "Sending blocks to peer", peer = task.id, blocks = (blocksDelivery.mapIt(it.address))
await b.network.request.sendBlocksDelivery(
task.id,
blocksDelivery
)
trace "Sending blocks to peer",
peer = task.id, blocks = (blocksDelivery.mapIt(it.address))
await b.network.request.sendBlocksDelivery(task.id, blocksDelivery)
codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64)
@ -572,8 +550,7 @@ proc blockexcTaskRunner(b: BlockExcEngine) {.async: (raises: []).} =
trace "Starting blockexc task runner"
while b.blockexcRunning:
try:
let
peerCtx = await b.taskQueue.pop()
let peerCtx = await b.taskQueue.pop()
await b.taskHandler(peerCtx)
except CancelledError:
@ -599,20 +576,20 @@ proc new*(
## Create new block exchange engine instance
##
let
engine = BlockExcEngine(
localStore: localStore,
peers: peerStore,
pendingBlocks: pendingBlocks,
peersPerRequest: peersPerRequest,
network: network,
wallet: wallet,
concurrentTasks: concurrentTasks,
trackedFutures: TrackedFutures.new(),
taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize),
discovery: discovery,
advertiser: advertiser,
blockFetchTimeout: blockFetchTimeout)
let engine = BlockExcEngine(
localStore: localStore,
peers: peerStore,
pendingBlocks: pendingBlocks,
peersPerRequest: peersPerRequest,
network: network,
wallet: wallet,
concurrentTasks: concurrentTasks,
trackedFutures: TrackedFutures.new(),
taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize),
discovery: discovery,
advertiser: advertiser,
blockFetchTimeout: blockFetchTimeout,
)
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
if event.kind == PeerEventKind.Joined:
@ -624,19 +601,17 @@ proc new*(
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
proc blockWantListHandler(
peer: PeerId,
wantList: WantList): Future[void] {.gcsafe.} =
proc blockWantListHandler(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} =
engine.wantListHandler(peer, wantList)
proc blockPresenceHandler(
peer: PeerId,
presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
peer: PeerId, presence: seq[BlockPresence]
): Future[void] {.gcsafe.} =
engine.blockPresenceHandler(peer, presence)
proc blocksDeliveryHandler(
peer: PeerId,
blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} =
peer: PeerId, blocksDelivery: seq[BlockDelivery]
): Future[void] {.gcsafe.} =
engine.blocksDeliveryHandler(peer, blocksDelivery)
proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} =
@ -650,6 +625,7 @@ proc new*(
onBlocksDelivery: blocksDeliveryHandler,
onPresence: blockPresenceHandler,
onAccount: accountHandler,
onPayment: paymentHandler)
onPayment: paymentHandler,
)
return engine

View File

@ -15,15 +15,16 @@ import ../peers
export nitro
export results
push: {.upraises: [].}
push:
{.upraises: [].}
const ChainId* = 0.u256 # invalid chain id for now
const Asset* = EthAddress.zero # invalid ERC20 asset address for now
const AmountPerChannel = (10'u64^18).u256 # 1 asset, ERC20 default is 18 decimals
const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals
func openLedgerChannel*(wallet: WalletRef,
hub: EthAddress,
asset: EthAddress): ?!ChannelId =
func openLedgerChannel*(
wallet: WalletRef, hub: EthAddress, asset: EthAddress
): ?!ChannelId =
wallet.openLedgerChannel(hub, ChainId, asset, AmountPerChannel)
func getOrOpenChannel(wallet: WalletRef, peer: BlockExcPeerCtx): ?!ChannelId =
@ -36,9 +37,7 @@ func getOrOpenChannel(wallet: WalletRef, peer: BlockExcPeerCtx): ?!ChannelId =
else:
failure "no account set for peer"
func pay*(wallet: WalletRef,
peer: BlockExcPeerCtx,
amount: UInt256): ?!SignedState =
func pay*(wallet: WalletRef, peer: BlockExcPeerCtx, amount: UInt256): ?!SignedState =
if account =? peer.account:
let asset = Asset
let receiver = account.address

View File

@ -12,7 +12,8 @@ import std/monotimes
import pkg/upraises
push: {.upraises: [].}
push:
{.upraises: [].}
import pkg/chronos
import pkg/libp2p
@ -25,11 +26,15 @@ import ../../logutils
logScope:
topics = "codex pendingblocks"
declareGauge(codex_block_exchange_pending_block_requests, "codex blockexchange pending block requests")
declareGauge(codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us")
declareGauge(
codex_block_exchange_pending_block_requests,
"codex blockexchange pending block requests",
)
declareGauge(
codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us"
)
const
DefaultBlockTimeout* = 10.minutes
const DefaultBlockTimeout* = 10.minutes
type
BlockReq* = object
@ -44,10 +49,11 @@ proc updatePendingBlockGauge(p: PendingBlocksManager) =
codex_block_exchange_pending_block_requests.set(p.blocks.len.int64)
proc getWantHandle*(
p: PendingBlocksManager,
address: BlockAddress,
timeout = DefaultBlockTimeout,
inFlight = false): Future[Block] {.async.} =
p: PendingBlocksManager,
address: BlockAddress,
timeout = DefaultBlockTimeout,
inFlight = false,
): Future[Block] {.async.} =
## Add an event for a block
##
@ -56,7 +62,8 @@ proc getWantHandle*(
p.blocks[address] = BlockReq(
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
inFlight: inFlight,
startTime: getMonoTime().ticks)
startTime: getMonoTime().ticks,
)
p.updatePendingBlockGauge()
return await p.blocks[address].handle.wait(timeout)
@ -72,15 +79,13 @@ proc getWantHandle*(
p.updatePendingBlockGauge()
proc getWantHandle*(
p: PendingBlocksManager,
cid: Cid,
timeout = DefaultBlockTimeout,
inFlight = false): Future[Block] =
p: PendingBlocksManager, cid: Cid, timeout = DefaultBlockTimeout, inFlight = false
): Future[Block] =
p.getWantHandle(BlockAddress.init(cid), timeout, inFlight)
proc resolve*(
p: PendingBlocksManager,
blocksDelivery: seq[BlockDelivery]) {.gcsafe, raises: [].} =
p: PendingBlocksManager, blocksDelivery: seq[BlockDelivery]
) {.gcsafe, raises: [].} =
## Resolve pending blocks
##
@ -101,19 +106,14 @@ proc resolve*(
else:
trace "Block handle already finished", address = bd.address
proc setInFlight*(
p: PendingBlocksManager,
address: BlockAddress,
inFlight = true) =
proc setInFlight*(p: PendingBlocksManager, address: BlockAddress, inFlight = true) =
## Set inflight status for a block
##
p.blocks.withValue(address, pending):
pending[].inFlight = inFlight
proc isInFlight*(
p: PendingBlocksManager,
address: BlockAddress): bool =
proc isInFlight*(p: PendingBlocksManager, address: BlockAddress): bool =
## Check if a block is in flight
##

View File

@ -35,8 +35,10 @@ const
type
WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.}
BlocksDeliveryHandler* = proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.}
BlockPresenceHandler* = proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
BlocksDeliveryHandler* =
proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.}
BlockPresenceHandler* =
proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
@ -54,10 +56,14 @@ type
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false): Future[void] {.gcsafe.}
WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.}
BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.}
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
sendDontHave: bool = false,
): Future[void] {.gcsafe.}
WantCancellationSender* =
proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.}
BlocksDeliverySender* =
proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.}
PresenceSender* =
proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
@ -108,10 +114,7 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
finally:
b.inflightSema.release()
proc handleWantList(
b: BlockExcNetwork,
peer: NetworkPeer,
list: WantList) {.async.} =
proc handleWantList(b: BlockExcNetwork, peer: NetworkPeer, list: WantList) {.async.} =
## Handle incoming want list
##
@ -119,14 +122,15 @@ proc handleWantList(
await b.handlers.onWantList(peer.id, list)
proc sendWantList*(
b: BlockExcNetwork,
id: PeerId,
addresses: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false): Future[void] =
b: BlockExcNetwork,
id: PeerId,
addresses: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
): Future[void] =
## Send a want message to peer
##
@ -137,43 +141,41 @@ proc sendWantList*(
priority: priority,
cancel: cancel,
wantType: wantType,
sendDontHave: sendDontHave) ),
full: full)
sendDontHave: sendDontHave,
)
),
full: full,
)
b.send(id, Message(wantlist: msg))
proc sendWantCancellations*(
b: BlockExcNetwork,
id: PeerId,
addresses: seq[BlockAddress]): Future[void] {.async.} =
b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress]
): Future[void] {.async.} =
## Informs a remote peer that we're no longer interested in a set of blocks
##
await b.sendWantList(id = id, addresses = addresses, cancel = true)
proc handleBlocksDelivery(
b: BlockExcNetwork,
peer: NetworkPeer,
blocksDelivery: seq[BlockDelivery]) {.async.} =
b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery]
) {.async.} =
## Handle incoming blocks
##
if not b.handlers.onBlocksDelivery.isNil:
await b.handlers.onBlocksDelivery(peer.id, blocksDelivery)
proc sendBlocksDelivery*(
b: BlockExcNetwork,
id: PeerId,
blocksDelivery: seq[BlockDelivery]): Future[void] =
b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery]
): Future[void] =
## Send blocks to remote
##
b.send(id, pb.Message(payload: blocksDelivery))
proc handleBlockPresence(
b: BlockExcNetwork,
peer: NetworkPeer,
presence: seq[BlockPresence]) {.async.} =
b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence]
) {.async.} =
## Handle block presence
##
@ -181,56 +183,44 @@ proc handleBlockPresence(
await b.handlers.onPresence(peer.id, presence)
proc sendBlockPresence*(
b: BlockExcNetwork,
id: PeerId,
presence: seq[BlockPresence]): Future[void] =
b: BlockExcNetwork, id: PeerId, presence: seq[BlockPresence]
): Future[void] =
## Send presence to remote
##
b.send(id, Message(blockPresences: @presence))
proc handleAccount(
network: BlockExcNetwork,
peer: NetworkPeer,
account: Account) {.async.} =
network: BlockExcNetwork, peer: NetworkPeer, account: Account
) {.async.} =
## Handle account info
##
if not network.handlers.onAccount.isNil:
await network.handlers.onAccount(peer.id, account)
proc sendAccount*(
b: BlockExcNetwork,
id: PeerId,
account: Account): Future[void] =
proc sendAccount*(b: BlockExcNetwork, id: PeerId, account: Account): Future[void] =
## Send account info to remote
##
b.send(id, Message(account: AccountMessage.init(account)))
proc sendPayment*(
b: BlockExcNetwork,
id: PeerId,
payment: SignedState): Future[void] =
proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[void] =
## Send payment to remote
##
b.send(id, Message(payment: StateChannelUpdate.init(payment)))
proc handlePayment(
network: BlockExcNetwork,
peer: NetworkPeer,
payment: SignedState) {.async.} =
network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState
) {.async.} =
## Handle payment
##
if not network.handlers.onPayment.isNil:
await network.handlers.onPayment(peer.id, payment)
proc rpcHandler(
b: BlockExcNetwork,
peer: NetworkPeer,
msg: Message) {.raises: [].} =
proc rpcHandler(b: BlockExcNetwork, peer: NetworkPeer, msg: Message) {.raises: [].} =
## handle rpc messages
##
if msg.wantList.entries.len > 0:
@ -266,7 +256,7 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
if not isNil(b.getConn):
getConn = b.getConn
let rpcHandler = proc (p: NetworkPeer, msg: Message) {.async.} =
let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async.} =
b.rpcHandler(p, msg)
# create new pubsub peer
@ -316,41 +306,43 @@ method init*(b: BlockExcNetwork) =
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
let peerId = conn.peerId
let blockexcPeer = b.getOrCreatePeer(peerId)
await blockexcPeer.readLoop(conn) # attach read loop
await blockexcPeer.readLoop(conn) # attach read loop
b.handler = handle
b.codec = Codec
proc new*(
T: type BlockExcNetwork,
switch: Switch,
connProvider: ConnProvider = nil,
maxInflight = MaxInflight): BlockExcNetwork =
T: type BlockExcNetwork,
switch: Switch,
connProvider: ConnProvider = nil,
maxInflight = MaxInflight,
): BlockExcNetwork =
## Create a new BlockExcNetwork instance
##
let
self = BlockExcNetwork(
switch: switch,
getConn: connProvider,
inflightSema: newAsyncSemaphore(maxInflight))
let self = BlockExcNetwork(
switch: switch, getConn: connProvider, inflightSema: newAsyncSemaphore(maxInflight)
)
proc sendWantList(
id: PeerId,
cids: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false): Future[void] {.gcsafe.} =
self.sendWantList(
id, cids, priority, cancel,
wantType, full, sendDontHave)
id: PeerId,
cids: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
): Future[void] {.gcsafe.} =
self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave)
proc sendWantCancellations(id: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.} =
proc sendWantCancellations(
id: PeerId, addresses: seq[BlockAddress]
): Future[void] {.gcsafe.} =
self.sendWantCancellations(id, addresses)
proc sendBlocksDelivery(id: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} =
proc sendBlocksDelivery(
id: PeerId, blocksDelivery: seq[BlockDelivery]
): Future[void] {.gcsafe.} =
self.sendBlocksDelivery(id, blocksDelivery)
proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
@ -368,7 +360,8 @@ proc new*(
sendBlocksDelivery: sendBlocksDelivery,
sendPresence: sendPresence,
sendAccount: sendAccount,
sendPayment: sendPayment)
sendPayment: sendPayment,
)
self.init()
return self

View File

@ -8,7 +8,8 @@
## those terms.
import pkg/upraises
push: {.upraises: [].}
push:
{.upraises: [].}
import pkg/chronos
import pkg/libp2p
@ -33,8 +34,7 @@ type
getConn: ConnProvider
proc connected*(b: NetworkPeer): bool =
not(isNil(b.sendConn)) and
not(b.sendConn.closed or b.sendConn.atEof)
not (isNil(b.sendConn)) and not (b.sendConn.closed or b.sendConn.atEof)
proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} =
if isNil(conn):
@ -80,15 +80,11 @@ proc broadcast*(b: NetworkPeer, msg: Message) =
asyncSpawn sendAwaiter()
func new*(
T: type NetworkPeer,
peer: PeerId,
connProvider: ConnProvider,
rpcHandler: RPCHandler): NetworkPeer =
T: type NetworkPeer,
peer: PeerId,
connProvider: ConnProvider,
rpcHandler: RPCHandler,
): NetworkPeer =
doAssert(not isNil(connProvider), "should supply connection provider")
doAssert(not isNil(connProvider),
"should supply connection provider")
NetworkPeer(
id: peer,
getConn: connProvider,
handler: rpcHandler)
NetworkPeer(id: peer, getConn: connProvider, handler: rpcHandler)

View File

@ -25,15 +25,14 @@ import ../../logutils
export payments, nitro
type
BlockExcPeerCtx* = ref object of RootObj
id*: PeerId
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
peerWants*: seq[WantListEntry] # remote peers want lists
exchanged*: int # times peer has exchanged with us
lastExchange*: Moment # last time peer has exchanged with us
account*: ?Account # ethereum account of this peer
paymentChannel*: ?ChannelId # payment channel id
type BlockExcPeerCtx* = ref object of RootObj
id*: PeerId
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
peerWants*: seq[WantListEntry] # remote peers want lists
exchanged*: int # times peer has exchanged with us
lastExchange*: Moment # last time peer has exchanged with us
account*: ?Account # ethereum account of this peer
paymentChannel*: ?ChannelId # payment channel id
proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] =
toSeq(self.blocks.keys)

View File

@ -13,7 +13,8 @@ import std/algorithm
import pkg/upraises
push: {.upraises: [].}
push:
{.upraises: [].}
import pkg/chronos
import pkg/libp2p
@ -22,7 +23,6 @@ import ../protobuf/blockexc
import ../../blocktype
import ../../logutils
import ./peercontext
export peercontext
@ -32,6 +32,7 @@ logScope:
type
PeerCtxStore* = ref object of RootObj
peers*: OrderedTable[PeerId, BlockExcPeerCtx]
PeersForBlock* = object of RootObj
with*: seq[BlockExcPeerCtx]
without*: seq[BlockExcPeerCtx]
@ -44,7 +45,7 @@ proc contains*(a: openArray[BlockExcPeerCtx], b: PeerId): bool =
## Convenience method to check for peer precense
##
a.anyIt( it.id == b )
a.anyIt(it.id == b)
func contains*(self: PeerCtxStore, peerId: PeerId): bool =
peerId in self.peers
@ -62,21 +63,21 @@ func len*(self: PeerCtxStore): int =
self.peers.len
func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it == address ) )
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it == address))
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it.cidOrTreeCid == cid ) )
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid))
func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it == address ) )
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it == address))
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it.address.cidOrTreeCid == cid ) )
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it.address.cidOrTreeCid == cid))
proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock =
var res = PeersForBlock()
for peer in self:
if peer.peerHave.anyIt( it == address ):
if peer.peerHave.anyIt(it == address):
res.with.add(peer)
else:
res.without.add(peer)

View File

@ -42,7 +42,6 @@ proc `==`*(a: WantListEntry, b: BlockAddress): bool =
proc `<`*(a, b: WantListEntry): bool =
a.priority < b.priority
proc `==`*(a: BlockPresence, b: BlockAddress): bool =
return a.address == b

View File

@ -20,40 +20,40 @@ const
type
WantType* = enum
WantBlock = 0,
WantBlock = 0
WantHave = 1
WantListEntry* = object
address*: BlockAddress
priority*: int32 # The priority (normalized). default to 1
cancel*: bool # Whether this revokes an entry
wantType*: WantType # Note: defaults to enum 0, ie Block
sendDontHave*: bool # Note: defaults to false
inFlight*: bool # Whether block sending is in progress. Not serialized.
priority*: int32 # The priority (normalized). default to 1
cancel*: bool # Whether this revokes an entry
wantType*: WantType # Note: defaults to enum 0, ie Block
sendDontHave*: bool # Note: defaults to false
inFlight*: bool # Whether block sending is in progress. Not serialized.
WantList* = object
entries*: seq[WantListEntry] # A list of wantList entries
full*: bool # Whether this is the full wantList. default to false
entries*: seq[WantListEntry] # A list of wantList entries
full*: bool # Whether this is the full wantList. default to false
BlockDelivery* = object
blk*: Block
address*: BlockAddress
proof*: ?CodexProof # Present only if `address.leaf` is true
proof*: ?CodexProof # Present only if `address.leaf` is true
BlockPresenceType* = enum
Have = 0,
Have = 0
DontHave = 1
BlockPresence* = object
address*: BlockAddress
`type`*: BlockPresenceType
price*: seq[byte] # Amount of assets to pay for the block (UInt256)
price*: seq[byte] # Amount of assets to pay for the block (UInt256)
AccountMessage* = object
address*: seq[byte] # Ethereum address to which payments should be made
address*: seq[byte] # Ethereum address to which payments should be made
StateChannelUpdate* = object
update*: seq[byte] # Signed Nitro state, serialized as JSON
update*: seq[byte] # Signed Nitro state, serialized as JSON
Message* = object
wantList*: WantList
@ -140,7 +140,6 @@ proc protobufEncode*(value: Message): seq[byte] =
ipb.finish()
ipb.buffer
#
# Decoding Message from seq[byte] in Protobuf format
#
@ -151,22 +150,22 @@ proc decode*(_: type BlockAddress, pb: ProtoBuffer): ProtoResult[BlockAddress] =
field: uint64
cidBuf = newSeq[byte]()
if ? pb.getField(1, field):
if ?pb.getField(1, field):
leaf = bool(field)
if leaf:
var
treeCid: Cid
index: Natural
if ? pb.getField(2, cidBuf):
treeCid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
if ? pb.getField(3, field):
if ?pb.getField(2, cidBuf):
treeCid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
if ?pb.getField(3, field):
index = field
value = BlockAddress(leaf: true, treeCid: treeCid, index: index)
else:
var cid: Cid
if ? pb.getField(4, cidBuf):
cid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
if ?pb.getField(4, cidBuf):
cid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
value = BlockAddress(leaf: false, cid: cid)
ok(value)
@ -176,15 +175,15 @@ proc decode*(_: type WantListEntry, pb: ProtoBuffer): ProtoResult[WantListEntry]
value = WantListEntry()
field: uint64
ipb: ProtoBuffer
if ? pb.getField(1, ipb):
value.address = ? BlockAddress.decode(ipb)
if ? pb.getField(2, field):
if ?pb.getField(1, ipb):
value.address = ?BlockAddress.decode(ipb)
if ?pb.getField(2, field):
value.priority = int32(field)
if ? pb.getField(3, field):
if ?pb.getField(3, field):
value.cancel = bool(field)
if ? pb.getField(4, field):
if ?pb.getField(4, field):
value.wantType = WantType(field)
if ? pb.getField(5, field):
if ?pb.getField(5, field):
value.sendDontHave = bool(field)
ok(value)
@ -193,10 +192,10 @@ proc decode*(_: type WantList, pb: ProtoBuffer): ProtoResult[WantList] =
value = WantList()
field: uint64
sublist: seq[seq[byte]]
if ? pb.getRepeatedField(1, sublist):
if ?pb.getRepeatedField(1, sublist):
for item in sublist:
value.entries.add(? WantListEntry.decode(initProtoBuffer(item)))
if ? pb.getField(2, field):
value.entries.add(?WantListEntry.decode(initProtoBuffer(item)))
if ?pb.getField(2, field):
value.full = bool(field)
ok(value)
@ -208,17 +207,18 @@ proc decode*(_: type BlockDelivery, pb: ProtoBuffer): ProtoResult[BlockDelivery]
cid: Cid
ipb: ProtoBuffer
if ? pb.getField(1, cidBuf):
cid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
if ? pb.getField(2, dataBuf):
value.blk = ? Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob)
if ? pb.getField(3, ipb):
value.address = ? BlockAddress.decode(ipb)
if ?pb.getField(1, cidBuf):
cid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
if ?pb.getField(2, dataBuf):
value.blk =
?Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob)
if ?pb.getField(3, ipb):
value.address = ?BlockAddress.decode(ipb)
if value.address.leaf:
var proofBuf = newSeq[byte]()
if ? pb.getField(4, proofBuf):
let proof = ? CodexProof.decode(proofBuf).mapErr(x => ProtoError.IncorrectBlob)
if ?pb.getField(4, proofBuf):
let proof = ?CodexProof.decode(proofBuf).mapErr(x => ProtoError.IncorrectBlob)
value.proof = proof.some
else:
value.proof = CodexProof.none
@ -232,23 +232,23 @@ proc decode*(_: type BlockPresence, pb: ProtoBuffer): ProtoResult[BlockPresence]
value = BlockPresence()
field: uint64
ipb: ProtoBuffer
if ? pb.getField(1, ipb):
value.address = ? BlockAddress.decode(ipb)
if ? pb.getField(2, field):
if ?pb.getField(1, ipb):
value.address = ?BlockAddress.decode(ipb)
if ?pb.getField(2, field):
value.`type` = BlockPresenceType(field)
discard ? pb.getField(3, value.price)
discard ?pb.getField(3, value.price)
ok(value)
proc decode*(_: type AccountMessage, pb: ProtoBuffer): ProtoResult[AccountMessage] =
var
value = AccountMessage()
discard ? pb.getField(1, value.address)
var value = AccountMessage()
discard ?pb.getField(1, value.address)
ok(value)
proc decode*(_: type StateChannelUpdate, pb: ProtoBuffer): ProtoResult[StateChannelUpdate] =
var
value = StateChannelUpdate()
discard ? pb.getField(1, value.update)
proc decode*(
_: type StateChannelUpdate, pb: ProtoBuffer
): ProtoResult[StateChannelUpdate] =
var value = StateChannelUpdate()
discard ?pb.getField(1, value.update)
ok(value)
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
@ -257,17 +257,19 @@ proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
pb = initProtoBuffer(msg, maxSize = MaxMessageSize)
ipb: ProtoBuffer
sublist: seq[seq[byte]]
if ? pb.getField(1, ipb):
value.wantList = ? WantList.decode(ipb)
if ? pb.getRepeatedField(3, sublist):
if ?pb.getField(1, ipb):
value.wantList = ?WantList.decode(ipb)
if ?pb.getRepeatedField(3, sublist):
for item in sublist:
value.payload.add(? BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize)))
if ? pb.getRepeatedField(4, sublist):
value.payload.add(
?BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize))
)
if ?pb.getRepeatedField(4, sublist):
for item in sublist:
value.blockPresences.add(? BlockPresence.decode(initProtoBuffer(item)))
discard ? pb.getField(5, value.pendingBytes)
if ? pb.getField(6, ipb):
value.account = ? AccountMessage.decode(ipb)
if ? pb.getField(7, ipb):
value.payment = ? StateChannelUpdate.decode(ipb)
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))
discard ?pb.getField(5, value.pendingBytes)
if ?pb.getField(6, ipb):
value.account = ?AccountMessage.decode(ipb)
if ?pb.getField(7, ipb):
value.payment = ?StateChannelUpdate.decode(ipb)
ok(value)

View File

@ -11,11 +11,11 @@ export StateChannelUpdate
export stint
export nitro
push: {.upraises: [].}
push:
{.upraises: [].}
type
Account* = object
address*: EthAddress
type Account* = object
address*: EthAddress
func init*(_: type AccountMessage, account: Account): AccountMessage =
AccountMessage(address: @(account.address.toArray))
@ -24,7 +24,7 @@ func parse(_: type EthAddress, bytes: seq[byte]): ?EthAddress =
var address: array[20, byte]
if bytes.len != address.len:
return EthAddress.none
for i in 0..<address.len:
for i in 0 ..< address.len:
address[i] = bytes[i]
EthAddress(address).some

View File

@ -11,7 +11,8 @@ export questionable
export stint
export BlockPresenceType
upraises.push: {.upraises: [].}
upraises.push:
{.upraises: [].}
type
PresenceMessage* = blockexc.BlockPresence
@ -32,15 +33,12 @@ func init*(_: type Presence, message: PresenceMessage): ?Presence =
some Presence(
address: message.address,
have: message.`type` == BlockPresenceType.Have,
price: price
price: price,
)
func init*(_: type PresenceMessage, presence: Presence): PresenceMessage =
PresenceMessage(
address: presence.address,
`type`: if presence.have:
BlockPresenceType.Have
else:
BlockPresenceType.DontHave,
price: @(presence.price.toBytesBE)
`type`: if presence.have: BlockPresenceType.Have else: BlockPresenceType.DontHave,
price: @(presence.price.toBytesBE),
)

View File

@ -14,7 +14,8 @@ export tables
import pkg/upraises
push: {.upraises: [].}
push:
{.upraises: [].}
import pkg/libp2p/[cid, multicodec, multihash]
import pkg/stew/byteutils
@ -49,16 +50,16 @@ logutils.formatIt(LogFormat.textLines, BlockAddress):
else:
"cid: " & shortLog($it.cid)
logutils.formatIt(LogFormat.json, BlockAddress): %it
logutils.formatIt(LogFormat.json, BlockAddress):
%it
proc `==`*(a, b: BlockAddress): bool =
a.leaf == b.leaf and
(
if a.leaf:
a.treeCid == b.treeCid and a.index == b.index
else:
a.cid == b.cid
)
a.leaf == b.leaf and (
if a.leaf:
a.treeCid == b.treeCid and a.index == b.index
else:
a.cid == b.cid
)
proc `$`*(a: BlockAddress): string =
if a.leaf:
@ -67,10 +68,7 @@ proc `$`*(a: BlockAddress): string =
"cid: " & $a.cid
proc cidOrTreeCid*(a: BlockAddress): Cid =
if a.leaf:
a.treeCid
else:
a.cid
if a.leaf: a.treeCid else: a.cid
proc address*(b: Block): BlockAddress =
BlockAddress(leaf: false, cid: b.cid)
@ -86,57 +84,55 @@ proc `$`*(b: Block): string =
result &= "\ndata: " & string.fromBytes(b.data)
func new*(
T: type Block,
data: openArray[byte] = [],
version = CIDv1,
mcodec = Sha256HashCodec,
codec = BlockCodec): ?!Block =
T: type Block,
data: openArray[byte] = [],
version = CIDv1,
mcodec = Sha256HashCodec,
codec = BlockCodec,
): ?!Block =
## creates a new block for both storage and network IO
##
let
hash = ? MultiHash.digest($mcodec, data).mapFailure
cid = ? Cid.init(version, codec, hash).mapFailure
hash = ?MultiHash.digest($mcodec, data).mapFailure
cid = ?Cid.init(version, codec, hash).mapFailure
# TODO: If the hash is `>=` to the data,
# use the Cid as a container!
Block(
cid: cid,
data: @data).success
Block(cid: cid, data: @data).success
proc new*(
T: type Block,
cid: Cid,
data: openArray[byte],
verify: bool = true
T: type Block, cid: Cid, data: openArray[byte], verify: bool = true
): ?!Block =
## creates a new block for both storage and network IO
##
if verify:
let
mhash = ? cid.mhash.mapFailure
computedMhash = ? MultiHash.digest($mhash.mcodec, data).mapFailure
computedCid = ? Cid.init(cid.cidver, cid.mcodec, computedMhash).mapFailure
mhash = ?cid.mhash.mapFailure
computedMhash = ?MultiHash.digest($mhash.mcodec, data).mapFailure
computedCid = ?Cid.init(cid.cidver, cid.mcodec, computedMhash).mapFailure
if computedCid != cid:
return "Cid doesn't match the data".failure
return Block(
cid: cid,
data: @data
).success
return Block(cid: cid, data: @data).success
proc emptyBlock*(version: CidVersion, hcodec: MultiCodec): ?!Block =
emptyCid(version, hcodec, BlockCodec)
.flatMap((cid: Cid) => Block.new(cid = cid, data = @[]))
emptyCid(version, hcodec, BlockCodec).flatMap(
(cid: Cid) => Block.new(cid = cid, data = @[])
)
proc emptyBlock*(cid: Cid): ?!Block =
cid.mhash.mapFailure.flatMap((mhash: MultiHash) =>
emptyBlock(cid.cidver, mhash.mcodec))
cid.mhash.mapFailure.flatMap(
(mhash: MultiHash) => emptyBlock(cid.cidver, mhash.mcodec)
)
proc isEmpty*(cid: Cid): bool =
success(cid) == cid.mhash.mapFailure.flatMap((mhash: MultiHash) =>
emptyCid(cid.cidver, mhash.mcodec, cid.mcodec))
success(cid) ==
cid.mhash.mapFailure.flatMap(
(mhash: MultiHash) => emptyCid(cid.cidver, mhash.mcodec, cid.mcodec)
)
proc isEmpty*(blk: Block): bool =
blk.cid.isEmpty

View File

@ -11,7 +11,8 @@
import pkg/upraises
push: {.upraises: [].}
push:
{.upraises: [].}
import pkg/questionable
import pkg/questionable/results
@ -23,8 +24,7 @@ import ./logutils
export blocktype
const
DefaultChunkSize* = DefaultBlockSize
const DefaultChunkSize* = DefaultBlockSize
type
# default reader type
@ -33,10 +33,10 @@ type
# Reader that splits input data into fixed-size chunks
Chunker* = ref object
reader*: Reader # Procedure called to actually read the data
offset*: int # Bytes read so far (position in the stream)
chunkSize*: NBytes # Size of each chunk
pad*: bool # Pad last chunk to chunkSize?
reader*: Reader # Procedure called to actually read the data
offset*: int # Bytes read so far (position in the stream)
chunkSize*: NBytes # Size of each chunk
pad*: bool # Pad last chunk to chunkSize?
FileChunker* = Chunker
LPStreamChunker* = Chunker
@ -60,30 +60,21 @@ proc getBytes*(c: Chunker): Future[seq[byte]] {.async.} =
return move buff
proc new*(
T: type Chunker,
reader: Reader,
chunkSize = DefaultChunkSize,
pad = true
T: type Chunker, reader: Reader, chunkSize = DefaultChunkSize, pad = true
): Chunker =
## create a new Chunker instance
##
Chunker(
reader: reader,
offset: 0,
chunkSize: chunkSize,
pad: pad)
Chunker(reader: reader, offset: 0, chunkSize: chunkSize, pad: pad)
proc new*(
T: type LPStreamChunker,
stream: LPStream,
chunkSize = DefaultChunkSize,
pad = true
T: type LPStreamChunker, stream: LPStream, chunkSize = DefaultChunkSize, pad = true
): LPStreamChunker =
## create the default File chunker
##
proc reader(data: ChunkBuffer, len: int): Future[int]
{.gcsafe, async, raises: [Defect].} =
proc reader(
data: ChunkBuffer, len: int
): Future[int] {.gcsafe, async, raises: [Defect].} =
var res = 0
try:
while res < len:
@ -101,22 +92,17 @@ proc new*(
return res
LPStreamChunker.new(
reader = reader,
chunkSize = chunkSize,
pad = pad)
LPStreamChunker.new(reader = reader, chunkSize = chunkSize, pad = pad)
proc new*(
T: type FileChunker,
file: File,
chunkSize = DefaultChunkSize,
pad = true
T: type FileChunker, file: File, chunkSize = DefaultChunkSize, pad = true
): FileChunker =
## create the default File chunker
##
proc reader(data: ChunkBuffer, len: int): Future[int]
{.gcsafe, async, raises: [Defect].} =
proc reader(
data: ChunkBuffer, len: int
): Future[int] {.gcsafe, async, raises: [Defect].} =
var total = 0
try:
while total < len:
@ -135,7 +121,4 @@ proc new*(
return total
FileChunker.new(
reader = reader,
chunkSize = chunkSize,
pad = pad)
FileChunker.new(reader = reader, chunkSize = chunkSize, pad = pad)

View File

@ -20,9 +20,9 @@ method start*(clock: Clock) {.base, async.} =
method stop*(clock: Clock) {.base, async.} =
discard
proc withTimeout*(future: Future[void],
clock: Clock,
expiry: SecondsSince1970) {.async.} =
proc withTimeout*(
future: Future[void], clock: Clock, expiry: SecondsSince1970
) {.async.} =
let timeout = clock.waitUntil(expiry)
try:
await future or timeout

View File

@ -68,8 +68,7 @@ proc waitForSync(provider: Provider): Future[void] {.async.} =
inc sleepTime
trace "Ethereum provider is synced."
proc bootstrapInteractions(
s: CodexServer): Future[void] {.async.} =
proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
## bootstrap interactions and return contracts
## using clients, hosts, validators pairings
##
@ -137,12 +136,12 @@ proc bootstrapInteractions(
host = some HostInteractions.new(clock, sales)
if config.validator:
without validationConfig =? ValidationConfig.init(
config.validatorMaxSlots,
config.validatorGroups,
config.validatorGroupIndex), err:
error "Invalid validation parameters", err = err.msg
quit QuitFailure
without validationConfig =?
ValidationConfig.init(
config.validatorMaxSlots, config.validatorGroups, config.validatorGroupIndex
), err:
error "Invalid validation parameters", err = err.msg
quit QuitFailure
let validation = Validation.new(clock, market, validationConfig)
validator = some ValidatorInteractions.new(clock, validation)
@ -156,10 +155,9 @@ proc start*(s: CodexServer) {.async.} =
await s.codexNode.switch.start()
let (announceAddrs,discoveryAddrs)= nattedAddress(
s.config.nat,
s.codexNode.switch.peerInfo.addrs,
s.config.discoveryPort)
let (announceAddrs, discoveryAddrs) = nattedAddress(
s.config.nat, s.codexNode.switch.peerInfo.addrs, s.config.discoveryPort
)
s.codexNode.discovery.updateAnnounceRecord(announceAddrs)
s.codexNode.discovery.updateDhtRecord(discoveryAddrs)
@ -176,15 +174,14 @@ proc stop*(s: CodexServer) {.async.} =
s.codexNode.switch.stop(),
s.codexNode.stop(),
s.repoStore.stop(),
s.maintenance.stop())
s.maintenance.stop(),
)
proc new*(
T: type CodexServer,
config: CodexConf,
privateKey: CodexPrivateKey): CodexServer =
T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey
): CodexServer =
## create CodexServer including setting up datastore, repostore, etc
let
switch = SwitchBuilder
let switch = SwitchBuilder
.new()
.withPrivateKey(privateKey)
.withAddresses(config.listenAddrs)
@ -197,80 +194,107 @@ proc new*(
.withTcpTransport({ServerFlags.ReuseAddr})
.build()
var
cache: CacheStore = nil
var cache: CacheStore = nil
if config.cacheSize > 0'nb:
cache = CacheStore.new(cacheSize = config.cacheSize)
## Is unused?
let
discoveryDir = config.dataDir / CodexDhtNamespace
let discoveryDir = config.dataDir / CodexDhtNamespace
if io2.createPath(discoveryDir).isErr:
trace "Unable to create discovery directory for block store", discoveryDir = discoveryDir
trace "Unable to create discovery directory for block store",
discoveryDir = discoveryDir
raise (ref Defect)(
msg: "Unable to create discovery directory for block store: " & discoveryDir)
msg: "Unable to create discovery directory for block store: " & discoveryDir
)
let
discoveryStore = Datastore(
LevelDbDatastore.new(config.dataDir / CodexDhtProvidersNamespace)
.expect("Should create discovery datastore!"))
LevelDbDatastore.new(config.dataDir / CodexDhtProvidersNamespace).expect(
"Should create discovery datastore!"
)
)
discovery = Discovery.new(
switch.peerInfo.privateKey,
announceAddrs = config.listenAddrs,
bindPort = config.discoveryPort,
bootstrapNodes = config.bootstrapNodes,
store = discoveryStore)
store = discoveryStore,
)
wallet = WalletRef.new(EthPrivateKey.random())
network = BlockExcNetwork.new(switch)
repoData = case config.repoKind
of repoFS: Datastore(FSDatastore.new($config.dataDir, depth = 5)
.expect("Should create repo file data store!"))
of repoSQLite: Datastore(SQLiteDatastore.new($config.dataDir)
.expect("Should create repo SQLite data store!"))
of repoLevelDb: Datastore(LevelDbDatastore.new($config.dataDir)
.expect("Should create repo LevelDB data store!"))
repoData =
case config.repoKind
of repoFS:
Datastore(
FSDatastore.new($config.dataDir, depth = 5).expect(
"Should create repo file data store!"
)
)
of repoSQLite:
Datastore(
SQLiteDatastore.new($config.dataDir).expect(
"Should create repo SQLite data store!"
)
)
of repoLevelDb:
Datastore(
LevelDbDatastore.new($config.dataDir).expect(
"Should create repo LevelDB data store!"
)
)
repoStore = RepoStore.new(
repoDs = repoData,
metaDs = LevelDbDatastore.new(config.dataDir / CodexMetaNamespace)
.expect("Should create metadata store!"),
metaDs = LevelDbDatastore.new(config.dataDir / CodexMetaNamespace).expect(
"Should create metadata store!"
),
quotaMaxBytes = config.storageQuota,
blockTtl = config.blockTtl)
blockTtl = config.blockTtl,
)
maintenance = BlockMaintainer.new(
repoStore,
interval = config.blockMaintenanceInterval,
numberOfBlocksPerInterval = config.blockMaintenanceNumberOfBlocks)
numberOfBlocksPerInterval = config.blockMaintenanceNumberOfBlocks,
)
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
advertiser = Advertiser.new(repoStore, discovery)
blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
engine = BlockExcEngine.new(repoStore, wallet, network, blockDiscovery, advertiser, peerStore, pendingBlocks)
blockDiscovery =
DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
engine = BlockExcEngine.new(
repoStore, wallet, network, blockDiscovery, advertiser, peerStore, pendingBlocks
)
store = NetworkStore.new(engine, repoStore)
prover = if config.prover:
let backend = config.initializeBackend().expect("Unable to create prover backend.")
some Prover.new(store, backend, config.numProofSamples)
else:
none Prover
prover =
if config.prover:
let backend =
config.initializeBackend().expect("Unable to create prover backend.")
some Prover.new(store, backend, config.numProofSamples)
else:
none Prover
codexNode = CodexNodeRef.new(
switch = switch,
networkStore = store,
engine = engine,
discovery = discovery,
prover = prover)
prover = prover,
)
restServer = RestServerRef.new(
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
initTAddress(config.apiBindAddress , config.apiPort),
bufferSize = (1024 * 64),
maxRequestBodySize = int.high)
restServer = RestServerRef
.new(
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
initTAddress(config.apiBindAddress, config.apiPort),
bufferSize = (1024 * 64),
maxRequestBodySize = int.high,
)
.expect("Should start rest server!")
switch.mount(network)
@ -280,4 +304,5 @@ proc new*(
codexNode: codexNode,
restServer: restServer,
repoStore: repoStore,
maintenance: maintenance)
maintenance: maintenance,
)

View File

@ -25,15 +25,15 @@ export tables
const
# Size of blocks for storage / network exchange,
DefaultBlockSize* = NBytes 1024*64
DefaultBlockSize* = NBytes 1024 * 64
DefaultCellSize* = NBytes 2048
# Proving defaults
DefaultMaxSlotDepth* = 32
DefaultMaxSlotDepth* = 32
DefaultMaxDatasetDepth* = 8
DefaultBlockDepth* = 5
DefaultCellElms* = 67
DefaultSamplesNum* = 5
DefaultBlockDepth* = 5
DefaultCellElms* = 67
DefaultSamplesNum* = 5
# hashes
Sha256HashCodec* = multiCodec("sha2-256")
@ -48,18 +48,10 @@ const
SlotProvingRootCodec* = multiCodec("codex-proving-root")
CodexSlotCellCodec* = multiCodec("codex-slot-cell")
CodexHashesCodecs* = [
Sha256HashCodec,
Pos2Bn128SpngCodec,
Pos2Bn128MrklCodec
]
CodexHashesCodecs* = [Sha256HashCodec, Pos2Bn128SpngCodec, Pos2Bn128MrklCodec]
CodexPrimitivesCodecs* = [
ManifestCodec,
DatasetRootCodec,
BlockCodec,
SlotRootCodec,
SlotProvingRootCodec,
ManifestCodec, DatasetRootCodec, BlockCodec, SlotRootCodec, SlotProvingRootCodec,
CodexSlotCellCodec,
]
@ -74,40 +66,34 @@ proc initEmptyCidTable(): ?!Table[(CidVersion, MultiCodec, MultiCodec), Cid] =
let
emptyData: seq[byte] = @[]
PadHashes = {
Sha256HashCodec: ? MultiHash.digest($Sha256HashCodec, emptyData).mapFailure,
Sha512HashCodec: ? MultiHash.digest($Sha512HashCodec, emptyData).mapFailure,
Sha256HashCodec: ?MultiHash.digest($Sha256HashCodec, emptyData).mapFailure,
Sha512HashCodec: ?MultiHash.digest($Sha512HashCodec, emptyData).mapFailure,
}.toTable
var
table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]()
var table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]()
for hcodec, mhash in PadHashes.pairs:
table[(CIDv1, hcodec, BlockCodec)] = ? Cid.init(CIDv1, BlockCodec, mhash).mapFailure
table[(CIDv1, hcodec, BlockCodec)] = ?Cid.init(CIDv1, BlockCodec, mhash).mapFailure
success table
proc emptyCid*(
version: CidVersion,
hcodec: MultiCodec,
dcodec: MultiCodec): ?!Cid =
proc emptyCid*(version: CidVersion, hcodec: MultiCodec, dcodec: MultiCodec): ?!Cid =
## Returns cid representing empty content,
## given cid version, hash codec and data codec
##
var
table {.global, threadvar.}: Table[(CidVersion, MultiCodec, MultiCodec), Cid]
var table {.global, threadvar.}: Table[(CidVersion, MultiCodec, MultiCodec), Cid]
once:
table = ? initEmptyCidTable()
table = ?initEmptyCidTable()
table[(version, hcodec, dcodec)].catch
proc emptyDigest*(
version: CidVersion,
hcodec: MultiCodec,
dcodec: MultiCodec): ?!MultiHash =
version: CidVersion, hcodec: MultiCodec, dcodec: MultiCodec
): ?!MultiHash =
## Returns hash representing empty content,
## given cid version, hash codec and data codec
##
emptyCid(version, hcodec, dcodec)
.flatMap((cid: Cid) => cid.mhash.mapFailure)
emptyCid(version, hcodec, dcodec).flatMap((cid: Cid) => cid.mhash.mapFailure)

View File

@ -50,18 +50,17 @@ export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig
export ValidationGroups, MaxSlots
export
DefaultQuotaBytes,
DefaultBlockTtl,
DefaultBlockMaintenanceInterval,
DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockMaintenanceInterval,
DefaultNumberOfBlocksToMaintainPerInterval
proc defaultDataDir*(): string =
let dataDir = when defined(windows):
"AppData" / "Roaming" / "Codex"
elif defined(macosx):
"Library" / "Application Support" / "Codex"
else:
".cache" / "codex"
let dataDir =
when defined(windows):
"AppData" / "Roaming" / "Codex"
elif defined(macosx):
"Library" / "Application Support" / "Codex"
else:
".cache" / "codex"
getHomeDir() / dataDir
@ -96,320 +95,341 @@ type
CodexConf* = object
configFile* {.
desc: "Loads the configuration from a TOML file"
defaultValueDesc: "none"
defaultValue: InputFile.none
name: "config-file" }: Option[InputFile]
desc: "Loads the configuration from a TOML file",
defaultValueDesc: "none",
defaultValue: InputFile.none,
name: "config-file"
.}: Option[InputFile]
logLevel* {.
defaultValue: "info"
desc: "Sets the log level",
name: "log-level" }: string
logLevel* {.defaultValue: "info", desc: "Sets the log level", name: "log-level".}:
string
logFormat* {.
desc: "Specifies what kind of logs should be written to stdout (auto, " &
"colors, nocolors, json)"
defaultValueDesc: "auto"
defaultValue: LogKind.Auto
name: "log-format" }: LogKind
desc:
"Specifies what kind of logs should be written to stdout (auto, " &
"colors, nocolors, json)",
defaultValueDesc: "auto",
defaultValue: LogKind.Auto,
name: "log-format"
.}: LogKind
metricsEnabled* {.
desc: "Enable the metrics server"
defaultValue: false
name: "metrics" }: bool
desc: "Enable the metrics server", defaultValue: false, name: "metrics"
.}: bool
metricsAddress* {.
desc: "Listening address of the metrics server"
defaultValue: defaultAddress(config)
defaultValueDesc: "127.0.0.1"
name: "metrics-address" }: IpAddress
desc: "Listening address of the metrics server",
defaultValue: defaultAddress(config),
defaultValueDesc: "127.0.0.1",
name: "metrics-address"
.}: IpAddress
metricsPort* {.
desc: "Listening HTTP port of the metrics server"
defaultValue: 8008
name: "metrics-port" }: Port
desc: "Listening HTTP port of the metrics server",
defaultValue: 8008,
name: "metrics-port"
.}: Port
dataDir* {.
desc: "The directory where codex will store configuration and data"
defaultValue: DefaultDataDir
defaultValueDesc: $DefaultDataDir
abbr: "d"
name: "data-dir" }: OutDir
desc: "The directory where codex will store configuration and data",
defaultValue: DefaultDataDir,
defaultValueDesc: $DefaultDataDir,
abbr: "d",
name: "data-dir"
.}: OutDir
listenAddrs* {.
desc: "Multi Addresses to listen on"
defaultValue: @[
MultiAddress.init("/ip4/0.0.0.0/tcp/0")
.expect("Should init multiaddress")]
defaultValueDesc: "/ip4/0.0.0.0/tcp/0"
abbr: "i"
name: "listen-addrs" }: seq[MultiAddress]
desc: "Multi Addresses to listen on",
defaultValue:
@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").expect("Should init multiaddress")],
defaultValueDesc: "/ip4/0.0.0.0/tcp/0",
abbr: "i",
name: "listen-addrs"
.}: seq[MultiAddress]
nat* {.
desc: "Specify method to use for determining public address. " &
"Must be one of: any, none, upnp, pmp, extip:<IP>"
defaultValue: defaultNatConfig()
defaultValueDesc: "any"
name: "nat" }: NatConfig
desc:
"Specify method to use for determining public address. " &
"Must be one of: any, none, upnp, pmp, extip:<IP>",
defaultValue: defaultNatConfig(),
defaultValueDesc: "any",
name: "nat"
.}: NatConfig
discoveryPort* {.
desc: "Discovery (UDP) port"
defaultValue: 8090.Port
defaultValueDesc: "8090"
abbr: "u"
name: "disc-port" }: Port
desc: "Discovery (UDP) port",
defaultValue: 8090.Port,
defaultValueDesc: "8090",
abbr: "u",
name: "disc-port"
.}: Port
netPrivKeyFile* {.
desc: "Source of network (secp256k1) private key file path or name"
defaultValue: "key"
name: "net-privkey" }: string
desc: "Source of network (secp256k1) private key file path or name",
defaultValue: "key",
name: "net-privkey"
.}: string
bootstrapNodes* {.
desc: "Specifies one or more bootstrap nodes to use when " &
"connecting to the network"
abbr: "b"
name: "bootstrap-node" }: seq[SignedPeerRecord]
desc:
"Specifies one or more bootstrap nodes to use when " &
"connecting to the network",
abbr: "b",
name: "bootstrap-node"
.}: seq[SignedPeerRecord]
maxPeers* {.
desc: "The maximum number of peers to connect to"
defaultValue: 160
name: "max-peers" }: int
desc: "The maximum number of peers to connect to",
defaultValue: 160,
name: "max-peers"
.}: int
agentString* {.
defaultValue: "Codex"
desc: "Node agent string which is used as identifier in network"
name: "agent-string" }: string
defaultValue: "Codex",
desc: "Node agent string which is used as identifier in network",
name: "agent-string"
.}: string
apiBindAddress* {.
desc: "The REST API bind address"
defaultValue: "127.0.0.1"
name: "api-bindaddr"
}: string
desc: "The REST API bind address", defaultValue: "127.0.0.1", name: "api-bindaddr"
.}: string
apiPort* {.
desc: "The REST Api port",
defaultValue: 8080.Port
defaultValueDesc: "8080"
name: "api-port"
abbr: "p" }: Port
defaultValue: 8080.Port,
defaultValueDesc: "8080",
name: "api-port",
abbr: "p"
.}: Port
apiCorsAllowedOrigin* {.
desc: "The REST Api CORS allowed origin for downloading data. " &
desc:
"The REST Api CORS allowed origin for downloading data. " &
"'*' will allow all origins, '' will allow none.",
defaultValue: string.none
defaultValueDesc: "Disallow all cross origin requests to download data"
name: "api-cors-origin" }: Option[string]
defaultValue: string.none,
defaultValueDesc: "Disallow all cross origin requests to download data",
name: "api-cors-origin"
.}: Option[string]
repoKind* {.
desc: "Backend for main repo store (fs, sqlite, leveldb)"
defaultValueDesc: "fs"
defaultValue: repoFS
name: "repo-kind" }: RepoKind
desc: "Backend for main repo store (fs, sqlite, leveldb)",
defaultValueDesc: "fs",
defaultValue: repoFS,
name: "repo-kind"
.}: RepoKind
storageQuota* {.
desc: "The size of the total storage quota dedicated to the node"
defaultValue: DefaultQuotaBytes
defaultValueDesc: $DefaultQuotaBytes
name: "storage-quota"
abbr: "q" }: NBytes
desc: "The size of the total storage quota dedicated to the node",
defaultValue: DefaultQuotaBytes,
defaultValueDesc: $DefaultQuotaBytes,
name: "storage-quota",
abbr: "q"
.}: NBytes
blockTtl* {.
desc: "Default block timeout in seconds - 0 disables the ttl"
defaultValue: DefaultBlockTtl
defaultValueDesc: $DefaultBlockTtl
name: "block-ttl"
abbr: "t" }: Duration
desc: "Default block timeout in seconds - 0 disables the ttl",
defaultValue: DefaultBlockTtl,
defaultValueDesc: $DefaultBlockTtl,
name: "block-ttl",
abbr: "t"
.}: Duration
blockMaintenanceInterval* {.
desc: "Time interval in seconds - determines frequency of block " &
"maintenance cycle: how often blocks are checked " &
"for expiration and cleanup"
defaultValue: DefaultBlockMaintenanceInterval
defaultValueDesc: $DefaultBlockMaintenanceInterval
name: "block-mi" }: Duration
desc:
"Time interval in seconds - determines frequency of block " &
"maintenance cycle: how often blocks are checked " & "for expiration and cleanup",
defaultValue: DefaultBlockMaintenanceInterval,
defaultValueDesc: $DefaultBlockMaintenanceInterval,
name: "block-mi"
.}: Duration
blockMaintenanceNumberOfBlocks* {.
desc: "Number of blocks to check every maintenance cycle"
defaultValue: DefaultNumberOfBlocksToMaintainPerInterval
defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval
name: "block-mn" }: int
desc: "Number of blocks to check every maintenance cycle",
defaultValue: DefaultNumberOfBlocksToMaintainPerInterval,
defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval,
name: "block-mn"
.}: int
cacheSize* {.
desc: "The size of the block cache, 0 disables the cache - " &
"might help on slow hardrives"
defaultValue: 0
defaultValueDesc: "0"
name: "cache-size"
abbr: "c" }: NBytes
desc:
"The size of the block cache, 0 disables the cache - " &
"might help on slow hardrives",
defaultValue: 0,
defaultValueDesc: "0",
name: "cache-size",
abbr: "c"
.}: NBytes
logFile* {.
desc: "Logs to file"
defaultValue: string.none
name: "log-file"
hidden
.}: Option[string]
desc: "Logs to file", defaultValue: string.none, name: "log-file", hidden
.}: Option[string]
case cmd* {.
defaultValue: noCmd
command }: StartUpCmd
case cmd* {.defaultValue: noCmd, command.}: StartUpCmd
of persistence:
ethProvider* {.
desc: "The URL of the JSON-RPC API of the Ethereum node"
defaultValue: "ws://localhost:8545"
desc: "The URL of the JSON-RPC API of the Ethereum node",
defaultValue: "ws://localhost:8545",
name: "eth-provider"
.}: string
ethAccount* {.
desc: "The Ethereum account that is used for storage contracts"
defaultValue: EthAddress.none
defaultValueDesc: ""
desc: "The Ethereum account that is used for storage contracts",
defaultValue: EthAddress.none,
defaultValueDesc: "",
name: "eth-account"
.}: Option[EthAddress]
ethPrivateKey* {.
desc: "File containing Ethereum private key for storage contracts"
defaultValue: string.none
defaultValueDesc: ""
desc: "File containing Ethereum private key for storage contracts",
defaultValue: string.none,
defaultValueDesc: "",
name: "eth-private-key"
.}: Option[string]
marketplaceAddress* {.
desc: "Address of deployed Marketplace contract"
defaultValue: EthAddress.none
defaultValueDesc: ""
desc: "Address of deployed Marketplace contract",
defaultValue: EthAddress.none,
defaultValueDesc: "",
name: "marketplace-address"
.}: Option[EthAddress]
# TODO: should go behind a feature flag
simulateProofFailures* {.
desc: "Simulates proof failures once every N proofs. 0 = disabled."
defaultValue: 0
name: "simulate-proof-failures"
hidden
.}: int
desc: "Simulates proof failures once every N proofs. 0 = disabled.",
defaultValue: 0,
name: "simulate-proof-failures",
hidden
.}: int
validator* {.
desc: "Enables validator, requires an Ethereum node"
defaultValue: false
desc: "Enables validator, requires an Ethereum node",
defaultValue: false,
name: "validator"
.}: bool
validatorMaxSlots* {.
desc: "Maximum number of slots that the validator monitors"
longDesc: "If set to 0, the validator will not limit " &
"the maximum number of slots it monitors"
defaultValue: 1000
desc: "Maximum number of slots that the validator monitors",
longDesc:
"If set to 0, the validator will not limit " &
"the maximum number of slots it monitors",
defaultValue: 1000,
name: "validator-max-slots"
.}: MaxSlots
validatorGroups* {.
desc: "Slot validation groups"
longDesc: "A number indicating total number of groups into " &
desc: "Slot validation groups",
longDesc:
"A number indicating total number of groups into " &
"which the whole slot id space will be divided. " &
"The value must be in the range [2, 65535]. " &
"If not provided, the validator will observe " &
"the whole slot id space and the value of " &
"the --validator-group-index parameter will be ignored. " &
"Powers of twos are advised for even distribution"
defaultValue: ValidationGroups.none
"Powers of twos are advised for even distribution",
defaultValue: ValidationGroups.none,
name: "validator-groups"
.}: Option[ValidationGroups]
validatorGroupIndex* {.
desc: "Slot validation group index"
longDesc: "The value provided must be in the range " &
desc: "Slot validation group index",
longDesc:
"The value provided must be in the range " &
"[0, validatorGroups). Ignored when --validator-groups " &
"is not provided. Only slot ids satisfying condition " &
"[(slotId mod validationGroups) == groupIndex] will be " &
"observed by the validator"
defaultValue: 0
"observed by the validator",
defaultValue: 0,
name: "validator-group-index"
.}: uint16
rewardRecipient* {.
desc: "Address to send payouts to (eg rewards and refunds)"
desc: "Address to send payouts to (eg rewards and refunds)",
name: "reward-recipient"
.}: Option[EthAddress]
case persistenceCmd* {.
defaultValue: noCmd
command }: PersistenceCmd
case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd
of PersistenceCmd.prover:
circuitDir* {.
desc: "Directory where Codex will store proof circuit data"
defaultValue: DefaultCircuitDir
defaultValueDesc: $DefaultCircuitDir
abbr: "cd"
name: "circuit-dir" }: OutDir
desc: "Directory where Codex will store proof circuit data",
defaultValue: DefaultCircuitDir,
defaultValueDesc: $DefaultCircuitDir,
abbr: "cd",
name: "circuit-dir"
.}: OutDir
circomR1cs* {.
desc: "The r1cs file for the storage circuit"
defaultValue: $DefaultCircuitDir / "proof_main.r1cs"
defaultValueDesc: $DefaultCircuitDir & "/proof_main.r1cs"
desc: "The r1cs file for the storage circuit",
defaultValue: $DefaultCircuitDir / "proof_main.r1cs",
defaultValueDesc: $DefaultCircuitDir & "/proof_main.r1cs",
name: "circom-r1cs"
.}: InputFile
circomWasm* {.
desc: "The wasm file for the storage circuit"
defaultValue: $DefaultCircuitDir / "proof_main.wasm"
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm"
desc: "The wasm file for the storage circuit",
defaultValue: $DefaultCircuitDir / "proof_main.wasm",
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm",
name: "circom-wasm"
.}: InputFile
circomZkey* {.
desc: "The zkey file for the storage circuit"
defaultValue: $DefaultCircuitDir / "proof_main.zkey"
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey"
desc: "The zkey file for the storage circuit",
defaultValue: $DefaultCircuitDir / "proof_main.zkey",
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey",
name: "circom-zkey"
.}: InputFile
# TODO: should probably be hidden and behind a feature flag
circomNoZkey* {.
desc: "Ignore the zkey file - use only for testing!"
defaultValue: false
desc: "Ignore the zkey file - use only for testing!",
defaultValue: false,
name: "circom-no-zkey"
.}: bool
numProofSamples* {.
desc: "Number of samples to prove"
defaultValue: DefaultSamplesNum
defaultValueDesc: $DefaultSamplesNum
name: "proof-samples" }: int
desc: "Number of samples to prove",
defaultValue: DefaultSamplesNum,
defaultValueDesc: $DefaultSamplesNum,
name: "proof-samples"
.}: int
maxSlotDepth* {.
desc: "The maximum depth of the slot tree"
defaultValue: DefaultMaxSlotDepth
defaultValueDesc: $DefaultMaxSlotDepth
name: "max-slot-depth" }: int
desc: "The maximum depth of the slot tree",
defaultValue: DefaultMaxSlotDepth,
defaultValueDesc: $DefaultMaxSlotDepth,
name: "max-slot-depth"
.}: int
maxDatasetDepth* {.
desc: "The maximum depth of the dataset tree"
defaultValue: DefaultMaxDatasetDepth
defaultValueDesc: $DefaultMaxDatasetDepth
name: "max-dataset-depth" }: int
desc: "The maximum depth of the dataset tree",
defaultValue: DefaultMaxDatasetDepth,
defaultValueDesc: $DefaultMaxDatasetDepth,
name: "max-dataset-depth"
.}: int
maxBlockDepth* {.
desc: "The maximum depth of the network block merkle tree"
defaultValue: DefaultBlockDepth
defaultValueDesc: $DefaultBlockDepth
name: "max-block-depth" }: int
desc: "The maximum depth of the network block merkle tree",
defaultValue: DefaultBlockDepth,
defaultValueDesc: $DefaultBlockDepth,
name: "max-block-depth"
.}: int
maxCellElms* {.
desc: "The maximum number of elements in a cell"
defaultValue: DefaultCellElms
defaultValueDesc: $DefaultCellElms
name: "max-cell-elements" }: int
desc: "The maximum number of elements in a cell",
defaultValue: DefaultCellElms,
defaultValueDesc: $DefaultCellElms,
name: "max-cell-elements"
.}: int
of PersistenceCmd.noCmd:
discard
of StartUpCmd.noCmd:
discard # end of persistence
EthAddress* = ethers.Address
logutils.formatIt(LogFormat.textLines, EthAddress): it.short0xHexLog
logutils.formatIt(LogFormat.json, EthAddress): %it
logutils.formatIt(LogFormat.textLines, EthAddress):
it.short0xHexLog
logutils.formatIt(LogFormat.json, EthAddress):
%it
func defaultAddress*(conf: CodexConf): IpAddress =
result = static parseIpAddress("127.0.0.1")
@ -443,20 +463,19 @@ const
nimBanner* = getNimBanner()
codexFullVersion* =
"Codex version: " & codexVersion & "\p" &
"Codex revision: " & codexRevision & "\p" &
"Codex version: " & codexVersion & "\p" & "Codex revision: " & codexRevision & "\p" &
nimBanner
proc parseCmdArg*(T: typedesc[MultiAddress],
input: string): MultiAddress
{.upraises: [ValueError] .} =
proc parseCmdArg*(
T: typedesc[MultiAddress], input: string
): MultiAddress {.upraises: [ValueError].} =
var ma: MultiAddress
try:
let res = MultiAddress.init(input)
if res.isOk:
ma = res.get()
else:
warn "Invalid MultiAddress", input=input, error = res.error()
warn "Invalid MultiAddress", input = input, error = res.error()
quit QuitFailure
except LPError as exc:
warn "Invalid MultiAddress uri", uri = input, error = exc.msg
@ -478,28 +497,28 @@ proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
res
func parseCmdArg*(T: type NatConfig, p: string): T {.raises: [ValueError].} =
case p.toLowerAscii:
of "any":
NatConfig(hasExtIp: false, nat: NatStrategy.NatAny)
of "none":
NatConfig(hasExtIp: false, nat: NatStrategy.NatNone)
of "upnp":
NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp)
of "pmp":
NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp)
else:
if p.startsWith("extip:"):
try:
let ip = parseIpAddress(p[6..^1])
NatConfig(hasExtIp: true, extIp: ip)
except ValueError:
let error = "Not a valid IP address: " & p[6..^1]
raise newException(ValueError, error)
else:
let error = "Not a valid NAT option: " & p
case p.toLowerAscii
of "any":
NatConfig(hasExtIp: false, nat: NatStrategy.NatAny)
of "none":
NatConfig(hasExtIp: false, nat: NatStrategy.NatNone)
of "upnp":
NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp)
of "pmp":
NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp)
else:
if p.startsWith("extip:"):
try:
let ip = parseIpAddress(p[6 ..^ 1])
NatConfig(hasExtIp: true, extIp: ip)
except ValueError:
let error = "Not a valid IP address: " & p[6 ..^ 1]
raise newException(ValueError, error)
else:
let error = "Not a valid NAT option: " & p
raise newException(ValueError, error)
proc completeCmdArg*(T: type NatConfig; val: string): seq[string] =
proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
return @[]
proc parseCmdArg*(T: type EthAddress, address: string): T =
@ -509,20 +528,21 @@ proc parseCmdArg*(T: type NBytes, val: string): T =
var num = 0'i64
let count = parseSize(val, num, alwaysBin = true)
if count == 0:
warn "Invalid number of bytes", nbytes = val
quit QuitFailure
warn "Invalid number of bytes", nbytes = val
quit QuitFailure
NBytes(num)
proc parseCmdArg*(T: type Duration, val: string): T =
var dur: Duration
let count = parseDuration(val, dur)
if count == 0:
warn "Cannot parse duration", dur = dur
quit QuitFailure
warn "Cannot parse duration", dur = dur
quit QuitFailure
dur
proc readValue*(r: var TomlReader, val: var EthAddress)
{.upraises: [SerializationError, IOError].} =
proc readValue*(
r: var TomlReader, val: var EthAddress
) {.upraises: [SerializationError, IOError].} =
val = EthAddress.init(r.readValue(string)).get()
proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
@ -545,11 +565,12 @@ proc readValue*(r: var TomlReader, val: var MultiAddress) =
if res.isOk:
val = res.get()
else:
warn "Invalid MultiAddress", input=input, error=res.error()
warn "Invalid MultiAddress", input = input, error = res.error()
quit QuitFailure
proc readValue*(r: var TomlReader, val: var NBytes)
{.upraises: [SerializationError, IOError].} =
proc readValue*(
r: var TomlReader, val: var NBytes
) {.upraises: [SerializationError, IOError].} =
var value = 0'i64
var str = r.readValue(string)
let count = parseSize(str, value, alwaysBin = true)
@ -558,8 +579,9 @@ proc readValue*(r: var TomlReader, val: var NBytes)
quit QuitFailure
val = NBytes(value)
proc readValue*(r: var TomlReader, val: var Duration)
{.upraises: [SerializationError, IOError].} =
proc readValue*(
r: var TomlReader, val: var Duration
) {.upraises: [SerializationError, IOError].} =
var str = r.readValue(string)
var dur: Duration
let count = parseDuration(str, dur)
@ -568,20 +590,23 @@ proc readValue*(r: var TomlReader, val: var Duration)
quit QuitFailure
val = dur
proc readValue*(r: var TomlReader, val: var NatConfig)
{.raises: [SerializationError].} =
val = try: parseCmdArg(NatConfig, r.readValue(string))
except CatchableError as err:
raise newException(SerializationError, err.msg)
proc readValue*(
r: var TomlReader, val: var NatConfig
) {.raises: [SerializationError].} =
val =
try:
parseCmdArg(NatConfig, r.readValue(string))
except CatchableError as err:
raise newException(SerializationError, err.msg)
# no idea why confutils needs this:
proc completeCmdArg*(T: type EthAddress; val: string): seq[string] =
proc completeCmdArg*(T: type EthAddress, val: string): seq[string] =
discard
proc completeCmdArg*(T: type NBytes; val: string): seq[string] =
proc completeCmdArg*(T: type NBytes, val: string): seq[string] =
discard
proc completeCmdArg*(T: type Duration; val: string): seq[string] =
proc completeCmdArg*(T: type Duration, val: string): seq[string] =
discard
# silly chronicles, colors is a compile-time property
@ -603,7 +628,7 @@ proc stripAnsi*(v: string): string =
if c2 != '[':
break
else:
if c2 in {'0'..'9'} + {';'}:
if c2 in {'0' .. '9'} + {';'}:
discard # keep looking
elif c2 == 'm':
i = x + 1
@ -627,12 +652,12 @@ proc updateLogLevel*(logLevel: string) {.upraises: [ValueError].} =
setLogLevel(parseEnum[LogLevel](directives[0].toUpperAscii))
except ValueError:
raise (ref ValueError)(
msg: "Please specify one of: trace, debug, " &
"info, notice, warn, error or fatal"
msg:
"Please specify one of: trace, debug, " & "info, notice, warn, error or fatal"
)
if directives.len > 1:
for topicName, settings in parseTopicDirectives(directives[1..^1]):
for topicName, settings in parseTopicDirectives(directives[1 ..^ 1]):
if not setTopicState(topicName, settings.state, settings.logLevel):
warn "Unrecognized logging topic", topic = topicName
@ -641,7 +666,9 @@ proc setupLogging*(conf: CodexConf) =
warn "Logging configuration options not enabled in the current build"
else:
var logFile: ?IoHandle
proc noOutput(logLevel: LogLevel, msg: LogOutputStr) = discard
proc noOutput(logLevel: LogLevel, msg: LogOutputStr) =
discard
proc writeAndFlush(f: File, msg: LogOutputStr) =
try:
f.write(msg)
@ -662,14 +689,11 @@ proc setupLogging*(conf: CodexConf) =
defaultChroniclesStream.outputs[2].writer = noOutput
if logFilePath =? conf.logFile and logFilePath.len > 0:
let logFileHandle = openFile(
logFilePath,
{OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate}
)
let logFileHandle =
openFile(logFilePath, {OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate})
if logFileHandle.isErr:
error "failed to open log file",
path = logFilePath,
errorCode = $logFileHandle.error
path = logFilePath, errorCode = $logFileHandle.error
else:
logFile = logFileHandle.option
defaultChroniclesStream.outputs[2].writer = fileFlush
@ -677,14 +701,13 @@ proc setupLogging*(conf: CodexConf) =
defaultChroniclesStream.outputs[1].writer = noOutput
let writer =
case conf.logFormat:
case conf.logFormat
of LogKind.Auto:
if isatty(stdout):
stdoutFlush
else:
noColorsFlush
of LogKind.Colors: stdoutFlush
of LogKind.NoColors: noColorsFlush
if isatty(stdout): stdoutFlush else: noColorsFlush
of LogKind.Colors:
stdoutFlush
of LogKind.NoColors:
noColorsFlush
of LogKind.Json:
defaultChroniclesStream.outputs[1].writer = stdoutFlush
noOutput
@ -695,8 +718,9 @@ proc setupLogging*(conf: CodexConf) =
var counter = 0.uint64
proc numberedWriter(logLevel: LogLevel, msg: LogOutputStr) =
inc(counter)
let withoutNewLine = msg[0..^2]
let withoutNewLine = msg[0 ..^ 2]
writer(logLevel, withoutNewLine & " count=" & $counter & "\n")
defaultChroniclesStream.outputs[0].writer = numberedWriter
else:
defaultChroniclesStream.outputs[0].writer = writer

View File

@ -11,14 +11,13 @@ export clock
logScope:
topics = "contracts clock"
type
OnChainClock* = ref object of Clock
provider: Provider
subscription: Subscription
offset: times.Duration
blockNumber: UInt256
started: bool
newBlock: AsyncEvent
type OnChainClock* = ref object of Clock
provider: Provider
subscription: Subscription
offset: times.Duration
blockNumber: UInt256
started: bool
newBlock: AsyncEvent
proc new*(_: type OnChainClock, provider: Provider): OnChainClock =
OnChainClock(provider: provider, newBlock: newAsyncEvent())
@ -29,7 +28,8 @@ proc update(clock: OnChainClock, blck: Block) =
let computerTime = getTime()
clock.offset = blockTime - computerTime
clock.blockNumber = number
trace "updated clock", blockTime=blck.timestamp, blockNumber=number, offset=clock.offset
trace "updated clock",
blockTime = blck.timestamp, blockNumber = number, offset = clock.offset
clock.newBlock.fire()
proc update(clock: OnChainClock) {.async.} =
@ -39,7 +39,7 @@ proc update(clock: OnChainClock) {.async.} =
except CancelledError as error:
raise error
except CatchableError as error:
debug "error updating clock: ", error=error.msg
debug "error updating clock: ", error = error.msg
discard
method start*(clock: OnChainClock) {.async.} =
@ -48,7 +48,7 @@ method start*(clock: OnChainClock) {.async.} =
proc onBlock(blckResult: ?!Block) =
if eventError =? blckResult.errorOption:
error "There was an error in block subscription", msg=eventError.msg
error "There was an error in block subscription", msg = eventError.msg
return
# ignore block parameter; hardhat may call this with pending blocks

View File

@ -8,11 +8,14 @@ type
MarketplaceConfig* = object
collateral*: CollateralConfig
proofs*: ProofConfig
CollateralConfig* = object
repairRewardPercentage*: uint8 # percentage of remaining collateral slot has after it has been freed
repairRewardPercentage*: uint8
# percentage of remaining collateral slot has after it has been freed
maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value
slashCriterion*: uint16 # amount of proofs missed that lead to slashing
slashPercentage*: uint8 # percentage of the collateral that is slashed
ProofConfig* = object
period*: UInt256 # proofs requirements are calculated per period (in seconds)
timeout*: UInt256 # mark proofs as missing before the timeout (in seconds)
@ -23,14 +26,13 @@ type
# blocks. Should be a prime number to ensure there are no cycles.
downtimeProduct*: uint8
func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig =
ProofConfig(
period: tupl[0],
timeout: tupl[1],
downtime: tupl[2],
zkeyHash: tupl[3],
downtimeProduct: tupl[4]
downtimeProduct: tupl[4],
)
func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig =
@ -38,14 +40,11 @@ func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig =
repairRewardPercentage: tupl[0],
maxNumberOfSlashes: tupl[1],
slashCriterion: tupl[2],
slashPercentage: tupl[3]
slashPercentage: tupl[3],
)
func fromTuple(_: type MarketplaceConfig, tupl: tuple): MarketplaceConfig =
MarketplaceConfig(
collateral: tupl[0],
proofs: tupl[1]
)
MarketplaceConfig(collateral: tupl[0], proofs: tupl[1])
func solidityType*(_: type ProofConfig): string =
solidityType(ProofConfig.fieldTypes)

View File

@ -12,23 +12,20 @@ type Deployment* = ref object
config: CodexConf
const knownAddresses = {
# Hardhat localhost network
"31337": {
"Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44"),
}.toTable,
# Taiko Alpha-3 Testnet
"167005": {
"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")
}.toTable,
# Codex Testnet - Nov 25 2024 18:41:29 PM (+00:00 UTC)
"789987": {
"Marketplace": Address.init("0xAB03b6a58C5262f530D54146DA2a552B1C0F7648")
}.toTable
# Hardhat localhost network
"31337":
{"Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44")}.toTable,
# Taiko Alpha-3 Testnet
"167005":
{"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable,
# Codex Testnet - Nov 25 2024 18:41:29 PM (+00:00 UTC)
"789987":
{"Marketplace": Address.init("0xAB03b6a58C5262f530D54146DA2a552B1C0F7648")}.toTable,
}.toTable
proc getKnownAddress(T: type, chainId: UInt256): ?Address =
let id = chainId.toString(10)
notice "Looking for well-known contract address with ChainID ", chainId=id
notice "Looking for well-known contract address with ChainID ", chainId = id
if not (id in knownAddresses):
return none Address

View File

@ -9,13 +9,12 @@ import ./interactions
export purchasing
export logutils
type
ClientInteractions* = ref object of ContractInteractions
purchasing*: Purchasing
type ClientInteractions* = ref object of ContractInteractions
purchasing*: Purchasing
proc new*(_: type ClientInteractions,
clock: OnChainClock,
purchasing: Purchasing): ClientInteractions =
proc new*(
_: type ClientInteractions, clock: OnChainClock, purchasing: Purchasing
): ClientInteractions =
ClientInteractions(clock: clock, purchasing: purchasing)
proc start*(self: ClientInteractions) {.async.} =

View File

@ -7,15 +7,10 @@ import ./interactions
export sales
export logutils
type
HostInteractions* = ref object of ContractInteractions
sales*: Sales
type HostInteractions* = ref object of ContractInteractions
sales*: Sales
proc new*(
_: type HostInteractions,
clock: Clock,
sales: Sales
): HostInteractions =
proc new*(_: type HostInteractions, clock: Clock, sales: Sales): HostInteractions =
## Create a new HostInteractions instance
##
HostInteractions(clock: clock, sales: sales)

View File

@ -5,9 +5,8 @@ import ../market
export clock
type
ContractInteractions* = ref object of RootObj
clock*: Clock
type ContractInteractions* = ref object of RootObj
clock*: Clock
method start*(self: ContractInteractions) {.async, base.} =
discard

View File

@ -3,13 +3,12 @@ import ../../validation
export validation
type
ValidatorInteractions* = ref object of ContractInteractions
validation: Validation
type ValidatorInteractions* = ref object of ContractInteractions
validation: Validation
proc new*(_: type ValidatorInteractions,
clock: OnChainClock,
validation: Validation): ValidatorInteractions =
proc new*(
_: type ValidatorInteractions, clock: OnChainClock, validation: Validation
): ValidatorInteractions =
ValidatorInteractions(clock: clock, validation: validation)
proc start*(self: ValidatorInteractions) {.async.} =

View File

@ -27,18 +27,12 @@ type
eventSubscription: EventSubscription
func new*(
_: type OnChainMarket,
contract: Marketplace,
rewardRecipient = Address.none): OnChainMarket =
_: type OnChainMarket, contract: Marketplace, rewardRecipient = Address.none
): OnChainMarket =
without signer =? contract.signer:
raiseAssert("Marketplace contract should have a signer")
OnChainMarket(
contract: contract,
signer: signer,
rewardRecipient: rewardRecipient
)
OnChainMarket(contract: contract, signer: signer, rewardRecipient: rewardRecipient)
proc raiseMarketError(message: string) {.raises: [MarketError].} =
raise newException(MarketError, message)
@ -105,18 +99,19 @@ method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} =
method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} =
convertEthersError:
let slots = await market.contract.mySlots()
debug "Fetched my slots", numSlots=len(slots)
debug "Fetched my slots", numSlots = len(slots)
return slots
method requestStorage(market: OnChainMarket, request: StorageRequest){.async.} =
method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} =
convertEthersError:
debug "Requesting storage"
await market.approveFunds(request.price())
discard await market.contract.requestStorage(request).confirm(1)
method getRequest*(market: OnChainMarket,
id: RequestId): Future[?StorageRequest] {.async.} =
method getRequest*(
market: OnChainMarket, id: RequestId
): Future[?StorageRequest] {.async.} =
convertEthersError:
try:
return some await market.contract.getRequest(id)
@ -125,8 +120,9 @@ method getRequest*(market: OnChainMarket,
return none StorageRequest
raise e
method requestState*(market: OnChainMarket,
requestId: RequestId): Future[?RequestState] {.async.} =
method requestState*(
market: OnChainMarket, requestId: RequestId
): Future[?RequestState] {.async.} =
convertEthersError:
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
@ -136,25 +132,26 @@ method requestState*(market: OnChainMarket,
return none RequestState
raise e
method slotState*(market: OnChainMarket,
slotId: SlotId): Future[SlotState] {.async.} =
method slotState*(market: OnChainMarket, slotId: SlotId): Future[SlotState] {.async.} =
convertEthersError:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.slotState(slotId, overrides)
method getRequestEnd*(market: OnChainMarket,
id: RequestId): Future[SecondsSince1970] {.async.} =
method getRequestEnd*(
market: OnChainMarket, id: RequestId
): Future[SecondsSince1970] {.async.} =
convertEthersError:
return await market.contract.requestEnd(id)
method requestExpiresAt*(market: OnChainMarket,
id: RequestId): Future[SecondsSince1970] {.async.} =
method requestExpiresAt*(
market: OnChainMarket, id: RequestId
): Future[SecondsSince1970] {.async.} =
convertEthersError:
return await market.contract.requestExpiry(id)
method getHost(market: OnChainMarket,
requestId: RequestId,
slotIndex: UInt256): Future[?Address] {.async.} =
method getHost(
market: OnChainMarket, requestId: RequestId, slotIndex: UInt256
): Future[?Address] {.async.} =
convertEthersError:
let slotId = slotId(requestId, slotIndex)
let address = await market.contract.getHost(slotId)
@ -163,8 +160,7 @@ method getHost(market: OnChainMarket,
else:
return none Address
method getActiveSlot*(market: OnChainMarket,
slotId: SlotId): Future[?Slot] {.async.} =
method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} =
convertEthersError:
try:
return some await market.contract.getActiveSlot(slotId)
@ -173,11 +169,13 @@ method getActiveSlot*(market: OnChainMarket,
return none Slot
raise e
method fillSlot(market: OnChainMarket,
requestId: RequestId,
slotIndex: UInt256,
proof: Groth16Proof,
collateral: UInt256) {.async.} =
method fillSlot(
market: OnChainMarket,
requestId: RequestId,
slotIndex: UInt256,
proof: Groth16Proof,
collateral: UInt256,
) {.async.} =
convertEthersError:
logScope:
requestId
@ -197,9 +195,9 @@ method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
let collateralRecipient = await market.getSigner()
freeSlot = market.contract.freeSlot(
slotId,
rewardRecipient, # --reward-recipient
collateralRecipient) # SP's address
rewardRecipient, # --reward-recipient
collateralRecipient,
) # SP's address
else:
# Otherwise, use the SP's address as both the reward and collateral
# recipient (the contract will use msg.sender for both)
@ -207,14 +205,11 @@ method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
discard await freeSlot.confirm(1)
method withdrawFunds(market: OnChainMarket,
requestId: RequestId) {.async.} =
method withdrawFunds(market: OnChainMarket, requestId: RequestId) {.async.} =
convertEthersError:
discard await market.contract.withdrawFunds(requestId).confirm(1)
method isProofRequired*(market: OnChainMarket,
id: SlotId): Future[bool] {.async.} =
method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
convertEthersError:
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
@ -224,8 +219,7 @@ method isProofRequired*(market: OnChainMarket,
return false
raise e
method willProofBeRequired*(market: OnChainMarket,
id: SlotId): Future[bool] {.async.} =
method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
convertEthersError:
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
@ -235,27 +229,25 @@ method willProofBeRequired*(market: OnChainMarket,
return false
raise e
method getChallenge*(market: OnChainMarket, id: SlotId): Future[ProofChallenge] {.async.} =
method getChallenge*(
market: OnChainMarket, id: SlotId
): Future[ProofChallenge] {.async.} =
convertEthersError:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.getChallenge(id, overrides)
method submitProof*(market: OnChainMarket,
id: SlotId,
proof: Groth16Proof) {.async.} =
method submitProof*(market: OnChainMarket, id: SlotId, proof: Groth16Proof) {.async.} =
convertEthersError:
discard await market.contract.submitProof(id, proof).confirm(1)
method markProofAsMissing*(market: OnChainMarket,
id: SlotId,
period: Period) {.async.} =
method markProofAsMissing*(
market: OnChainMarket, id: SlotId, period: Period
) {.async.} =
convertEthersError:
discard await market.contract.markProofAsMissing(id, period).confirm(1)
method canProofBeMarkedAsMissing*(
market: OnChainMarket,
id: SlotId,
period: Period
market: OnChainMarket, id: SlotId, period: Period
): Future[bool] {.async.} =
let provider = market.contract.provider
let contractWithoutSigner = market.contract.connect(provider)
@ -268,46 +260,42 @@ method canProofBeMarkedAsMissing*(
return false
method reserveSlot*(
market: OnChainMarket,
requestId: RequestId,
slotIndex: UInt256) {.async.} =
market: OnChainMarket, requestId: RequestId, slotIndex: UInt256
) {.async.} =
convertEthersError:
discard await market.contract.reserveSlot(
discard await market.contract
.reserveSlot(
requestId,
slotIndex,
# reserveSlot runs out of gas for unknown reason, but 100k gas covers it
TransactionOverrides(gasLimit: some 100000.u256)
).confirm(1)
TransactionOverrides(gasLimit: some 100000.u256),
)
.confirm(1)
method canReserveSlot*(
market: OnChainMarket,
requestId: RequestId,
slotIndex: UInt256): Future[bool] {.async.} =
market: OnChainMarket, requestId: RequestId, slotIndex: UInt256
): Future[bool] {.async.} =
convertEthersError:
return await market.contract.canReserveSlot(requestId, slotIndex)
method subscribeRequests*(market: OnChainMarket,
callback: OnRequest):
Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!StorageRequested) {.upraises:[].} =
method subscribeRequests*(
market: OnChainMarket, callback: OnRequest
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!StorageRequested) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in Request subscription", msg = eventErr.msg
return
callback(event.requestId,
event.ask,
event.expiry)
callback(event.requestId, event.ask, event.expiry)
convertEthersError:
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeSlotFilled*(market: OnChainMarket,
callback: OnSlotFilled):
Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFilled) {.upraises:[].} =
method subscribeSlotFilled*(
market: OnChainMarket, callback: OnSlotFilled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFilled) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotFilled subscription", msg = eventErr.msg
return
@ -318,11 +306,12 @@ method subscribeSlotFilled*(market: OnChainMarket,
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeSlotFilled*(market: OnChainMarket,
requestId: RequestId,
slotIndex: UInt256,
callback: OnSlotFilled):
Future[MarketSubscription] {.async.} =
method subscribeSlotFilled*(
market: OnChainMarket,
requestId: RequestId,
slotIndex: UInt256,
callback: OnSlotFilled,
): Future[MarketSubscription] {.async.} =
proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: UInt256) =
if eventRequestId == requestId and eventSlotIndex == slotIndex:
callback(requestId, slotIndex)
@ -330,10 +319,10 @@ method subscribeSlotFilled*(market: OnChainMarket,
convertEthersError:
return await market.subscribeSlotFilled(onSlotFilled)
method subscribeSlotFreed*(market: OnChainMarket,
callback: OnSlotFreed):
Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFreed) {.upraises:[].} =
method subscribeSlotFreed*(
market: OnChainMarket, callback: OnSlotFreed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFreed) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotFreed subscription", msg = eventErr.msg
return
@ -345,12 +334,12 @@ method subscribeSlotFreed*(market: OnChainMarket,
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeSlotReservationsFull*(
market: OnChainMarket,
callback: OnSlotReservationsFull): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotReservationsFull) {.upraises:[].} =
market: OnChainMarket, callback: OnSlotReservationsFull
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotReservationsFull) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotReservationsFull subscription", msg = eventErr.msg
error "There was an error in SlotReservationsFull subscription",
msg = eventErr.msg
return
callback(event.requestId, event.slotIndex)
@ -359,10 +348,10 @@ method subscribeSlotReservationsFull*(
let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeFulfillment(market: OnChainMarket,
callback: OnFulfillment):
Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises:[].} =
method subscribeFulfillment(
market: OnChainMarket, callback: OnFulfillment
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
return
@ -373,11 +362,10 @@ method subscribeFulfillment(market: OnChainMarket,
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeFulfillment(market: OnChainMarket,
requestId: RequestId,
callback: OnFulfillment):
Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises:[].} =
method subscribeFulfillment(
market: OnChainMarket, requestId: RequestId, callback: OnFulfillment
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
return
@ -389,10 +377,10 @@ method subscribeFulfillment(market: OnChainMarket,
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestCancelled*(market: OnChainMarket,
callback: OnRequestCancelled):
Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.upraises:[].} =
method subscribeRequestCancelled*(
market: OnChainMarket, callback: OnRequestCancelled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
return
@ -403,11 +391,10 @@ method subscribeRequestCancelled*(market: OnChainMarket,
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestCancelled*(market: OnChainMarket,
requestId: RequestId,
callback: OnRequestCancelled):
Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.upraises:[].} =
method subscribeRequestCancelled*(
market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
return
@ -419,10 +406,10 @@ method subscribeRequestCancelled*(market: OnChainMarket,
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestFailed*(market: OnChainMarket,
callback: OnRequestFailed):
Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.upraises:[]} =
method subscribeRequestFailed*(
market: OnChainMarket, callback: OnRequestFailed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFailed subscription", msg = eventErr.msg
return
@ -433,11 +420,10 @@ method subscribeRequestFailed*(market: OnChainMarket,
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestFailed*(market: OnChainMarket,
requestId: RequestId,
callback: OnRequestFailed):
Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.upraises:[]} =
method subscribeRequestFailed*(
market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFailed subscription", msg = eventErr.msg
return
@ -449,9 +435,9 @@ method subscribeRequestFailed*(market: OnChainMarket,
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeProofSubmission*(market: OnChainMarket,
callback: OnProofSubmitted):
Future[MarketSubscription] {.async.} =
method subscribeProofSubmission*(
market: OnChainMarket, callback: OnProofSubmitted
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!ProofSubmitted) {.upraises: [].} =
without event =? eventResult, eventErr:
error "There was an error in ProofSubmitted subscription", msg = eventErr.msg
@ -467,48 +453,37 @@ method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
await subscription.eventSubscription.unsubscribe()
method queryPastSlotFilledEvents*(
market: OnChainMarket,
fromBlock: BlockTag): Future[seq[SlotFilled]] {.async.} =
market: OnChainMarket, fromBlock: BlockTag
): Future[seq[SlotFilled]] {.async.} =
convertEthersError:
return await market.contract.queryFilter(SlotFilled,
fromBlock,
BlockTag.latest)
return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest)
method queryPastSlotFilledEvents*(
market: OnChainMarket,
blocksAgo: int): Future[seq[SlotFilled]] {.async.} =
market: OnChainMarket, blocksAgo: int
): Future[seq[SlotFilled]] {.async.} =
convertEthersError:
let fromBlock =
await market.contract.provider.pastBlockTag(blocksAgo)
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
return await market.queryPastSlotFilledEvents(fromBlock)
method queryPastSlotFilledEvents*(
market: OnChainMarket,
fromTime: SecondsSince1970): Future[seq[SlotFilled]] {.async.} =
market: OnChainMarket, fromTime: SecondsSince1970
): Future[seq[SlotFilled]] {.async.} =
convertEthersError:
let fromBlock =
await market.contract.provider.blockNumberForEpoch(fromTime)
let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime)
return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock))
method queryPastStorageRequestedEvents*(
market: OnChainMarket,
fromBlock: BlockTag): Future[seq[StorageRequested]] {.async.} =
market: OnChainMarket, fromBlock: BlockTag
): Future[seq[StorageRequested]] {.async.} =
convertEthersError:
return await market.contract.queryFilter(StorageRequested,
fromBlock,
BlockTag.latest)
return
await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest)
method queryPastStorageRequestedEvents*(
market: OnChainMarket,
blocksAgo: int): Future[seq[StorageRequested]] {.async.} =
market: OnChainMarket, blocksAgo: int
): Future[seq[StorageRequested]] {.async.} =
convertEthersError:
let fromBlock =
await market.contract.provider.pastBlockTag(blocksAgo)
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
return await market.queryPastStorageRequestedEvents(fromBlock)

View File

@ -14,8 +14,7 @@ export erc20 except `%`, `%*`, toJson
export config
export requests
type
Marketplace* = ref object of Contract
type Marketplace* = ref object of Contract
proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
proc token*(marketplace: Marketplace): Address {.contract, view.}
@ -23,22 +22,54 @@ proc slashMisses*(marketplace: Marketplace): UInt256 {.contract, view.}
proc slashPercentage*(marketplace: Marketplace): UInt256 {.contract, view.}
proc minCollateralThreshold*(marketplace: Marketplace): UInt256 {.contract, view.}
proc requestStorage*(marketplace: Marketplace, request: StorageRequest): Confirmable {.contract.}
proc fillSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256, proof: Groth16Proof): Confirmable {.contract.}
proc withdrawFunds*(marketplace: Marketplace, requestId: RequestId): Confirmable {.contract.}
proc withdrawFunds*(marketplace: Marketplace, requestId: RequestId, withdrawAddress: Address): Confirmable {.contract.}
proc requestStorage*(
marketplace: Marketplace, request: StorageRequest
): Confirmable {.contract.}
proc fillSlot*(
marketplace: Marketplace,
requestId: RequestId,
slotIndex: UInt256,
proof: Groth16Proof,
): Confirmable {.contract.}
proc withdrawFunds*(
marketplace: Marketplace, requestId: RequestId
): Confirmable {.contract.}
proc withdrawFunds*(
marketplace: Marketplace, requestId: RequestId, withdrawAddress: Address
): Confirmable {.contract.}
proc freeSlot*(marketplace: Marketplace, id: SlotId): Confirmable {.contract.}
proc freeSlot*(marketplace: Marketplace, id: SlotId, rewardRecipient: Address, collateralRecipient: Address): Confirmable {.contract.}
proc getRequest*(marketplace: Marketplace, id: RequestId): StorageRequest {.contract, view.}
proc freeSlot*(
marketplace: Marketplace,
id: SlotId,
rewardRecipient: Address,
collateralRecipient: Address,
): Confirmable {.contract.}
proc getRequest*(
marketplace: Marketplace, id: RequestId
): StorageRequest {.contract, view.}
proc getHost*(marketplace: Marketplace, id: SlotId): Address {.contract, view.}
proc getActiveSlot*(marketplace: Marketplace, id: SlotId): Slot {.contract, view.}
proc myRequests*(marketplace: Marketplace): seq[RequestId] {.contract, view.}
proc mySlots*(marketplace: Marketplace): seq[SlotId] {.contract, view.}
proc requestState*(marketplace: Marketplace, requestId: RequestId): RequestState {.contract, view.}
proc requestState*(
marketplace: Marketplace, requestId: RequestId
): RequestState {.contract, view.}
proc slotState*(marketplace: Marketplace, slotId: SlotId): SlotState {.contract, view.}
proc requestEnd*(marketplace: Marketplace, requestId: RequestId): SecondsSince1970 {.contract, view.}
proc requestExpiry*(marketplace: Marketplace, requestId: RequestId): SecondsSince1970 {.contract, view.}
proc requestEnd*(
marketplace: Marketplace, requestId: RequestId
): SecondsSince1970 {.contract, view.}
proc requestExpiry*(
marketplace: Marketplace, requestId: RequestId
): SecondsSince1970 {.contract, view.}
proc proofTimeout*(marketplace: Marketplace): UInt256 {.contract, view.}
@ -46,11 +77,24 @@ proc proofEnd*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
proc missingProofs*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
proc isProofRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
proc willProofBeRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
proc getChallenge*(marketplace: Marketplace, id: SlotId): array[32, byte] {.contract, view.}
proc getChallenge*(
marketplace: Marketplace, id: SlotId
): array[32, byte] {.contract, view.}
proc getPointer*(marketplace: Marketplace, id: SlotId): uint8 {.contract, view.}
proc submitProof*(marketplace: Marketplace, id: SlotId, proof: Groth16Proof): Confirmable {.contract.}
proc markProofAsMissing*(marketplace: Marketplace, id: SlotId, period: UInt256): Confirmable {.contract.}
proc submitProof*(
marketplace: Marketplace, id: SlotId, proof: Groth16Proof
): Confirmable {.contract.}
proc reserveSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256): Confirmable {.contract.}
proc canReserveSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256): bool {.contract, view.}
proc markProofAsMissing*(
marketplace: Marketplace, id: SlotId, period: UInt256
): Confirmable {.contract.}
proc reserveSlot*(
marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256
): Confirmable {.contract.}
proc canReserveSlot*(
marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256
): bool {.contract, view.}

View File

@ -7,13 +7,16 @@ type
a*: G1Point
b*: G2Point
c*: G1Point
G1Point* = object
x*: UInt256
y*: UInt256
# A field element F_{p^2} encoded as `real + i * imag`
Fp2Element* = object
real*: UInt256
imag*: UInt256
G2Point* = object
x*: Fp2Element
y*: Fp2Element

View File

@ -12,8 +12,9 @@ logScope:
proc raiseProviderError(message: string) {.raises: [ProviderError].} =
raise newException(ProviderError, message)
proc blockNumberAndTimestamp*(provider: Provider, blockTag: BlockTag):
Future[(UInt256, UInt256)] {.async: (raises: [ProviderError]).} =
proc blockNumberAndTimestamp*(
provider: Provider, blockTag: BlockTag
): Future[(UInt256, UInt256)] {.async: (raises: [ProviderError]).} =
without latestBlock =? await provider.getBlock(blockTag):
raiseProviderError("Could not get latest block")
@ -23,14 +24,10 @@ proc blockNumberAndTimestamp*(provider: Provider, blockTag: BlockTag):
return (latestBlockNumber, latestBlock.timestamp)
proc binarySearchFindClosestBlock(
provider: Provider,
epochTime: int,
low: UInt256,
high: UInt256): Future[UInt256] {.async: (raises: [ProviderError]).} =
let (_, lowTimestamp) =
await provider.blockNumberAndTimestamp(BlockTag.init(low))
let (_, highTimestamp) =
await provider.blockNumberAndTimestamp(BlockTag.init(high))
provider: Provider, epochTime: int, low: UInt256, high: UInt256
): Future[UInt256] {.async: (raises: [ProviderError]).} =
let (_, lowTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(low))
let (_, highTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(high))
if abs(lowTimestamp.truncate(int) - epochTime) <
abs(highTimestamp.truncate(int) - epochTime):
return low
@ -41,8 +38,8 @@ proc binarySearchBlockNumberForEpoch(
provider: Provider,
epochTime: UInt256,
latestBlockNumber: UInt256,
earliestBlockNumber: UInt256): Future[UInt256]
{.async: (raises: [ProviderError]).} =
earliestBlockNumber: UInt256,
): Future[UInt256] {.async: (raises: [ProviderError]).} =
var low = earliestBlockNumber
var high = latestBlockNumber
@ -52,7 +49,7 @@ proc binarySearchBlockNumberForEpoch(
let mid = (low + high) div 2
let (midBlockNumber, midBlockTimestamp) =
await provider.blockNumberAndTimestamp(BlockTag.init(mid))
if midBlockTimestamp < epochTime:
low = mid + 1
elif midBlockTimestamp > epochTime:
@ -63,16 +60,16 @@ proc binarySearchBlockNumberForEpoch(
# low is always greater than high - this is why we use high, where
# intuitively we would use low:
await provider.binarySearchFindClosestBlock(
epochTime.truncate(int), low=high, high=low)
epochTime.truncate(int), low = high, high = low
)
proc blockNumberForEpoch*(
provider: Provider,
epochTime: SecondsSince1970): Future[UInt256]
{.async: (raises: [ProviderError]).} =
provider: Provider, epochTime: SecondsSince1970
): Future[UInt256] {.async: (raises: [ProviderError]).} =
let epochTimeUInt256 = epochTime.u256
let (latestBlockNumber, latestBlockTimestamp) =
let (latestBlockNumber, latestBlockTimestamp) =
await provider.blockNumberAndTimestamp(BlockTag.latest)
let (earliestBlockNumber, earliestBlockTimestamp) =
let (earliestBlockNumber, earliestBlockTimestamp) =
await provider.blockNumberAndTimestamp(BlockTag.earliest)
# Initially we used the average block time to predict
@ -109,18 +106,18 @@ proc blockNumberForEpoch*(
return latestBlockNumber
if earliestBlockNumber > 0 and earliestBlockTimestamp > epochTimeUInt256:
let availableHistoryInDays =
(latestBlockTimestamp - earliestBlockTimestamp) div
1.days.secs.u256
warn "Short block history detected.", earliestBlockTimestamp =
earliestBlockTimestamp, days = availableHistoryInDays
let availableHistoryInDays =
(latestBlockTimestamp - earliestBlockTimestamp) div 1.days.secs.u256
warn "Short block history detected.",
earliestBlockTimestamp = earliestBlockTimestamp, days = availableHistoryInDays
return earliestBlockNumber
return await provider.binarySearchBlockNumberForEpoch(
epochTimeUInt256, latestBlockNumber, earliestBlockNumber)
epochTimeUInt256, latestBlockNumber, earliestBlockNumber
)
proc pastBlockTag*(provider: Provider,
blocksAgo: int):
Future[BlockTag] {.async: (raises: [ProviderError]).} =
proc pastBlockTag*(
provider: Provider, blocksAgo: int
): Future[BlockTag] {.async: (raises: [ProviderError]).} =
let head = await provider.getBlockNumber()
return BlockTag.init(head - blocksAgo.abs.u256)

View File

@ -18,6 +18,7 @@ type
content* {.serialize.}: StorageContent
expiry* {.serialize.}: UInt256
nonce*: Nonce
StorageAsk* = object
slots* {.serialize.}: uint64
slotSize* {.serialize.}: UInt256
@ -26,12 +27,15 @@ type
reward* {.serialize.}: UInt256
collateral* {.serialize.}: UInt256
maxSlotLoss* {.serialize.}: uint64
StorageContent* = object
cid* {.serialize.}: string
merkleRoot*: array[32, byte]
Slot* = object
request* {.serialize.}: StorageRequest
slotIndex* {.serialize.}: UInt256
SlotId* = distinct array[32, byte]
RequestId* = distinct array[32, byte]
Nonce* = distinct array[32, byte]
@ -41,6 +45,7 @@ type
Cancelled
Finished
Failed
SlotState* {.pure.} = enum
Free
Filled
@ -80,27 +85,26 @@ proc toHex*[T: distinct](id: T): string =
type baseType = T.distinctBase
baseType(id).toHex
logutils.formatIt(LogFormat.textLines, Nonce): it.short0xHexLog
logutils.formatIt(LogFormat.textLines, RequestId): it.short0xHexLog
logutils.formatIt(LogFormat.textLines, SlotId): it.short0xHexLog
logutils.formatIt(LogFormat.json, Nonce): it.to0xHexLog
logutils.formatIt(LogFormat.json, RequestId): it.to0xHexLog
logutils.formatIt(LogFormat.json, SlotId): it.to0xHexLog
logutils.formatIt(LogFormat.textLines, Nonce):
it.short0xHexLog
logutils.formatIt(LogFormat.textLines, RequestId):
it.short0xHexLog
logutils.formatIt(LogFormat.textLines, SlotId):
it.short0xHexLog
logutils.formatIt(LogFormat.json, Nonce):
it.to0xHexLog
logutils.formatIt(LogFormat.json, RequestId):
it.to0xHexLog
logutils.formatIt(LogFormat.json, SlotId):
it.to0xHexLog
func fromTuple(_: type StorageRequest, tupl: tuple): StorageRequest =
StorageRequest(
client: tupl[0],
ask: tupl[1],
content: tupl[2],
expiry: tupl[3],
nonce: tupl[4]
client: tupl[0], ask: tupl[1], content: tupl[2], expiry: tupl[3], nonce: tupl[4]
)
func fromTuple(_: type Slot, tupl: tuple): Slot =
Slot(
request: tupl[0],
slotIndex: tupl[1]
)
Slot(request: tupl[0], slotIndex: tupl[1])
func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk =
StorageAsk(
@ -110,14 +114,11 @@ func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk =
proofProbability: tupl[3],
reward: tupl[4],
collateral: tupl[5],
maxSlotLoss: tupl[6]
maxSlotLoss: tupl[6],
)
func fromTuple(_: type StorageContent, tupl: tuple): StorageContent =
StorageContent(
cid: tupl[0],
merkleRoot: tupl[1]
)
StorageContent(cid: tupl[0], merkleRoot: tupl[1])
func solidityType*(_: type StorageContent): string =
solidityType(StorageContent.fieldTypes)
@ -160,7 +161,7 @@ func decode*(decoder: var AbiDecoder, T: type Slot): ?!T =
success Slot.fromTuple(tupl)
func id*(request: StorageRequest): RequestId =
let encoding = AbiEncoder.encode((request, ))
let encoding = AbiEncoder.encode((request,))
RequestId(keccak256.digest(encoding).data)
func slotId*(requestId: RequestId, slotIndex: UInt256): SlotId =

View File

@ -32,15 +32,15 @@ export discv5
logScope:
topics = "codex discovery"
type
Discovery* = ref object of RootObj
protocol*: discv5.Protocol # dht protocol
key: PrivateKey # private key
peerId: PeerId # the peer id of the local node
announceAddrs*: seq[MultiAddress] # addresses announced as part of the provider records
providerRecord*: ?SignedPeerRecord # record to advertice node connection information, this carry any
# address that the node can be connected on
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
type Discovery* = ref object of RootObj
protocol*: discv5.Protocol # dht protocol
key: PrivateKey # private key
peerId: PeerId # the peer id of the local node
announceAddrs*: seq[MultiAddress] # addresses announced as part of the provider records
providerRecord*: ?SignedPeerRecord
# record to advertice node connection information, this carry any
# address that the node can be connected on
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
proc toNodeId*(cid: Cid): NodeId =
## Cid to discovery id
@ -54,14 +54,11 @@ proc toNodeId*(host: ca.Address): NodeId =
readUintBE[256](keccak256.digest(host.toArray).data)
proc findPeer*(
d: Discovery,
peerId: PeerId): Future[?PeerRecord] {.async.} =
proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} =
trace "protocol.resolve..."
## Find peer using the given Discovery object
##
let
node = await d.protocol.resolve(toNodeId(peerId))
let node = await d.protocol.resolve(toNodeId(peerId))
return
if node.isSome():
@ -69,37 +66,31 @@ proc findPeer*(
else:
PeerRecord.none
method find*(
d: Discovery,
cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} =
method find*(d: Discovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} =
## Find block providers
##
without providers =?
(await d.protocol.getProviders(cid.toNodeId())).mapFailure, error:
without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure, error:
warn "Error finding providers for block", cid, error = error.msg
return providers.filterIt( not (it.data.peerId == d.peerId) )
return providers.filterIt(not (it.data.peerId == d.peerId))
method provide*(d: Discovery, cid: Cid) {.async, base.} =
## Provide a block Cid
##
let
nodes = await d.protocol.addProvider(
cid.toNodeId(), d.providerRecord.get)
let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get)
if nodes.len <= 0:
warn "Couldn't provide to any nodes!"
method find*(
d: Discovery,
host: ca.Address): Future[seq[SignedPeerRecord]] {.async, base.} =
d: Discovery, host: ca.Address
): Future[seq[SignedPeerRecord]] {.async, base.} =
## Find host providers
##
trace "Finding providers for host", host = $host
without var providers =?
(await d.protocol.getProviders(host.toNodeId())).mapFailure, error:
without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure,
error:
trace "Error finding providers for host", host = $host, exc = error.msg
return
@ -117,15 +108,11 @@ method provide*(d: Discovery, host: ca.Address) {.async, base.} =
##
trace "Providing host", host = $host
let
nodes = await d.protocol.addProvider(
host.toNodeId(), d.providerRecord.get)
let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get)
if nodes.len > 0:
trace "Provided to nodes", nodes = nodes.len
method removeProvider*(
d: Discovery,
peerId: PeerId): Future[void] {.base, gcsafe.} =
method removeProvider*(d: Discovery, peerId: PeerId): Future[void] {.base, gcsafe.} =
## Remove provider from providers table
##
@ -139,26 +126,24 @@ proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
d.announceAddrs = @addrs
trace "Updating announce record", addrs = d.announceAddrs
d.providerRecord = SignedPeerRecord.init(
d.key, PeerRecord.init(d.peerId, d.announceAddrs))
.expect("Should construct signed record").some
d.providerRecord = SignedPeerRecord
.init(d.key, PeerRecord.init(d.peerId, d.announceAddrs))
.expect("Should construct signed record").some
if not d.protocol.isNil:
d.protocol.updateRecord(d.providerRecord)
.expect("Should update SPR")
d.protocol.updateRecord(d.providerRecord).expect("Should update SPR")
proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
## Update providers record
##
trace "Updating Dht record", addrs = addrs
d.dhtRecord = SignedPeerRecord.init(
d.key, PeerRecord.init(d.peerId, @addrs))
.expect("Should construct signed record").some
d.dhtRecord = SignedPeerRecord
.init(d.key, PeerRecord.init(d.peerId, @addrs))
.expect("Should construct signed record").some
if not d.protocol.isNil:
d.protocol.updateRecord(d.dhtRecord)
.expect("Should update SPR")
d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR")
proc start*(d: Discovery) {.async.} =
d.protocol.open()
@ -174,15 +159,13 @@ proc new*(
bindPort = 0.Port,
announceAddrs: openArray[MultiAddress],
bootstrapNodes: openArray[SignedPeerRecord] = [],
store: Datastore = SQLiteDatastore.new(Memory).expect("Should not fail!")
store: Datastore = SQLiteDatastore.new(Memory).expect("Should not fail!"),
): Discovery =
## Create a new Discovery node instance for the given key and datastore
##
var
self = Discovery(
key: key,
peerId: PeerId.init(key).expect("Should construct PeerId"))
var self =
Discovery(key: key, peerId: PeerId.init(key).expect("Should construct PeerId"))
self.updateAnnounceRecord(announceAddrs)
@ -190,11 +173,8 @@ proc new*(
# FIXME disable IP limits temporarily so we can run our workshop. Re-enable
# and figure out proper solution.
let discoveryConfig = DiscoveryConfig(
tableIpLimits: TableIpLimits(
tableIpLimit: high(uint),
bucketIpLimit:high(uint)
),
bitsPerHop: DefaultBitsPerHop
tableIpLimits: TableIpLimits(tableIpLimit: high(uint), bucketIpLimit: high(uint)),
bitsPerHop: DefaultBitsPerHop,
)
# --------------------------------------------------------------------------
@ -206,6 +186,7 @@ proc new*(
bootstrapRecords = bootstrapNodes,
rng = Rng.instance(),
providers = ProvidersManager.new(store),
config = discoveryConfig)
config = discoveryConfig,
)
self

View File

@ -23,7 +23,8 @@ logScope:
topics = "codex asyncerasure"
const
CompletitionTimeout = 1.seconds # Maximum await time for completition after receiving a signal
CompletitionTimeout = 1.seconds
# Maximum await time for completition after receiving a signal
CompletitionRetryDelay = 10.millis
type
@ -62,12 +63,9 @@ proc encodeTask(args: EncodeTaskArgs, data: seq[seq[byte]]): EncodeTaskResult =
let
resDataSize = parity.len * args.blockSize
resData = cast[ptr UncheckedArray[byte]](allocShared0(resDataSize))
arrHolder = SharedArrayHolder[byte](
data: resData,
size: resDataSize
)
arrHolder = SharedArrayHolder[byte](data: resData, size: resDataSize)
for i in 0..<parity.len:
for i in 0 ..< parity.len:
copyMem(addr resData[i * args.blockSize], addr parity[i][0], args.blockSize)
return ok(arrHolder)
@ -79,7 +77,9 @@ proc encodeTask(args: EncodeTaskArgs, data: seq[seq[byte]]): EncodeTaskResult =
if err =? args.signal.fireSync().mapFailure.errorOption():
error "Error firing signal", msg = err.msg
proc decodeTask(args: DecodeTaskArgs, data: seq[seq[byte]], parity: seq[seq[byte]]): DecodeTaskResult =
proc decodeTask(
args: DecodeTaskArgs, data: seq[seq[byte]], parity: seq[seq[byte]]
): DecodeTaskResult =
var
data = data.unsafeAddr
parity = parity.unsafeAddr
@ -92,12 +92,9 @@ proc decodeTask(args: DecodeTaskArgs, data: seq[seq[byte]], parity: seq[seq[byte
let
resDataSize = recovered.len * args.blockSize
resData = cast[ptr UncheckedArray[byte]](allocShared0(resDataSize))
arrHolder = SharedArrayHolder[byte](
data: resData,
size: resDataSize
)
arrHolder = SharedArrayHolder[byte](data: resData, size: resDataSize)
for i in 0..<recovered.len:
for i in 0 ..< recovered.len:
copyMem(addr resData[i * args.blockSize], addr recovered[i][0], args.blockSize)
return ok(arrHolder)
@ -110,9 +107,7 @@ proc decodeTask(args: DecodeTaskArgs, data: seq[seq[byte]], parity: seq[seq[byte
error "Error firing signal", msg = err.msg
proc proxySpawnEncodeTask(
tp: Taskpool,
args: EncodeTaskArgs,
data: ref seq[seq[byte]]
tp: Taskpool, args: EncodeTaskArgs, data: ref seq[seq[byte]]
): Flowvar[EncodeTaskResult] =
# FIXME Uncomment the code below after addressing an issue:
# https://github.com/codex-storage/nim-codex/issues/854
@ -124,41 +119,45 @@ proc proxySpawnEncodeTask(
return fv
proc proxySpawnDecodeTask(
tp: Taskpool,
args: DecodeTaskArgs,
data: ref seq[seq[byte]],
parity: ref seq[seq[byte]]
tp: Taskpool,
args: DecodeTaskArgs,
data: ref seq[seq[byte]],
parity: ref seq[seq[byte]],
): Flowvar[DecodeTaskResult] =
# FIXME Uncomment the code below after addressing an issue:
# https://github.com/codex-storage/nim-codex/issues/854
# tp.spawn decodeTask(args, data[], parity[])
let fv = DecodeTaskResult.newFlowVar
fv.readyWith(decodeTask(args, data[], parity[]))
return fv
proc awaitResult[T](signal: ThreadSignalPtr, handle: Flowvar[T]): Future[?!T] {.async.} =
proc awaitResult[T](
signal: ThreadSignalPtr, handle: Flowvar[T]
): Future[?!T] {.async.} =
await wait(signal)
var
res: T
awaitTotal: Duration
while awaitTotal < CompletitionTimeout:
if handle.tryComplete(res):
return success(res)
else:
awaitTotal += CompletitionRetryDelay
await sleepAsync(CompletitionRetryDelay)
if handle.tryComplete(res):
return success(res)
else:
awaitTotal += CompletitionRetryDelay
await sleepAsync(CompletitionRetryDelay)
return failure("Task signaled finish but didn't return any result within " & $CompletitionRetryDelay)
return failure(
"Task signaled finish but didn't return any result within " & $CompletitionRetryDelay
)
proc asyncEncode*(
tp: Taskpool,
backend: EncoderBackend,
data: ref seq[seq[byte]],
blockSize: int,
ecM: int
tp: Taskpool,
backend: EncoderBackend,
data: ref seq[seq[byte]],
blockSize: int,
ecM: int,
): Future[?!ref seq[seq[byte]]] {.async.} =
without signal =? ThreadSignalPtr.new().mapFailure, err:
return failure(err)
@ -166,7 +165,9 @@ proc asyncEncode*(
try:
let
blockSize = data[0].len
args = EncodeTaskArgs(signal: signal, backend: unsafeAddr backend, blockSize: blockSize, ecM: ecM)
args = EncodeTaskArgs(
signal: signal, backend: unsafeAddr backend, blockSize: blockSize, ecM: ecM
)
handle = proxySpawnEncodeTask(tp, args, data)
without res =? await awaitResult(signal, handle), err:
@ -176,7 +177,7 @@ proc asyncEncode*(
var parity = seq[seq[byte]].new()
parity[].setLen(ecM)
for i in 0..<parity[].len:
for i in 0 ..< parity[].len:
parity[i] = newSeq[byte](blockSize)
copyMem(addr parity[i][0], addr res.value.data[i * blockSize], blockSize)
@ -190,10 +191,10 @@ proc asyncEncode*(
error "Error closing signal", msg = $err.msg
proc asyncDecode*(
tp: Taskpool,
backend: DecoderBackend,
data, parity: ref seq[seq[byte]],
blockSize: int
tp: Taskpool,
backend: DecoderBackend,
data, parity: ref seq[seq[byte]],
blockSize: int,
): Future[?!ref seq[seq[byte]]] {.async.} =
without signal =? ThreadSignalPtr.new().mapFailure, err:
return failure(err)
@ -201,7 +202,9 @@ proc asyncDecode*(
try:
let
ecK = data[].len
args = DecodeTaskArgs(signal: signal, backend: unsafeAddr backend, blockSize: blockSize, ecK: ecK)
args = DecodeTaskArgs(
signal: signal, backend: unsafeAddr backend, blockSize: blockSize, ecK: ecK
)
handle = proxySpawnDecodeTask(tp, args, data, parity)
without res =? await awaitResult(signal, handle), err:
@ -211,7 +214,7 @@ proc asyncDecode*(
var recovered = seq[seq[byte]].new()
recovered[].setLen(ecK)
for i in 0..<recovered[].len:
for i in 0 ..< recovered[].len:
recovered[i] = newSeq[byte](blockSize)
copyMem(addr recovered[i][0], addr res.value.data[i * blockSize], blockSize)

View File

@ -9,15 +9,16 @@
import pkg/upraises
push: {.upraises: [].}
push:
{.upraises: [].}
import ../stores
type
ErasureBackend* = ref object of RootObj
blockSize*: int # block size in bytes
buffers*: int # number of original pieces
parity*: int # number of redundancy pieces
buffers*: int # number of original pieces
parity*: int # number of redundancy pieces
EncoderBackend* = ref object of ErasureBackend
DecoderBackend* = ref object of ErasureBackend
@ -28,19 +29,14 @@ method release*(self: ErasureBackend) {.base, gcsafe.} =
raiseAssert("not implemented!")
method encode*(
self: EncoderBackend,
buffers,
parity: var openArray[seq[byte]]
self: EncoderBackend, buffers, parity: var openArray[seq[byte]]
): Result[void, cstring] {.base, gcsafe.} =
## encode buffers using a backend
##
raiseAssert("not implemented!")
method decode*(
self: DecoderBackend,
buffers,
parity,
recovered: var openArray[seq[byte]]
self: DecoderBackend, buffers, parity, recovered: var openArray[seq[byte]]
): Result[void, cstring] {.base, gcsafe.} =
## decode buffers using a backend
##

View File

@ -22,19 +22,16 @@ type
decoder*: Option[LeoDecoder]
method encode*(
self: LeoEncoderBackend,
data,
parity: var openArray[seq[byte]]): Result[void, cstring] =
self: LeoEncoderBackend, data, parity: var openArray[seq[byte]]
): Result[void, cstring] =
## Encode data using Leopard backend
if parity.len == 0:
return ok()
var encoder = if self.encoder.isNone:
self.encoder = (? LeoEncoder.init(
self.blockSize,
self.buffers,
self.parity)).some
var encoder =
if self.encoder.isNone:
self.encoder = (?LeoEncoder.init(self.blockSize, self.buffers, self.parity)).some
self.encoder.get()
else:
self.encoder.get()
@ -42,18 +39,13 @@ method encode*(
encoder.encode(data, parity)
method decode*(
self: LeoDecoderBackend,
data,
parity,
recovered: var openArray[seq[byte]]): Result[void, cstring] =
self: LeoDecoderBackend, data, parity, recovered: var openArray[seq[byte]]
): Result[void, cstring] =
## Decode data using given Leopard backend
var decoder =
if self.decoder.isNone:
self.decoder = (? LeoDecoder.init(
self.blockSize,
self.buffers,
self.parity)).some
self.decoder = (?LeoDecoder.init(self.blockSize, self.buffers, self.parity)).some
self.decoder.get()
else:
self.decoder.get()
@ -69,25 +61,15 @@ method release*(self: LeoDecoderBackend) =
self.decoder.get().free()
proc new*(
T: type LeoEncoderBackend,
blockSize,
buffers,
parity: int): LeoEncoderBackend =
T: type LeoEncoderBackend, blockSize, buffers, parity: int
): LeoEncoderBackend =
## Create an instance of an Leopard Encoder backend
##
LeoEncoderBackend(
blockSize: blockSize,
buffers: buffers,
parity: parity)
LeoEncoderBackend(blockSize: blockSize, buffers: buffers, parity: parity)
proc new*(
T: type LeoDecoderBackend,
blockSize,
buffers,
parity: int): LeoDecoderBackend =
T: type LeoDecoderBackend, blockSize, buffers, parity: int
): LeoDecoderBackend =
## Create an instance of an Leopard Decoder backend
##
LeoDecoderBackend(
blockSize: blockSize,
buffers: buffers,
parity: parity)
LeoDecoderBackend(blockSize: blockSize, buffers: buffers, parity: parity)

View File

@ -9,7 +9,8 @@
import pkg/upraises
push: {.upraises: [].}
push:
{.upraises: [].}
import std/sequtils
import std/sugar
@ -60,12 +61,11 @@ type
## columns (with up to M blocks missing per column),
## or any combination there of.
##
EncoderProvider* =
proc(size, blocks, parity: int): EncoderBackend {.raises: [Defect], noSideEffect.}
EncoderProvider* = proc(size, blocks, parity: int): EncoderBackend
{.raises: [Defect], noSideEffect.}
DecoderProvider* = proc(size, blocks, parity: int): DecoderBackend
{.raises: [Defect], noSideEffect.}
DecoderProvider* =
proc(size, blocks, parity: int): DecoderBackend {.raises: [Defect], noSideEffect.}
Erasure* = ref object
encoderProvider*: EncoderProvider
@ -98,21 +98,22 @@ func indexToPos(steps, idx, step: int): int {.inline.} =
(idx - step) div steps
proc getPendingBlocks(
self: Erasure,
manifest: Manifest,
indicies: seq[int]): AsyncIter[(?!bt.Block, int)] =
self: Erasure, manifest: Manifest, indicies: seq[int]
): AsyncIter[(?!bt.Block, int)] =
## Get pending blocks iterator
##
var
# request blocks from the store
pendingBlocks = indicies.map( (i: int) =>
self.store.getBlock(
BlockAddress.init(manifest.treeCid, i)
).map((r: ?!bt.Block) => (r, i)) # Get the data blocks (first K)
pendingBlocks = indicies.map(
(i: int) =>
self.store.getBlock(BlockAddress.init(manifest.treeCid, i)).map(
(r: ?!bt.Block) => (r, i)
) # Get the data blocks (first K)
)
proc isFinished(): bool = pendingBlocks.len == 0
proc isFinished(): bool =
pendingBlocks.len == 0
proc genNext(): Future[(?!bt.Block, int)] {.async.} =
let completedFut = await one(pendingBlocks)
@ -123,29 +124,31 @@ proc getPendingBlocks(
let (_, index) = await completedFut
raise newException(
CatchableError,
"Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " & $index)
"Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " &
$index,
)
AsyncIter[(?!bt.Block, int)].new(genNext, isFinished)
proc prepareEncodingData(
self: Erasure,
manifest: Manifest,
params: EncodingParams,
step: Natural,
data: ref seq[seq[byte]],
cids: ref seq[Cid],
emptyBlock: seq[byte]): Future[?!Natural] {.async.} =
self: Erasure,
manifest: Manifest,
params: EncodingParams,
step: Natural,
data: ref seq[seq[byte]],
cids: ref seq[Cid],
emptyBlock: seq[byte],
): Future[?!Natural] {.async.} =
## Prepare data for encoding
##
let
strategy = params.strategy.init(
firstIndex = 0,
lastIndex = params.rounded - 1,
iterations = params.steps
firstIndex = 0, lastIndex = params.rounded - 1, iterations = params.steps
)
indicies = toSeq(strategy.getIndicies(step))
pendingBlocksIter = self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount))
pendingBlocksIter =
self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount))
var resolved = 0
for fut in pendingBlocksIter:
@ -164,20 +167,22 @@ proc prepareEncodingData(
let pos = indexToPos(params.steps, idx, step)
trace "Padding with empty block", idx
shallowCopy(data[pos], emptyBlock)
without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec), err:
without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec),
err:
return failure(err)
cids[idx] = emptyBlockCid
success(resolved.Natural)
proc prepareDecodingData(
self: Erasure,
encoded: Manifest,
step: Natural,
data: ref seq[seq[byte]],
parityData: ref seq[seq[byte]],
cids: ref seq[Cid],
emptyBlock: seq[byte]): Future[?!(Natural, Natural)] {.async.} =
self: Erasure,
encoded: Manifest,
step: Natural,
data: ref seq[seq[byte]],
parityData: ref seq[seq[byte]],
cids: ref seq[Cid],
emptyBlock: seq[byte],
): Future[?!(Natural, Natural)] {.async.} =
## Prepare data for decoding
## `encoded` - the encoded manifest
## `step` - the current step
@ -189,9 +194,7 @@ proc prepareDecodingData(
let
strategy = encoded.protectedStrategy.init(
firstIndex = 0,
lastIndex = encoded.blocksCount - 1,
iterations = encoded.steps
firstIndex = 0, lastIndex = encoded.blocksCount - 1, iterations = encoded.steps
)
indicies = toSeq(strategy.getIndicies(step))
pendingBlocksIter = self.getPendingBlocks(encoded, indicies)
@ -211,20 +214,21 @@ proc prepareDecodingData(
trace "Failed retreiving a block", idx, treeCid = encoded.treeCid, msg = err.msg
continue
let
pos = indexToPos(encoded.steps, idx, step)
let pos = indexToPos(encoded.steps, idx, step)
logScope:
cid = blk.cid
idx = idx
pos = pos
step = step
cid = blk.cid
idx = idx
pos = pos
step = step
empty = blk.isEmpty
cids[idx] = blk.cid
if idx >= encoded.rounded:
trace "Retrieved parity block"
shallowCopy(parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data)
shallowCopy(
parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data
)
parityPieces.inc
else:
trace "Retrieved data block"
@ -236,17 +240,19 @@ proc prepareDecodingData(
return success (dataPieces.Natural, parityPieces.Natural)
proc init*(
_: type EncodingParams,
manifest: Manifest,
ecK: Natural, ecM: Natural,
strategy: StrategyType): ?!EncodingParams =
_: type EncodingParams,
manifest: Manifest,
ecK: Natural,
ecM: Natural,
strategy: StrategyType,
): ?!EncodingParams =
if ecK > manifest.blocksCount:
let exc = (ref InsufficientBlocksError)(
msg: "Unable to encode manifest, not enough blocks, ecK = " &
$ecK &
", blocksCount = " &
$manifest.blocksCount,
minSize: ecK.NBytes * manifest.blockSize)
msg:
"Unable to encode manifest, not enough blocks, ecK = " & $ecK &
", blocksCount = " & $manifest.blocksCount,
minSize: ecK.NBytes * manifest.blockSize,
)
return failure(exc)
let
@ -260,25 +266,23 @@ proc init*(
rounded: rounded,
steps: steps,
blocksCount: blocksCount,
strategy: strategy
strategy: strategy,
)
proc encodeData(
self: Erasure,
manifest: Manifest,
params: EncodingParams
): Future[?!Manifest] {.async.} =
self: Erasure, manifest: Manifest, params: EncodingParams
): Future[?!Manifest] {.async.} =
## Encode blocks pointed to by the protected manifest
##
## `manifest` - the manifest to encode
##
logScope:
steps = params.steps
rounded_blocks = params.rounded
blocks_count = params.blocksCount
ecK = params.ecK
ecM = params.ecM
steps = params.steps
rounded_blocks = params.rounded
blocks_count = params.blocksCount
ecK = params.ecK
ecM = params.ecM
var
cids = seq[Cid].new()
@ -288,11 +292,12 @@ proc encodeData(
cids[].setLen(params.blocksCount)
try:
for step in 0..<params.steps:
for step in 0 ..< params.steps:
# TODO: Don't allocate a new seq every time, allocate once and zero out
var
data = seq[seq[byte]].new() # number of blocks to encode
parityData = newSeqWith[seq[byte]](params.ecM, newSeq[byte](manifest.blockSize.int))
parityData =
newSeqWith[seq[byte]](params.ecM, newSeq[byte](manifest.blockSize.int))
data[].setLen(params.ecK)
# TODO: this is a tight blocking loop so we sleep here to allow
@ -301,20 +306,19 @@ proc encodeData(
await sleepAsync(10.millis)
without resolved =?
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)), err:
trace "Unable to prepare data", error = err.msg
return failure(err)
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)),
err:
trace "Unable to prepare data", error = err.msg
return failure(err)
trace "Erasure coding data", data = data[].len, parity = parityData.len
if (
let res = encoder.encode(data[], parityData);
res.isErr):
if (let res = encoder.encode(data[], parityData); res.isErr):
trace "Unable to encode manifest!", error = $res.error
return failure($res.error)
var idx = params.rounded + step
for j in 0..<params.ecM:
for j in 0 ..< params.ecM:
without blk =? bt.Block.new(parityData[j]), error:
trace "Unable to create parity block", err = error.msg
return failure(error)
@ -341,7 +345,7 @@ proc encodeData(
datasetSize = (manifest.blockSize.int * params.blocksCount).NBytes,
ecK = params.ecK,
ecM = params.ecM,
strategy = params.strategy
strategy = params.strategy,
)
trace "Encoded data successfully", treeCid, blocksCount = params.blocksCount
@ -356,11 +360,12 @@ proc encodeData(
encoder.release()
proc encode*(
self: Erasure,
manifest: Manifest,
blocks: Natural,
parity: Natural,
strategy = SteppedStrategy): Future[?!Manifest] {.async.} =
self: Erasure,
manifest: Manifest,
blocks: Natural,
parity: Natural,
strategy = SteppedStrategy,
): Future[?!Manifest] {.async.} =
## Encode a manifest into one that is erasure protected.
##
## `manifest` - the original manifest to be encoded
@ -376,9 +381,7 @@ proc encode*(
return success encodedManifest
proc decode*(
self: Erasure,
encoded: Manifest): Future[?!Manifest] {.async.} =
proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
## Decode a protected manifest into it's original
## manifest
##
@ -387,9 +390,9 @@ proc decode*(
##
logScope:
steps = encoded.steps
rounded_blocks = encoded.rounded
new_manifest = encoded.blocksCount
steps = encoded.steps
rounded_blocks = encoded.rounded
new_manifest = encoded.blocksCount
var
cids = seq[Cid].new()
@ -399,22 +402,26 @@ proc decode*(
cids[].setLen(encoded.blocksCount)
try:
for step in 0..<encoded.steps:
for step in 0 ..< encoded.steps:
# TODO: this is a tight blocking loop so we sleep here to allow
# other events to be processed, this should be addressed
# by threading
await sleepAsync(10.millis)
var
data = seq[seq[byte]].new()
parityData = seq[seq[byte]].new()
recovered = newSeqWith[seq[byte]](encoded.ecK, newSeq[byte](encoded.blockSize.int))
recovered =
newSeqWith[seq[byte]](encoded.ecK, newSeq[byte](encoded.blockSize.int))
data[].setLen(encoded.ecK) # set len to K
parityData[].setLen(encoded.ecM) # set len to M
data[].setLen(encoded.ecK) # set len to K
parityData[].setLen(encoded.ecM) # set len to M
without (dataPieces, _) =?
(await self.prepareDecodingData(encoded, step, data, parityData, cids, emptyBlock)), err:
without (dataPieces, _) =? (
await self.prepareDecodingData(
encoded, step, data, parityData, cids, emptyBlock
)
), err:
trace "Unable to prepare data", error = err.msg
return failure(err)
@ -424,13 +431,11 @@ proc decode*(
trace "Erasure decoding data"
if (
let err = decoder.decode(data[], parityData[], recovered);
err.isErr):
if (let err = decoder.decode(data[], parityData[], recovered); err.isErr):
trace "Unable to decode data!", err = $err.error
return failure($err.error)
for i in 0..<encoded.ecK:
for i in 0 ..< encoded.ecK:
let idx = i * encoded.steps + step
if data[i].len <= 0 and not cids[idx].isEmpty:
without blk =? bt.Block.new(recovered[i]), error:
@ -453,20 +458,22 @@ proc decode*(
finally:
decoder.release()
without tree =? CodexTree.init(cids[0..<encoded.originalBlocksCount]), err:
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
return failure(err)
without treeCid =? tree.rootCid, err:
return failure(err)
if treeCid != encoded.originalTreeCid:
return failure("Original tree root differs from the tree root computed out of recovered data")
return failure(
"Original tree root differs from the tree root computed out of recovered data"
)
let idxIter = Iter[Natural].new(recoveredIndices)
.filter((i: Natural) => i < tree.leavesCount)
let idxIter =
Iter[Natural].new(recoveredIndices).filter((i: Natural) => i < tree.leavesCount)
if err =? (await self.store.putSomeProofs(tree, idxIter)).errorOption:
return failure(err)
return failure(err)
let decoded = Manifest.new(encoded)
@ -479,14 +486,14 @@ proc stop*(self: Erasure) {.async.} =
return
proc new*(
T: type Erasure,
store: BlockStore,
encoderProvider: EncoderProvider,
decoderProvider: DecoderProvider): Erasure =
T: type Erasure,
store: BlockStore,
encoderProvider: EncoderProvider,
decoderProvider: DecoderProvider,
): Erasure =
## Create a new Erasure instance for encoding and decoding manifests
##
Erasure(
store: store,
encoderProvider: encoderProvider,
decoderProvider: decoderProvider)
store: store, encoderProvider: encoderProvider, decoderProvider: decoderProvider
)

View File

@ -20,13 +20,15 @@ type
CodexResult*[T] = Result[T, ref CodexError]
template mapFailure*[T, V, E](
exp: Result[T, V],
exc: typedesc[E],
exp: Result[T, V], exc: typedesc[E]
): Result[T, ref CatchableError] =
## Convert `Result[T, E]` to `Result[E, ref CatchableError]`
##
exp.mapErr(proc (e: V): ref CatchableError = (ref exc)(msg: $e))
exp.mapErr(
proc(e: V): ref CatchableError =
(ref exc)(msg: $e)
)
template mapFailure*[T, V](exp: Result[T, V]): Result[T, ref CatchableError] =
mapFailure(exp, CodexError)

View File

@ -10,7 +10,7 @@ type
# 0 => 0, 1, 2
# 1 => 3, 4, 5
# 2 => 6, 7, 8
LinearStrategy,
LinearStrategy
# Stepped indexing:
# 0 => 0, 3, 6
@ -21,31 +21,32 @@ type
# Representing a strategy for grouping indices (of blocks usually)
# Given an interation-count as input, will produce a seq of
# selected indices.
IndexingError* = object of CodexError
IndexingWrongIndexError* = object of IndexingError
IndexingWrongIterationsError* = object of IndexingError
IndexingStrategy* = object
strategyType*: StrategyType
firstIndex*: int # Lowest index that can be returned
lastIndex*: int # Highest index that can be returned
iterations*: int # getIndices(iteration) will run from 0 ..< iterations
firstIndex*: int # Lowest index that can be returned
lastIndex*: int # Highest index that can be returned
iterations*: int # getIndices(iteration) will run from 0 ..< iterations
step*: int
func checkIteration(self: IndexingStrategy, iteration: int): void {.raises: [IndexingError].} =
func checkIteration(
self: IndexingStrategy, iteration: int
): void {.raises: [IndexingError].} =
if iteration >= self.iterations:
raise newException(
IndexingError,
"Indexing iteration can't be greater than or equal to iterations.")
IndexingError, "Indexing iteration can't be greater than or equal to iterations."
)
func getIter(first, last, step: int): Iter[int] =
{.cast(noSideEffect).}:
Iter[int].new(first, last, step)
func getLinearIndicies(
self: IndexingStrategy,
iteration: int): Iter[int] {.raises: [IndexingError].} =
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
self.checkIteration(iteration)
let
@ -55,8 +56,8 @@ func getLinearIndicies(
getIter(first, last, 1)
func getSteppedIndicies(
self: IndexingStrategy,
iteration: int): Iter[int] {.raises: [IndexingError].} =
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
self.checkIteration(iteration)
let
@ -66,9 +67,8 @@ func getSteppedIndicies(
getIter(first, last, self.iterations)
func getIndicies*(
self: IndexingStrategy,
iteration: int): Iter[int] {.raises: [IndexingError].} =
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
case self.strategyType
of StrategyType.LinearStrategy:
self.getLinearIndicies(iteration)
@ -76,22 +76,25 @@ func getIndicies*(
self.getSteppedIndicies(iteration)
func init*(
strategy: StrategyType,
firstIndex, lastIndex, iterations: int): IndexingStrategy {.raises: [IndexingError].} =
strategy: StrategyType, firstIndex, lastIndex, iterations: int
): IndexingStrategy {.raises: [IndexingError].} =
if firstIndex > lastIndex:
raise newException(
IndexingWrongIndexError,
"firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex & ")")
"firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex &
")",
)
if iterations <= 0:
raise newException(
IndexingWrongIterationsError,
"iterations (" & $iterations & ") must be greater than zero.")
"iterations (" & $iterations & ") must be greater than zero.",
)
IndexingStrategy(
strategyType: strategy,
firstIndex: firstIndex,
lastIndex: lastIndex,
iterations: iterations,
step: divUp((lastIndex - firstIndex + 1), iterations))
step: divUp((lastIndex - firstIndex + 1), iterations),
)

View File

@ -123,8 +123,9 @@ func shortLog*(long: string, ellipses = "*", start = 3, stop = 6): string =
short
func shortHexLog*(long: string): string =
if long[0..1] == "0x": result &= "0x"
result &= long[2..long.high].shortLog("..", 4, 4)
if long[0 .. 1] == "0x":
result &= "0x"
result &= long[2 .. long.high].shortLog("..", 4, 4)
func short0xHexLog*[N: static[int], T: array[N, byte]](v: T): string =
v.to0xHex.shortHexLog
@ -182,12 +183,16 @@ template formatIt*(format: LogFormat, T: typedesc, body: untyped) =
let v = opts.map(opt => opt.formatJsonOption)
setProperty(r, key, json.`%`(v))
proc setProperty*(r: var JsonRecord, key: string, val: seq[T]) {.raises:[ValueError, IOError].} =
proc setProperty*(
r: var JsonRecord, key: string, val: seq[T]
) {.raises: [ValueError, IOError].} =
var it {.inject, used.}: T
let v = val.map(it => body)
setProperty(r, key, json.`%`(v))
proc setProperty*(r: var JsonRecord, key: string, val: T) {.raises:[ValueError, IOError].} =
proc setProperty*(
r: var JsonRecord, key: string, val: T
) {.raises: [ValueError, IOError].} =
var it {.inject, used.}: T = val
let v = body
setProperty(r, key, json.`%`(v))
@ -218,23 +223,35 @@ template formatIt*(format: LogFormat, T: typedesc, body: untyped) =
let v = opts.map(opt => opt.formatTextLineOption)
setProperty(r, key, v.formatTextLineSeq)
proc setProperty*(r: var TextLineRecord, key: string, val: seq[T]) {.raises:[ValueError, IOError].} =
proc setProperty*(
r: var TextLineRecord, key: string, val: seq[T]
) {.raises: [ValueError, IOError].} =
var it {.inject, used.}: T
let v = val.map(it => body)
setProperty(r, key, v.formatTextLineSeq)
proc setProperty*(r: var TextLineRecord, key: string, val: T) {.raises:[ValueError, IOError].} =
proc setProperty*(
r: var TextLineRecord, key: string, val: T
) {.raises: [ValueError, IOError].} =
var it {.inject, used.}: T = val
let v = body
setProperty(r, key, v)
template formatIt*(T: type, body: untyped) {.dirty.} =
formatIt(LogFormat.textLines, T): body
formatIt(LogFormat.json, T): body
formatIt(LogFormat.textLines, T):
body
formatIt(LogFormat.json, T):
body
formatIt(LogFormat.textLines, Cid): shortLog($it)
formatIt(LogFormat.json, Cid): $it
formatIt(UInt256): $it
formatIt(MultiAddress): $it
formatIt(LogFormat.textLines, array[32, byte]): it.short0xHexLog
formatIt(LogFormat.json, array[32, byte]): it.to0xHex
formatIt(LogFormat.textLines, Cid):
shortLog($it)
formatIt(LogFormat.json, Cid):
$it
formatIt(UInt256):
$it
formatIt(MultiAddress):
$it
formatIt(LogFormat.textLines, array[32, byte]):
it.short0xHexLog
formatIt(LogFormat.json, array[32, byte]):
it.to0xHex

View File

@ -10,9 +10,10 @@
# This module implements serialization and deserialization of Manifest
import pkg/upraises
import times
import times
push: {.upraises: [].}
push:
{.upraises: [].}
import std/tables
import std/sequtils
@ -33,7 +34,7 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
## multicodec container (Dag-pb) for now
##
? manifest.verify()
?manifest.verify()
var pbNode = initProtoBuffer()
# NOTE: The `Data` field in the the `dag-pb`
@ -98,7 +99,7 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
if manifest.filename.isSome:
header.write(8, manifest.filename.get())
if manifest.mimetype.isSome:
if manifest.mimetype.isSome:
header.write(9, manifest.mimetype.get())
if manifest.uploadedAt.isSome:
@ -206,15 +207,14 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
if pbVerificationInfo.getField(4, verifiableStrategy).isErr:
return failure("Unable to decode `verifiableStrategy` from manifest!")
let
treeCid = ? Cid.init(treeCidBuf).mapFailure
let treeCid = ?Cid.init(treeCidBuf).mapFailure
var filenameOption = if filename.len == 0: string.none else: filename.some
var mimetypeOption = if mimetype.len == 0: string.none else: mimetype.some
var uploadedAtOption = if uploadedAt == 0: int64.none else: uploadedAt.int64.some
let
self = if protected:
let self =
if protected:
Manifest.new(
treeCid = treeCid,
datasetSize = datasetSize.NBytes,
@ -224,37 +224,39 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
codec = codec.MultiCodec,
ecK = ecK.int,
ecM = ecM.int,
originalTreeCid = ? Cid.init(originalTreeCid).mapFailure,
originalTreeCid = ?Cid.init(originalTreeCid).mapFailure,
originalDatasetSize = originalDatasetSize.NBytes,
strategy = StrategyType(protectedStrategy),
filename = filenameOption,
mimetype = mimetypeOption,
uploadedAt = uploadedAtOption)
else:
Manifest.new(
treeCid = treeCid,
datasetSize = datasetSize.NBytes,
blockSize = blockSize.NBytes,
version = CidVersion(version),
hcodec = hcodec.MultiCodec,
codec = codec.MultiCodec,
filename = filenameOption,
mimetype = mimetypeOption,
uploadedAt = uploadedAtOption)
uploadedAt = uploadedAtOption,
)
else:
Manifest.new(
treeCid = treeCid,
datasetSize = datasetSize.NBytes,
blockSize = blockSize.NBytes,
version = CidVersion(version),
hcodec = hcodec.MultiCodec,
codec = codec.MultiCodec,
filename = filenameOption,
mimetype = mimetypeOption,
uploadedAt = uploadedAtOption,
)
? self.verify()
?self.verify()
if verifiable:
let
verifyRootCid = ? Cid.init(verifyRoot).mapFailure
slotRootCids = slotRoots.mapIt(? Cid.init(it).mapFailure)
verifyRootCid = ?Cid.init(verifyRoot).mapFailure
slotRootCids = slotRoots.mapIt(?Cid.init(it).mapFailure)
return Manifest.new(
manifest = self,
verifyRoot = verifyRootCid,
slotRoots = slotRootCids,
cellSize = cellSize.NBytes,
strategy = StrategyType(verifiableStrategy)
strategy = StrategyType(verifiableStrategy),
)
self.success
@ -263,7 +265,7 @@ func decode*(_: type Manifest, blk: Block): ?!Manifest =
## Decode a manifest using `decoder`
##
if not ? blk.cid.isManifest:
if not ?blk.cid.isManifest:
return failure "Cid not a manifest codec"
Manifest.decode(blk.data)

View File

@ -11,7 +11,8 @@
import pkg/upraises
push: {.upraises: [].}
push:
{.upraises: [].}
import pkg/libp2p/protobuf/minprotobuf
import pkg/libp2p/[cid, multihash, multicodec]
@ -25,37 +26,37 @@ import ../blocktype
import ../indexingstrategy
import ../logutils
# TODO: Manifest should be reworked to more concrete types,
# perhaps using inheritance
type
Manifest* = ref object of RootObj
treeCid {.serialize.}: Cid # Root of the merkle tree
datasetSize {.serialize.}: NBytes # Total size of all blocks
blockSize {.serialize.}: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed)
codec: MultiCodec # Dataset codec
hcodec: MultiCodec # Multihash codec
version: CidVersion # Cid version
filename {.serialize.}: ?string # The filename of the content uploaded (optional)
mimetype {.serialize.}: ?string # The mimetype of the content uploaded (optional)
uploadedAt {.serialize.}: ?int64 # The UTC creation timestamp in seconds
case protected {.serialize.}: bool # Protected datasets have erasure coded info
type Manifest* = ref object of RootObj
treeCid {.serialize.}: Cid # Root of the merkle tree
datasetSize {.serialize.}: NBytes # Total size of all blocks
blockSize {.serialize.}: NBytes
# Size of each contained block (might not be needed if blocks are len-prefixed)
codec: MultiCodec # Dataset codec
hcodec: MultiCodec # Multihash codec
version: CidVersion # Cid version
filename {.serialize.}: ?string # The filename of the content uploaded (optional)
mimetype {.serialize.}: ?string # The mimetype of the content uploaded (optional)
uploadedAt {.serialize.}: ?int64 # The UTC creation timestamp in seconds
case protected {.serialize.}: bool # Protected datasets have erasure coded info
of true:
ecK: int # Number of blocks to encode
ecM: int # Number of resulting parity blocks
originalTreeCid: Cid # The original root of the dataset being erasure coded
originalDatasetSize: NBytes
protectedStrategy: StrategyType # Indexing strategy used to build the slot roots
case verifiable {.serialize.}: bool
# Verifiable datasets can be used to generate storage proofs
of true:
ecK: int # Number of blocks to encode
ecM: int # Number of resulting parity blocks
originalTreeCid: Cid # The original root of the dataset being erasure coded
originalDatasetSize: NBytes
protectedStrategy: StrategyType # Indexing strategy used to build the slot roots
case verifiable {.serialize.}: bool # Verifiable datasets can be used to generate storage proofs
of true:
verifyRoot: Cid # Root of the top level merkle tree built from slot roots
slotRoots: seq[Cid] # Individual slot root built from the original dataset blocks
cellSize: NBytes # Size of each slot cell
verifiableStrategy: StrategyType # Indexing strategy used to build the slot roots
else:
discard
verifyRoot: Cid # Root of the top level merkle tree built from slot roots
slotRoots: seq[Cid] # Individual slot root built from the original dataset blocks
cellSize: NBytes # Size of each slot cell
verifiableStrategy: StrategyType # Indexing strategy used to build the slot roots
else:
discard
else:
discard
############################################################
# Accessors
@ -137,7 +138,7 @@ func uploadedAt*(self: Manifest): ?int64 =
############################################################
func isManifest*(cid: Cid): ?!bool =
success (ManifestCodec == ? cid.contentType().mapFailure(CodexError))
success (ManifestCodec == ?cid.contentType().mapFailure(CodexError))
func isManifest*(mc: MultiCodec): ?!bool =
success mc == ManifestCodec
@ -159,7 +160,8 @@ func verify*(self: Manifest): ?!void =
##
if self.protected and (self.blocksCount != self.steps * (self.ecK + self.ecM)):
return failure newException(CodexError, "Broken manifest: wrong originalBlocksCount")
return
failure newException(CodexError, "Broken manifest: wrong originalBlocksCount")
return success()
@ -167,41 +169,32 @@ func cid*(self: Manifest): ?!Cid {.deprecated: "use treeCid instead".} =
self.treeCid.success
func `==`*(a, b: Manifest): bool =
(a.treeCid == b.treeCid) and
(a.datasetSize == b.datasetSize) and
(a.blockSize == b.blockSize) and
(a.version == b.version) and
(a.hcodec == b.hcodec) and
(a.codec == b.codec) and
(a.protected == b.protected) and
(a.filename == b.filename) and
(a.mimetype == b.mimetype) and
(a.uploadedAt == b.uploadedAt) and
(if a.protected:
(a.ecK == b.ecK) and
(a.ecM == b.ecM) and
(a.originalTreeCid == b.originalTreeCid) and
(a.originalDatasetSize == b.originalDatasetSize) and
(a.protectedStrategy == b.protectedStrategy) and
(a.verifiable == b.verifiable) and
(if a.verifiable:
(a.verifyRoot == b.verifyRoot) and
(a.slotRoots == b.slotRoots) and
(a.cellSize == b.cellSize) and
(a.verifiableStrategy == b.verifiableStrategy)
(a.treeCid == b.treeCid) and (a.datasetSize == b.datasetSize) and
(a.blockSize == b.blockSize) and (a.version == b.version) and (a.hcodec == b.hcodec) and
(a.codec == b.codec) and (a.protected == b.protected) and (a.filename == b.filename) and
(a.mimetype == b.mimetype) and (a.uploadedAt == b.uploadedAt) and (
if a.protected:
(a.ecK == b.ecK) and (a.ecM == b.ecM) and (a.originalTreeCid == b.originalTreeCid) and
(a.originalDatasetSize == b.originalDatasetSize) and
(a.protectedStrategy == b.protectedStrategy) and (a.verifiable == b.verifiable) and
(
if a.verifiable:
(a.verifyRoot == b.verifyRoot) and (a.slotRoots == b.slotRoots) and
(a.cellSize == b.cellSize) and (
a.verifiableStrategy == b.verifiableStrategy
)
else:
true)
true
)
else:
true)
true
)
func `$`*(self: Manifest): string =
result = "treeCid: " & $self.treeCid &
", datasetSize: " & $self.datasetSize &
", blockSize: " & $self.blockSize &
", version: " & $self.version &
", hcodec: " & $self.hcodec &
", codec: " & $self.codec &
", protected: " & $self.protected
result =
"treeCid: " & $self.treeCid & ", datasetSize: " & $self.datasetSize & ", blockSize: " &
$self.blockSize & ", version: " & $self.version & ", hcodec: " & $self.hcodec &
", codec: " & $self.codec & ", protected: " & $self.protected
if self.filename.isSome:
result &= ", filename: " & $self.filename
@ -212,19 +205,19 @@ func `$`*(self: Manifest): string =
if self.uploadedAt.isSome:
result &= ", uploadedAt: " & $self.uploadedAt
result &= (if self.protected:
", ecK: " & $self.ecK &
", ecM: " & $self.ecM &
", originalTreeCid: " & $self.originalTreeCid &
", originalDatasetSize: " & $self.originalDatasetSize &
", verifiable: " & $self.verifiable &
(if self.verifiable:
", verifyRoot: " & $self.verifyRoot &
", slotRoots: " & $self.slotRoots
result &= (
if self.protected:
", ecK: " & $self.ecK & ", ecM: " & $self.ecM & ", originalTreeCid: " &
$self.originalTreeCid & ", originalDatasetSize: " & $self.originalDatasetSize &
", verifiable: " & $self.verifiable & (
if self.verifiable:
", verifyRoot: " & $self.verifyRoot & ", slotRoots: " & $self.slotRoots
else:
""
)
else:
"")
else:
"")
""
)
return result
@ -233,18 +226,18 @@ func `$`*(self: Manifest): string =
############################################################
func new*(
T: type Manifest,
treeCid: Cid,
blockSize: NBytes,
datasetSize: NBytes,
version: CidVersion = CIDv1,
hcodec = Sha256HashCodec,
codec = BlockCodec,
protected = false,
filename: ?string = string.none,
mimetype: ?string = string.none,
uploadedAt: ?int64 = int64.none): Manifest =
T: type Manifest,
treeCid: Cid,
blockSize: NBytes,
datasetSize: NBytes,
version: CidVersion = CIDv1,
hcodec = Sha256HashCodec,
codec = BlockCodec,
protected = false,
filename: ?string = string.none,
mimetype: ?string = string.none,
uploadedAt: ?int64 = int64.none,
): Manifest =
T(
treeCid: treeCid,
blockSize: blockSize,
@ -255,15 +248,17 @@ func new*(
protected: protected,
filename: filename,
mimetype: mimetype,
uploadedAt: uploadedAt)
uploadedAt: uploadedAt,
)
func new*(
T: type Manifest,
manifest: Manifest,
treeCid: Cid,
datasetSize: NBytes,
ecK, ecM: int,
strategy = SteppedStrategy): Manifest =
T: type Manifest,
manifest: Manifest,
treeCid: Cid,
datasetSize: NBytes,
ecK, ecM: int,
strategy = SteppedStrategy,
): Manifest =
## Create an erasure protected dataset from an
## unprotected one
##
@ -276,18 +271,17 @@ func new*(
hcodec: manifest.hcodec,
blockSize: manifest.blockSize,
protected: true,
ecK: ecK, ecM: ecM,
ecK: ecK,
ecM: ecM,
originalTreeCid: manifest.treeCid,
originalDatasetSize: manifest.datasetSize,
protectedStrategy: strategy,
filename: manifest.filename,
mimetype: manifest.mimetype,
uploadedAt: manifest.uploadedAt
)
uploadedAt: manifest.uploadedAt,
)
func new*(
T: type Manifest,
manifest: Manifest): Manifest =
func new*(T: type Manifest, manifest: Manifest): Manifest =
## Create an unprotected dataset from an
## erasure protected one
##
@ -302,25 +296,26 @@ func new*(
protected: false,
filename: manifest.filename,
mimetype: manifest.mimetype,
uploadedAt: manifest.uploadedAt)
uploadedAt: manifest.uploadedAt,
)
func new*(
T: type Manifest,
treeCid: Cid,
datasetSize: NBytes,
blockSize: NBytes,
version: CidVersion,
hcodec: MultiCodec,
codec: MultiCodec,
ecK: int,
ecM: int,
originalTreeCid: Cid,
originalDatasetSize: NBytes,
strategy = SteppedStrategy,
filename: ?string = string.none,
mimetype: ?string = string.none,
uploadedAt: ?int64 = int64.none): Manifest =
T: type Manifest,
treeCid: Cid,
datasetSize: NBytes,
blockSize: NBytes,
version: CidVersion,
hcodec: MultiCodec,
codec: MultiCodec,
ecK: int,
ecM: int,
originalTreeCid: Cid,
originalDatasetSize: NBytes,
strategy = SteppedStrategy,
filename: ?string = string.none,
mimetype: ?string = string.none,
uploadedAt: ?int64 = int64.none,
): Manifest =
Manifest(
treeCid: treeCid,
datasetSize: datasetSize,
@ -334,28 +329,30 @@ func new*(
originalTreeCid: originalTreeCid,
originalDatasetSize: originalDatasetSize,
protectedStrategy: strategy,
filename: filename,
filename: filename,
mimetype: mimetype,
uploadedAt: uploadedAt)
uploadedAt: uploadedAt,
)
func new*(
T: type Manifest,
manifest: Manifest,
verifyRoot: Cid,
slotRoots: openArray[Cid],
cellSize = DefaultCellSize,
strategy = LinearStrategy): ?!Manifest =
T: type Manifest,
manifest: Manifest,
verifyRoot: Cid,
slotRoots: openArray[Cid],
cellSize = DefaultCellSize,
strategy = LinearStrategy,
): ?!Manifest =
## Create a verifiable dataset from an
## protected one
##
if not manifest.protected:
return failure newException(
CodexError, "Can create verifiable manifest only from protected manifest.")
CodexError, "Can create verifiable manifest only from protected manifest."
)
if slotRoots.len != manifest.numSlots:
return failure newException(
CodexError, "Wrong number of slot roots.")
return failure newException(CodexError, "Wrong number of slot roots.")
success Manifest(
treeCid: manifest.treeCid,
@ -377,12 +374,10 @@ func new*(
verifiableStrategy: strategy,
filename: manifest.filename,
mimetype: manifest.mimetype,
uploadedAt: manifest.uploadedAt
)
uploadedAt: manifest.uploadedAt,
)
func new*(
T: type Manifest,
data: openArray[byte]): ?!Manifest =
func new*(T: type Manifest, data: openArray[byte]): ?!Manifest =
## Create a manifest instance from given data
##

View File

@ -19,16 +19,17 @@ type
Market* = ref object of RootObj
MarketError* = object of CodexError
Subscription* = ref object of RootObj
OnRequest* = proc(id: RequestId,
ask: StorageAsk,
expiry: UInt256) {.gcsafe, upraises:[].}
OnRequest* =
proc(id: RequestId, ask: StorageAsk, expiry: UInt256) {.gcsafe, upraises: [].}
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnSlotFilled* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises:[].}
OnSlotFilled* =
proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].}
OnSlotFreed* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].}
OnSlotReservationsFull* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].}
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises:[].}
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises:[].}
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises:[].}
OnSlotReservationsFull* =
proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].}
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises: [].}
ProofChallenge* = array[32, byte]
# Marketplace events -- located here due to the Market abstraction
@ -37,21 +38,28 @@ type
requestId*: RequestId
ask*: StorageAsk
expiry*: UInt256
SlotFilled* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
slotIndex*: UInt256
SlotFreed* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
slotIndex*: UInt256
SlotReservationsFull* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
slotIndex*: UInt256
RequestFulfilled* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
RequestCancelled* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
RequestFailed* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
ProofSubmitted* = object of MarketplaceEvent
id*: SlotId
@ -81,8 +89,7 @@ proc inDowntime*(market: Market, slotId: SlotId): Future[bool] {.async.} =
let pntr = await market.getPointer(slotId)
return pntr < downtime
method requestStorage*(market: Market,
request: StorageRequest) {.base, async.} =
method requestStorage*(market: Market, request: StorageRequest) {.base, async.} =
raiseAssert("not implemented")
method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
@ -91,182 +98,168 @@ method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} =
raiseAssert("not implemented")
method getRequest*(market: Market,
id: RequestId):
Future[?StorageRequest] {.base, async.} =
method getRequest*(
market: Market, id: RequestId
): Future[?StorageRequest] {.base, async.} =
raiseAssert("not implemented")
method requestState*(market: Market,
requestId: RequestId): Future[?RequestState] {.base, async.} =
method requestState*(
market: Market, requestId: RequestId
): Future[?RequestState] {.base, async.} =
raiseAssert("not implemented")
method slotState*(market: Market,
slotId: SlotId): Future[SlotState] {.base, async.} =
method slotState*(market: Market, slotId: SlotId): Future[SlotState] {.base, async.} =
raiseAssert("not implemented")
method getRequestEnd*(market: Market,
id: RequestId): Future[SecondsSince1970] {.base, async.} =
method getRequestEnd*(
market: Market, id: RequestId
): Future[SecondsSince1970] {.base, async.} =
raiseAssert("not implemented")
method requestExpiresAt*(market: Market,
id: RequestId): Future[SecondsSince1970] {.base, async.} =
method requestExpiresAt*(
market: Market, id: RequestId
): Future[SecondsSince1970] {.base, async.} =
raiseAssert("not implemented")
method getHost*(market: Market,
requestId: RequestId,
slotIndex: UInt256): Future[?Address] {.base, async.} =
method getHost*(
market: Market, requestId: RequestId, slotIndex: UInt256
): Future[?Address] {.base, async.} =
raiseAssert("not implemented")
method getActiveSlot*(
market: Market,
slotId: SlotId): Future[?Slot] {.base, async.} =
method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, async.} =
raiseAssert("not implemented")
method fillSlot*(market: Market,
requestId: RequestId,
slotIndex: UInt256,
proof: Groth16Proof,
collateral: UInt256) {.base, async.} =
method fillSlot*(
market: Market,
requestId: RequestId,
slotIndex: UInt256,
proof: Groth16Proof,
collateral: UInt256,
) {.base, async.} =
raiseAssert("not implemented")
method freeSlot*(market: Market, slotId: SlotId) {.base, async.} =
raiseAssert("not implemented")
method withdrawFunds*(market: Market,
requestId: RequestId) {.base, async.} =
method withdrawFunds*(market: Market, requestId: RequestId) {.base, async.} =
raiseAssert("not implemented")
method subscribeRequests*(market: Market,
callback: OnRequest):
Future[Subscription] {.base, async.} =
method subscribeRequests*(
market: Market, callback: OnRequest
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method isProofRequired*(market: Market,
id: SlotId): Future[bool] {.base, async.} =
method isProofRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} =
raiseAssert("not implemented")
method willProofBeRequired*(market: Market,
id: SlotId): Future[bool] {.base, async.} =
method willProofBeRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} =
raiseAssert("not implemented")
method getChallenge*(market: Market, id: SlotId): Future[ProofChallenge] {.base, async.} =
method getChallenge*(
market: Market, id: SlotId
): Future[ProofChallenge] {.base, async.} =
raiseAssert("not implemented")
method submitProof*(market: Market,
id: SlotId,
proof: Groth16Proof) {.base, async.} =
method submitProof*(market: Market, id: SlotId, proof: Groth16Proof) {.base, async.} =
raiseAssert("not implemented")
method markProofAsMissing*(market: Market,
id: SlotId,
period: Period) {.base, async.} =
method markProofAsMissing*(market: Market, id: SlotId, period: Period) {.base, async.} =
raiseAssert("not implemented")
method canProofBeMarkedAsMissing*(market: Market,
id: SlotId,
period: Period): Future[bool] {.base, async.} =
method canProofBeMarkedAsMissing*(
market: Market, id: SlotId, period: Period
): Future[bool] {.base, async.} =
raiseAssert("not implemented")
method reserveSlot*(
market: Market,
requestId: RequestId,
slotIndex: UInt256) {.base, async.} =
market: Market, requestId: RequestId, slotIndex: UInt256
) {.base, async.} =
raiseAssert("not implemented")
method canReserveSlot*(
market: Market,
requestId: RequestId,
slotIndex: UInt256): Future[bool] {.base, async.} =
market: Market, requestId: RequestId, slotIndex: UInt256
): Future[bool] {.base, async.} =
raiseAssert("not implemented")
method subscribeFulfillment*(market: Market,
callback: OnFulfillment):
Future[Subscription] {.base, async.} =
method subscribeFulfillment*(
market: Market, callback: OnFulfillment
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeFulfillment*(market: Market,
requestId: RequestId,
callback: OnFulfillment):
Future[Subscription] {.base, async.} =
method subscribeFulfillment*(
market: Market, requestId: RequestId, callback: OnFulfillment
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotFilled*(market: Market,
callback: OnSlotFilled):
Future[Subscription] {.base, async.} =
method subscribeSlotFilled*(
market: Market, callback: OnSlotFilled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotFilled*(market: Market,
requestId: RequestId,
slotIndex: UInt256,
callback: OnSlotFilled):
Future[Subscription] {.base, async.} =
method subscribeSlotFilled*(
market: Market, requestId: RequestId, slotIndex: UInt256, callback: OnSlotFilled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotFreed*(market: Market,
callback: OnSlotFreed):
Future[Subscription] {.base, async.} =
method subscribeSlotFreed*(
market: Market, callback: OnSlotFreed
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotReservationsFull*(
market: Market,
callback: OnSlotReservationsFull): Future[Subscription] {.base, async.} =
market: Market, callback: OnSlotReservationsFull
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestCancelled*(market: Market,
callback: OnRequestCancelled):
Future[Subscription] {.base, async.} =
method subscribeRequestCancelled*(
market: Market, callback: OnRequestCancelled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestCancelled*(market: Market,
requestId: RequestId,
callback: OnRequestCancelled):
Future[Subscription] {.base, async.} =
method subscribeRequestCancelled*(
market: Market, requestId: RequestId, callback: OnRequestCancelled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestFailed*(market: Market,
callback: OnRequestFailed):
Future[Subscription] {.base, async.} =
method subscribeRequestFailed*(
market: Market, callback: OnRequestFailed
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestFailed*(market: Market,
requestId: RequestId,
callback: OnRequestFailed):
Future[Subscription] {.base, async.} =
method subscribeRequestFailed*(
market: Market, requestId: RequestId, callback: OnRequestFailed
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeProofSubmission*(market: Market,
callback: OnProofSubmitted):
Future[Subscription] {.base, async.} =
method subscribeProofSubmission*(
market: Market, callback: OnProofSubmitted
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method unsubscribe*(subscription: Subscription) {.base, async, upraises:[].} =
method unsubscribe*(subscription: Subscription) {.base, async, upraises: [].} =
raiseAssert("not implemented")
method queryPastSlotFilledEvents*(
market: Market,
fromBlock: BlockTag): Future[seq[SlotFilled]] {.base, async.} =
market: Market, fromBlock: BlockTag
): Future[seq[SlotFilled]] {.base, async.} =
raiseAssert("not implemented")
method queryPastSlotFilledEvents*(
market: Market,
blocksAgo: int): Future[seq[SlotFilled]] {.base, async.} =
market: Market, blocksAgo: int
): Future[seq[SlotFilled]] {.base, async.} =
raiseAssert("not implemented")
method queryPastSlotFilledEvents*(
market: Market,
fromTime: SecondsSince1970): Future[seq[SlotFilled]] {.base, async.} =
market: Market, fromTime: SecondsSince1970
): Future[seq[SlotFilled]] {.base, async.} =
raiseAssert("not implemented")
method queryPastStorageRequestedEvents*(
market: Market,
fromBlock: BlockTag): Future[seq[StorageRequested]] {.base, async.} =
market: Market, fromBlock: BlockTag
): Future[seq[StorageRequested]] {.base, async.} =
raiseAssert("not implemented")
method queryPastStorageRequestedEvents*(
market: Market,
blocksAgo: int): Future[seq[StorageRequested]] {.base, async.} =
market: Market, blocksAgo: int
): Future[seq[StorageRequested]] {.base, async.} =
raiseAssert("not implemented")

View File

@ -9,7 +9,8 @@
import pkg/upraises
push: {.upraises: [].}
push:
{.upraises: [].}
import pkg/libp2p
import pkg/questionable
@ -42,8 +43,8 @@ proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize)
var mcodecCode: uint64
var leavesCount: uint64
discard ? pb.getField(1, mcodecCode).mapFailure
discard ? pb.getField(2, leavesCount).mapFailure
discard ?pb.getField(1, mcodecCode).mapFailure
discard ?pb.getField(2, leavesCount).mapFailure
let mcodec = MultiCodec.codec(mcodecCode.int)
if mcodec == InvalidMultiCodec:
@ -53,10 +54,10 @@ proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
nodesBuff: seq[seq[byte]]
nodes: seq[ByteHash]
if ? pb.getRepeatedField(3, nodesBuff).mapFailure:
if ?pb.getRepeatedField(3, nodesBuff).mapFailure:
for nodeBuff in nodesBuff:
var node: ByteHash
discard ? initProtoBuffer(nodeBuff).getField(1, node).mapFailure
discard ?initProtoBuffer(nodeBuff).getField(1, node).mapFailure
nodes.add node
CodexTree.fromNodes(mcodec, nodes, leavesCount.int)
@ -81,32 +82,29 @@ proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof =
var mcodecCode: uint64
var index: uint64
var nleaves: uint64
discard ? pb.getField(1, mcodecCode).mapFailure
discard ?pb.getField(1, mcodecCode).mapFailure
let mcodec = MultiCodec.codec(mcodecCode.int)
if mcodec == InvalidMultiCodec:
return failure("Invalid MultiCodec code " & $mcodecCode)
discard ? pb.getField(2, index).mapFailure
discard ? pb.getField(3, nleaves).mapFailure
discard ?pb.getField(2, index).mapFailure
discard ?pb.getField(3, nleaves).mapFailure
var
nodesBuff: seq[seq[byte]]
nodes: seq[ByteHash]
if ? pb.getRepeatedField(4, nodesBuff).mapFailure:
if ?pb.getRepeatedField(4, nodesBuff).mapFailure:
for nodeBuff in nodesBuff:
var node: ByteHash
let nodePb = initProtoBuffer(nodeBuff)
discard ? nodePb.getField(1, node).mapFailure
discard ?nodePb.getField(1, node).mapFailure
nodes.add node
CodexProof.init(mcodec, index.int, nleaves.int, nodes)
proc fromJson*(
_: type CodexProof,
json: JsonNode
): ?!CodexProof =
proc fromJson*(_: type CodexProof, json: JsonNode): ?!CodexProof =
expectJsonKind(Cid, JString, json)
var bytes: seq[byte]
try:
@ -116,4 +114,5 @@ proc fromJson*(
CodexProof.decode(bytes)
func `%`*(proof: CodexProof): JsonNode = % byteutils.toHex(proof.encode())
func `%`*(proof: CodexProof): JsonNode =
%byteutils.toHex(proof.encode())

View File

@ -32,10 +32,10 @@ logScope:
type
ByteTreeKey* {.pure.} = enum
KeyNone = 0x0.byte
KeyBottomLayer = 0x1.byte
KeyOdd = 0x2.byte
KeyOddAndBottomLayer = 0x3.byte
KeyNone = 0x0.byte
KeyBottomLayer = 0x1.byte
KeyOdd = 0x2.byte
KeyOddAndBottomLayer = 0x3.byte
ByteHash* = seq[byte]
ByteTree* = MerkleTree[ByteHash, ByteTreeKey]
@ -56,8 +56,7 @@ proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} =
const CodeHashes = initMultiHashCodeTable()
func mhash*(mcodec: MultiCodec): ?!MHash =
let
mhash = CodeHashes.getOrDefault(mcodec)
let mhash = CodeHashes.getOrDefault(mcodec)
if isNil(mhash.coder):
return failure "Invalid multihash codec"
@ -71,10 +70,9 @@ func digestSize*(self: (CodexTree or CodexProof)): int =
self.mhash.size
func getProof*(self: CodexTree, index: int): ?!CodexProof =
var
proof = CodexProof(mcodec: self.mcodec)
var proof = CodexProof(mcodec: self.mcodec)
? self.getProof(index, proof)
?self.getProof(index, proof)
success proof
@ -86,83 +84,66 @@ func verify*(self: CodexProof, leaf: MultiHash, root: MultiHash): ?!bool =
rootBytes = root.digestBytes
leafBytes = leaf.digestBytes
if self.mcodec != root.mcodec or
self.mcodec != leaf.mcodec:
if self.mcodec != root.mcodec or self.mcodec != leaf.mcodec:
return failure "Hash codec mismatch"
if rootBytes.len != root.size and
leafBytes.len != leaf.size:
if rootBytes.len != root.size and leafBytes.len != leaf.size:
return failure "Invalid hash length"
self.verify(leafBytes, rootBytes)
func verify*(self: CodexProof, leaf: Cid, root: Cid): ?!bool =
self.verify(? leaf.mhash.mapFailure, ? leaf.mhash.mapFailure)
self.verify(?leaf.mhash.mapFailure, ?leaf.mhash.mapFailure)
proc rootCid*(
self: CodexTree,
version = CIDv1,
dataCodec = DatasetRootCodec): ?!Cid =
if (? self.root).len == 0:
proc rootCid*(self: CodexTree, version = CIDv1, dataCodec = DatasetRootCodec): ?!Cid =
if (?self.root).len == 0:
return failure "Empty root"
let
mhash = ? MultiHash.init(self.mcodec, ? self.root).mapFailure
let mhash = ?MultiHash.init(self.mcodec, ?self.root).mapFailure
Cid.init(version, DatasetRootCodec, mhash).mapFailure
func getLeafCid*(
self: CodexTree,
i: Natural,
version = CIDv1,
dataCodec = BlockCodec): ?!Cid =
self: CodexTree, i: Natural, version = CIDv1, dataCodec = BlockCodec
): ?!Cid =
if i >= self.leavesCount:
return failure "Invalid leaf index " & $i
let
leaf = self.leaves[i]
mhash = ? MultiHash.init($self.mcodec, leaf).mapFailure
mhash = ?MultiHash.init($self.mcodec, leaf).mapFailure
Cid.init(version, dataCodec, mhash).mapFailure
proc `$`*(self: CodexTree): string =
let root = if self.root.isOk: byteutils.toHex(self.root.get) else: "none"
"CodexTree(" &
" root: " & root &
", leavesCount: " & $self.leavesCount &
", levels: " & $self.levels &
", mcodec: " & $self.mcodec & " )"
let root =
if self.root.isOk:
byteutils.toHex(self.root.get)
else:
"none"
"CodexTree(" & " root: " & root & ", leavesCount: " & $self.leavesCount & ", levels: " &
$self.levels & ", mcodec: " & $self.mcodec & " )"
proc `$`*(self: CodexProof): string =
"CodexProof(" &
" nleaves: " & $self.nleaves &
", index: " & $self.index &
", path: " & $self.path.mapIt( byteutils.toHex(it) ) &
", mcodec: " & $self.mcodec & " )"
"CodexProof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index & ", path: " &
$self.path.mapIt(byteutils.toHex(it)) & ", mcodec: " & $self.mcodec & " )"
func compress*(
x, y: openArray[byte],
key: ByteTreeKey,
mhash: MHash): ?!ByteHash =
func compress*(x, y: openArray[byte], key: ByteTreeKey, mhash: MHash): ?!ByteHash =
## Compress two hashes
##
var digest = newSeq[byte](mhash.size)
mhash.coder(@x & @y & @[ key.byte ], digest)
mhash.coder(@x & @y & @[key.byte], digest)
success digest
func init*(
_: type CodexTree,
mcodec: MultiCodec = Sha256HashCodec,
leaves: openArray[ByteHash]): ?!CodexTree =
_: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash]
): ?!CodexTree =
if leaves.len == 0:
return failure "Empty leaves"
let
mhash = ? mcodec.mhash()
mhash = ?mcodec.mhash()
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
compress(x, y, key, mhash)
Zero: ByteHash = newSeq[byte](mhash.size)
@ -170,48 +151,42 @@ func init*(
if mhash.size != leaves[0].len:
return failure "Invalid hash length"
var
self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero)
var self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero)
self.layers = ? merkleTreeWorker(self, leaves, isBottomLayer = true)
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
success self
func init*(
_: type CodexTree,
leaves: openArray[MultiHash]): ?!CodexTree =
func init*(_: type CodexTree, leaves: openArray[MultiHash]): ?!CodexTree =
if leaves.len == 0:
return failure "Empty leaves"
let
mcodec = leaves[0].mcodec
leaves = leaves.mapIt( it.digestBytes )
leaves = leaves.mapIt(it.digestBytes)
CodexTree.init(mcodec, leaves)
func init*(
_: type CodexTree,
leaves: openArray[Cid]): ?!CodexTree =
func init*(_: type CodexTree, leaves: openArray[Cid]): ?!CodexTree =
if leaves.len == 0:
return failure "Empty leaves"
let
mcodec = (? leaves[0].mhash.mapFailure).mcodec
leaves = leaves.mapIt( (? it.mhash.mapFailure).digestBytes )
mcodec = (?leaves[0].mhash.mapFailure).mcodec
leaves = leaves.mapIt((?it.mhash.mapFailure).digestBytes)
CodexTree.init(mcodec, leaves)
proc fromNodes*(
_: type CodexTree,
mcodec: MultiCodec = Sha256HashCodec,
nodes: openArray[ByteHash],
nleaves: int): ?!CodexTree =
_: type CodexTree,
mcodec: MultiCodec = Sha256HashCodec,
nodes: openArray[ByteHash],
nleaves: int,
): ?!CodexTree =
if nodes.len == 0:
return failure "Empty nodes"
let
mhash = ? mcodec.mhash()
mhash = ?mcodec.mhash()
Zero = newSeq[byte](mhash.size)
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
compress(x, y, key, mhash)
@ -225,31 +200,31 @@ proc fromNodes*(
pos = 0
while pos < nodes.len:
self.layers.add( nodes[pos..<(pos + layer)] )
self.layers.add(nodes[pos ..< (pos + layer)])
pos += layer
layer = divUp(layer, 2)
let
index = Rng.instance.rand(nleaves - 1)
proof = ? self.getProof(index)
proof = ?self.getProof(index)
if not ? proof.verify(self.leaves[index], ? self.root): # sanity check
if not ?proof.verify(self.leaves[index], ?self.root): # sanity check
return failure "Unable to verify tree built from nodes"
success self
func init*(
_: type CodexProof,
mcodec: MultiCodec = Sha256HashCodec,
index: int,
nleaves: int,
nodes: openArray[ByteHash]): ?!CodexProof =
_: type CodexProof,
mcodec: MultiCodec = Sha256HashCodec,
index: int,
nleaves: int,
nodes: openArray[ByteHash],
): ?!CodexProof =
if nodes.len == 0:
return failure "Empty nodes"
let
mhash = ? mcodec.mhash()
mhash = ?mcodec.mhash()
Zero = newSeq[byte](mhash.size)
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} =
compress(x, y, key, mhash)
@ -260,4 +235,5 @@ func init*(
mcodec: mcodec,
index: index,
nleaves: nleaves,
path: @nodes)
path: @nodes,
)

View File

@ -16,19 +16,19 @@ import pkg/questionable/results
import ../errors
type
CompressFn*[H, K] = proc (x, y: H, key: K): ?!H {.noSideEffect, raises: [].}
CompressFn*[H, K] = proc(x, y: H, key: K): ?!H {.noSideEffect, raises: [].}
MerkleTree*[H, K] = ref object of RootObj
layers* : seq[seq[H]]
layers*: seq[seq[H]]
compress*: CompressFn[H, K]
zero* : H
zero*: H
MerkleProof*[H, K] = ref object of RootObj
index* : int # linear index of the leaf, starting from 0
path* : seq[H] # order: from the bottom to the top
nleaves* : int # number of leaves in the tree (=size of input)
compress*: CompressFn[H, K] # compress function
zero* : H # zero value
index*: int # linear index of the leaf, starting from 0
path*: seq[H] # order: from the bottom to the top
nleaves*: int # number of leaves in the tree (=size of input)
compress*: CompressFn[H, K] # compress function
zero*: H # zero value
func depth*[H, K](self: MerkleTree[H, K]): int =
return self.layers.len - 1
@ -59,36 +59,38 @@ func root*[H, K](self: MerkleTree[H, K]): ?!H =
return success last[0]
func getProof*[H, K](
self: MerkleTree[H, K],
index: int,
proof: MerkleProof[H, K]): ?!void =
let depth = self.depth
self: MerkleTree[H, K], index: int, proof: MerkleProof[H, K]
): ?!void =
let depth = self.depth
let nleaves = self.leavesCount
if not (index >= 0 and index < nleaves):
return failure "index out of bounds"
var path : seq[H] = newSeq[H](depth)
var path: seq[H] = newSeq[H](depth)
var k = index
var m = nleaves
for i in 0..<depth:
for i in 0 ..< depth:
let j = k xor 1
path[i] = if (j < m): self.layers[i][j] else: self.zero
k = k shr 1
path[i] =
if (j < m):
self.layers[i][j]
else:
self.zero
k = k shr 1
m = (m + 1) shr 1
proof.index = index
proof.path = path
proof.path = path
proof.nleaves = nleaves
proof.compress = self.compress
success()
func getProof*[H, K](self: MerkleTree[H, K], index: int): ?!MerkleProof[H, K] =
var
proof = MerkleProof[H, K]()
var proof = MerkleProof[H, K]()
? self.getProof(index, proof)
?self.getProof(index, proof)
success proof
@ -100,41 +102,39 @@ func reconstructRoot*[H, K](proof: MerkleProof[H, K], leaf: H): ?!H =
bottomFlag = K.KeyBottomLayer
for p in proof.path:
let oddIndex : bool = (bitand(j,1) != 0)
let oddIndex: bool = (bitand(j, 1) != 0)
if oddIndex:
# the index of the child is odd, so the node itself can't be odd (a bit counterintuitive, yeah :)
h = ? proof.compress( p, h, bottomFlag )
h = ?proof.compress(p, h, bottomFlag)
else:
if j == m - 1:
# single child => odd node
h = ? proof.compress( h, p, K(bottomFlag.ord + 2) )
h = ?proof.compress(h, p, K(bottomFlag.ord + 2))
else:
# even node
h = ? proof.compress( h , p, bottomFlag )
h = ?proof.compress(h, p, bottomFlag)
bottomFlag = K.KeyNone
j = j shr 1
m = (m+1) shr 1
j = j shr 1
m = (m + 1) shr 1
return success h
func verify*[H, K](proof: MerkleProof[H, K], leaf: H, root: H): ?!bool =
success bool(root == ? proof.reconstructRoot(leaf))
success bool(root == ?proof.reconstructRoot(leaf))
func merkleTreeWorker*[H, K](
self: MerkleTree[H, K],
xs: openArray[H],
isBottomLayer: static bool): ?!seq[seq[H]] =
self: MerkleTree[H, K], xs: openArray[H], isBottomLayer: static bool
): ?!seq[seq[H]] =
let a = low(xs)
let b = high(xs)
let m = b - a + 1
when not isBottomLayer:
if m == 1:
return success @[ @xs ]
return success @[@xs]
let halfn: int = m div 2
let n : int = 2 * halfn
let halfn: int = m div 2
let n: int = 2 * halfn
let isOdd: bool = (n != m)
var ys: seq[H]
@ -143,11 +143,11 @@ func merkleTreeWorker*[H, K](
else:
ys = newSeq[H](halfn + 1)
for i in 0..<halfn:
for i in 0 ..< halfn:
const key = when isBottomLayer: K.KeyBottomLayer else: K.KeyNone
ys[i] = ? self.compress( xs[a + 2 * i], xs[a + 2 * i + 1], key = key )
ys[i] = ?self.compress(xs[a + 2 * i], xs[a + 2 * i + 1], key = key)
if isOdd:
const key = when isBottomLayer: K.KeyOddAndBottomLayer else: K.KeyOdd
ys[halfn] = ? self.compress( xs[n], self.zero, key = key )
ys[halfn] = ?self.compress(xs[n], self.zero, key = key)
success @[ @xs ] & ? self.merkleTreeWorker(ys, isBottomLayer = false)
success @[@xs] & ?self.merkleTreeWorker(ys, isBottomLayer = false)

View File

@ -24,9 +24,9 @@ import ./merkletree
export merkletree, poseidon2
const
KeyNoneF = F.fromHex("0x0")
KeyBottomLayerF = F.fromHex("0x1")
KeyOddF = F.fromHex("0x2")
KeyNoneF = F.fromHex("0x0")
KeyBottomLayerF = F.fromHex("0x1")
KeyOddF = F.fromHex("0x2")
KeyOddAndBottomLayerF = F.fromHex("0x3")
Poseidon2Zero* = zero
@ -35,7 +35,7 @@ type
Bn254Fr* = F
Poseidon2Hash* = Bn254Fr
PoseidonKeysEnum* = enum # can't use non-ordinals as enum values
PoseidonKeysEnum* = enum # can't use non-ordinals as enum values
KeyNone
KeyBottomLayer
KeyOdd
@ -46,65 +46,50 @@ type
proc `$`*(self: Poseidon2Tree): string =
let root = if self.root.isOk: self.root.get.toHex else: "none"
"Poseidon2Tree(" &
" root: " & root &
", leavesCount: " & $self.leavesCount &
"Poseidon2Tree(" & " root: " & root & ", leavesCount: " & $self.leavesCount &
", levels: " & $self.levels & " )"
proc `$`*(self: Poseidon2Proof): string =
"Poseidon2Proof(" &
" nleaves: " & $self.nleaves &
", index: " & $self.index &
", path: " & $self.path.mapIt( it.toHex ) & " )"
"Poseidon2Proof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index &
", path: " & $self.path.mapIt(it.toHex) & " )"
func toArray32*(bytes: openArray[byte]): array[32, byte] =
result[0..<bytes.len] = bytes[0..<bytes.len]
result[0 ..< bytes.len] = bytes[0 ..< bytes.len]
converter toKey*(key: PoseidonKeysEnum): Poseidon2Hash =
case key:
case key
of KeyNone: KeyNoneF
of KeyBottomLayer: KeyBottomLayerF
of KeyOdd: KeyOddF
of KeyOddAndBottomLayer: KeyOddAndBottomLayerF
func init*(
_: type Poseidon2Tree,
leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
func init*(_: type Poseidon2Tree, leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
if leaves.len == 0:
return failure "Empty leaves"
let
compressor = proc(
x, y: Poseidon2Hash,
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
success compress( x, y, key.toKey )
let compressor = proc(
x, y: Poseidon2Hash, key: PoseidonKeysEnum
): ?!Poseidon2Hash {.noSideEffect.} =
success compress(x, y, key.toKey)
var
self = Poseidon2Tree(compress: compressor, zero: Poseidon2Zero)
var self = Poseidon2Tree(compress: compressor, zero: Poseidon2Zero)
self.layers = ? merkleTreeWorker(self, leaves, isBottomLayer = true)
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
success self
func init*(
_: type Poseidon2Tree,
leaves: openArray[array[31, byte]]): ?!Poseidon2Tree =
Poseidon2Tree.init(
leaves.mapIt( Poseidon2Hash.fromBytes(it) ))
func init*(_: type Poseidon2Tree, leaves: openArray[array[31, byte]]): ?!Poseidon2Tree =
Poseidon2Tree.init(leaves.mapIt(Poseidon2Hash.fromBytes(it)))
proc fromNodes*(
_: type Poseidon2Tree,
nodes: openArray[Poseidon2Hash],
nleaves: int): ?!Poseidon2Tree =
_: type Poseidon2Tree, nodes: openArray[Poseidon2Hash], nleaves: int
): ?!Poseidon2Tree =
if nodes.len == 0:
return failure "Empty nodes"
let
compressor = proc(
x, y: Poseidon2Hash,
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
success compress( x, y, key.toKey )
let compressor = proc(
x, y: Poseidon2Hash, key: PoseidonKeysEnum
): ?!Poseidon2Hash {.noSideEffect.} =
success compress(x, y, key.toKey)
var
self = Poseidon2Tree(compress: compressor, zero: zero)
@ -112,37 +97,34 @@ proc fromNodes*(
pos = 0
while pos < nodes.len:
self.layers.add( nodes[pos..<(pos + layer)] )
self.layers.add(nodes[pos ..< (pos + layer)])
pos += layer
layer = divUp(layer, 2)
let
index = Rng.instance.rand(nleaves - 1)
proof = ? self.getProof(index)
proof = ?self.getProof(index)
if not ? proof.verify(self.leaves[index], ? self.root): # sanity check
if not ?proof.verify(self.leaves[index], ?self.root): # sanity check
return failure "Unable to verify tree built from nodes"
success self
func init*(
_: type Poseidon2Proof,
index: int,
nleaves: int,
nodes: openArray[Poseidon2Hash]): ?!Poseidon2Proof =
_: type Poseidon2Proof, index: int, nleaves: int, nodes: openArray[Poseidon2Hash]
): ?!Poseidon2Proof =
if nodes.len == 0:
return failure "Empty nodes"
let
compressor = proc(
x, y: Poseidon2Hash,
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
success compress( x, y, key.toKey )
let compressor = proc(
x, y: Poseidon2Hash, key: PoseidonKeysEnum
): ?!Poseidon2Hash {.noSideEffect.} =
success compress(x, y, key.toKey)
success Poseidon2Proof(
compress: compressor,
zero: Poseidon2Zero,
index: index,
nleaves: nleaves,
path: @nodes)
path: @nodes,
)

View File

@ -9,16 +9,17 @@
const
# Namespaces
CodexMetaNamespace* = "meta" # meta info stored here
CodexRepoNamespace* = "repo" # repository namespace, blocks and manifests are subkeys
CodexBlockTotalNamespace* = CodexMetaNamespace & "/total" # number of blocks in the repo
CodexBlocksNamespace* = CodexRepoNamespace & "/blocks" # blocks namespace
CodexMetaNamespace* = "meta" # meta info stored here
CodexRepoNamespace* = "repo" # repository namespace, blocks and manifests are subkeys
CodexBlockTotalNamespace* = CodexMetaNamespace & "/total"
# number of blocks in the repo
CodexBlocksNamespace* = CodexRepoNamespace & "/blocks" # blocks namespace
CodexManifestNamespace* = CodexRepoNamespace & "/manifests" # manifest namespace
CodexBlocksTtlNamespace* = # Cid TTL
CodexBlocksTtlNamespace* = # Cid TTL
CodexMetaNamespace & "/ttl"
CodexBlockProofNamespace* = # Cid and Proof
CodexBlockProofNamespace* = # Cid and Proof
CodexMetaNamespace & "/proof"
CodexDhtNamespace* = "dht" # Dht namespace
CodexDhtProvidersNamespace* = # Dht providers namespace
CodexDhtNamespace* = "dht" # Dht namespace
CodexDhtProvidersNamespace* = # Dht providers namespace
CodexDhtNamespace & "/providers"
CodexQuotaNamespace* = CodexMetaNamespace & "/quota" # quota's namespace
CodexQuotaNamespace* = CodexMetaNamespace & "/quota" # quota's namespace

View File

@ -9,10 +9,12 @@
{.push raises: [].}
import
std/[options, os, strutils, times, net],stew/shims/net as stewNet,
stew/[objects,results], nat_traversal/[miniupnpc, natpmp],
std/[options, os, strutils, times, net],
stew/shims/net as stewNet,
stew/[objects, results],
nat_traversal/[miniupnpc, natpmp],
json_serialization/std/net
import pkg/chronos
import pkg/chronicles
import pkg/libp2p
@ -38,13 +40,12 @@ var
logScope:
topics = "nat"
type
PrefSrcStatus = enum
NoRoutingInfo
PrefSrcIsPublic
PrefSrcIsPrivate
BindAddressIsPublic
BindAddressIsPrivate
type PrefSrcStatus = enum
NoRoutingInfo
PrefSrcIsPublic
PrefSrcIsPrivate
BindAddressIsPublic
BindAddressIsPrivate
## Also does threadvar initialisation.
## Must be called before redirectPorts() in each thread.
@ -63,18 +64,20 @@ proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress]
var
msg: cstring
canContinue = true
case upnp.selectIGD():
of IGDNotFound:
msg = "Internet Gateway Device not found. Giving up."
canContinue = false
of IGDFound:
msg = "Internet Gateway Device found."
of IGDNotConnected:
msg = "Internet Gateway Device found but it's not connected. Trying anyway."
of NotAnIGD:
msg = "Some device found, but it's not recognised as an Internet Gateway Device. Trying anyway."
of IGDIpNotRoutable:
msg = "Internet Gateway Device found and is connected, but with a reserved or non-routable IP. Trying anyway."
case upnp.selectIGD()
of IGDNotFound:
msg = "Internet Gateway Device not found. Giving up."
canContinue = false
of IGDFound:
msg = "Internet Gateway Device found."
of IGDNotConnected:
msg = "Internet Gateway Device found but it's not connected. Trying anyway."
of NotAnIGD:
msg =
"Some device found, but it's not recognised as an Internet Gateway Device. Trying anyway."
of IGDIpNotRoutable:
msg =
"Internet Gateway Device found and is connected, but with a reserved or non-routable IP. Trying anyway."
if not quiet:
debug "UPnP", msg
if canContinue:
@ -116,8 +119,7 @@ proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress]
# Further more, we check if the bind address (user provided, or a "0.0.0.0"
# default) is a public IP. That's a long shot, because code paths involving a
# user-provided bind address are not supposed to get here.
proc getRoutePrefSrc(
bindIp: IpAddress): (Option[IpAddress], PrefSrcStatus) =
proc getRoutePrefSrc(bindIp: IpAddress): (Option[IpAddress], PrefSrcStatus) =
let bindAddress = initTAddress(bindIp, Port(0))
if bindAddress.isAnyLocal():
@ -137,18 +139,22 @@ proc getRoutePrefSrc(
return (none(IpAddress), BindAddressIsPrivate)
# Try to detect a public IP assigned to this host, before trying NAT traversal.
proc getPublicRoutePrefSrcOrExternalIP*(natStrategy: NatStrategy, bindIp: IpAddress, quiet = true): Option[IpAddress] =
proc getPublicRoutePrefSrcOrExternalIP*(
natStrategy: NatStrategy, bindIp: IpAddress, quiet = true
): Option[IpAddress] =
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
case prefSrcStatus:
of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic:
return prefSrcIp
of PrefSrcIsPrivate, BindAddressIsPrivate:
let extIp = getExternalIP(natStrategy, quiet)
if extIp.isSome:
return some(extIp.get)
case prefSrcStatus
of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic:
return prefSrcIp
of PrefSrcIsPrivate, BindAddressIsPrivate:
let extIp = getExternalIP(natStrategy, quiet)
if extIp.isSome:
return some(extIp.get)
proc doPortMapping(tcpPort, udpPort: Port, description: string): Option[(Port, Port)] {.gcsafe.} =
proc doPortMapping(
tcpPort, udpPort: Port, description: string
): Option[(Port, Port)] {.gcsafe.} =
var
extTcpPort: Port
extUdpPort: Port
@ -157,47 +163,54 @@ proc doPortMapping(tcpPort, udpPort: Port, description: string): Option[(Port, P
for t in [(tcpPort, UPNPProtocol.TCP), (udpPort, UPNPProtocol.UDP)]:
let
(port, protocol) = t
pmres = upnp.addPortMapping(externalPort = $port,
protocol = protocol,
internalHost = upnp.lanAddr,
internalPort = $port,
desc = description,
leaseDuration = 0)
pmres = upnp.addPortMapping(
externalPort = $port,
protocol = protocol,
internalHost = upnp.lanAddr,
internalPort = $port,
desc = description,
leaseDuration = 0,
)
if pmres.isErr:
error "UPnP port mapping", msg = pmres.error, port
return
else:
# let's check it
let cres = upnp.getSpecificPortMapping(externalPort = $port,
protocol = protocol)
let cres =
upnp.getSpecificPortMapping(externalPort = $port, protocol = protocol)
if cres.isErr:
warn "UPnP port mapping check failed. Assuming the check itself is broken and the port mapping was done.", msg = cres.error
warn "UPnP port mapping check failed. Assuming the check itself is broken and the port mapping was done.",
msg = cres.error
info "UPnP: added port mapping", externalPort = port, internalPort = port, protocol = protocol
case protocol:
of UPNPProtocol.TCP:
extTcpPort = port
of UPNPProtocol.UDP:
extUdpPort = port
info "UPnP: added port mapping",
externalPort = port, internalPort = port, protocol = protocol
case protocol
of UPNPProtocol.TCP:
extTcpPort = port
of UPNPProtocol.UDP:
extUdpPort = port
elif strategy == NatStrategy.NatPmp:
for t in [(tcpPort, NatPmpProtocol.TCP), (udpPort, NatPmpProtocol.UDP)]:
let
(port, protocol) = t
pmres = npmp.addPortMapping(eport = port.cushort,
iport = port.cushort,
protocol = protocol,
lifetime = NATPMP_LIFETIME)
pmres = npmp.addPortMapping(
eport = port.cushort,
iport = port.cushort,
protocol = protocol,
lifetime = NATPMP_LIFETIME,
)
if pmres.isErr:
error "NAT-PMP port mapping", msg = pmres.error, port
return
else:
let extPort = Port(pmres.value)
info "NAT-PMP: added port mapping", externalPort = extPort, internalPort = port, protocol = protocol
case protocol:
of NatPmpProtocol.TCP:
extTcpPort = extPort
of NatPmpProtocol.UDP:
extUdpPort = extPort
info "NAT-PMP: added port mapping",
externalPort = extPort, internalPort = port, protocol = protocol
case protocol
of NatPmpProtocol.TCP:
extTcpPort = extPort
of NatPmpProtocol.UDP:
extUdpPort = extPort
return some((extTcpPort, extUdpPort))
type PortMappingArgs = tuple[tcpPort, udpPort: Port, description: string]
@ -223,8 +236,11 @@ proc repeatPortMapping(args: PortMappingArgs) {.thread, raises: [ValueError].} =
while true:
# we're being silly here with this channel polling because we can't
# select on Nim channels like on Go ones
let (dataAvailable, _) = try: natCloseChan.tryRecv()
except Exception: (false, false)
let (dataAvailable, _) =
try:
natCloseChan.tryRecv()
except Exception:
(false, false)
if dataAvailable:
return
else:
@ -255,26 +271,33 @@ proc stopNatThread() {.noconv.} =
let ipres = getExternalIP(strategy, quiet = true)
if ipres.isSome:
if strategy == NatStrategy.NatUpnp:
for t in [(externalTcpPort, internalTcpPort, UPNPProtocol.TCP), (externalUdpPort, internalUdpPort, UPNPProtocol.UDP)]:
for t in [
(externalTcpPort, internalTcpPort, UPNPProtocol.TCP),
(externalUdpPort, internalUdpPort, UPNPProtocol.UDP),
]:
let
(eport, iport, protocol) = t
pmres = upnp.deletePortMapping(externalPort = $eport,
protocol = protocol)
pmres = upnp.deletePortMapping(externalPort = $eport, protocol = protocol)
if pmres.isErr:
error "UPnP port mapping deletion", msg = pmres.error
else:
debug "UPnP: deleted port mapping", externalPort = eport, internalPort = iport, protocol = protocol
debug "UPnP: deleted port mapping",
externalPort = eport, internalPort = iport, protocol = protocol
elif strategy == NatStrategy.NatPmp:
for t in [(externalTcpPort, internalTcpPort, NatPmpProtocol.TCP), (externalUdpPort, internalUdpPort, NatPmpProtocol.UDP)]:
for t in [
(externalTcpPort, internalTcpPort, NatPmpProtocol.TCP),
(externalUdpPort, internalUdpPort, NatPmpProtocol.UDP),
]:
let
(eport, iport, protocol) = t
pmres = npmp.deletePortMapping(eport = eport.cushort,
iport = iport.cushort,
protocol = protocol)
pmres = npmp.deletePortMapping(
eport = eport.cushort, iport = iport.cushort, protocol = protocol
)
if pmres.isErr:
error "NAT-PMP port mapping deletion", msg = pmres.error
else:
debug "NAT-PMP: deleted port mapping", externalPort = eport, internalPort = iport, protocol = protocol
debug "NAT-PMP: deleted port mapping",
externalPort = eport, internalPort = iport, protocol = protocol
proc redirectPorts*(tcpPort, udpPort: Port, description: string): Option[(Port, Port)] =
result = doPortMapping(tcpPort, udpPort, description)
@ -288,15 +311,17 @@ proc redirectPorts*(tcpPort, udpPort: Port, description: string): Option[(Port,
# these mappings.
natCloseChan.open()
try:
natThread.createThread(repeatPortMapping, (externalTcpPort, externalUdpPort, description))
natThread.createThread(
repeatPortMapping, (externalTcpPort, externalUdpPort, description)
)
# atexit() in disguise
addQuitProc(stopNatThread)
except Exception as exc:
warn "Failed to create NAT port mapping renewal thread", exc = exc.msg
proc setupNat*(natStrategy: NatStrategy, tcpPort, udpPort: Port,
clientId: string):
tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]] =
proc setupNat*(
natStrategy: NatStrategy, tcpPort, udpPort: Port, clientId: string
): tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]] =
## Setup NAT port mapping and get external IP address.
## If any of this fails, we don't return any IP address but do return the
## original ports as best effort.
@ -304,10 +329,10 @@ proc setupNat*(natStrategy: NatStrategy, tcpPort, udpPort: Port,
let extIp = getExternalIP(natStrategy)
if extIp.isSome:
let ip = extIp.get
let extPorts = ({.gcsafe.}:
redirectPorts(tcpPort = tcpPort,
udpPort = udpPort,
description = clientId))
let extPorts = (
{.gcsafe.}:
redirectPorts(tcpPort = tcpPort, udpPort = udpPort, description = clientId)
)
if extPorts.isSome:
let (extTcpPort, extUdpPort) = extPorts.get()
(ip: some(ip), tcpPort: some(extTcpPort), udpPort: some(extUdpPort))
@ -318,16 +343,14 @@ proc setupNat*(natStrategy: NatStrategy, tcpPort, udpPort: Port,
warn "UPnP/NAT-PMP not available"
(ip: none(IpAddress), tcpPort: some(tcpPort), udpPort: some(udpPort))
type
NatConfig* = object
case hasExtIp*: bool
of true: extIp*: IpAddress
of false: nat*: NatStrategy
type NatConfig* = object
case hasExtIp*: bool
of true: extIp*: IpAddress
of false: nat*: NatStrategy
proc setupAddress*(natConfig: NatConfig, bindIp: IpAddress,
tcpPort, udpPort: Port, clientId: string):
tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]]
{.gcsafe.} =
proc setupAddress*(
natConfig: NatConfig, bindIp: IpAddress, tcpPort, udpPort: Port, clientId: string
): tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]] {.gcsafe.} =
## Set-up of the external address via any of the ways as configured in
## `NatConfig`. In case all fails an error is logged and the bind ports are
## selected also as external ports, as best effort and in hope that the
@ -338,58 +361,57 @@ proc setupAddress*(natConfig: NatConfig, bindIp: IpAddress,
# any required port redirection must be done by hand
return (some(natConfig.extIp), some(tcpPort), some(udpPort))
case natConfig.nat:
of NatStrategy.NatAny:
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
case natConfig.nat
of NatStrategy.NatAny:
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
case prefSrcStatus:
of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic:
return (prefSrcIp, some(tcpPort), some(udpPort))
of PrefSrcIsPrivate, BindAddressIsPrivate:
return setupNat(natConfig.nat, tcpPort, udpPort, clientId)
of NatStrategy.NatNone:
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
case prefSrcStatus:
of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic:
return (prefSrcIp, some(tcpPort), some(udpPort))
of PrefSrcIsPrivate:
error "No public IP address found. Should not use --nat:none option"
return (none(IpAddress), some(tcpPort), some(udpPort))
of BindAddressIsPrivate:
error "Bind IP is not a public IP address. Should not use --nat:none option"
return (none(IpAddress), some(tcpPort), some(udpPort))
of NatStrategy.NatUpnp, NatStrategy.NatPmp:
case prefSrcStatus
of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic:
return (prefSrcIp, some(tcpPort), some(udpPort))
of PrefSrcIsPrivate, BindAddressIsPrivate:
return setupNat(natConfig.nat, tcpPort, udpPort, clientId)
of NatStrategy.NatNone:
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
proc nattedAddress*(natConfig: NatConfig, addrs: seq[MultiAddress], udpPort: Port): tuple[libp2p, discovery: seq[MultiAddress]] =
case prefSrcStatus
of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic:
return (prefSrcIp, some(tcpPort), some(udpPort))
of PrefSrcIsPrivate:
error "No public IP address found. Should not use --nat:none option"
return (none(IpAddress), some(tcpPort), some(udpPort))
of BindAddressIsPrivate:
error "Bind IP is not a public IP address. Should not use --nat:none option"
return (none(IpAddress), some(tcpPort), some(udpPort))
of NatStrategy.NatUpnp, NatStrategy.NatPmp:
return setupNat(natConfig.nat, tcpPort, udpPort, clientId)
proc nattedAddress*(
natConfig: NatConfig, addrs: seq[MultiAddress], udpPort: Port
): tuple[libp2p, discovery: seq[MultiAddress]] =
## Takes a NAT configuration, sequence of multiaddresses and UDP port and returns:
## - Modified multiaddresses with NAT-mapped addresses for libp2p
## - Discovery addresses with NAT-mapped UDP ports
var discoveryAddrs = newSeq[MultiAddress](0)
let
newAddrs = addrs.mapIt:
block:
# Extract IP address and port from the multiaddress
let (ipPart, port) = getAddressAndPort(it)
if ipPart.isSome and port.isSome:
# Try to setup NAT mapping for the address
let (newIP, tcp, udp) = setupAddress(natConfig, ipPart.get, port.get, udpPort, "codex")
if newIP.isSome:
# NAT mapping successful - add discovery address with mapped UDP port
discoveryAddrs.add(getMultiAddrWithIPAndUDPPort(newIP.get, udp.get))
# Remap original address with NAT IP and TCP port
it.remapAddr(ip = newIP, port = tcp)
else:
# NAT mapping failed - use original address
echo "Failed to get external IP, using original address", it
discoveryAddrs.add(getMultiAddrWithIPAndUDPPort(ipPart.get, udpPort))
it
let newAddrs = addrs.mapIt:
block:
# Extract IP address and port from the multiaddress
let (ipPart, port) = getAddressAndPort(it)
if ipPart.isSome and port.isSome:
# Try to setup NAT mapping for the address
let (newIP, tcp, udp) =
setupAddress(natConfig, ipPart.get, port.get, udpPort, "codex")
if newIP.isSome:
# NAT mapping successful - add discovery address with mapped UDP port
discoveryAddrs.add(getMultiAddrWithIPAndUDPPort(newIP.get, udp.get))
# Remap original address with NAT IP and TCP port
it.remapAddr(ip = newIP, port = tcp)
else:
# Invalid multiaddress format - return as is
# NAT mapping failed - use original address
echo "Failed to get external IP, using original address", it
discoveryAddrs.add(getMultiAddrWithIPAndUDPPort(ipPart.get, udpPort))
it
else:
# Invalid multiaddress format - return as is
it
(newAddrs, discoveryAddrs)

View File

@ -50,14 +50,15 @@ export logutils
logScope:
topics = "codex node"
const
FetchBatch = 200
const FetchBatch = 200
type
Contracts* = tuple
client: ?ClientInteractions
host: ?HostInteractions
validator: ?ValidatorInteractions
Contracts* =
tuple[
client: ?ClientInteractions,
host: ?HostInteractions,
validator: ?ValidatorInteractions,
]
CodexNode* = object
switch: Switch
@ -88,8 +89,8 @@ func discovery*(self: CodexNodeRef): Discovery =
return self.discovery
proc storeManifest*(
self: CodexNodeRef,
manifest: Manifest): Future[?!bt.Block] {.async.} =
self: CodexNodeRef, manifest: Manifest
): Future[?!bt.Block] {.async.} =
without encodedVerifiable =? manifest.encode(), err:
trace "Unable to encode manifest"
return failure(err)
@ -104,9 +105,7 @@ proc storeManifest*(
success blk
proc fetchManifest*(
self: CodexNodeRef,
cid: Cid): Future[?!Manifest] {.async.} =
proc fetchManifest*(self: CodexNodeRef, cid: Cid): Future[?!Manifest] {.async.} =
## Fetch and decode a manifest block
##
@ -129,33 +128,27 @@ proc fetchManifest*(
return manifest.success
proc findPeer*(
self: CodexNodeRef,
peerId: PeerId): Future[?PeerRecord] {.async.} =
proc findPeer*(self: CodexNodeRef, peerId: PeerId): Future[?PeerRecord] {.async.} =
## Find peer using the discovery service from the given CodexNode
##
return await self.discovery.findPeer(peerId)
proc connect*(
self: CodexNodeRef,
peerId: PeerId,
addrs: seq[MultiAddress]
self: CodexNodeRef, peerId: PeerId, addrs: seq[MultiAddress]
): Future[void] =
self.switch.connect(peerId, addrs)
proc updateExpiry*(
self: CodexNodeRef,
manifestCid: Cid,
expiry: SecondsSince1970): Future[?!void] {.async.} =
self: CodexNodeRef, manifestCid: Cid, expiry: SecondsSince1970
): Future[?!void] {.async.} =
without manifest =? await self.fetchManifest(manifestCid), error:
trace "Unable to fetch manifest for cid", manifestCid
return failure(error)
try:
let
ensuringFutures = Iter[int].new(0..<manifest.blocksCount)
.mapIt(self.networkStore.localStore.ensureExpiry( manifest.treeCid, it, expiry ))
let ensuringFutures = Iter[int].new(0 ..< manifest.blocksCount).mapIt(
self.networkStore.localStore.ensureExpiry(manifest.treeCid, it, expiry)
)
await allFuturesThrowing(ensuringFutures)
except CancelledError as exc:
raise exc
@ -165,11 +158,12 @@ proc updateExpiry*(
return success()
proc fetchBatched*(
self: CodexNodeRef,
cid: Cid,
iter: Iter[int],
batchSize = FetchBatch,
onBatch: BatchProc = nil): Future[?!void] {.async, gcsafe.} =
self: CodexNodeRef,
cid: Cid,
iter: Iter[int],
batchSize = FetchBatch,
onBatch: BatchProc = nil,
): Future[?!void] {.async, gcsafe.} =
## Fetch blocks in batches of `batchSize`
##
@ -181,7 +175,7 @@ proc fetchBatched*(
while not iter.finished:
let blocks = collect:
for i in 0..<batchSize:
for i in 0 ..< batchSize:
if not iter.finished:
self.networkStore.getBlock(BlockAddress.init(cid, iter.next()))
@ -189,34 +183,31 @@ proc fetchBatched*(
return failure(blocksErr)
if not onBatch.isNil and
batchErr =? (await onBatch(blocks.mapIt( it.read.get ))).errorOption:
batchErr =? (await onBatch(blocks.mapIt(it.read.get))).errorOption:
return failure(batchErr)
success()
proc fetchBatched*(
self: CodexNodeRef,
manifest: Manifest,
batchSize = FetchBatch,
onBatch: BatchProc = nil): Future[?!void] =
self: CodexNodeRef,
manifest: Manifest,
batchSize = FetchBatch,
onBatch: BatchProc = nil,
): Future[?!void] =
## Fetch manifest in batches of `batchSize`
##
trace "Fetching blocks in batches of", size = batchSize
let iter = Iter[int].new(0..<manifest.blocksCount)
let iter = Iter[int].new(0 ..< manifest.blocksCount)
self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch)
proc streamSingleBlock(
self: CodexNodeRef,
cid: Cid
): Future[?!LPStream] {.async.} =
proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async.} =
## Streams the contents of a single block.
##
trace "Streaming single block", cid = cid
let
stream = BufferStream.new()
let stream = BufferStream.new()
without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err:
return failure(err)
@ -234,9 +225,7 @@ proc streamSingleBlock(
LPStream(stream).success
proc streamEntireDataset(
self: CodexNodeRef,
manifest: Manifest,
manifestCid: Cid,
self: CodexNodeRef, manifest: Manifest, manifestCid: Cid
): Future[?!LPStream] {.async.} =
## Streams the contents of the entire dataset described by the manifest.
##
@ -246,11 +235,8 @@ proc streamEntireDataset(
# Retrieve, decode and save to the local store all EС groups
proc erasureJob(): Future[?!void] {.async.} =
# Spawn an erasure decoding job
let
erasure = Erasure.new(
self.networkStore,
leoEncoderProvider,
leoDecoderProvider)
let erasure =
Erasure.new(self.networkStore, leoEncoderProvider, leoDecoderProvider)
without _ =? (await erasure.decode(manifest)), error:
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
return failure(error)
@ -265,9 +251,8 @@ proc streamEntireDataset(
LPStream(StoreStream.new(self.networkStore, manifest, pad = false)).success
proc retrieve*(
self: CodexNodeRef,
cid: Cid,
local: bool = true): Future[?!LPStream] {.async.} =
self: CodexNodeRef, cid: Cid, local: bool = true
): Future[?!LPStream] {.async.} =
## Retrieve by Cid a single block or an entire dataset described by manifest
##
@ -283,11 +268,12 @@ proc retrieve*(
await self.streamEntireDataset(manifest, cid)
proc store*(
self: CodexNodeRef,
stream: LPStream,
filename: ?string = string.none,
mimetype: ?string = string.none,
blockSize = DefaultBlockSize): Future[?!Cid] {.async.} =
self: CodexNodeRef,
stream: LPStream,
filename: ?string = string.none,
mimetype: ?string = string.none,
blockSize = DefaultBlockSize,
): Future[?!Cid] {.async.} =
## Save stream contents as dataset with given blockSize
## to nodes's BlockStore, and return Cid of its manifest
##
@ -301,10 +287,7 @@ proc store*(
var cids: seq[Cid]
try:
while (
let chunk = await chunker.getBytes();
chunk.len > 0):
while (let chunk = await chunker.getBytes(); chunk.len > 0):
without mhash =? MultiHash.digest($hcodec, chunk).mapFailure, err:
return failure(err)
@ -335,7 +318,8 @@ proc store*(
for index, cid in cids:
without proof =? tree.getProof(index), err:
return failure(err)
if err =? (await self.networkStore.putCidAndProof(treeCid, index, cid, proof)).errorOption:
if err =?
(await self.networkStore.putCidAndProof(treeCid, index, cid, proof)).errorOption:
# TODO add log here
return failure(err)
@ -348,18 +332,20 @@ proc store*(
codec = dataCodec,
filename = filename,
mimetype = mimetype,
uploadedAt = now().utc.toTime.toUnix.some)
uploadedAt = now().utc.toTime.toUnix.some,
)
without manifestBlk =? await self.storeManifest(manifest), err:
error "Unable to store manifest"
return failure(err)
info "Stored data", manifestCid = manifestBlk.cid,
treeCid = treeCid,
blocks = manifest.blocksCount,
datasetSize = manifest.datasetSize,
filename = manifest.filename,
mimetype = manifest.mimetype
info "Stored data",
manifestCid = manifestBlk.cid,
treeCid = treeCid,
blocks = manifest.blocksCount,
datasetSize = manifest.datasetSize,
filename = manifest.filename,
mimetype = manifest.mimetype
return manifestBlk.cid.success
@ -381,15 +367,16 @@ proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} =
onManifest(cid, manifest)
proc setupRequest(
self: CodexNodeRef,
cid: Cid,
duration: UInt256,
proofProbability: UInt256,
nodes: uint,
tolerance: uint,
reward: UInt256,
collateral: UInt256,
expiry: UInt256): Future[?!StorageRequest] {.async.} =
self: CodexNodeRef,
cid: Cid,
duration: UInt256,
proofProbability: UInt256,
nodes: uint,
tolerance: uint,
reward: UInt256,
collateral: UInt256,
expiry: UInt256,
): Future[?!StorageRequest] {.async.} =
## Setup slots for a given dataset
##
@ -398,16 +385,16 @@ proc setupRequest(
ecM = tolerance
logScope:
cid = cid
duration = duration
nodes = nodes
tolerance = tolerance
reward = reward
proofProbability = proofProbability
collateral = collateral
expiry = expiry
ecK = ecK
ecM = ecM
cid = cid
duration = duration
nodes = nodes
tolerance = tolerance
reward = reward
proofProbability = proofProbability
collateral = collateral
expiry = expiry
ecK = ecK
ecM = ecM
trace "Setting up slots"
@ -416,11 +403,8 @@ proc setupRequest(
return failure error
# Erasure code the dataset according to provided parameters
let
erasure = Erasure.new(
self.networkStore.localStore,
leoEncoderProvider,
leoDecoderProvider)
let erasure =
Erasure.new(self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider)
without encoded =? (await erasure.encode(manifest, ecK, ecM)), error:
trace "Unable to erasure code dataset"
@ -441,9 +425,9 @@ proc setupRequest(
let
verifyRoot =
if builder.verifyRoot.isNone:
return failure("No slots root")
else:
builder.verifyRoot.get.toBytes
return failure("No slots root")
else:
builder.verifyRoot.get.toBytes
request = StorageRequest(
ask: StorageAsk(
@ -453,42 +437,43 @@ proc setupRequest(
proofProbability: proofProbability,
reward: reward,
collateral: collateral,
maxSlotLoss: tolerance
maxSlotLoss: tolerance,
),
content: StorageContent(
cid: $manifestBlk.cid, # TODO: why string?
merkleRoot: verifyRoot
merkleRoot: verifyRoot,
),
expiry: expiry
expiry: expiry,
)
trace "Request created", request = $request
success request
proc requestStorage*(
self: CodexNodeRef,
cid: Cid,
duration: UInt256,
proofProbability: UInt256,
nodes: uint,
tolerance: uint,
reward: UInt256,
collateral: UInt256,
expiry: UInt256): Future[?!PurchaseId] {.async.} =
self: CodexNodeRef,
cid: Cid,
duration: UInt256,
proofProbability: UInt256,
nodes: uint,
tolerance: uint,
reward: UInt256,
collateral: UInt256,
expiry: UInt256,
): Future[?!PurchaseId] {.async.} =
## Initiate a request for storage sequence, this might
## be a multistep procedure.
##
logScope:
cid = cid
duration = duration
nodes = nodes
tolerance = tolerance
reward = reward
proofProbability = proofProbability
collateral = collateral
expiry = expiry.truncate(int64)
now = self.clock.now
cid = cid
duration = duration
nodes = nodes
tolerance = tolerance
reward = reward
proofProbability = proofProbability
collateral = collateral
expiry = expiry.truncate(int64)
now = self.clock.now
trace "Received a request for storage!"
@ -496,16 +481,11 @@ proc requestStorage*(
trace "Purchasing not available"
return failure "Purchasing not available"
without request =?
(await self.setupRequest(
cid,
duration,
proofProbability,
nodes,
tolerance,
reward,
collateral,
expiry)), err:
without request =? (
await self.setupRequest(
cid, duration, proofProbability, nodes, tolerance, reward, collateral, expiry
)
), err:
trace "Unable to setup request"
return failure err
@ -513,10 +493,8 @@ proc requestStorage*(
success purchase.id
proc onStore(
self: CodexNodeRef,
request: StorageRequest,
slotIdx: UInt256,
blocksCb: BlocksCb): Future[?!void] {.async.} =
self: CodexNodeRef, request: StorageRequest, slotIdx: UInt256, blocksCb: BlocksCb
): Future[?!void] {.async.} =
## store data in local storage
##
@ -534,9 +512,8 @@ proc onStore(
trace "Unable to fetch manifest for cid", cid, err = err.msg
return failure(err)
without builder =? Poseidon2Builder.new(
self.networkStore, manifest, manifest.verifiableStrategy
), err:
without builder =?
Poseidon2Builder.new(self.networkStore, manifest, manifest.verifiableStrategy), err:
trace "Unable to create slots builder", err = err.msg
return failure(err)
@ -551,7 +528,8 @@ proc onStore(
proc updateExpiry(blocks: seq[bt.Block]): Future[?!void] {.async.} =
trace "Updating expiry for blocks", blocks = blocks.len
let ensureExpiryFutures = blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry))
let ensureExpiryFutures =
blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry))
if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption:
return failure(updateExpiryErr)
@ -561,8 +539,9 @@ proc onStore(
return success()
without indexer =? manifest.verifiableStrategy.init(
0, manifest.blocksCount - 1, manifest.numSlots).catch, err:
without indexer =?
manifest.verifiableStrategy.init(0, manifest.blocksCount - 1, manifest.numSlots).catch,
err:
trace "Unable to create indexing strategy from protected manifest", err = err.msg
return failure(err)
@ -570,10 +549,9 @@ proc onStore(
trace "Unable to get indicies from strategy", err = err.msg
return failure(err)
if err =? (await self.fetchBatched(
manifest.treeCid,
blksIter,
onBatch = updateExpiry)).errorOption:
if err =? (
await self.fetchBatched(manifest.treeCid, blksIter, onBatch = updateExpiry)
).errorOption:
trace "Unable to fetch blocks", err = err.msg
return failure(err)
@ -584,7 +562,8 @@ proc onStore(
trace "Slot successfully retrieved and reconstructed"
if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx.int]:
trace "Slot root mismatch", manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid()
trace "Slot root mismatch",
manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid()
return failure(newException(CodexError, "Slot root mismatch"))
trace "Slot successfully retrieved and reconstructed"
@ -592,9 +571,8 @@ proc onStore(
return success()
proc onProve(
self: CodexNodeRef,
slot: Slot,
challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} =
self: CodexNodeRef, slot: Slot, challenge: ProofChallenge
): Future[?!Groth16Proof] {.async.} =
## Generats a proof for a given slot and challenge
##
@ -648,9 +626,8 @@ proc onProve(
failure "Prover not enabled"
proc onExpiryUpdate(
self: CodexNodeRef,
rootCid: string,
expiry: SecondsSince1970): Future[?!void] {.async.} =
self: CodexNodeRef, rootCid: string, expiry: SecondsSince1970
): Future[?!void] {.async.} =
without cid =? Cid.init(rootCid):
trace "Unable to parse Cid", cid
let error = newException(CodexError, "Unable to parse Cid")
@ -658,11 +635,8 @@ proc onExpiryUpdate(
return await self.updateExpiry(cid, expiry)
proc onClear(
self: CodexNodeRef,
request: StorageRequest,
slotIndex: UInt256) =
# TODO: remove data from local storage
proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: UInt256) =
# TODO: remove data from local storage
discard
proc start*(self: CodexNodeRef) {.async.} =
@ -676,32 +650,32 @@ proc start*(self: CodexNodeRef) {.async.} =
await self.clock.start()
if hostContracts =? self.contracts.host:
hostContracts.sales.onStore =
proc(
request: StorageRequest,
slot: UInt256,
onBatch: BatchProc): Future[?!void] = self.onStore(request, slot, onBatch)
hostContracts.sales.onStore = proc(
request: StorageRequest, slot: UInt256, onBatch: BatchProc
): Future[?!void] =
self.onStore(request, slot, onBatch)
hostContracts.sales.onExpiryUpdate =
proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] =
self.onExpiryUpdate(rootCid, expiry)
hostContracts.sales.onExpiryUpdate = proc(
rootCid: string, expiry: SecondsSince1970
): Future[?!void] =
self.onExpiryUpdate(rootCid, expiry)
hostContracts.sales.onClear =
proc(request: StorageRequest, slotIndex: UInt256) =
hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: UInt256) =
# TODO: remove data from local storage
self.onClear(request, slotIndex)
hostContracts.sales.onProve =
proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] =
# TODO: generate proof
self.onProve(slot, challenge)
hostContracts.sales.onProve = proc(
slot: Slot, challenge: ProofChallenge
): Future[?!Groth16Proof] =
# TODO: generate proof
self.onProve(slot, challenge)
try:
await hostContracts.start()
except CancelledError as error:
raise error
except CatchableError as error:
error "Unable to start host contract interactions", error=error.msg
error "Unable to start host contract interactions", error = error.msg
self.contracts.host = HostInteractions.none
if clientContracts =? self.contracts.client:
@ -710,7 +684,7 @@ proc start*(self: CodexNodeRef) {.async.} =
except CancelledError as error:
raise error
except CatchableError as error:
error "Unable to start client contract interactions: ", error=error.msg
error "Unable to start client contract interactions: ", error = error.msg
self.contracts.client = ClientInteractions.none
if validatorContracts =? self.contracts.validator:
@ -719,7 +693,7 @@ proc start*(self: CodexNodeRef) {.async.} =
except CancelledError as error:
raise error
except CatchableError as error:
error "Unable to start validator contract interactions: ", error=error.msg
error "Unable to start validator contract interactions: ", error = error.msg
self.contracts.validator = ValidatorInteractions.none
self.networkId = self.switch.peerInfo.peerId
@ -750,13 +724,14 @@ proc stop*(self: CodexNodeRef) {.async.} =
await self.networkStore.close
proc new*(
T: type CodexNodeRef,
switch: Switch,
networkStore: NetworkStore,
engine: BlockExcEngine,
discovery: Discovery,
prover = Prover.none,
contracts = Contracts.default): CodexNodeRef =
T: type CodexNodeRef,
switch: Switch,
networkStore: NetworkStore,
engine: BlockExcEngine,
discovery: Discovery,
prover = Prover.none,
contracts = Contracts.default,
): CodexNodeRef =
## Create new instance of a Codex self, call `start` to run it
##
@ -766,4 +741,5 @@ proc new*(
engine: engine,
prover: prover,
discovery: discovery,
contracts: contracts)
contracts: contracts,
)

View File

@ -3,6 +3,7 @@ import pkg/stint
type
Periodicity* = object
seconds*: UInt256
Period* = UInt256
Timestamp* = UInt256

View File

@ -18,16 +18,13 @@ type
clock: Clock
purchases: Table[PurchaseId, Purchase]
proofProbability*: UInt256
PurchaseTimeout* = Timeout
const DefaultProofProbability = 100.u256
proc new*(_: type Purchasing, market: Market, clock: Clock): Purchasing =
Purchasing(
market: market,
clock: clock,
proofProbability: DefaultProofProbability,
)
Purchasing(market: market, clock: clock, proofProbability: DefaultProofProbability)
proc load*(purchasing: Purchasing) {.async.} =
let market = purchasing.market
@ -43,9 +40,9 @@ proc start*(purchasing: Purchasing) {.async.} =
proc stop*(purchasing: Purchasing) {.async.} =
discard
proc populate*(purchasing: Purchasing,
request: StorageRequest
): Future[StorageRequest] {.async.} =
proc populate*(
purchasing: Purchasing, request: StorageRequest
): Future[StorageRequest] {.async.} =
result = request
if result.ask.proofProbability == 0.u256:
result.ask.proofProbability = purchasing.proofProbability
@ -55,9 +52,9 @@ proc populate*(purchasing: Purchasing,
result.nonce = Nonce(id)
result.client = await purchasing.market.getSigner()
proc purchase*(purchasing: Purchasing,
request: StorageRequest
): Future[Purchase] {.async.} =
proc purchase*(
purchasing: Purchasing, request: StorageRequest
): Future[Purchase] {.async.} =
let request = await purchasing.populate(request)
let purchase = Purchase.new(request, purchasing.market, purchasing.clock)
purchase.start()
@ -75,4 +72,3 @@ func getPurchaseIds*(purchasing: Purchasing): seq[PurchaseId] =
for key in purchasing.purchases.keys:
pIds.add(key)
return pIds

View File

@ -25,10 +25,7 @@ export purchaseid
export statemachine
func new*(
_: type Purchase,
requestId: RequestId,
market: Market,
clock: Clock
_: type Purchase, requestId: RequestId, market: Market, clock: Clock
): Purchase =
## create a new instance of a Purchase
##
@ -42,10 +39,7 @@ func new*(
return purchase
func new*(
_: type Purchase,
request: StorageRequest,
market: Market,
clock: Clock
_: type Purchase, request: StorageRequest, market: Market, clock: Clock
): Purchase =
## Create a new purchase using the given market and clock
let purchase = Purchase.new(request.id, market, clock)
@ -76,4 +70,5 @@ func error*(purchase: Purchase): ?(ref CatchableError) =
func state*(purchase: Purchase): ?string =
proc description(state: State): string =
$state
purchase.query(description)

View File

@ -3,9 +3,12 @@ import ../logutils
type PurchaseId* = distinct array[32, byte]
logutils.formatIt(LogFormat.textLines, PurchaseId): it.short0xHexLog
logutils.formatIt(LogFormat.json, PurchaseId): it.to0xHexLog
logutils.formatIt(LogFormat.textLines, PurchaseId):
it.short0xHexLog
logutils.formatIt(LogFormat.json, PurchaseId):
it.to0xHexLog
proc hash*(x: PurchaseId): Hash {.borrow.}
proc `==`*(x, y: PurchaseId): bool {.borrow.}
proc toHex*(x: PurchaseId): string = array[32, byte](x).toHex
proc toHex*(x: PurchaseId): string =
array[32, byte](x).toHex

View File

@ -14,5 +14,6 @@ type
clock*: Clock
requestId*: RequestId
request*: ?StorageRequest
PurchaseState* = ref object of State
PurchaseError* = object of CodexError

View File

@ -18,7 +18,7 @@ method run*(state: PurchaseCancelled, machine: Machine): Future[?State] {.async.
codex_purchases_cancelled.inc()
let purchase = Purchase(machine)
warn "Request cancelled, withdrawing remaining funds", requestId = purchase.requestId
warn "Request cancelled, withdrawing remaining funds", requestId = purchase.requestId
await purchase.market.withdrawFunds(purchase.requestId)
let error = newException(Timeout, "Purchase cancelled due to timeout")

View File

@ -18,6 +18,7 @@ method run*(state: PurchaseErrored, machine: Machine): Future[?State] {.async.}
codex_purchases_error.inc()
let purchase = Purchase(machine)
error "Purchasing error", error=state.error.msgDetail, requestId = purchase.requestId
error "Purchasing error",
error = state.error.msgDetail, requestId = purchase.requestId
purchase.future.fail(state.error)

View File

@ -2,8 +2,7 @@ import pkg/questionable
import ../statemachine
import ./error
type
ErrorHandlingState* = ref object of PurchaseState
type ErrorHandlingState* = ref object of PurchaseState
method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State =
some State(PurchaseErrored(error: error))

View File

@ -5,8 +5,7 @@ import ./error
declareCounter(codex_purchases_failed, "codex purchases failed")
type
PurchaseFailed* = ref object of PurchaseState
type PurchaseFailed* = ref object of PurchaseState
method `$`*(state: PurchaseFailed): string =
"failed"
@ -14,7 +13,7 @@ method `$`*(state: PurchaseFailed): string =
method run*(state: PurchaseFailed, machine: Machine): Future[?State] {.async.} =
codex_purchases_failed.inc()
let purchase = Purchase(machine)
warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId
warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId
await purchase.market.withdrawFunds(purchase.requestId)
let error = newException(PurchaseError, "Purchase failed")

View File

@ -27,6 +27,7 @@ method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.}
let failed = newFuture[void]()
proc callback(_: RequestId) =
failed.complete()
let subscription = await market.subscribeRequestFailed(purchase.requestId, callback)
# Ensure that we're past the request end by waiting an additional second

View File

@ -23,12 +23,14 @@ method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async.
let market = purchase.market
let clock = purchase.clock
info "Request submitted, waiting for slots to be filled", requestId = purchase.requestId
info "Request submitted, waiting for slots to be filled",
requestId = purchase.requestId
proc wait {.async.} =
proc wait() {.async.} =
let done = newFuture[void]()
proc callback(_: RequestId) =
done.complete()
let subscription = await market.subscribeFulfillment(request.id, callback)
await done
await subscription.unsubscribe()

View File

@ -19,7 +19,6 @@ method run*(state: PurchaseUnknown, machine: Machine): Future[?State] {.async.}
let purchase = Purchase(machine)
if (request =? await purchase.market.getRequest(purchase.requestId)) and
(requestState =? await purchase.market.requestState(purchase.requestId)):
purchase.request = some request
case requestState

File diff suppressed because it is too large Load Diff

View File

@ -25,9 +25,7 @@ proc encodeString*(cid: type Cid): Result[string, cstring] =
ok($cid)
proc decodeString*(T: type Cid, value: string): Result[Cid, cstring] =
Cid
.init(value)
.mapErr do(e: CidError) -> cstring:
Cid.init(value).mapErr do(e: CidError) -> cstring:
case e
of CidError.Incorrect: "Incorrect Cid".cstring
of CidError.Unsupported: "Unsupported Cid".cstring
@ -44,9 +42,8 @@ proc encodeString*(address: MultiAddress): Result[string, cstring] =
ok($address)
proc decodeString*(T: type MultiAddress, value: string): Result[MultiAddress, cstring] =
MultiAddress
.init(value)
.mapErr do(e: string) -> cstring: cstring(e)
MultiAddress.init(value).mapErr do(e: string) -> cstring:
cstring(e)
proc decodeString*(T: type SomeUnsignedInt, value: string): Result[T, cstring] =
Base10.decode(T, value)
@ -55,7 +52,7 @@ proc encodeString*(value: SomeUnsignedInt): Result[string, cstring] =
ok(Base10.toString(value))
proc decodeString*(T: type Duration, value: string): Result[T, cstring] =
let v = ? Base10.decode(uint32, value)
let v = ?Base10.decode(uint32, value)
ok(v.minutes)
proc encodeString*(value: Duration): Result[string, cstring] =
@ -77,19 +74,20 @@ proc decodeString*(_: type UInt256, value: string): Result[UInt256, cstring] =
except ValueError as e:
err e.msg.cstring
proc decodeString*(_: type array[32, byte],
value: string): Result[array[32, byte], cstring] =
proc decodeString*(
_: type array[32, byte], value: string
): Result[array[32, byte], cstring] =
try:
ok array[32, byte].fromHex(value)
except ValueError as e:
err e.msg.cstring
proc decodeString*[T: PurchaseId | RequestId | Nonce | SlotId | AvailabilityId](_: type T,
value: string): Result[T, cstring] =
proc decodeString*[T: PurchaseId | RequestId | Nonce | SlotId | AvailabilityId](
_: type T, value: string
): Result[T, cstring] =
array[32, byte].decodeString(value).map(id => T(id))
proc decodeString*(t: typedesc[string],
value: string): Result[string, cstring] =
proc decodeString*(t: typedesc[string], value: string): Result[string, cstring] =
ok(value)
proc encodeString*(value: string): RestResult[string] =

View File

@ -74,15 +74,10 @@ type
quotaReservedBytes* {.serialize.}: NBytes
proc init*(_: type RestContentList, content: seq[RestContent]): RestContentList =
RestContentList(
content: content
)
RestContentList(content: content)
proc init*(_: type RestContent, cid: Cid, manifest: Manifest): RestContent =
RestContent(
cid: cid,
manifest: manifest
)
RestContent(cid: cid, manifest: manifest)
proc init*(_: type RestNode, node: dn.Node): RestNode =
RestNode(
@ -90,7 +85,7 @@ proc init*(_: type RestNode, node: dn.Node): RestNode =
peerId: node.record.data.peerId,
record: node.record,
address: node.address,
seen: node.seen > 0.5
seen: node.seen > 0.5,
)
proc init*(_: type RestRoutingTable, routingTable: rt.RoutingTable): RestRoutingTable =
@ -99,28 +94,23 @@ proc init*(_: type RestRoutingTable, routingTable: rt.RoutingTable): RestRouting
for node in bucket.nodes:
nodes.add(RestNode.init(node))
RestRoutingTable(
localNode: RestNode.init(routingTable.localNode),
nodes: nodes
)
RestRoutingTable(localNode: RestNode.init(routingTable.localNode), nodes: nodes)
proc init*(_: type RestPeerRecord, peerRecord: PeerRecord): RestPeerRecord =
RestPeerRecord(
peerId: peerRecord.peerId,
seqNo: peerRecord.seqNo,
addresses: peerRecord.addresses
peerId: peerRecord.peerId, seqNo: peerRecord.seqNo, addresses: peerRecord.addresses
)
proc init*(_: type RestNodeId, id: NodeId): RestNodeId =
RestNodeId(
id: id
)
RestNodeId(id: id)
proc `%`*(obj: StorageRequest | Slot): JsonNode =
let jsonObj = newJObject()
for k, v in obj.fieldPairs: jsonObj[k] = %v
for k, v in obj.fieldPairs:
jsonObj[k] = %v
jsonObj["id"] = %(obj.id)
return jsonObj
proc `%`*(obj: RestNodeId): JsonNode = % $obj.id
proc `%`*(obj: RestNodeId): JsonNode =
% $obj.id

View File

@ -9,7 +9,8 @@
import pkg/upraises
push: {.upraises: [].}
push:
{.upraises: [].}
import pkg/libp2p/crypto/crypto
import pkg/bearssl/rand
@ -30,7 +31,8 @@ proc instance*(t: type Rng): Rng =
const randMax = 18_446_744_073_709_551_615'u64
proc rand*(rng: Rng, max: Natural): int =
if max == 0: return 0
if max == 0:
return 0
while true:
let x = rng[].generate(uint64)
@ -41,8 +43,8 @@ proc sample*[T](rng: Rng, a: openArray[T]): T =
result = a[rng.rand(a.high)]
proc sample*[T](
rng: Rng, sample, exclude: openArray[T]): T
{.raises: [Defect, RngSampleError].} =
rng: Rng, sample, exclude: openArray[T]
): T {.raises: [Defect, RngSampleError].} =
if sample == exclude:
raise newException(RngSampleError, "Sample and exclude arrays are the same!")

View File

@ -45,13 +45,12 @@ export salescontext
logScope:
topics = "sales marketplace"
type
Sales* = ref object
context*: SalesContext
agents*: seq[SalesAgent]
running: bool
subscriptions: seq[market.Subscription]
trackedFutures: TrackedFutures
type Sales* = ref object
context*: SalesContext
agents*: seq[SalesAgent]
running: bool
subscriptions: seq[market.Subscription]
trackedFutures: TrackedFutures
proc `onStore=`*(sales: Sales, onStore: OnStore) =
sales.context.onStore = some onStore
@ -68,28 +67,31 @@ proc `onProve=`*(sales: Sales, callback: OnProve) =
proc `onExpiryUpdate=`*(sales: Sales, callback: OnExpiryUpdate) =
sales.context.onExpiryUpdate = some callback
proc onStore*(sales: Sales): ?OnStore = sales.context.onStore
proc onStore*(sales: Sales): ?OnStore =
sales.context.onStore
proc onClear*(sales: Sales): ?OnClear = sales.context.onClear
proc onClear*(sales: Sales): ?OnClear =
sales.context.onClear
proc onSale*(sales: Sales): ?OnSale = sales.context.onSale
proc onSale*(sales: Sales): ?OnSale =
sales.context.onSale
proc onProve*(sales: Sales): ?OnProve = sales.context.onProve
proc onProve*(sales: Sales): ?OnProve =
sales.context.onProve
proc onExpiryUpdate*(sales: Sales): ?OnExpiryUpdate = sales.context.onExpiryUpdate
proc onExpiryUpdate*(sales: Sales): ?OnExpiryUpdate =
sales.context.onExpiryUpdate
proc new*(_: type Sales,
market: Market,
clock: Clock,
repo: RepoStore): Sales =
proc new*(_: type Sales, market: Market, clock: Clock, repo: RepoStore): Sales =
Sales.new(market, clock, repo, 0)
proc new*(_: type Sales,
market: Market,
clock: Clock,
repo: RepoStore,
simulateProofFailures: int): Sales =
proc new*(
_: type Sales,
market: Market,
clock: Clock,
repo: RepoStore,
simulateProofFailures: int,
): Sales =
let reservations = Reservations.new(repo)
Sales(
context: SalesContext(
@ -97,10 +99,10 @@ proc new*(_: type Sales,
clock: clock,
reservations: reservations,
slotQueue: SlotQueue.new(),
simulateProofFailures: simulateProofFailures
simulateProofFailures: simulateProofFailures,
),
trackedFutures: TrackedFutures.new(),
subscriptions: @[]
subscriptions: @[],
)
proc remove(sales: Sales, agent: SalesAgent) {.async.} =
@ -108,20 +110,21 @@ proc remove(sales: Sales, agent: SalesAgent) {.async.} =
if sales.running:
sales.agents.keepItIf(it != agent)
proc cleanUp(sales: Sales,
agent: SalesAgent,
returnBytes: bool,
reprocessSlot: bool,
processing: Future[void]) {.async.} =
proc cleanUp(
sales: Sales,
agent: SalesAgent,
returnBytes: bool,
reprocessSlot: bool,
processing: Future[void],
) {.async.} =
let data = agent.data
logScope:
topics = "sales cleanUp"
requestId = data.requestId
slotIndex = data.slotIndex
reservationId = data.reservation.?id |? ReservationId.default
availabilityId = data.reservation.?availabilityId |? AvailabilityId.default
reservationId = data.reservation .? id |? ReservationId.default
availabilityId = data.reservation .? availabilityId |? AvailabilityId.default
trace "cleaning up sales agent"
@ -129,36 +132,37 @@ proc cleanUp(sales: Sales,
# that the cleanUp was called before the sales process really started, so
# there are not really any bytes to be returned
if returnBytes and request =? data.request and reservation =? data.reservation:
if returnErr =? (await sales.context.reservations.returnBytesToAvailability(
reservation.availabilityId,
reservation.id,
request.ask.slotSize
)).errorOption:
error "failure returning bytes",
error = returnErr.msg,
bytes = request.ask.slotSize
if returnErr =? (
await sales.context.reservations.returnBytesToAvailability(
reservation.availabilityId, reservation.id, request.ask.slotSize
)
).errorOption:
error "failure returning bytes",
error = returnErr.msg, bytes = request.ask.slotSize
# delete reservation and return reservation bytes back to the availability
if reservation =? data.reservation and
deleteErr =? (await sales.context.reservations.deleteReservation(
reservation.id,
reservation.availabilityId
)).errorOption:
error "failure deleting reservation", error = deleteErr.msg
deleteErr =? (
await sales.context.reservations.deleteReservation(
reservation.id, reservation.availabilityId
)
).errorOption:
error "failure deleting reservation", error = deleteErr.msg
# Re-add items back into the queue to prevent small availabilities from
# draining the queue. Seen items will be ordered last.
if reprocessSlot and request =? data.request:
let queue = sales.context.slotQueue
var seenItem = SlotQueueItem.init(data.requestId,
data.slotIndex.truncate(uint16),
data.ask,
request.expiry,
seen = true)
var seenItem = SlotQueueItem.init(
data.requestId,
data.slotIndex.truncate(uint16),
data.ask,
request.expiry,
seen = true,
)
trace "pushing ignored item to queue, marked as seen"
if err =? queue.push(seenItem).errorOption:
error "failed to readd slot to queue",
errorType = $(type err), error = err.msg
error "failed to readd slot to queue", errorType = $(type err), error = err.msg
await sales.remove(agent)
@ -167,11 +171,8 @@ proc cleanUp(sales: Sales,
processing.complete()
proc filled(
sales: Sales,
request: StorageRequest,
slotIndex: UInt256,
processing: Future[void]) =
sales: Sales, request: StorageRequest, slotIndex: UInt256, processing: Future[void]
) =
if onSale =? sales.context.onSale:
onSale(request, slotIndex)
@ -180,17 +181,13 @@ proc filled(
processing.complete()
proc processSlot(sales: Sales, item: SlotQueueItem, done: Future[void]) =
debug "Processing slot from queue", requestId = item.requestId,
slot = item.slotIndex
debug "Processing slot from queue", requestId = item.requestId, slot = item.slotIndex
let agent = newSalesAgent(
sales.context,
item.requestId,
item.slotIndex.u256,
none StorageRequest
sales.context, item.requestId, item.slotIndex.u256, none StorageRequest
)
agent.onCleanUp = proc (returnBytes = false, reprocessSlot = false) {.async.} =
agent.onCleanUp = proc(returnBytes = false, reprocessSlot = false) {.async.} =
await sales.cleanUp(agent, returnBytes, reprocessSlot, done)
agent.onFilled = some proc(request: StorageRequest, slotIndex: UInt256) =
@ -204,10 +201,12 @@ proc deleteInactiveReservations(sales: Sales, activeSlots: seq[Slot]) {.async.}
without reservs =? await reservations.all(Reservation):
return
let unused = reservs.filter(r => (
let slotId = slotId(r.requestId, r.slotIndex)
not activeSlots.any(slot => slot.id == slotId)
))
let unused = reservs.filter(
r => (
let slotId = slotId(r.requestId, r.slotIndex)
not activeSlots.any(slot => slot.id == slotId)
)
)
if unused.len == 0:
return
@ -215,14 +214,13 @@ proc deleteInactiveReservations(sales: Sales, activeSlots: seq[Slot]) {.async.}
info "Found unused reservations for deletion", unused = unused.len
for reservation in unused:
logScope:
reservationId = reservation.id
availabilityId = reservation.availabilityId
if err =? (await reservations.deleteReservation(
reservation.id, reservation.availabilityId
)).errorOption:
if err =? (
await reservations.deleteReservation(reservation.id, reservation.availabilityId)
).errorOption:
error "Failed to delete unused reservation", error = err.msg
else:
trace "Deleted unused reservation"
@ -252,11 +250,8 @@ proc load*(sales: Sales) {.async.} =
await sales.deleteInactiveReservations(activeSlots)
for slot in activeSlots:
let agent = newSalesAgent(
sales.context,
slot.request.id,
slot.slotIndex,
some slot.request)
let agent =
newSalesAgent(sales.context, slot.request.id, slot.slotIndex, some slot.request)
agent.onCleanUp = proc(returnBytes = false, reprocessSlot = false) {.async.} =
# since workers are not being dispatched, this future has not been created
@ -282,11 +277,9 @@ proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} =
trace "unpausing queue after new availability added"
queue.unpause()
proc onStorageRequested(sales: Sales,
requestId: RequestId,
ask: StorageAsk,
expiry: UInt256) =
proc onStorageRequested(
sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: UInt256
) =
logScope:
topics = "marketplace sales onStorageRequested"
requestId
@ -314,10 +307,7 @@ proc onStorageRequested(sales: Sales,
else:
warn "Error adding request to SlotQueue", error = err.msg
proc onSlotFreed(sales: Sales,
requestId: RequestId,
slotIndex: UInt256) =
proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: UInt256) =
logScope:
topics = "marketplace sales onSlotFreed"
requestId
@ -331,8 +321,7 @@ proc onSlotFreed(sales: Sales,
let queue = context.slotQueue
# first attempt to populate request using existing slot metadata in queue
without var found =? queue.populateItem(requestId,
slotIndex.truncate(uint16)):
without var found =? queue.populateItem(requestId, slotIndex.truncate(uint16)):
trace "no existing request metadata, getting request info from contract"
# if there's no existing slot for that request, retrieve the request
# from the contract.
@ -359,9 +348,7 @@ proc subscribeRequested(sales: Sales) {.async.} =
let context = sales.context
let market = context.market
proc onStorageRequested(requestId: RequestId,
ask: StorageAsk,
expiry: UInt256) =
proc onStorageRequested(requestId: RequestId, ask: StorageAsk, expiry: UInt256) =
sales.onStorageRequested(requestId, ask, expiry)
try:
@ -485,10 +472,9 @@ proc startSlotQueue(sales: Sales) =
let slotQueue = sales.context.slotQueue
let reservations = sales.context.reservations
slotQueue.onProcessSlot =
proc(item: SlotQueueItem, done: Future[void]) {.async.} =
trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex
sales.processSlot(item, done)
slotQueue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex
sales.processSlot(item, done)
slotQueue.start()

View File

@ -26,7 +26,8 @@
## +----------------------------------------+
import pkg/upraises
push: {.upraises: [].}
push:
{.upraises: [].}
import std/sequtils
import std/sugar
@ -54,7 +55,6 @@ export logutils
logScope:
topics = "sales reservations"
type
AvailabilityId* = distinct array[32, byte]
ReservationId* = distinct array[32, byte]
@ -65,25 +65,32 @@ type
totalSize* {.serialize.}: UInt256
freeSize* {.serialize.}: UInt256
duration* {.serialize.}: UInt256
minPrice* {.serialize.}: UInt256 # minimal price paid for the whole hosted slot for the request's duration
minPrice* {.serialize.}: UInt256
# minimal price paid for the whole hosted slot for the request's duration
maxCollateral* {.serialize.}: UInt256
Reservation* = ref object
id* {.serialize.}: ReservationId
availabilityId* {.serialize.}: AvailabilityId
size* {.serialize.}: UInt256
requestId* {.serialize.}: RequestId
slotIndex* {.serialize.}: UInt256
Reservations* = ref object of RootObj
availabilityLock: AsyncLock # Lock for protecting assertions of availability's sizes when searching for matching availability
availabilityLock: AsyncLock
# Lock for protecting assertions of availability's sizes when searching for matching availability
repo: RepoStore
onAvailabilityAdded: ?OnAvailabilityAdded
GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.}
IterDispose* = proc(): Future[?!void] {.gcsafe, closure.}
OnAvailabilityAdded* = proc(availability: Availability): Future[void] {.upraises: [], gcsafe.}
OnAvailabilityAdded* =
proc(availability: Availability): Future[void] {.upraises: [], gcsafe.}
StorableIter* = ref object
finished*: bool
next*: GetNext
dispose*: IterDispose
ReservationsError* = object of CodexError
ReserveFailedError* = object of ReservationsError
ReleaseFailedError* = object of ReservationsError
@ -109,35 +116,44 @@ template withLock(lock, body) =
if lock.locked:
lock.release()
proc new*(T: type Reservations,
repo: RepoStore): Reservations =
T(availabilityLock: newAsyncLock(),repo: repo)
proc new*(T: type Reservations, repo: RepoStore): Reservations =
T(availabilityLock: newAsyncLock(), repo: repo)
proc init*(
_: type Availability,
totalSize: UInt256,
freeSize: UInt256,
duration: UInt256,
minPrice: UInt256,
maxCollateral: UInt256): Availability =
_: type Availability,
totalSize: UInt256,
freeSize: UInt256,
duration: UInt256,
minPrice: UInt256,
maxCollateral: UInt256,
): Availability =
var id: array[32, byte]
doAssert randomBytes(id) == 32
Availability(id: AvailabilityId(id), totalSize:totalSize, freeSize: freeSize, duration: duration, minPrice: minPrice, maxCollateral: maxCollateral)
Availability(
id: AvailabilityId(id),
totalSize: totalSize,
freeSize: freeSize,
duration: duration,
minPrice: minPrice,
maxCollateral: maxCollateral,
)
proc init*(
_: type Reservation,
availabilityId: AvailabilityId,
size: UInt256,
requestId: RequestId,
slotIndex: UInt256
_: type Reservation,
availabilityId: AvailabilityId,
size: UInt256,
requestId: RequestId,
slotIndex: UInt256,
): Reservation =
var id: array[32, byte]
doAssert randomBytes(id) == 32
Reservation(id: ReservationId(id), availabilityId: availabilityId, size: size, requestId: requestId, slotIndex: slotIndex)
Reservation(
id: ReservationId(id),
availabilityId: availabilityId,
size: size,
requestId: requestId,
slotIndex: slotIndex,
)
func toArray(id: SomeStorableId): array[32, byte] =
array[32, byte](id)
@ -146,23 +162,26 @@ proc `==`*(x, y: AvailabilityId): bool {.borrow.}
proc `==`*(x, y: ReservationId): bool {.borrow.}
proc `==`*(x, y: Reservation): bool =
x.id == y.id
proc `==`*(x, y: Availability): bool =
x.id == y.id
proc `$`*(id: SomeStorableId): string = id.toArray.toHex
proc `$`*(id: SomeStorableId): string =
id.toArray.toHex
proc toErr[E1: ref CatchableError, E2: ReservationsError](
e1: E1,
_: type E2,
msg: string = e1.msg): ref E2 =
e1: E1, _: type E2, msg: string = e1.msg
): ref E2 =
return newException(E2, msg, e1)
logutils.formatIt(LogFormat.textLines, SomeStorableId): it.short0xHexLog
logutils.formatIt(LogFormat.json, SomeStorableId): it.to0xHexLog
logutils.formatIt(LogFormat.textLines, SomeStorableId):
it.short0xHexLog
logutils.formatIt(LogFormat.json, SomeStorableId):
it.to0xHexLog
proc `onAvailabilityAdded=`*(self: Reservations,
onAvailabilityAdded: OnAvailabilityAdded) =
proc `onAvailabilityAdded=`*(
self: Reservations, onAvailabilityAdded: OnAvailabilityAdded
) =
self.onAvailabilityAdded = some onAvailabilityAdded
func key*(id: AvailabilityId): ?!Key =
@ -179,24 +198,20 @@ func key*(availability: Availability): ?!Key =
func key*(reservation: Reservation): ?!Key =
return key(reservation.id, reservation.availabilityId)
func available*(self: Reservations): uint = self.repo.available.uint
func available*(self: Reservations): uint =
self.repo.available.uint
func hasAvailable*(self: Reservations, bytes: uint): bool =
self.repo.available(bytes.NBytes)
proc exists*(
self: Reservations,
key: Key): Future[bool] {.async.} =
proc exists*(self: Reservations, key: Key): Future[bool] {.async.} =
let exists = await self.repo.metaDs.ds.contains(key)
return exists
proc getImpl(
self: Reservations,
key: Key): Future[?!seq[byte]] {.async.} =
proc getImpl(self: Reservations, key: Key): Future[?!seq[byte]] {.async.} =
if not await self.exists(key):
let err = newException(NotExistsError, "object with key " & $key & " does not exist")
let err =
newException(NotExistsError, "object with key " & $key & " does not exist")
return failure(err)
without serialized =? await self.repo.metaDs.ds.get(key), error:
@ -205,10 +220,8 @@ proc getImpl(
return success serialized
proc get*(
self: Reservations,
key: Key,
T: type SomeStorableObject): Future[?!T] {.async.} =
self: Reservations, key: Key, T: type SomeStorableObject
): Future[?!T] {.async.} =
without serialized =? await self.getImpl(key), error:
return failure(error)
@ -217,27 +230,20 @@ proc get*(
return success obj
proc updateImpl(
self: Reservations,
obj: SomeStorableObject): Future[?!void] {.async.} =
proc updateImpl(self: Reservations, obj: SomeStorableObject): Future[?!void] {.async.} =
trace "updating " & $(obj.type), id = obj.id
without key =? obj.key, error:
return failure(error)
if err =? (await self.repo.metaDs.ds.put(
key,
@(obj.toJson.toBytes)
)).errorOption:
if err =? (await self.repo.metaDs.ds.put(key, @(obj.toJson.toBytes))).errorOption:
return failure(err.toErr(UpdateFailedError))
return success()
proc updateAvailability(
self: Reservations,
obj: Availability): Future[?!void] {.async.} =
self: Reservations, obj: Availability
): Future[?!void] {.async.} =
logScope:
availabilityId = obj.id
@ -269,11 +275,18 @@ proc updateAvailability(
if oldAvailability.totalSize != obj.totalSize:
trace "totalSize changed, updating repo reservation"
if oldAvailability.totalSize < obj.totalSize: # storage added
if reserveErr =? (await self.repo.reserve((obj.totalSize - oldAvailability.totalSize).truncate(uint).NBytes)).errorOption:
if reserveErr =? (
await self.repo.reserve(
(obj.totalSize - oldAvailability.totalSize).truncate(uint).NBytes
)
).errorOption:
return failure(reserveErr.toErr(ReserveFailedError))
elif oldAvailability.totalSize > obj.totalSize: # storage removed
if reserveErr =? (await self.repo.release((oldAvailability.totalSize - obj.totalSize).truncate(uint).NBytes)).errorOption:
if reserveErr =? (
await self.repo.release(
(oldAvailability.totalSize - obj.totalSize).truncate(uint).NBytes
)
).errorOption:
return failure(reserveErr.toErr(ReleaseFailedError))
let res = await self.updateImpl(obj)
@ -296,21 +309,14 @@ proc updateAvailability(
return res
proc update*(
self: Reservations,
obj: Reservation): Future[?!void] {.async.} =
proc update*(self: Reservations, obj: Reservation): Future[?!void] {.async.} =
return await self.updateImpl(obj)
proc update*(
self: Reservations,
obj: Availability): Future[?!void] {.async.} =
proc update*(self: Reservations, obj: Availability): Future[?!void] {.async.} =
withLock(self.availabilityLock):
return await self.updateAvailability(obj)
proc delete(
self: Reservations,
key: Key): Future[?!void] {.async.} =
proc delete(self: Reservations, key: Key): Future[?!void] {.async.} =
trace "deleting object", key
if not await self.exists(key):
@ -322,10 +328,8 @@ proc delete(
return success()
proc deleteReservation*(
self: Reservations,
reservationId: ReservationId,
availabilityId: AvailabilityId): Future[?!void] {.async.} =
self: Reservations, reservationId: ReservationId, availabilityId: AvailabilityId
): Future[?!void] {.async.} =
logScope:
reservationId
availabilityId
@ -365,24 +369,21 @@ proc deleteReservation*(
# To delete, must not have any active sales.
proc createAvailability*(
self: Reservations,
size: UInt256,
duration: UInt256,
minPrice: UInt256,
maxCollateral: UInt256): Future[?!Availability] {.async.} =
self: Reservations,
size: UInt256,
duration: UInt256,
minPrice: UInt256,
maxCollateral: UInt256,
): Future[?!Availability] {.async.} =
trace "creating availability", size, duration, minPrice, maxCollateral
let availability = Availability.init(
size, size, duration, minPrice, maxCollateral
)
let availability = Availability.init(size, size, duration, minPrice, maxCollateral)
let bytes = availability.freeSize.truncate(uint)
if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
return failure(reserveErr.toErr(ReserveFailedError))
if updateErr =? (await self.update(availability)).errorOption:
# rollback the reserve
trace "rolling back reserve"
if rollbackErr =? (await self.repo.release(bytes.NBytes)).errorOption:
@ -394,13 +395,12 @@ proc createAvailability*(
return success(availability)
method createReservation*(
self: Reservations,
availabilityId: AvailabilityId,
slotSize: UInt256,
requestId: RequestId,
slotIndex: UInt256
self: Reservations,
availabilityId: AvailabilityId,
slotSize: UInt256,
requestId: RequestId,
slotIndex: UInt256,
): Future[?!Reservation] {.async, base.} =
withLock(self.availabilityLock):
without availabilityKey =? availabilityId.key, error:
return failure(error)
@ -412,7 +412,8 @@ method createReservation*(
if availability.freeSize < slotSize:
let error = newException(
BytesOutOfBoundsError,
"trying to reserve an amount of bytes that is greater than the total size of the Availability")
"trying to reserve an amount of bytes that is greater than the total size of the Availability",
)
return failure(error)
trace "Creating reservation", availabilityId, slotSize, requestId, slotIndex
@ -446,11 +447,11 @@ method createReservation*(
return success(reservation)
proc returnBytesToAvailability*(
self: Reservations,
availabilityId: AvailabilityId,
reservationId: ReservationId,
bytes: UInt256): Future[?!void] {.async.} =
self: Reservations,
availabilityId: AvailabilityId,
reservationId: ReservationId,
bytes: UInt256,
): Future[?!void] {.async.} =
logScope:
reservationId
availabilityId
@ -467,14 +468,17 @@ proc returnBytesToAvailability*(
let bytesToBeReturned = bytes - reservation.size
if bytesToBeReturned == 0:
trace "No bytes are returned", requestSizeBytes = bytes, returningBytes = bytesToBeReturned
trace "No bytes are returned",
requestSizeBytes = bytes, returningBytes = bytesToBeReturned
return success()
trace "Returning bytes", requestSizeBytes = bytes, returningBytes = bytesToBeReturned
trace "Returning bytes",
requestSizeBytes = bytes, returningBytes = bytesToBeReturned
# First lets see if we can re-reserve the bytes, if the Repo's quota
# is depleted then we will fail-fast as there is nothing to be done atm.
if reserveErr =? (await self.repo.reserve(bytesToBeReturned.truncate(uint).NBytes)).errorOption:
if reserveErr =?
(await self.repo.reserve(bytesToBeReturned.truncate(uint).NBytes)).errorOption:
return failure(reserveErr.toErr(ReserveFailedError))
without availabilityKey =? availabilityId.key, error:
@ -487,9 +491,9 @@ proc returnBytesToAvailability*(
# Update availability with returned size
if updateErr =? (await self.updateAvailability(availability)).errorOption:
trace "Rolling back returning bytes"
if rollbackErr =? (await self.repo.release(bytesToBeReturned.truncate(uint).NBytes)).errorOption:
if rollbackErr =?
(await self.repo.release(bytesToBeReturned.truncate(uint).NBytes)).errorOption:
rollbackErr.parent = updateErr
return failure(rollbackErr)
@ -498,11 +502,11 @@ proc returnBytesToAvailability*(
return success()
proc release*(
self: Reservations,
reservationId: ReservationId,
availabilityId: AvailabilityId,
bytes: uint): Future[?!void] {.async.} =
self: Reservations,
reservationId: ReservationId,
availabilityId: AvailabilityId,
bytes: uint,
): Future[?!void] {.async.} =
logScope:
topics = "release"
bytes
@ -520,7 +524,8 @@ proc release*(
if reservation.size < bytes.u256:
let error = newException(
BytesOutOfBoundsError,
"trying to release an amount of bytes that is greater than the total size of the Reservation")
"trying to release an amount of bytes that is greater than the total size of the Reservation",
)
return failure(error)
if releaseErr =? (await self.repo.release(bytes.NBytes)).errorOption:
@ -530,7 +535,6 @@ proc release*(
# persist partially used Reservation with updated size
if err =? (await self.update(reservation)).errorOption:
# rollback release if an update error encountered
trace "rolling back release"
if rollbackErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
@ -545,11 +549,8 @@ iterator items(self: StorableIter): Future[?seq[byte]] =
yield self.next()
proc storables(
self: Reservations,
T: type SomeStorableObject,
queryKey: Key = ReservationsKey
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
): Future[?!StorableIter] {.async.} =
var iter = StorableIter()
let query = Query.init(queryKey)
when T is Availability:
@ -570,12 +571,8 @@ proc storables(
proc next(): Future[?seq[byte]] {.async.} =
await idleAsync()
iter.finished = results.finished
if not results.finished and
res =? (await results.next()) and
res.data.len > 0 and
key =? res.key and
key.namespaces.len == defaultKey.namespaces.len:
if not results.finished and res =? (await results.next()) and res.data.len > 0 and
key =? res.key and key.namespaces.len == defaultKey.namespaces.len:
return some res.data
return none seq[byte]
@ -588,11 +585,8 @@ proc storables(
return success iter
proc allImpl(
self: Reservations,
T: type SomeStorableObject,
queryKey: Key = ReservationsKey
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
): Future[?!seq[T]] {.async.} =
var ret: seq[T] = @[]
without storables =? (await self.storables(T, queryKey)), error:
@ -604,24 +598,18 @@ proc allImpl(
without obj =? T.fromJson(bytes), error:
error "json deserialization error",
json = string.fromBytes(bytes),
error = error.msg
json = string.fromBytes(bytes), error = error.msg
continue
ret.add obj
return success(ret)
proc all*(
self: Reservations,
T: type SomeStorableObject
): Future[?!seq[T]] {.async.} =
proc all*(self: Reservations, T: type SomeStorableObject): Future[?!seq[T]] {.async.} =
return await self.allImpl(T)
proc all*(
self: Reservations,
T: type SomeStorableObject,
availabilityId: AvailabilityId
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
): Future[?!seq[T]] {.async.} =
without key =? (ReservationsKey / $availabilityId):
return failure("no key")
@ -629,29 +617,26 @@ proc all*(
return await self.allImpl(T, key)
proc findAvailability*(
self: Reservations,
size, duration, minPrice, collateral: UInt256
self: Reservations, size, duration, minPrice, collateral: UInt256
): Future[?Availability] {.async.} =
without storables =? (await self.storables(Availability)), e:
error "failed to get all storables", error = e.msg
return none Availability
for item in storables.items:
if bytes =? (await item) and
availability =? Availability.fromJson(bytes):
if size <= availability.freeSize and
duration <= availability.duration and
collateral <= availability.maxCollateral and
minPrice >= availability.minPrice:
if bytes =? (await item) and availability =? Availability.fromJson(bytes):
if size <= availability.freeSize and duration <= availability.duration and
collateral <= availability.maxCollateral and minPrice >= availability.minPrice:
trace "availability matched",
id = availability.id,
size, availFreeSize = availability.freeSize,
duration, availDuration = availability.duration,
minPrice, availMinPrice = availability.minPrice,
collateral, availMaxCollateral = availability.maxCollateral
size,
availFreeSize = availability.freeSize,
duration,
availDuration = availability.duration,
minPrice,
availMinPrice = availability.minPrice,
collateral,
availMaxCollateral = availability.maxCollateral
# TODO: As soon as we're on ARC-ORC, we can use destructors
# to automatically dispose our iterators when they fall out of scope.
@ -663,7 +648,11 @@ proc findAvailability*(
trace "availability did not match",
id = availability.id,
size, availFreeSize = availability.freeSize,
duration, availDuration = availability.duration,
minPrice, availMinPrice = availability.minPrice,
collateral, availMaxCollateral = availability.maxCollateral
size,
availFreeSize = availability.freeSize,
duration,
availDuration = availability.duration,
minPrice,
availMinPrice = availability.minPrice,
collateral,
availMaxCollateral = availability.maxCollateral

View File

@ -25,27 +25,26 @@ type
onCleanUp*: OnCleanUp
onFilled*: ?OnFilled
OnCleanUp* = proc (returnBytes = false, reprocessSlot = false): Future[void] {.gcsafe, upraises: [].}
OnFilled* = proc(request: StorageRequest,
slotIndex: UInt256) {.gcsafe, upraises: [].}
OnCleanUp* = proc(returnBytes = false, reprocessSlot = false): Future[void] {.
gcsafe, upraises: []
.}
OnFilled* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].}
SalesAgentError = object of CodexError
AllSlotsFilledError* = object of SalesAgentError
func `==`*(a, b: SalesAgent): bool =
a.data.requestId == b.data.requestId and
a.data.slotIndex == b.data.slotIndex
a.data.requestId == b.data.requestId and a.data.slotIndex == b.data.slotIndex
proc newSalesAgent*(context: SalesContext,
requestId: RequestId,
slotIndex: UInt256,
request: ?StorageRequest): SalesAgent =
proc newSalesAgent*(
context: SalesContext,
requestId: RequestId,
slotIndex: UInt256,
request: ?StorageRequest,
): SalesAgent =
var agent = SalesAgent.new()
agent.context = context
agent.data = SalesData(
requestId: requestId,
slotIndex: slotIndex,
request: request)
agent.data = SalesData(requestId: requestId, slotIndex: slotIndex, request: request)
return agent
proc retrieveRequest*(agent: SalesAgent) {.async.} =
@ -62,6 +61,7 @@ proc retrieveRequestState*(agent: SalesAgent): Future[?RequestState] {.async.} =
func state*(agent: SalesAgent): ?string =
proc description(state: State): string =
$state
agent.query(description)
proc subscribeCancellation(agent: SalesAgent) {.async.} =
@ -77,7 +77,7 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} =
while true:
let deadline = max(clock.now, expiry) + 1
trace "Waiting for request to be cancelled", now=clock.now, expiry=deadline
trace "Waiting for request to be cancelled", now = clock.now, expiry = deadline
await clock.waitUntil(deadline)
without state =? await agent.retrieveRequestState():
@ -93,27 +93,29 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} =
of RequestState.Started, RequestState.Finished, RequestState.Failed:
break
debug "The request is not yet canceled, even though it should be. Waiting for some more time.", currentState = state, now=clock.now
debug "The request is not yet canceled, even though it should be. Waiting for some more time.",
currentState = state, now = clock.now
data.cancelled = onCancelled()
method onFulfilled*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, upraises: [].} =
if agent.data.requestId == requestId and
not agent.data.cancelled.isNil:
method onFulfilled*(
agent: SalesAgent, requestId: RequestId
) {.base, gcsafe, upraises: [].} =
if agent.data.requestId == requestId and not agent.data.cancelled.isNil:
agent.data.cancelled.cancelSoon()
method onFailed*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, upraises: [].} =
method onFailed*(
agent: SalesAgent, requestId: RequestId
) {.base, gcsafe, upraises: [].} =
without request =? agent.data.request:
return
if agent.data.requestId == requestId:
agent.schedule(failedEvent(request))
method onSlotFilled*(agent: SalesAgent,
requestId: RequestId,
slotIndex: UInt256) {.base, gcsafe, upraises: [].} =
if agent.data.requestId == requestId and
agent.data.slotIndex == slotIndex:
method onSlotFilled*(
agent: SalesAgent, requestId: RequestId, slotIndex: UInt256
) {.base, gcsafe, upraises: [].} =
if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex:
agent.schedule(slotFilledEvent(requestId, slotIndex))
proc subscribe*(agent: SalesAgent) {.async.} =

View File

@ -24,12 +24,14 @@ type
simulateProofFailures*: int
BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].}
OnStore* = proc(request: StorageRequest,
slot: UInt256,
blocksCb: BlocksCb): Future[?!void] {.gcsafe, upraises: [].}
OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.gcsafe, upraises: [].}
OnExpiryUpdate* = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {.gcsafe, upraises: [].}
OnClear* = proc(request: StorageRequest,
slotIndex: UInt256) {.gcsafe, upraises: [].}
OnSale* = proc(request: StorageRequest,
slotIndex: UInt256) {.gcsafe, upraises: [].}
OnStore* = proc(
request: StorageRequest, slot: UInt256, blocksCb: BlocksCb
): Future[?!void] {.gcsafe, upraises: [].}
OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.
gcsafe, upraises: []
.}
OnExpiryUpdate* = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {.
gcsafe, upraises: []
.}
OnClear* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].}
OnSale* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].}

View File

@ -3,11 +3,10 @@ import ../contracts/requests
import ../market
import ./reservations
type
SalesData* = ref object
requestId*: RequestId
ask*: StorageAsk
request*: ?StorageRequest
slotIndex*: UInt256
cancelled*: Future[void]
reservation*: ?Reservation
type SalesData* = ref object
requestId*: RequestId
ask*: StorageAsk
request*: ?StorageRequest
slotIndex*: UInt256
cancelled*: Future[void]
reservation*: ?Reservation

View File

@ -17,7 +17,7 @@ logScope:
type
OnProcessSlot* =
proc(item: SlotQueueItem, done: Future[void]): Future[void] {.gcsafe, upraises:[].}
proc(item: SlotQueueItem, done: Future[void]): Future[void] {.gcsafe, upraises: [].}
# Non-ref obj copies value when assigned, preventing accidental modification
# of values which could cause an incorrect order (eg
@ -39,7 +39,7 @@ type
# don't need to -1 to prevent overflow when adding 1 (to always allow push)
# because AsyncHeapQueue size is of type `int`, which is larger than `uint16`
SlotQueueSize = range[1'u16..uint16.high]
SlotQueueSize = range[1'u16 .. uint16.high]
SlotQueue* = ref object
maxWorkers: int
@ -69,10 +69,12 @@ const DefaultMaxWorkers = 3
const DefaultMaxSize = 128'u16
proc profitability(item: SlotQueueItem): UInt256 =
StorageAsk(collateral: item.collateral,
duration: item.duration,
reward: item.reward,
slotSize: item.slotSize).pricePerSlot
StorageAsk(
collateral: item.collateral,
duration: item.duration,
reward: item.reward,
slotSize: item.slotSize,
).pricePerSlot
proc `<`*(a, b: SlotQueueItem): bool =
# for A to have a higher priority than B (in a min queue), A must be less than
@ -102,13 +104,13 @@ proc `<`*(a, b: SlotQueueItem): bool =
return scoreA > scoreB
proc `==`*(a, b: SlotQueueItem): bool =
a.requestId == b.requestId and
a.slotIndex == b.slotIndex
proc new*(_: type SlotQueue,
maxWorkers = DefaultMaxWorkers,
maxSize: SlotQueueSize = DefaultMaxSize): SlotQueue =
a.requestId == b.requestId and a.slotIndex == b.slotIndex
proc new*(
_: type SlotQueue,
maxWorkers = DefaultMaxWorkers,
maxSize: SlotQueueSize = DefaultMaxSize,
): SlotQueue =
if maxWorkers <= 0:
raise newException(ValueError, "maxWorkers must be positive")
if maxWorkers.uint16 > maxSize:
@ -121,23 +123,22 @@ proc new*(_: type SlotQueue,
queue: newAsyncHeapQueue[SlotQueueItem](maxSize.int + 1),
running: false,
trackedFutures: TrackedFutures.new(),
unpaused: newAsyncEvent()
unpaused: newAsyncEvent(),
)
# avoid instantiating `workers` in constructor to avoid side effects in
# `newAsyncQueue` procedure
proc init(_: type SlotQueueWorker): SlotQueueWorker =
SlotQueueWorker(
doneProcessing: newFuture[void]("slotqueue.worker.processing")
)
proc init*(_: type SlotQueueItem,
requestId: RequestId,
slotIndex: uint16,
ask: StorageAsk,
expiry: UInt256,
seen = false): SlotQueueItem =
SlotQueueWorker(doneProcessing: newFuture[void]("slotqueue.worker.processing"))
proc init*(
_: type SlotQueueItem,
requestId: RequestId,
slotIndex: uint16,
ask: StorageAsk,
expiry: UInt256,
seen = false,
): SlotQueueItem =
SlotQueueItem(
requestId: requestId,
slotIndex: slotIndex,
@ -146,28 +147,22 @@ proc init*(_: type SlotQueueItem,
reward: ask.reward,
collateral: ask.collateral,
expiry: expiry,
seen: seen
seen: seen,
)
proc init*(_: type SlotQueueItem,
request: StorageRequest,
slotIndex: uint16): SlotQueueItem =
SlotQueueItem.init(request.id,
slotIndex,
request.ask,
request.expiry)
proc init*(_: type SlotQueueItem,
requestId: RequestId,
ask: StorageAsk,
expiry: UInt256): seq[SlotQueueItem] =
proc init*(
_: type SlotQueueItem, request: StorageRequest, slotIndex: uint16
): SlotQueueItem =
SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry)
proc init*(
_: type SlotQueueItem, requestId: RequestId, ask: StorageAsk, expiry: UInt256
): seq[SlotQueueItem] =
if not ask.slots.inRange:
raise newException(SlotsOutOfRangeError, "Too many slots")
var i = 0'u16
proc initSlotQueueItem: SlotQueueItem =
proc initSlotQueueItem(): SlotQueueItem =
let item = SlotQueueItem.init(requestId, i, ask, expiry)
inc i
return item
@ -176,37 +171,54 @@ proc init*(_: type SlotQueueItem,
Rng.instance.shuffle(items)
return items
proc init*(_: type SlotQueueItem,
request: StorageRequest): seq[SlotQueueItem] =
proc init*(_: type SlotQueueItem, request: StorageRequest): seq[SlotQueueItem] =
return SlotQueueItem.init(request.id, request.ask, request.expiry)
proc inRange*(val: SomeUnsignedInt): bool =
val.uint16 in SlotQueueSize.low..SlotQueueSize.high
val.uint16 in SlotQueueSize.low .. SlotQueueSize.high
proc requestId*(self: SlotQueueItem): RequestId = self.requestId
proc slotIndex*(self: SlotQueueItem): uint16 = self.slotIndex
proc slotSize*(self: SlotQueueItem): UInt256 = self.slotSize
proc duration*(self: SlotQueueItem): UInt256 = self.duration
proc reward*(self: SlotQueueItem): UInt256 = self.reward
proc collateral*(self: SlotQueueItem): UInt256 = self.collateral
proc seen*(self: SlotQueueItem): bool = self.seen
proc requestId*(self: SlotQueueItem): RequestId =
self.requestId
proc running*(self: SlotQueue): bool = self.running
proc slotIndex*(self: SlotQueueItem): uint16 =
self.slotIndex
proc len*(self: SlotQueue): int = self.queue.len
proc slotSize*(self: SlotQueueItem): UInt256 =
self.slotSize
proc size*(self: SlotQueue): int = self.queue.size - 1
proc duration*(self: SlotQueueItem): UInt256 =
self.duration
proc paused*(self: SlotQueue): bool = not self.unpaused.isSet
proc reward*(self: SlotQueueItem): UInt256 =
self.reward
proc `$`*(self: SlotQueue): string = $self.queue
proc collateral*(self: SlotQueueItem): UInt256 =
self.collateral
proc seen*(self: SlotQueueItem): bool =
self.seen
proc running*(self: SlotQueue): bool =
self.running
proc len*(self: SlotQueue): int =
self.queue.len
proc size*(self: SlotQueue): int =
self.queue.size - 1
proc paused*(self: SlotQueue): bool =
not self.unpaused.isSet
proc `$`*(self: SlotQueue): string =
$self.queue
proc `onProcessSlot=`*(self: SlotQueue, onProcessSlot: OnProcessSlot) =
self.onProcessSlot = some onProcessSlot
proc activeWorkers*(self: SlotQueue): int =
if not self.running: return 0
if not self.running:
return 0
# active = capacity - available
self.maxWorkers - self.workers.len
@ -222,10 +234,9 @@ proc unpause*(self: SlotQueue) =
# set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait()
self.unpaused.fire()
proc populateItem*(self: SlotQueue,
requestId: RequestId,
slotIndex: uint16): ?SlotQueueItem =
proc populateItem*(
self: SlotQueue, requestId: RequestId, slotIndex: uint16
): ?SlotQueueItem =
trace "populate item, items in queue", len = self.queue.len
for item in self.queue.items:
trace "populate item search", itemRequestId = item.requestId, requestId
@ -237,12 +248,11 @@ proc populateItem*(self: SlotQueue,
duration: item.duration,
reward: item.reward,
collateral: item.collateral,
expiry: item.expiry
expiry: item.expiry,
)
return none SlotQueueItem
proc push*(self: SlotQueue, item: SlotQueueItem): ?!void =
logScope:
requestId = item.requestId
slotIndex = item.slotIndex
@ -330,9 +340,9 @@ proc addWorker(self: SlotQueue): ?!void =
return success()
proc dispatch(self: SlotQueue,
worker: SlotQueueWorker,
item: SlotQueueItem) {.async: (raises: []).} =
proc dispatch(
self: SlotQueue, worker: SlotQueueWorker, item: SlotQueueItem
) {.async: (raises: []).} =
logScope:
requestId = item.requestId
slotIndex = item.slotIndex
@ -349,10 +359,8 @@ proc dispatch(self: SlotQueue,
if err =? self.addWorker().errorOption:
raise err # catch below
except QueueNotRunningError as e:
info "could not re-add worker to worker queue, queue not running",
error = e.msg
info "could not re-add worker to worker queue, queue not running", error = e.msg
except CancelledError:
# do not bubble exception up as it is called with `asyncSpawn` which would
# convert the exception into a `FutureDefect`
@ -380,7 +388,6 @@ proc clearSeenFlags*(self: SlotQueue) =
trace "all 'seen' flags cleared"
proc run(self: SlotQueue) {.async: (raises: []).} =
while self.running:
try:
if self.paused:
@ -389,7 +396,8 @@ proc run(self: SlotQueue) {.async: (raises: []).} =
# block until unpaused is true/fired, ie wait for queue to be unpaused
await self.unpaused.wait()
let worker = await self.workers.popFirst() # if workers saturated, wait here for new workers
let worker =
await self.workers.popFirst() # if workers saturated, wait here for new workers
let item = await self.queue.pop() # if queue empty, wait here for new items
logScope:
@ -442,7 +450,7 @@ proc start*(self: SlotQueue) =
# Add initial workers to the `AsyncHeapQueue`. Once a worker has completed its
# task, a new worker will be pushed to the queue
for i in 0..<self.maxWorkers:
for i in 0 ..< self.maxWorkers:
if err =? self.addWorker().errorOption:
error "start: error adding new worker", error = err.msg

View File

@ -14,24 +14,29 @@ type
SaleState* = ref object of State
SaleError* = ref object of CodexError
method onCancelled*(state: SaleState, request: StorageRequest): ?State {.base, upraises:[].} =
method onCancelled*(
state: SaleState, request: StorageRequest
): ?State {.base, upraises: [].} =
discard
method onFailed*(state: SaleState, request: StorageRequest): ?State {.base, upraises:[].} =
method onFailed*(
state: SaleState, request: StorageRequest
): ?State {.base, upraises: [].} =
discard
method onSlotFilled*(state: SaleState, requestId: RequestId,
slotIndex: UInt256): ?State {.base, upraises:[].} =
method onSlotFilled*(
state: SaleState, requestId: RequestId, slotIndex: UInt256
): ?State {.base, upraises: [].} =
discard
proc cancelledEvent*(request: StorageRequest): Event =
return proc (state: State): ?State =
return proc(state: State): ?State =
SaleState(state).onCancelled(request)
proc failedEvent*(request: StorageRequest): Event =
return proc (state: State): ?State =
return proc(state: State): ?State =
SaleState(state).onFailed(request)
proc slotFilledEvent*(requestId: RequestId, slotIndex: UInt256): Event =
return proc (state: State): ?State =
return proc(state: State): ?State =
SaleState(state).onSlotFilled(requestId, slotIndex)

View File

@ -6,10 +6,10 @@ import ./errorhandling
logScope:
topics = "marketplace sales cancelled"
type
SaleCancelled* = ref object of ErrorHandlingState
type SaleCancelled* = ref object of ErrorHandlingState
method `$`*(state: SaleCancelled): string = "SaleCancelled"
method `$`*(state: SaleCancelled): string =
"SaleCancelled"
method run*(state: SaleCancelled, machine: Machine): Future[?State] {.async.} =
let agent = SalesAgent(machine)
@ -20,14 +20,15 @@ method run*(state: SaleCancelled, machine: Machine): Future[?State] {.async.} =
raiseAssert "no sale request"
let slot = Slot(request: request, slotIndex: data.slotIndex)
debug "Collecting collateral and partial payout", requestId = data.requestId, slotIndex = data.slotIndex
debug "Collecting collateral and partial payout",
requestId = data.requestId, slotIndex = data.slotIndex
await market.freeSlot(slot.id)
if onClear =? agent.context.onClear and
request =? data.request:
if onClear =? agent.context.onClear and request =? data.request:
onClear(request, data.slotIndex)
if onCleanUp =? agent.onCleanUp:
await onCleanUp(returnBytes = true, reprocessSlot = false)
warn "Sale cancelled due to timeout", requestId = data.requestId, slotIndex = data.slotIndex
warn "Sale cancelled due to timeout",
requestId = data.requestId, slotIndex = data.slotIndex

View File

@ -13,13 +13,13 @@ import ./filled
import ./initialproving
import ./errored
type
SaleDownloading* = ref object of ErrorHandlingState
type SaleDownloading* = ref object of ErrorHandlingState
logScope:
topics = "marketplace sales downloading"
method `$`*(state: SaleDownloading): string = "SaleDownloading"
method `$`*(state: SaleDownloading): string =
"SaleDownloading"
method onCancelled*(state: SaleDownloading, request: StorageRequest): ?State =
return some State(SaleCancelled())
@ -27,8 +27,9 @@ method onCancelled*(state: SaleDownloading, request: StorageRequest): ?State =
method onFailed*(state: SaleDownloading, request: StorageRequest): ?State =
return some State(SaleFailed())
method onSlotFilled*(state: SaleDownloading, requestId: RequestId,
slotIndex: UInt256): ?State =
method onSlotFilled*(
state: SaleDownloading, requestId: RequestId, slotIndex: UInt256
): ?State =
return some State(SaleFilled())
method run*(state: SaleDownloading, machine: Machine): Future[?State] {.async.} =
@ -61,14 +62,10 @@ method run*(state: SaleDownloading, machine: Machine): Future[?State] {.async.}
bytes += blk.data.len.uint
trace "Releasing batch of bytes written to disk", bytes
return await reservations.release(reservation.id,
reservation.availabilityId,
bytes)
return await reservations.release(reservation.id, reservation.availabilityId, bytes)
trace "Starting download"
if err =? (await onStore(request,
data.slotIndex,
onBlocks)).errorOption:
if err =? (await onStore(request, data.slotIndex, onBlocks)).errorOption:
return some State(SaleErrored(error: err, reprocessSlot: false))
trace "Download complete"

View File

@ -14,9 +14,10 @@ type SaleErrored* = ref object of SaleState
error*: ref CatchableError
reprocessSlot*: bool
method `$`*(state: SaleErrored): string = "SaleErrored"
method `$`*(state: SaleErrored): string =
"SaleErrored"
method onError*(state: SaleState, err: ref CatchableError): ?State {.upraises:[].} =
method onError*(state: SaleState, err: ref CatchableError): ?State {.upraises: [].} =
error "error during SaleErrored run", error = err.msg
method run*(state: SaleErrored, machine: Machine): Future[?State] {.async.} =
@ -24,12 +25,13 @@ method run*(state: SaleErrored, machine: Machine): Future[?State] {.async.} =
let data = agent.data
let context = agent.context
error "Sale error", error=state.error.msgDetail, requestId = data.requestId, slotIndex = data.slotIndex
error "Sale error",
error = state.error.msgDetail,
requestId = data.requestId,
slotIndex = data.slotIndex
if onClear =? context.onClear and
request =? data.request:
if onClear =? context.onClear and request =? data.request:
onClear(request, data.slotIndex)
if onCleanUp =? agent.onCleanUp:
await onCleanUp(returnBytes = true, reprocessSlot = state.reprocessSlot)

View File

@ -2,8 +2,7 @@ import pkg/questionable
import ../statemachine
import ./errored
type
ErrorHandlingState* = ref object of SaleState
type ErrorHandlingState* = ref object of SaleState
method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State =
some State(SaleErrored(error: error))

View File

@ -11,7 +11,8 @@ type
SaleFailed* = ref object of ErrorHandlingState
SaleFailedError* = object of SaleError
method `$`*(state: SaleFailed): string = "SaleFailed"
method `$`*(state: SaleFailed): string =
"SaleFailed"
method run*(state: SaleFailed, machine: Machine): Future[?State] {.async.} =
let data = SalesAgent(machine).data
@ -21,7 +22,8 @@ method run*(state: SaleFailed, machine: Machine): Future[?State] {.async.} =
raiseAssert "no sale request"
let slot = Slot(request: request, slotIndex: data.slotIndex)
debug "Removing slot from mySlots", requestId = data.requestId, slotIndex = data.slotIndex
debug "Removing slot from mySlots",
requestId = data.requestId, slotIndex = data.slotIndex
await market.freeSlot(slot.id)
let error = newException(SaleFailedError, "Sale failed")

View File

@ -27,7 +27,8 @@ method onCancelled*(state: SaleFilled, request: StorageRequest): ?State =
method onFailed*(state: SaleFilled, request: StorageRequest): ?State =
return some State(SaleFailed())
method `$`*(state: SaleFilled): string = "SaleFilled"
method `$`*(state: SaleFilled): string =
"SaleFilled"
method run*(state: SaleFilled, machine: Machine): Future[?State] {.async.} =
let agent = SalesAgent(machine)
@ -39,7 +40,8 @@ method run*(state: SaleFilled, machine: Machine): Future[?State] {.async.} =
let me = await market.getSigner()
if host == me.some:
info "Slot succesfully filled", requestId = data.requestId, slotIndex = data.slotIndex
info "Slot succesfully filled",
requestId = data.requestId, slotIndex = data.slotIndex
without request =? data.request:
raiseAssert "no sale request"
@ -57,10 +59,11 @@ method run*(state: SaleFilled, machine: Machine): Future[?State] {.async.} =
when codex_enable_proof_failures:
if context.simulateProofFailures > 0:
info "Proving with failure rate", rate = context.simulateProofFailures
return some State(SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures))
return some State(
SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures)
)
return some State(SaleProving())
else:
let error = newException(HostMismatchError, "Slot filled by other host")
return some State(SaleErrored(error: error))

View File

@ -13,11 +13,11 @@ import ./errored
logScope:
topics = "marketplace sales filling"
type
SaleFilling* = ref object of ErrorHandlingState
proof*: Groth16Proof
type SaleFilling* = ref object of ErrorHandlingState
proof*: Groth16Proof
method `$`*(state: SaleFilling): string = "SaleFilling"
method `$`*(state: SaleFilling): string =
"SaleFilling"
method onCancelled*(state: SaleFilling, request: StorageRequest): ?State =
return some State(SaleCancelled())
@ -28,7 +28,7 @@ method onFailed*(state: SaleFilling, request: StorageRequest): ?State =
method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} =
let data = SalesAgent(machine).data
let market = SalesAgent(machine).context.market
without (fullCollateral =? data.request.?ask.?collateral):
without (fullCollateral =? data.request .? ask .? collateral):
raiseAssert "Request not set"
logScope:
@ -41,7 +41,8 @@ method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} =
if slotState == SlotState.Repair:
# When repairing the node gets "discount" on the collateral that it needs to
let repairRewardPercentage = (await market.repairRewardPercentage).u256
collateral = fullCollateral - ((fullCollateral * repairRewardPercentage)).div(100.u256)
collateral =
fullCollateral - ((fullCollateral * repairRewardPercentage)).div(100.u256)
else:
collateral = fullCollateral
@ -51,9 +52,9 @@ method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} =
except MarketError as e:
if e.msg.contains "Slot is not free":
debug "Slot is already filled, ignoring slot"
return some State( SaleIgnored(reprocessSlot: false, returnBytes: true) )
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
else:
return some State( SaleErrored(error: e) )
return some State(SaleErrored(error: e))
# other CatchableErrors are handled "automatically" by the ErrorHandlingState
return some State(SaleFilled())

View File

@ -10,10 +10,10 @@ import ./failed
logScope:
topics = "marketplace sales finished"
type
SaleFinished* = ref object of ErrorHandlingState
type SaleFinished* = ref object of ErrorHandlingState
method `$`*(state: SaleFinished): string = "SaleFinished"
method `$`*(state: SaleFinished): string =
"SaleFinished"
method onCancelled*(state: SaleFinished, request: StorageRequest): ?State =
return some State(SaleCancelled())
@ -28,7 +28,8 @@ method run*(state: SaleFinished, machine: Machine): Future[?State] {.async.} =
without request =? data.request:
raiseAssert "no sale request"
info "Slot finished and paid out", requestId = data.requestId, slotIndex = data.slotIndex
info "Slot finished and paid out",
requestId = data.requestId, slotIndex = data.slotIndex
if onCleanUp =? agent.onCleanUp:
await onCleanUp()

View File

@ -11,16 +11,17 @@ logScope:
# Ignored slots could mean there was no availability or that the slot could
# not be reserved.
type
SaleIgnored* = ref object of ErrorHandlingState
reprocessSlot*: bool # readd slot to queue with `seen` flag
returnBytes*: bool # return unreleased bytes from Reservation to Availability
type SaleIgnored* = ref object of ErrorHandlingState
reprocessSlot*: bool # readd slot to queue with `seen` flag
returnBytes*: bool # return unreleased bytes from Reservation to Availability
method `$`*(state: SaleIgnored): string = "SaleIgnored"
method `$`*(state: SaleIgnored): string =
"SaleIgnored"
method run*(state: SaleIgnored, machine: Machine): Future[?State] {.async.} =
let agent = SalesAgent(machine)
if onCleanUp =? agent.onCleanUp:
await onCleanUp(reprocessSlot = state.reprocessSlot,
returnBytes = state.returnBytes)
await onCleanUp(
reprocessSlot = state.reprocessSlot, returnBytes = state.returnBytes
)

View File

@ -12,10 +12,10 @@ import ./failed
logScope:
topics = "marketplace sales initial-proving"
type
SaleInitialProving* = ref object of ErrorHandlingState
type SaleInitialProving* = ref object of ErrorHandlingState
method `$`*(state: SaleInitialProving): string = "SaleInitialProving"
method `$`*(state: SaleInitialProving): string =
"SaleInitialProving"
method onCancelled*(state: SaleInitialProving, request: StorageRequest): ?State =
return some State(SaleCancelled())

View File

@ -10,10 +10,10 @@ import ./finished
logScope:
topics = "marketplace sales payout"
type
SalePayout* = ref object of ErrorHandlingState
type SalePayout* = ref object of ErrorHandlingState
method `$`*(state: SalePayout): string = "SalePayout"
method `$`*(state: SalePayout): string =
"SalePayout"
method onCancelled*(state: SalePayout, request: StorageRequest): ?State =
return some State(SaleCancelled())
@ -29,7 +29,8 @@ method run(state: SalePayout, machine: Machine): Future[?State] {.async.} =
raiseAssert "no sale request"
let slot = Slot(request: request, slotIndex: data.slotIndex)
debug "Collecting finished slot's reward", requestId = data.requestId, slotIndex = data.slotIndex
debug "Collecting finished slot's reward",
requestId = data.requestId, slotIndex = data.slotIndex
await market.freeSlot(slot.id)
return some State(SaleFinished())

View File

@ -14,15 +14,17 @@ import ./ignored
import ./slotreserving
import ./errored
declareCounter(codex_reservations_availability_mismatch, "codex reservations availability_mismatch")
declareCounter(
codex_reservations_availability_mismatch, "codex reservations availability_mismatch"
)
type
SalePreparing* = ref object of ErrorHandlingState
type SalePreparing* = ref object of ErrorHandlingState
logScope:
topics = "marketplace sales preparing"
method `$`*(state: SalePreparing): string = "SalePreparing"
method `$`*(state: SalePreparing): string =
"SalePreparing"
method onCancelled*(state: SalePreparing, request: StorageRequest): ?State =
return some State(SaleCancelled())
@ -30,8 +32,9 @@ method onCancelled*(state: SalePreparing, request: StorageRequest): ?State =
method onFailed*(state: SalePreparing, request: StorageRequest): ?State =
return some State(SaleFailed())
method onSlotFilled*(state: SalePreparing, requestId: RequestId,
slotIndex: UInt256): ?State =
method onSlotFilled*(
state: SalePreparing, requestId: RequestId, slotIndex: UInt256
): ?State =
return some State(SaleFilled())
method run*(state: SalePreparing, machine: Machine): Future[?State] {.async.} =
@ -64,29 +67,27 @@ method run*(state: SalePreparing, machine: Machine): Future[?State] {.async.} =
# availability was checked for this slot when it entered the queue, however
# check to the ensure that there is still availability as they may have
# changed since being added (other slots may have been processed in that time)
without availability =? await reservations.findAvailability(
request.ask.slotSize,
request.ask.duration,
request.ask.pricePerSlot,
request.ask.collateral):
without availability =?
await reservations.findAvailability(
request.ask.slotSize, request.ask.duration, request.ask.pricePerSlot,
request.ask.collateral,
):
debug "No availability found for request, ignoring"
return some State(SaleIgnored(reprocessSlot: true))
info "Availability found for request, creating reservation"
without reservation =? await reservations.createReservation(
availability.id,
request.ask.slotSize,
request.id,
data.slotIndex
), error:
without reservation =?
await reservations.createReservation(
availability.id, request.ask.slotSize, request.id, data.slotIndex
), error:
trace "Creation of reservation failed"
# Race condition:
# reservations.findAvailability (line 64) is no guarantee. You can never know for certain that the reservation can be created until after you have it.
# Should createReservation fail because there's no space, we proceed to SaleIgnored.
if error of BytesOutOfBoundsError:
# Lets monitor how often this happen and if it is often we can make it more inteligent to handle it
# Lets monitor how often this happen and if it is often we can make it more inteligent to handle it
codex_reservations_availability_mismatch.inc()
return some State(SaleIgnored(reprocessSlot: true))

View File

@ -22,12 +22,12 @@ type
loop: Future[void]
method prove*(
state: SaleProving,
slot: Slot,
challenge: ProofChallenge,
onProve: OnProve,
market: Market,
currentPeriod: Period
state: SaleProving,
slot: Slot,
challenge: ProofChallenge,
onProve: OnProve,
market: Market,
currentPeriod: Period,
) {.base, async.} =
try:
without proof =? (await onProve(slot, challenge)), err:
@ -43,14 +43,13 @@ method prove*(
error "Submitting proof failed", msg = e.msgDetail
proc proveLoop(
state: SaleProving,
market: Market,
clock: Clock,
request: StorageRequest,
slotIndex: UInt256,
onProve: OnProve
state: SaleProving,
market: Market,
clock: Clock,
request: StorageRequest,
slotIndex: UInt256,
onProve: OnProve,
) {.async.} =
let slot = Slot(request: request, slotIndex: slotIndex)
let slotId = slot.id
@ -76,7 +75,8 @@ proc proveLoop(
case slotState
of SlotState.Filled:
debug "Proving for new period", period = currentPeriod
if (await market.isProofRequired(slotId)) or (await market.willProofBeRequired(slotId)):
if (await market.isProofRequired(slotId)) or
(await market.willProofBeRequired(slotId)):
let challenge = await market.getChallenge(slotId)
debug "Proof is required", period = currentPeriod, challenge = challenge
await state.prove(slot, challenge, onProve, market, currentPeriod)
@ -100,7 +100,8 @@ proc proveLoop(
debug "waiting until next period"
await waitUntilPeriod(currentPeriod + 1)
method `$`*(state: SaleProving): string = "SaleProving"
method `$`*(state: SaleProving): string =
"SaleProving"
method onCancelled*(state: SaleProving, request: StorageRequest): ?State =
# state.loop cancellation happens automatically when run is cancelled due to

View File

@ -12,21 +12,26 @@ when codex_enable_proof_failures:
import ./proving
logScope:
topics = "marketplace sales simulated-proving"
topics = "marketplace sales simulated-proving"
type
SaleProvingSimulated* = ref object of SaleProving
failEveryNProofs*: int
proofCount: int
type SaleProvingSimulated* = ref object of SaleProving
failEveryNProofs*: int
proofCount: int
proc onSubmitProofError(error: ref CatchableError, period: UInt256, slotId: SlotId) =
error "Submitting invalid proof failed", period, slotId, msg = error.msgDetail
method prove*(state: SaleProvingSimulated, slot: Slot, challenge: ProofChallenge, onProve: OnProve, market: Market, currentPeriod: Period) {.async.} =
method prove*(
state: SaleProvingSimulated,
slot: Slot,
challenge: ProofChallenge,
onProve: OnProve,
market: Market,
currentPeriod: Period,
) {.async.} =
trace "Processing proving in simulated mode"
state.proofCount += 1
if state.failEveryNProofs > 0 and
state.proofCount mod state.failEveryNProofs == 0:
if state.failEveryNProofs > 0 and state.proofCount mod state.failEveryNProofs == 0:
state.proofCount = 0
try:
@ -40,4 +45,6 @@ when codex_enable_proof_failures:
except CatchableError as e:
onSubmitProofError(e, currentPeriod, slot.id)
else:
await procCall SaleProving(state).prove(slot, challenge, onProve, market, currentPeriod)
await procCall SaleProving(state).prove(
slot, challenge, onProve, market, currentPeriod
)

View File

@ -12,13 +12,13 @@ import ./ignored
import ./downloading
import ./errored
type
SaleSlotReserving* = ref object of ErrorHandlingState
type SaleSlotReserving* = ref object of ErrorHandlingState
logScope:
topics = "marketplace sales reserving"
method `$`*(state: SaleSlotReserving): string = "SaleSlotReserving"
method `$`*(state: SaleSlotReserving): string =
"SaleSlotReserving"
method onCancelled*(state: SaleSlotReserving, request: StorageRequest): ?State =
return some State(SaleCancelled())
@ -44,17 +44,15 @@ method run*(state: SaleSlotReserving, machine: Machine): Future[?State] {.async.
except MarketError as e:
if e.msg.contains "Reservation not allowed":
debug "Slot cannot be reserved, ignoring", error = e.msg
return some State( SaleIgnored(reprocessSlot: false, returnBytes: true) )
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
else:
return some State( SaleErrored(error: e) )
return some State(SaleErrored(error: e))
# other CatchableErrors are handled "automatically" by the ErrorHandlingState
trace "Slot successfully reserved"
return some State( SaleDownloading() )
return some State(SaleDownloading())
else:
# do not re-add this slot to the queue, and return bytes from Reservation to
# the Availability
debug "Slot cannot be reserved, ignoring"
return some State( SaleIgnored(reprocessSlot: false, returnBytes: true) )
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))

View File

@ -17,7 +17,8 @@ type
SaleUnknownError* = object of CatchableError
UnexpectedSlotError* = object of SaleUnknownError
method `$`*(state: SaleUnknown): string = "SaleUnknown"
method `$`*(state: SaleUnknown): string =
"SaleUnknown"
method onCancelled*(state: SaleUnknown, request: StorageRequest): ?State =
return some State(SaleCancelled())
@ -38,8 +39,8 @@ method run*(state: SaleUnknown, machine: Machine): Future[?State] {.async.} =
case slotState
of SlotState.Free:
let error = newException(UnexpectedSlotError,
"Slot state on chain should not be 'free'")
let error =
newException(UnexpectedSlotError, "Slot state on chain should not be 'free'")
return some State(SaleErrored(error: error))
of SlotState.Filled:
return some State(SaleFilled())
@ -52,6 +53,7 @@ method run*(state: SaleUnknown, machine: Machine): Future[?State] {.async.} =
of SlotState.Cancelled:
return some State(SaleCancelled())
of SlotState.Repair:
let error = newException(SlotFreedError,
"Slot was forcible freed and host was removed from its hosting")
let error = newException(
SlotFreedError, "Slot was forcible freed and host was removed from its hosting"
)
return some State(SaleErrored(error: error))

View File

@ -5,5 +5,4 @@ import ../merkletree
export builder, converters
type
Poseidon2Builder* = SlotsBuilder[Poseidon2Tree, Poseidon2Hash]
type Poseidon2Builder* = SlotsBuilder[Poseidon2Tree, Poseidon2Hash]

View File

@ -34,17 +34,17 @@ export converters, asynciter
logScope:
topics = "codex slotsbuilder"
type
SlotsBuilder*[T, H] = ref object of RootObj
store: BlockStore
manifest: Manifest # current manifest
strategy: IndexingStrategy # indexing strategy
cellSize: NBytes # cell size
numSlotBlocks: Natural # number of blocks per slot (should yield a power of two number of cells)
slotRoots: seq[H] # roots of the slots
emptyBlock: seq[byte] # empty block
verifiableTree: ?T # verification tree (dataset tree)
emptyDigestTree: T # empty digest tree for empty blocks
type SlotsBuilder*[T, H] = ref object of RootObj
store: BlockStore
manifest: Manifest # current manifest
strategy: IndexingStrategy # indexing strategy
cellSize: NBytes # cell size
numSlotBlocks: Natural
# number of blocks per slot (should yield a power of two number of cells)
slotRoots: seq[H] # roots of the slots
emptyBlock: seq[byte] # empty block
verifiableTree: ?T # verification tree (dataset tree)
emptyDigestTree: T # empty digest tree for empty blocks
func verifiable*[T, H](self: SlotsBuilder[T, H]): bool {.inline.} =
## Returns true if the slots are verifiable.
@ -133,9 +133,8 @@ func manifest*[T, H](self: SlotsBuilder[T, H]): Manifest =
self.manifest
proc buildBlockTree*[T, H](
self: SlotsBuilder[T, H],
blkIdx: Natural,
slotPos: Natural): Future[?!(seq[byte], T)] {.async.} =
self: SlotsBuilder[T, H], blkIdx: Natural, slotPos: Natural
): Future[?!(seq[byte], T)] {.async.} =
## Build the block digest tree and return a tuple with the
## block data and the tree.
##
@ -160,16 +159,15 @@ proc buildBlockTree*[T, H](
if blk.isEmpty:
success (self.emptyBlock, self.emptyDigestTree)
else:
without tree =?
T.digestTree(blk.data, self.cellSize.int), err:
without tree =? T.digestTree(blk.data, self.cellSize.int), err:
error "Failed to create digest for block", err = err.msg
return failure(err)
success (blk.data, tree)
proc getCellHashes*[T, H](
self: SlotsBuilder[T, H],
slotIndex: Natural): Future[?!seq[H]] {.async.} =
self: SlotsBuilder[T, H], slotIndex: Natural
): Future[?!seq[H]] {.async.} =
## Collect all the cells from a block and return
## their hashes.
##
@ -192,8 +190,8 @@ proc getCellHashes*[T, H](
pos = i
trace "Getting block CID for tree at index"
without (_, tree) =? (await self.buildBlockTree(blkIdx, i)) and
digest =? tree.root, err:
without (_, tree) =? (await self.buildBlockTree(blkIdx, i)) and digest =? tree.root,
err:
error "Failed to get block CID for tree at index", err = err.msg
return failure(err)
@ -203,8 +201,8 @@ proc getCellHashes*[T, H](
success hashes
proc buildSlotTree*[T, H](
self: SlotsBuilder[T, H],
slotIndex: Natural): Future[?!T] {.async.} =
self: SlotsBuilder[T, H], slotIndex: Natural
): Future[?!T] {.async.} =
## Build the slot tree from the block digest hashes
## and return the tree.
@ -215,20 +213,20 @@ proc buildSlotTree*[T, H](
T.init(cellHashes)
proc buildSlot*[T, H](
self: SlotsBuilder[T, H],
slotIndex: Natural): Future[?!H] {.async.} =
self: SlotsBuilder[T, H], slotIndex: Natural
): Future[?!H] {.async.} =
## Build a slot tree and store the proofs in
## the block store.
##
logScope:
cid = self.manifest.treeCid
slotIndex = slotIndex
cid = self.manifest.treeCid
slotIndex = slotIndex
trace "Building slot tree"
without tree =? (await self.buildSlotTree(slotIndex)) and
treeCid =? tree.root.?toSlotCid, err:
treeCid =? tree.root .? toSlotCid, err:
error "Failed to build slot tree", err = err.msg
return failure(err)
@ -238,13 +236,12 @@ proc buildSlot*[T, H](
error "Failed to get CID for slot cell", err = err.msg
return failure(err)
without proof =? tree.getProof(i) and
encodableProof =? proof.toEncodableProof, err:
without proof =? tree.getProof(i) and encodableProof =? proof.toEncodableProof, err:
error "Failed to get proof for slot tree", err = err.msg
return failure(err)
if err =? (await self.store.putCidAndProof(
treeCid, i, cellCid, encodableProof)).errorOption:
if err =?
(await self.store.putCidAndProof(treeCid, i, cellCid, encodableProof)).errorOption:
error "Failed to store slot tree", err = err.msg
return failure(err)
@ -258,14 +255,14 @@ proc buildSlots*[T, H](self: SlotsBuilder[T, H]): Future[?!void] {.async.} =
##
logScope:
cid = self.manifest.treeCid
blockCount = self.manifest.blocksCount
cid = self.manifest.treeCid
blockCount = self.manifest.blocksCount
trace "Building slots"
if self.slotRoots.len == 0:
self.slotRoots = collect(newSeq):
for i in 0..<self.manifest.numSlots:
for i in 0 ..< self.manifest.numSlots:
without slotRoot =? (await self.buildSlot(i)), err:
error "Failed to build slot", err = err.msg, index = i
return failure(err)
@ -277,7 +274,7 @@ proc buildSlots*[T, H](self: SlotsBuilder[T, H]): Future[?!void] {.async.} =
if verifyTree =? self.verifyTree and verifyRoot =? verifyTree.root:
if not bool(verifyRoot == root): # TODO: `!=` doesn't work for SecretBool
return failure "Existing slots root doesn't match reconstructed root."
return failure "Existing slots root doesn't match reconstructed root."
self.verifiableTree = some tree
@ -292,25 +289,22 @@ proc buildManifest*[T, H](self: SlotsBuilder[T, H]): Future[?!Manifest] {.async.
error "Failed to map slot roots to CIDs", err = err.msg
return failure(err)
without rootProvingCidRes =? self.verifyRoot.?toVerifyCid() and
rootProvingCid =? rootProvingCidRes, err: # TODO: why doesn't `.?` unpack the result?
without rootProvingCidRes =? self.verifyRoot .? toVerifyCid() and
rootProvingCid =? rootProvingCidRes, err:
error "Failed to map slot roots to CIDs", err = err.msg
return failure(err)
Manifest.new(
self.manifest,
rootProvingCid,
rootCids,
self.cellSize,
self.strategy.strategyType)
self.manifest, rootProvingCid, rootCids, self.cellSize, self.strategy.strategyType
)
proc new*[T, H](
_: type SlotsBuilder[T, H],
store: BlockStore,
manifest: Manifest,
strategy = SteppedStrategy,
cellSize = DefaultCellSize): ?!SlotsBuilder[T, H] =
_: type SlotsBuilder[T, H],
store: BlockStore,
manifest: Manifest,
strategy = SteppedStrategy,
cellSize = DefaultCellSize,
): ?!SlotsBuilder[T, H] =
if not manifest.protected:
trace "Manifest is not protected."
return failure("Manifest is not protected.")
@ -330,60 +324,59 @@ proc new*[T, H](
return failure("Block size must be divisable by cell size.")
let
numSlotBlocks = manifest.numSlotBlocks
numBlockCells = (manifest.blockSize div cellSize).int # number of cells per block
numSlotCells = manifest.numSlotBlocks * numBlockCells # number of uncorrected slot cells
pow2SlotCells = nextPowerOfTwo(numSlotCells) # pow2 cells per slot
numPadSlotBlocks = (pow2SlotCells div numBlockCells) - numSlotBlocks # pow2 blocks per slot
numSlotBlocks = manifest.numSlotBlocks
numBlockCells = (manifest.blockSize div cellSize).int # number of cells per block
numSlotCells = manifest.numSlotBlocks * numBlockCells
# number of uncorrected slot cells
pow2SlotCells = nextPowerOfTwo(numSlotCells) # pow2 cells per slot
numPadSlotBlocks = (pow2SlotCells div numBlockCells) - numSlotBlocks
# pow2 blocks per slot
numSlotBlocksTotal = # pad blocks per slot
numSlotBlocksTotal =
# pad blocks per slot
if numPadSlotBlocks > 0:
numPadSlotBlocks + numSlotBlocks
else:
numSlotBlocks
numPadSlotBlocks + numSlotBlocks
else:
numSlotBlocks
numBlocksTotal = numSlotBlocksTotal * manifest.numSlots # number of blocks per slot
numBlocksTotal = numSlotBlocksTotal * manifest.numSlots # number of blocks per slot
emptyBlock = newSeq[byte](manifest.blockSize.int)
emptyDigestTree = ? T.digestTree(emptyBlock, cellSize.int)
emptyBlock = newSeq[byte](manifest.blockSize.int)
emptyDigestTree = ?T.digestTree(emptyBlock, cellSize.int)
strategy = ? strategy.init(
0,
numBlocksTotal - 1,
manifest.numSlots).catch
strategy = ?strategy.init(0, numBlocksTotal - 1, manifest.numSlots).catch
logScope:
numSlotBlocks = numSlotBlocks
numBlockCells = numBlockCells
numSlotCells = numSlotCells
pow2SlotCells = pow2SlotCells
numPadSlotBlocks = numPadSlotBlocks
numBlocksTotal = numBlocksTotal
numSlotBlocksTotal = numSlotBlocksTotal
strategy = strategy.strategyType
numSlotBlocks = numSlotBlocks
numBlockCells = numBlockCells
numSlotCells = numSlotCells
pow2SlotCells = pow2SlotCells
numPadSlotBlocks = numPadSlotBlocks
numBlocksTotal = numBlocksTotal
numSlotBlocksTotal = numSlotBlocksTotal
strategy = strategy.strategyType
trace "Creating slots builder"
var
self = SlotsBuilder[T, H](
store: store,
manifest: manifest,
strategy: strategy,
cellSize: cellSize,
emptyBlock: emptyBlock,
numSlotBlocks: numSlotBlocksTotal,
emptyDigestTree: emptyDigestTree)
var self = SlotsBuilder[T, H](
store: store,
manifest: manifest,
strategy: strategy,
cellSize: cellSize,
emptyBlock: emptyBlock,
numSlotBlocks: numSlotBlocksTotal,
emptyDigestTree: emptyDigestTree,
)
if manifest.verifiable:
if manifest.slotRoots.len == 0 or
manifest.slotRoots.len != manifest.numSlots:
if manifest.slotRoots.len == 0 or manifest.slotRoots.len != manifest.numSlots:
return failure "Manifest is verifiable but slot roots are missing or invalid."
let
slotRoots = manifest.slotRoots.mapIt( (? it.fromSlotCid() ))
tree = ? self.buildVerifyTree(slotRoots)
expectedRoot = ? manifest.verifyRoot.fromVerifyCid()
verifyRoot = ? tree.root
slotRoots = manifest.slotRoots.mapIt((?it.fromSlotCid()))
tree = ?self.buildVerifyTree(slotRoots)
expectedRoot = ?manifest.verifyRoot.fromVerifyCid()
verifyRoot = ?tree.root
if verifyRoot != expectedRoot:
return failure "Existing slots root doesn't match reconstructed root."

View File

@ -23,21 +23,25 @@ import ../utils/digest
func toCid(hash: Poseidon2Hash, mcodec: MultiCodec, cidCodec: MultiCodec): ?!Cid =
let
mhash = ? MultiHash.init(mcodec, hash.toBytes).mapFailure
treeCid = ? Cid.init(CIDv1, cidCodec, mhash).mapFailure
mhash = ?MultiHash.init(mcodec, hash.toBytes).mapFailure
treeCid = ?Cid.init(CIDv1, cidCodec, mhash).mapFailure
success treeCid
proc toPoseidon2Hash(cid: Cid, mcodec: MultiCodec, cidCodec: MultiCodec): ?!Poseidon2Hash =
proc toPoseidon2Hash(
cid: Cid, mcodec: MultiCodec, cidCodec: MultiCodec
): ?!Poseidon2Hash =
if cid.cidver != CIDv1:
return failure("Unexpected CID version")
if cid.mcodec != cidCodec:
return failure("Cid is not of expected codec. Was: " & $cid.mcodec & " but expected: " & $cidCodec)
return failure(
"Cid is not of expected codec. Was: " & $cid.mcodec & " but expected: " & $cidCodec
)
let
mhash = ? cid.mhash.mapFailure
mhash = ?cid.mhash.mapFailure
bytes: array[32, byte] = array[32, byte].initCopyFrom(mhash.digestBytes())
hash = ? Poseidon2Hash.fromBytes(bytes).toFailure
hash = ?Poseidon2Hash.fromBytes(bytes).toFailure
success hash
@ -51,7 +55,7 @@ func toSlotCid*(hash: Poseidon2Hash): ?!Cid =
toCid(hash, multiCodec("identity"), SlotRootCodec)
func toSlotCids*(slotRoots: openArray[Poseidon2Hash]): ?!seq[Cid] =
success slotRoots.mapIt( ? it.toSlotCid )
success slotRoots.mapIt(?it.toSlotCid)
func fromSlotCid*(cid: Cid): ?!Poseidon2Hash =
toPoseidon2Hash(cid, multiCodec("identity"), SlotRootCodec)
@ -62,27 +66,17 @@ func toVerifyCid*(hash: Poseidon2Hash): ?!Cid =
func fromVerifyCid*(cid: Cid): ?!Poseidon2Hash =
toPoseidon2Hash(cid, multiCodec("identity"), SlotProvingRootCodec)
func toEncodableProof*(
proof: Poseidon2Proof): ?!CodexProof =
let
encodableProof = CodexProof(
mcodec: multiCodec("identity"),
index: proof.index,
nleaves: proof.nleaves,
path: proof.path.mapIt( @( it.toBytes ) ))
func toEncodableProof*(proof: Poseidon2Proof): ?!CodexProof =
let encodableProof = CodexProof(
mcodec: multiCodec("identity"),
index: proof.index,
nleaves: proof.nleaves,
path: proof.path.mapIt(@(it.toBytes)),
)
success encodableProof
func toVerifiableProof*(
proof: CodexProof): ?!Poseidon2Proof =
func toVerifiableProof*(proof: CodexProof): ?!Poseidon2Proof =
let nodes = proof.path.mapIt(?Poseidon2Hash.fromBytes(it.toArray32).toFailure)
let
nodes = proof.path.mapIt(
? Poseidon2Hash.fromBytes(it.toArray32).toFailure
)
Poseidon2Proof.init(
index = proof.index,
nleaves = proof.nleaves,
nodes = nodes)
Poseidon2Proof.init(index = proof.index, nleaves = proof.nleaves, nodes = nodes)

View File

@ -11,26 +11,25 @@ import ../../conf
import ./backends
import ./backendutils
proc initializeFromConfig(
config: CodexConf,
utils: BackendUtils): ?!AnyBackend =
proc initializeFromConfig(config: CodexConf, utils: BackendUtils): ?!AnyBackend =
if not fileAccessible($config.circomR1cs, {AccessFlags.Read}) or
not endsWith($config.circomR1cs, ".r1cs"):
not endsWith($config.circomR1cs, ".r1cs"):
return failure("Circom R1CS file not accessible")
if not fileAccessible($config.circomWasm, {AccessFlags.Read}) or
not endsWith($config.circomWasm, ".wasm"):
not endsWith($config.circomWasm, ".wasm"):
return failure("Circom wasm file not accessible")
if not fileAccessible($config.circomZkey, {AccessFlags.Read}) or
not endsWith($config.circomZkey, ".zkey"):
not endsWith($config.circomZkey, ".zkey"):
return failure("Circom zkey file not accessible")
trace "Initialized prover backend from cli config"
success(utils.initializeCircomBackend(
$config.circomR1cs,
$config.circomWasm,
$config.circomZkey))
success(
utils.initializeCircomBackend(
$config.circomR1cs, $config.circomWasm, $config.circomZkey
)
)
proc r1csFilePath(config: CodexConf): string =
config.circuitDir / "proof_main.r1cs"
@ -42,42 +41,40 @@ proc zkeyFilePath(config: CodexConf): string =
config.circuitDir / "proof_main.zkey"
proc initializeFromCircuitDirFiles(
config: CodexConf,
utils: BackendUtils): ?!AnyBackend {.gcsafe.} =
if fileExists(config.r1csFilePath) and
fileExists(config.wasmFilePath) and
fileExists(config.zkeyFilePath):
config: CodexConf, utils: BackendUtils
): ?!AnyBackend {.gcsafe.} =
if fileExists(config.r1csFilePath) and fileExists(config.wasmFilePath) and
fileExists(config.zkeyFilePath):
trace "Initialized prover backend from local files"
return success(utils.initializeCircomBackend(
config.r1csFilePath,
config.wasmFilePath,
config.zkeyFilePath))
return success(
utils.initializeCircomBackend(
config.r1csFilePath, config.wasmFilePath, config.zkeyFilePath
)
)
failure("Circuit files not found")
proc suggestDownloadTool(config: CodexConf) =
without address =? config.marketplaceAddress:
raise (ref Defect)(msg: "Proving backend initializing while marketplace address not set.")
raise (ref Defect)(
msg: "Proving backend initializing while marketplace address not set."
)
let
tokens = [
"cirdl",
"\"" & $config.circuitDir & "\"",
config.ethProvider,
$address
]
tokens = ["cirdl", "\"" & $config.circuitDir & "\"", config.ethProvider, $address]
instructions = "'./" & tokens.join(" ") & "'"
warn "Proving circuit files are not found. Please run the following to download them:", instructions
warn "Proving circuit files are not found. Please run the following to download them:",
instructions
proc initializeBackend*(
config: CodexConf,
utils: BackendUtils = BackendUtils()): ?!AnyBackend =
config: CodexConf, utils: BackendUtils = BackendUtils()
): ?!AnyBackend =
without backend =? initializeFromConfig(config, utils), cliErr:
info "Could not initialize prover backend from CLI options...", msg = cliErr.msg
without backend =? initializeFromCircuitDirFiles(config, utils), localErr:
info "Could not initialize prover backend from circuit dir files...", msg = localErr.msg
info "Could not initialize prover backend from circuit dir files...",
msg = localErr.msg
suggestDownloadTool(config)
return failure("CircuitFilesNotFound")
# Unexpected: value of backend does not survive leaving each scope. (definition does though...)

View File

@ -2,5 +2,4 @@ import ./backends/circomcompat
export circomcompat
type
AnyBackend* = CircomCompat
type AnyBackend* = CircomCompat

View File

@ -25,21 +25,22 @@ export circomcompat, converters
type
CircomCompat* = object
slotDepth : int # max depth of the slot tree
datasetDepth : int # max depth of dataset tree
blkDepth : int # depth of the block merkle tree (pow2 for now)
cellElms : int # number of field elements per cell
numSamples : int # number of samples per slot
r1csPath : string # path to the r1cs file
wasmPath : string # path to the wasm file
zkeyPath : string # path to the zkey file
backendCfg : ptr CircomBn254Cfg
vkp* : ptr CircomKey
slotDepth: int # max depth of the slot tree
datasetDepth: int # max depth of dataset tree
blkDepth: int # depth of the block merkle tree (pow2 for now)
cellElms: int # number of field elements per cell
numSamples: int # number of samples per slot
r1csPath: string # path to the r1cs file
wasmPath: string # path to the wasm file
zkeyPath: string # path to the zkey file
backendCfg: ptr CircomBn254Cfg
vkp*: ptr CircomKey
NormalizedProofInputs*[H] {.borrow: `.`.} = distinct ProofInputs[H]
func normalizeInput*[H](self: CircomCompat, input: ProofInputs[H]):
NormalizedProofInputs[H] =
func normalizeInput*[H](
self: CircomCompat, input: ProofInputs[H]
): NormalizedProofInputs[H] =
## Parameters in CIRCOM circuits are statically sized and must be properly
## padded before they can be passed onto the circuit. This function takes
## variable length parameters and performs that padding.
@ -52,10 +53,7 @@ func normalizeInput*[H](self: CircomCompat, input: ProofInputs[H]):
for sample in input.samples:
var merklePaths = sample.merklePaths
merklePaths.setLen(self.slotDepth)
Sample[H](
cellData: sample.cellData,
merklePaths: merklePaths
)
Sample[H](cellData: sample.cellData, merklePaths: merklePaths)
var normSlotProof = input.slotProof
normSlotProof.setLen(self.datasetDepth)
@ -68,7 +66,7 @@ func normalizeInput*[H](self: CircomCompat, input: ProofInputs[H]):
nCellsPerSlot: input.nCellsPerSlot,
nSlotsPerDataSet: input.nSlotsPerDataSet,
slotProof: normSlotProof,
samples: normSamples
samples: normSamples,
)
proc release*(self: CircomCompat) =
@ -81,32 +79,28 @@ proc release*(self: CircomCompat) =
if not isNil(self.vkp):
self.vkp.unsafeAddr.release_key()
proc prove[H](
self: CircomCompat,
input: NormalizedProofInputs[H]): ?!CircomProof =
doAssert input.samples.len == self.numSamples,
"Number of samples does not match"
proc prove[H](self: CircomCompat, input: NormalizedProofInputs[H]): ?!CircomProof =
doAssert input.samples.len == self.numSamples, "Number of samples does not match"
doAssert input.slotProof.len <= self.datasetDepth,
"Slot proof is too deep - dataset has more slots than what we can handle?"
doAssert input.samples.allIt(
block:
(it.merklePaths.len <= self.slotDepth + self.blkDepth and
it.cellData.len == self.cellElms)), "Merkle paths too deep or cells too big for circuit"
(
it.merklePaths.len <= self.slotDepth + self.blkDepth and
it.cellData.len == self.cellElms
)
), "Merkle paths too deep or cells too big for circuit"
# TODO: All parameters should match circom's static parametter
var
ctx: ptr CircomCompatCtx
var ctx: ptr CircomCompatCtx
defer:
if ctx != nil:
ctx.addr.release_circom_compat()
if init_circom_compat(
self.backendCfg,
addr ctx) != ERR_OK or ctx == nil:
if init_circom_compat(self.backendCfg, addr ctx) != ERR_OK or ctx == nil:
raiseAssert("failed to initialize CircomCompat ctx")
var
@ -114,67 +108,61 @@ proc prove[H](
dataSetRoot = input.datasetRoot.toBytes
slotRoot = input.slotRoot.toBytes
if ctx.push_input_u256_array(
"entropy".cstring, entropy[0].addr, entropy.len.uint32) != ERR_OK:
if ctx.push_input_u256_array("entropy".cstring, entropy[0].addr, entropy.len.uint32) !=
ERR_OK:
return failure("Failed to push entropy")
if ctx.push_input_u256_array(
"dataSetRoot".cstring, dataSetRoot[0].addr, dataSetRoot.len.uint32) != ERR_OK:
"dataSetRoot".cstring, dataSetRoot[0].addr, dataSetRoot.len.uint32
) != ERR_OK:
return failure("Failed to push data set root")
if ctx.push_input_u256_array(
"slotRoot".cstring, slotRoot[0].addr, slotRoot.len.uint32) != ERR_OK:
"slotRoot".cstring, slotRoot[0].addr, slotRoot.len.uint32
) != ERR_OK:
return failure("Failed to push data set root")
if ctx.push_input_u32(
"nCellsPerSlot".cstring, input.nCellsPerSlot.uint32) != ERR_OK:
if ctx.push_input_u32("nCellsPerSlot".cstring, input.nCellsPerSlot.uint32) != ERR_OK:
return failure("Failed to push nCellsPerSlot")
if ctx.push_input_u32(
"nSlotsPerDataSet".cstring, input.nSlotsPerDataSet.uint32) != ERR_OK:
if ctx.push_input_u32("nSlotsPerDataSet".cstring, input.nSlotsPerDataSet.uint32) !=
ERR_OK:
return failure("Failed to push nSlotsPerDataSet")
if ctx.push_input_u32(
"slotIndex".cstring, input.slotIndex.uint32) != ERR_OK:
if ctx.push_input_u32("slotIndex".cstring, input.slotIndex.uint32) != ERR_OK:
return failure("Failed to push slotIndex")
var
slotProof = input.slotProof.mapIt( it.toBytes ).concat
var slotProof = input.slotProof.mapIt(it.toBytes).concat
doAssert(slotProof.len == self.datasetDepth)
# arrays are always flattened
if ctx.push_input_u256_array(
"slotProof".cstring,
slotProof[0].addr,
uint (slotProof[0].len * slotProof.len)) != ERR_OK:
return failure("Failed to push slot proof")
"slotProof".cstring, slotProof[0].addr, uint (slotProof[0].len * slotProof.len)
) != ERR_OK:
return failure("Failed to push slot proof")
for s in input.samples:
var
merklePaths = s.merklePaths.mapIt( it.toBytes )
data = s.cellData.mapIt( @(it.toBytes) ).concat
merklePaths = s.merklePaths.mapIt(it.toBytes)
data = s.cellData.mapIt(@(it.toBytes)).concat
if ctx.push_input_u256_array(
"merklePaths".cstring,
merklePaths[0].addr,
uint (merklePaths[0].len * merklePaths.len)) != ERR_OK:
return failure("Failed to push merkle paths")
uint (merklePaths[0].len * merklePaths.len),
) != ERR_OK:
return failure("Failed to push merkle paths")
if ctx.push_input_u256_array(
"cellData".cstring,
data[0].addr,
data.len.uint) != ERR_OK:
return failure("Failed to push cell data")
if ctx.push_input_u256_array("cellData".cstring, data[0].addr, data.len.uint) !=
ERR_OK:
return failure("Failed to push cell data")
var
proofPtr: ptr Proof = nil
var proofPtr: ptr Proof = nil
let proof =
try:
if (
let res = self.backendCfg.prove_circuit(ctx, proofPtr.addr);
res != ERR_OK) or
proofPtr == nil:
if (let res = self.backendCfg.prove_circuit(ctx, proofPtr.addr); res != ERR_OK) or
proofPtr == nil:
return failure("Failed to prove - err code: " & $res)
proofPtr[]
@ -184,16 +172,12 @@ proc prove[H](
success proof
proc prove*[H](
self: CircomCompat,
input: ProofInputs[H]): ?!CircomProof =
proc prove*[H](self: CircomCompat, input: ProofInputs[H]): ?!CircomProof =
self.prove(self.normalizeInput(input))
proc verify*[H](
self: CircomCompat,
proof: CircomProof,
inputs: ProofInputs[H]): ?!bool =
self: CircomCompat, proof: CircomProof, inputs: ProofInputs[H]
): ?!bool =
## Verify a proof using a ctx
##
@ -213,43 +197,44 @@ proc verify*[H](
inputs.releaseCircomInputs()
proc init*(
_: type CircomCompat,
r1csPath : string,
wasmPath : string,
zkeyPath : string = "",
slotDepth = DefaultMaxSlotDepth,
datasetDepth = DefaultMaxDatasetDepth,
blkDepth = DefaultBlockDepth,
cellElms = DefaultCellElms,
numSamples = DefaultSamplesNum): CircomCompat =
_: type CircomCompat,
r1csPath: string,
wasmPath: string,
zkeyPath: string = "",
slotDepth = DefaultMaxSlotDepth,
datasetDepth = DefaultMaxDatasetDepth,
blkDepth = DefaultBlockDepth,
cellElms = DefaultCellElms,
numSamples = DefaultSamplesNum,
): CircomCompat =
## Create a new ctx
##
var cfg: ptr CircomBn254Cfg
var zkey = if zkeyPath.len > 0: zkeyPath.cstring else: nil
if init_circom_config(
r1csPath.cstring,
wasmPath.cstring,
zkey, cfg.addr) != ERR_OK or cfg == nil:
if cfg != nil: cfg.addr.release_cfg()
raiseAssert("failed to initialize circom compat config")
if init_circom_config(r1csPath.cstring, wasmPath.cstring, zkey, cfg.addr) != ERR_OK or
cfg == nil:
if cfg != nil:
cfg.addr.release_cfg()
raiseAssert("failed to initialize circom compat config")
var
vkpPtr: ptr VerifyingKey = nil
var vkpPtr: ptr VerifyingKey = nil
if cfg.get_verifying_key(vkpPtr.addr) != ERR_OK or vkpPtr == nil:
if vkpPtr != nil: vkpPtr.addr.release_key()
if vkpPtr != nil:
vkpPtr.addr.release_key()
raiseAssert("Failed to get verifying key")
CircomCompat(
r1csPath : r1csPath,
wasmPath : wasmPath,
zkeyPath : zkeyPath,
slotDepth : slotDepth,
r1csPath: r1csPath,
wasmPath: wasmPath,
zkeyPath: zkeyPath,
slotDepth: slotDepth,
datasetDepth: datasetDepth,
blkDepth : blkDepth,
cellElms : cellElms,
numSamples : numSamples,
backendCfg : cfg,
vkp : vkpPtr)
blkDepth: blkDepth,
cellElms: cellElms,
numSamples: numSamples,
backendCfg: cfg,
vkp: vkpPtr,
)

View File

@ -19,8 +19,8 @@ type
CircomG1* = G1
CircomG2* = G2
CircomProof* = Proof
CircomKey* = VerifyingKey
CircomProof* = Proof
CircomKey* = VerifyingKey
CircomInputs* = Inputs
proc toCircomInputs*(inputs: ProofInputs[Poseidon2Hash]): CircomInputs =
@ -29,18 +29,12 @@ proc toCircomInputs*(inputs: ProofInputs[Poseidon2Hash]): CircomInputs =
datasetRoot = inputs.datasetRoot.toBytes.toArray32
entropy = inputs.entropy.toBytes.toArray32
elms = [
entropy,
datasetRoot,
slotIndex
]
elms = [entropy, datasetRoot, slotIndex]
let inputsPtr = allocShared0(32 * elms.len)
copyMem(inputsPtr, addr elms[0], elms.len * 32)
CircomInputs(
elms: cast[ptr array[32, byte]](inputsPtr),
len: elms.len.uint)
CircomInputs(elms: cast[ptr array[32, byte]](inputsPtr), len: elms.len.uint)
proc releaseCircomInputs*(inputs: var CircomInputs) =
if not inputs.elms.isNil:
@ -48,23 +42,13 @@ proc releaseCircomInputs*(inputs: var CircomInputs) =
inputs.elms = nil
func toG1*(g: CircomG1): G1Point =
G1Point(
x: UInt256.fromBytesLE(g.x),
y: UInt256.fromBytesLE(g.y))
G1Point(x: UInt256.fromBytesLE(g.x), y: UInt256.fromBytesLE(g.y))
func toG2*(g: CircomG2): G2Point =
G2Point(
x: Fp2Element(
real: UInt256.fromBytesLE(g.x[0]),
imag: UInt256.fromBytesLE(g.x[1])
),
y: Fp2Element(
real: UInt256.fromBytesLE(g.y[0]),
imag: UInt256.fromBytesLE(g.y[1])
))
x: Fp2Element(real: UInt256.fromBytesLE(g.x[0]), imag: UInt256.fromBytesLE(g.x[1])),
y: Fp2Element(real: UInt256.fromBytesLE(g.y[0]), imag: UInt256.fromBytesLE(g.y[1])),
)
func toGroth16Proof*(proof: CircomProof): Groth16Proof =
Groth16Proof(
a: proof.a.toG1,
b: proof.b.toG2,
c: proof.c.toG1)
Groth16Proof(a: proof.a.toG1, b: proof.b.toG2, c: proof.c.toG1)

Some files were not shown because too many files have changed in this diff Show More