mirror of
https://github.com/status-im/nim-codex.git
synced 2025-02-23 07:58:39 +00:00
style: nph formatting (#1067)
* style: nph setup * chore: formates codex/ and tests/ folder with nph 0.6.1
This commit is contained in:
parent
d114e6e942
commit
e5df8c50d3
13
.github/workflows/ci.yml
vendored
13
.github/workflows/ci.yml
vendored
@ -47,6 +47,19 @@ jobs:
|
|||||||
matrix: ${{ needs.matrix.outputs.matrix }}
|
matrix: ${{ needs.matrix.outputs.matrix }}
|
||||||
cache_nonce: ${{ needs.matrix.outputs.cache_nonce }}
|
cache_nonce: ${{ needs.matrix.outputs.cache_nonce }}
|
||||||
|
|
||||||
|
linting:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.event_name == 'pull_request'
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Check `nph` formatting
|
||||||
|
uses: arnetheduck/nph-action@v1
|
||||||
|
with:
|
||||||
|
version: 0.6.1
|
||||||
|
options: "codex/ tests/"
|
||||||
|
fail: true
|
||||||
|
suggest: true
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
# Force to stick to ubuntu 20.04 for coverage because
|
# Force to stick to ubuntu 20.04 for coverage because
|
||||||
# lcov was updated to 2.x version in ubuntu-latest
|
# lcov was updated to 2.x version in ubuntu-latest
|
||||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -218,3 +218,6 @@
|
|||||||
[submodule "vendor/nim-zippy"]
|
[submodule "vendor/nim-zippy"]
|
||||||
path = vendor/nim-zippy
|
path = vendor/nim-zippy
|
||||||
url = https://github.com/status-im/nim-zippy.git
|
url = https://github.com/status-im/nim-zippy.git
|
||||||
|
[submodule "vendor/nph"]
|
||||||
|
path = vendor/nph
|
||||||
|
url = https://github.com/arnetheduck/nph.git
|
||||||
|
39
Makefile
39
Makefile
@ -17,6 +17,7 @@
|
|||||||
# version pinned by nimbus-build-system.
|
# version pinned by nimbus-build-system.
|
||||||
#PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21
|
#PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21
|
||||||
PINNED_NIM_VERSION := v2.0.14
|
PINNED_NIM_VERSION := v2.0.14
|
||||||
|
|
||||||
ifeq ($(NIM_COMMIT),)
|
ifeq ($(NIM_COMMIT),)
|
||||||
NIM_COMMIT := $(PINNED_NIM_VERSION)
|
NIM_COMMIT := $(PINNED_NIM_VERSION)
|
||||||
else ifeq ($(NIM_COMMIT),pinned)
|
else ifeq ($(NIM_COMMIT),pinned)
|
||||||
@ -199,4 +200,42 @@ ifneq ($(USE_LIBBACKTRACE), 0)
|
|||||||
+ $(MAKE) -C vendor/nim-libbacktrace clean $(HANDLE_OUTPUT)
|
+ $(MAKE) -C vendor/nim-libbacktrace clean $(HANDLE_OUTPUT)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
############
|
||||||
|
## Format ##
|
||||||
|
############
|
||||||
|
.PHONY: build-nph install-nph-hook clean-nph print-nph-path
|
||||||
|
|
||||||
|
# Default location for nph binary shall be next to nim binary to make it available on the path.
|
||||||
|
NPH:=$(shell dirname $(NIM_BINARY))/nph
|
||||||
|
|
||||||
|
build-nph:
|
||||||
|
ifeq ("$(wildcard $(NPH))","")
|
||||||
|
$(ENV_SCRIPT) nim c vendor/nph/src/nph.nim && \
|
||||||
|
mv vendor/nph/src/nph $(shell dirname $(NPH))
|
||||||
|
echo "nph utility is available at " $(NPH)
|
||||||
|
endif
|
||||||
|
|
||||||
|
GIT_PRE_COMMIT_HOOK := .git/hooks/pre-commit
|
||||||
|
|
||||||
|
install-nph-hook: build-nph
|
||||||
|
ifeq ("$(wildcard $(GIT_PRE_COMMIT_HOOK))","")
|
||||||
|
cp ./tools/scripts/git_pre_commit_format.sh $(GIT_PRE_COMMIT_HOOK)
|
||||||
|
else
|
||||||
|
echo "$(GIT_PRE_COMMIT_HOOK) already present, will NOT override"
|
||||||
|
exit 1
|
||||||
|
endif
|
||||||
|
|
||||||
|
nph/%: build-nph
|
||||||
|
echo -e $(FORMAT_MSG) "nph/$*" && \
|
||||||
|
$(NPH) $*
|
||||||
|
|
||||||
|
clean-nph:
|
||||||
|
rm -f $(NPH)
|
||||||
|
|
||||||
|
# To avoid hardcoding nph binary location in several places
|
||||||
|
print-nph-path:
|
||||||
|
echo "$(NPH)"
|
||||||
|
|
||||||
|
clean: | clean-nph
|
||||||
|
|
||||||
endif # "variables.mk" was not included
|
endif # "variables.mk" was not included
|
||||||
|
13
README.md
13
README.md
@ -31,6 +31,7 @@ Run the client with:
|
|||||||
```bash
|
```bash
|
||||||
build/codex
|
build/codex
|
||||||
```
|
```
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
It is possible to configure a Codex node in several ways:
|
It is possible to configure a Codex node in several ways:
|
||||||
@ -51,3 +52,15 @@ To get acquainted with Codex, consider:
|
|||||||
## API
|
## API
|
||||||
|
|
||||||
The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage).
|
The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage).
|
||||||
|
|
||||||
|
## Contributing and development
|
||||||
|
|
||||||
|
Feel free to dive in, contributions are welcomed! Open an issue or submit PRs.
|
||||||
|
|
||||||
|
### Linting and formatting
|
||||||
|
|
||||||
|
`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is requrired to adhere to its styling.
|
||||||
|
If you are setting up fresh setup, in order to get `nph` run `make build-nph`.
|
||||||
|
In order to format files run `make nph/<file/folder you want to format>`.
|
||||||
|
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior commiting them.
|
||||||
|
If you are using VSCode and the [NimLang](https://marketplace.visualstudio.com/items?itemName=NimLang.nimlang) extension you can enable "Format On Save" (eq. the `nim.formatOnSave` property) that will format the files using `nph`.
|
@ -1,10 +1,5 @@
|
|||||||
import ./blockexchange/[
|
import ./blockexchange/[network, engine, peers]
|
||||||
network,
|
|
||||||
engine,
|
|
||||||
peers]
|
|
||||||
|
|
||||||
import ./blockexchange/protobuf/[
|
import ./blockexchange/protobuf/[blockexc, presence]
|
||||||
blockexc,
|
|
||||||
presence]
|
|
||||||
|
|
||||||
export network, engine, blockexc, presence, peers
|
export network, engine, blockexc, presence, peers
|
||||||
|
@ -34,8 +34,7 @@ const
|
|||||||
DefaultConcurrentAdvertRequests = 10
|
DefaultConcurrentAdvertRequests = 10
|
||||||
DefaultAdvertiseLoopSleep = 30.minutes
|
DefaultAdvertiseLoopSleep = 30.minutes
|
||||||
|
|
||||||
type
|
type Advertiser* = ref object of RootObj
|
||||||
Advertiser* = ref object of RootObj
|
|
||||||
localStore*: BlockStore # Local block store for this instance
|
localStore*: BlockStore # Local block store for this instance
|
||||||
discovery*: Discovery # Discovery interface
|
discovery*: Discovery # Discovery interface
|
||||||
|
|
||||||
@ -83,7 +82,6 @@ proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} =
|
|||||||
trace "Advertiser iterating blocks finished."
|
trace "Advertiser iterating blocks finished."
|
||||||
|
|
||||||
await sleepAsync(b.advertiseLocalStoreLoopSleep)
|
await sleepAsync(b.advertiseLocalStoreLoopSleep)
|
||||||
|
|
||||||
except CancelledError:
|
except CancelledError:
|
||||||
break # do not propagate as advertiseLocalStoreLoop was asyncSpawned
|
break # do not propagate as advertiseLocalStoreLoop was asyncSpawned
|
||||||
except CatchableError as e:
|
except CatchableError as e:
|
||||||
@ -94,20 +92,17 @@ proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} =
|
|||||||
proc processQueueLoop(b: Advertiser) {.async: (raises: []).} =
|
proc processQueueLoop(b: Advertiser) {.async: (raises: []).} =
|
||||||
while b.advertiserRunning:
|
while b.advertiserRunning:
|
||||||
try:
|
try:
|
||||||
let
|
let cid = await b.advertiseQueue.get()
|
||||||
cid = await b.advertiseQueue.get()
|
|
||||||
|
|
||||||
if cid in b.inFlightAdvReqs:
|
if cid in b.inFlightAdvReqs:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
try:
|
try:
|
||||||
let
|
let request = b.discovery.provide(cid)
|
||||||
request = b.discovery.provide(cid)
|
|
||||||
|
|
||||||
b.inFlightAdvReqs[cid] = request
|
b.inFlightAdvReqs[cid] = request
|
||||||
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
|
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
|
||||||
await request
|
await request
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
b.inFlightAdvReqs.del(cid)
|
b.inFlightAdvReqs.del(cid)
|
||||||
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
|
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
|
||||||
@ -166,7 +161,7 @@ proc new*(
|
|||||||
localStore: BlockStore,
|
localStore: BlockStore,
|
||||||
discovery: Discovery,
|
discovery: Discovery,
|
||||||
concurrentAdvReqs = DefaultConcurrentAdvertRequests,
|
concurrentAdvReqs = DefaultConcurrentAdvertRequests,
|
||||||
advertiseLocalStoreLoopSleep = DefaultAdvertiseLoopSleep
|
advertiseLocalStoreLoopSleep = DefaultAdvertiseLoopSleep,
|
||||||
): Advertiser =
|
): Advertiser =
|
||||||
## Create a advertiser instance
|
## Create a advertiser instance
|
||||||
##
|
##
|
||||||
@ -177,4 +172,5 @@ proc new*(
|
|||||||
advertiseQueue: newAsyncQueue[Cid](concurrentAdvReqs),
|
advertiseQueue: newAsyncQueue[Cid](concurrentAdvReqs),
|
||||||
trackedFutures: TrackedFutures.new(),
|
trackedFutures: TrackedFutures.new(),
|
||||||
inFlightAdvReqs: initTable[Cid, Future[void]](),
|
inFlightAdvReqs: initTable[Cid, Future[void]](),
|
||||||
advertiseLocalStoreLoopSleep: advertiseLocalStoreLoopSleep)
|
advertiseLocalStoreLoopSleep: advertiseLocalStoreLoopSleep,
|
||||||
|
)
|
||||||
|
@ -40,8 +40,7 @@ const
|
|||||||
DefaultMinPeersPerBlock = 3
|
DefaultMinPeersPerBlock = 3
|
||||||
DefaultDiscoveryLoopSleep = 3.seconds
|
DefaultDiscoveryLoopSleep = 3.seconds
|
||||||
|
|
||||||
type
|
type DiscoveryEngine* = ref object of RootObj
|
||||||
DiscoveryEngine* = ref object of RootObj
|
|
||||||
localStore*: BlockStore # Local block store for this instance
|
localStore*: BlockStore # Local block store for this instance
|
||||||
peers*: PeerCtxStore # Peer context store
|
peers*: PeerCtxStore # Peer context store
|
||||||
network*: BlockExcNetwork # Network interface
|
network*: BlockExcNetwork # Network interface
|
||||||
@ -54,7 +53,8 @@ type
|
|||||||
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
|
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
|
||||||
minPeersPerBlock*: int # Max number of peers with block
|
minPeersPerBlock*: int # Max number of peers with block
|
||||||
discoveryLoopSleep: Duration # Discovery loop sleep
|
discoveryLoopSleep: Duration # Discovery loop sleep
|
||||||
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]] # Inflight discovery requests
|
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]]
|
||||||
|
# Inflight discovery requests
|
||||||
|
|
||||||
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
||||||
while b.discEngineRunning:
|
while b.discEngineRunning:
|
||||||
@ -81,36 +81,27 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
|
|||||||
|
|
||||||
while b.discEngineRunning:
|
while b.discEngineRunning:
|
||||||
try:
|
try:
|
||||||
let
|
let cid = await b.discoveryQueue.get()
|
||||||
cid = await b.discoveryQueue.get()
|
|
||||||
|
|
||||||
if cid in b.inFlightDiscReqs:
|
if cid in b.inFlightDiscReqs:
|
||||||
trace "Discovery request already in progress", cid
|
trace "Discovery request already in progress", cid
|
||||||
continue
|
continue
|
||||||
|
|
||||||
let
|
let haves = b.peers.peersHave(cid)
|
||||||
haves = b.peers.peersHave(cid)
|
|
||||||
|
|
||||||
if haves.len < b.minPeersPerBlock:
|
if haves.len < b.minPeersPerBlock:
|
||||||
try:
|
try:
|
||||||
let
|
let request = b.discovery.find(cid).wait(DefaultDiscoveryTimeout)
|
||||||
request = b.discovery
|
|
||||||
.find(cid)
|
|
||||||
.wait(DefaultDiscoveryTimeout)
|
|
||||||
|
|
||||||
b.inFlightDiscReqs[cid] = request
|
b.inFlightDiscReqs[cid] = request
|
||||||
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
|
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
|
||||||
let
|
let peers = await request
|
||||||
peers = await request
|
|
||||||
|
|
||||||
let
|
let dialed = await allFinished(peers.mapIt(b.network.dialPeer(it.data)))
|
||||||
dialed = await allFinished(
|
|
||||||
peers.mapIt( b.network.dialPeer(it.data) ))
|
|
||||||
|
|
||||||
for i, f in dialed:
|
for i, f in dialed:
|
||||||
if f.failed:
|
if f.failed:
|
||||||
await b.discovery.removeProvider(peers[i].data.peerId)
|
await b.discovery.removeProvider(peers[i].data.peerId)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
b.inFlightDiscReqs.del(cid)
|
b.inFlightDiscReqs.del(cid)
|
||||||
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
|
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
|
||||||
@ -180,7 +171,7 @@ proc new*(
|
|||||||
pendingBlocks: PendingBlocksManager,
|
pendingBlocks: PendingBlocksManager,
|
||||||
concurrentDiscReqs = DefaultConcurrentDiscRequests,
|
concurrentDiscReqs = DefaultConcurrentDiscRequests,
|
||||||
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
|
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
|
||||||
minPeersPerBlock = DefaultMinPeersPerBlock
|
minPeersPerBlock = DefaultMinPeersPerBlock,
|
||||||
): DiscoveryEngine =
|
): DiscoveryEngine =
|
||||||
## Create a discovery engine instance for advertising services
|
## Create a discovery engine instance for advertising services
|
||||||
##
|
##
|
||||||
@ -195,4 +186,5 @@ proc new*(
|
|||||||
trackedFutures: TrackedFutures.new(),
|
trackedFutures: TrackedFutures.new(),
|
||||||
inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](),
|
inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](),
|
||||||
discoveryLoopSleep: discoveryLoopSleep,
|
discoveryLoopSleep: discoveryLoopSleep,
|
||||||
minPeersPerBlock: minPeersPerBlock)
|
minPeersPerBlock: minPeersPerBlock,
|
||||||
|
)
|
||||||
|
@ -44,12 +44,24 @@ export peers, pendingblocks, payments, discovery
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "codex blockexcengine"
|
topics = "codex blockexcengine"
|
||||||
|
|
||||||
declareCounter(codex_block_exchange_want_have_lists_sent, "codex blockexchange wantHave lists sent")
|
declareCounter(
|
||||||
declareCounter(codex_block_exchange_want_have_lists_received, "codex blockexchange wantHave lists received")
|
codex_block_exchange_want_have_lists_sent, "codex blockexchange wantHave lists sent"
|
||||||
declareCounter(codex_block_exchange_want_block_lists_sent, "codex blockexchange wantBlock lists sent")
|
)
|
||||||
declareCounter(codex_block_exchange_want_block_lists_received, "codex blockexchange wantBlock lists received")
|
declareCounter(
|
||||||
|
codex_block_exchange_want_have_lists_received,
|
||||||
|
"codex blockexchange wantHave lists received",
|
||||||
|
)
|
||||||
|
declareCounter(
|
||||||
|
codex_block_exchange_want_block_lists_sent, "codex blockexchange wantBlock lists sent"
|
||||||
|
)
|
||||||
|
declareCounter(
|
||||||
|
codex_block_exchange_want_block_lists_received,
|
||||||
|
"codex blockexchange wantBlock lists received",
|
||||||
|
)
|
||||||
declareCounter(codex_block_exchange_blocks_sent, "codex blockexchange blocks sent")
|
declareCounter(codex_block_exchange_blocks_sent, "codex blockexchange blocks sent")
|
||||||
declareCounter(codex_block_exchange_blocks_received, "codex blockexchange blocks received")
|
declareCounter(
|
||||||
|
codex_block_exchange_blocks_received, "codex blockexchange blocks received"
|
||||||
|
)
|
||||||
|
|
||||||
const
|
const
|
||||||
DefaultMaxPeersPerRequest* = 10
|
DefaultMaxPeersPerRequest* = 10
|
||||||
@ -70,7 +82,8 @@ type
|
|||||||
localStore*: BlockStore # Local block store for this instance
|
localStore*: BlockStore # Local block store for this instance
|
||||||
network*: BlockExcNetwork # Petwork interface
|
network*: BlockExcNetwork # Petwork interface
|
||||||
peers*: PeerCtxStore # Peers we're currently actively exchanging with
|
peers*: PeerCtxStore # Peers we're currently actively exchanging with
|
||||||
taskQueue*: AsyncHeapQueue[BlockExcPeerCtx] # Peers we're currently processing tasks for
|
taskQueue*: AsyncHeapQueue[BlockExcPeerCtx]
|
||||||
|
# Peers we're currently processing tasks for
|
||||||
concurrentTasks: int # Number of concurrent peers we're serving at any given time
|
concurrentTasks: int # Number of concurrent peers we're serving at any given time
|
||||||
trackedFutures: TrackedFutures # Tracks futures of blockexc tasks
|
trackedFutures: TrackedFutures # Tracks futures of blockexc tasks
|
||||||
blockexcRunning: bool # Indicates if the blockexc task is running
|
blockexcRunning: bool # Indicates if the blockexc task is running
|
||||||
@ -87,7 +100,7 @@ type
|
|||||||
price*: UInt256
|
price*: UInt256
|
||||||
|
|
||||||
# attach task scheduler to engine
|
# attach task scheduler to engine
|
||||||
proc scheduleTask(b: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe} =
|
proc scheduleTask(b: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe.} =
|
||||||
b.taskQueue.pushOrUpdateNoWait(task).isOk()
|
b.taskQueue.pushOrUpdateNoWait(task).isOk()
|
||||||
|
|
||||||
proc blockexcTaskRunner(b: BlockExcEngine) {.async: (raises: []).}
|
proc blockexcTaskRunner(b: BlockExcEngine) {.async: (raises: []).}
|
||||||
@ -128,35 +141,26 @@ proc stop*(b: BlockExcEngine) {.async.} =
|
|||||||
trace "NetworkStore stopped"
|
trace "NetworkStore stopped"
|
||||||
|
|
||||||
proc sendWantHave(
|
proc sendWantHave(
|
||||||
b: BlockExcEngine,
|
b: BlockExcEngine, addresses: seq[BlockAddress], peers: seq[BlockExcPeerCtx]
|
||||||
addresses: seq[BlockAddress],
|
): Future[void] {.async.} =
|
||||||
peers: seq[BlockExcPeerCtx]): Future[void] {.async.} =
|
|
||||||
for p in peers:
|
for p in peers:
|
||||||
let toAsk = addresses.filterIt(it notin p.peerHave)
|
let toAsk = addresses.filterIt(it notin p.peerHave)
|
||||||
trace "Sending wantHave request", toAsk, peer = p.id
|
trace "Sending wantHave request", toAsk, peer = p.id
|
||||||
await b.network.request.sendWantList(
|
await b.network.request.sendWantList(p.id, toAsk, wantType = WantType.WantHave)
|
||||||
p.id,
|
|
||||||
toAsk,
|
|
||||||
wantType = WantType.WantHave)
|
|
||||||
codex_block_exchange_want_have_lists_sent.inc()
|
codex_block_exchange_want_have_lists_sent.inc()
|
||||||
|
|
||||||
proc sendWantBlock(
|
proc sendWantBlock(
|
||||||
b: BlockExcEngine,
|
b: BlockExcEngine, addresses: seq[BlockAddress], blockPeer: BlockExcPeerCtx
|
||||||
addresses: seq[BlockAddress],
|
): Future[void] {.async.} =
|
||||||
blockPeer: BlockExcPeerCtx): Future[void] {.async.} =
|
|
||||||
trace "Sending wantBlock request to", addresses, peer = blockPeer.id
|
trace "Sending wantBlock request to", addresses, peer = blockPeer.id
|
||||||
await b.network.request.sendWantList(
|
await b.network.request.sendWantList(
|
||||||
blockPeer.id,
|
blockPeer.id, addresses, wantType = WantType.WantBlock
|
||||||
addresses,
|
) # we want this remote to send us a block
|
||||||
wantType = WantType.WantBlock) # we want this remote to send us a block
|
|
||||||
codex_block_exchange_want_block_lists_sent.inc()
|
codex_block_exchange_want_block_lists_sent.inc()
|
||||||
|
|
||||||
proc monitorBlockHandle(
|
proc monitorBlockHandle(
|
||||||
b: BlockExcEngine,
|
b: BlockExcEngine, handle: Future[Block], address: BlockAddress, peerId: PeerId
|
||||||
handle: Future[Block],
|
) {.async.} =
|
||||||
address: BlockAddress,
|
|
||||||
peerId: PeerId) {.async.} =
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
discard await handle
|
discard await handle
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
@ -175,12 +179,13 @@ proc monitorBlockHandle(
|
|||||||
await b.network.switch.disconnect(peerId)
|
await b.network.switch.disconnect(peerId)
|
||||||
b.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
|
b.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
|
||||||
|
|
||||||
proc pickPseudoRandom(address: BlockAddress, peers: seq[BlockExcPeerCtx]): BlockExcPeerCtx =
|
proc pickPseudoRandom(
|
||||||
|
address: BlockAddress, peers: seq[BlockExcPeerCtx]
|
||||||
|
): BlockExcPeerCtx =
|
||||||
return peers[hash(address) mod peers.len]
|
return peers[hash(address) mod peers.len]
|
||||||
|
|
||||||
proc requestBlock*(
|
proc requestBlock*(
|
||||||
b: BlockExcEngine,
|
b: BlockExcEngine, address: BlockAddress
|
||||||
address: BlockAddress,
|
|
||||||
): Future[?!Block] {.async.} =
|
): Future[?!Block] {.async.} =
|
||||||
let blockFuture = b.pendingBlocks.getWantHandle(address, b.blockFetchTimeout)
|
let blockFuture = b.pendingBlocks.getWantHandle(address, b.blockFetchTimeout)
|
||||||
|
|
||||||
@ -204,16 +209,12 @@ proc requestBlock*(
|
|||||||
except AsyncTimeoutError as err:
|
except AsyncTimeoutError as err:
|
||||||
failure err
|
failure err
|
||||||
|
|
||||||
proc requestBlock*(
|
proc requestBlock*(b: BlockExcEngine, cid: Cid): Future[?!Block] =
|
||||||
b: BlockExcEngine,
|
|
||||||
cid: Cid
|
|
||||||
): Future[?!Block] =
|
|
||||||
b.requestBlock(BlockAddress.init(cid))
|
b.requestBlock(BlockAddress.init(cid))
|
||||||
|
|
||||||
proc blockPresenceHandler*(
|
proc blockPresenceHandler*(
|
||||||
b: BlockExcEngine,
|
b: BlockExcEngine, peer: PeerId, blocks: seq[BlockPresence]
|
||||||
peer: PeerId,
|
) {.async.} =
|
||||||
blocks: seq[BlockPresence]) {.async.} =
|
|
||||||
let
|
let
|
||||||
peerCtx = b.peers.get(peer)
|
peerCtx = b.peers.get(peer)
|
||||||
wantList = toSeq(b.pendingBlocks.wantList)
|
wantList = toSeq(b.pendingBlocks.wantList)
|
||||||
@ -227,17 +228,12 @@ proc blockPresenceHandler*(
|
|||||||
|
|
||||||
let
|
let
|
||||||
peerHave = peerCtx.peerHave
|
peerHave = peerCtx.peerHave
|
||||||
dontWantCids = peerHave.filterIt(
|
dontWantCids = peerHave.filterIt(it notin wantList)
|
||||||
it notin wantList
|
|
||||||
)
|
|
||||||
|
|
||||||
if dontWantCids.len > 0:
|
if dontWantCids.len > 0:
|
||||||
peerCtx.cleanPresence(dontWantCids)
|
peerCtx.cleanPresence(dontWantCids)
|
||||||
|
|
||||||
let
|
let wantCids = wantList.filterIt(it in peerHave)
|
||||||
wantCids = wantList.filterIt(
|
|
||||||
it in peerHave
|
|
||||||
)
|
|
||||||
|
|
||||||
if wantCids.len > 0:
|
if wantCids.len > 0:
|
||||||
trace "Peer has blocks in our wantList", peer, wants = wantCids
|
trace "Peer has blocks in our wantList", peer, wants = wantCids
|
||||||
@ -246,13 +242,12 @@ proc blockPresenceHandler*(
|
|||||||
# if none of the connected peers report our wants in their have list,
|
# if none of the connected peers report our wants in their have list,
|
||||||
# fire up discovery
|
# fire up discovery
|
||||||
b.discovery.queueFindBlocksReq(
|
b.discovery.queueFindBlocksReq(
|
||||||
toSeq(b.pendingBlocks.wantListCids)
|
toSeq(b.pendingBlocks.wantListCids).filter do(cid: Cid) -> bool:
|
||||||
.filter do(cid: Cid) -> bool:
|
not b.peers.anyIt(cid in it.peerHaveCids)
|
||||||
not b.peers.anyIt( cid in it.peerHaveCids ))
|
)
|
||||||
|
|
||||||
proc scheduleTasks(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} =
|
proc scheduleTasks(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} =
|
||||||
let
|
let cids = blocksDelivery.mapIt(it.blk.cid)
|
||||||
cids = blocksDelivery.mapIt( it.blk.cid )
|
|
||||||
|
|
||||||
# schedule any new peers to provide blocks to
|
# schedule any new peers to provide blocks to
|
||||||
for p in b.peers:
|
for p in b.peers:
|
||||||
@ -270,14 +265,16 @@ proc scheduleTasks(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.asyn
|
|||||||
|
|
||||||
proc cancelBlocks(b: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} =
|
proc cancelBlocks(b: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} =
|
||||||
## Tells neighboring peers that we're no longer interested in a block.
|
## Tells neighboring peers that we're no longer interested in a block.
|
||||||
trace "Sending block request cancellations to peers", addrs, peers = b.peers.mapIt($it.id)
|
trace "Sending block request cancellations to peers",
|
||||||
|
addrs, peers = b.peers.mapIt($it.id)
|
||||||
|
|
||||||
let failed = (await allFinished(
|
let failed = (
|
||||||
|
await allFinished(
|
||||||
b.peers.mapIt(
|
b.peers.mapIt(
|
||||||
b.network.request.sendWantCancellations(
|
b.network.request.sendWantCancellations(peer = it.id, addresses = addrs)
|
||||||
peer = it.id,
|
)
|
||||||
addresses = addrs))))
|
)
|
||||||
.filterIt(it.failed)
|
).filterIt(it.failed)
|
||||||
|
|
||||||
if failed.len > 0:
|
if failed.len > 0:
|
||||||
warn "Failed to send block request cancellations to peers", peers = failed.len
|
warn "Failed to send block request cancellations to peers", peers = failed.len
|
||||||
@ -290,12 +287,13 @@ proc resolveBlocks*(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.asy
|
|||||||
proc resolveBlocks*(b: BlockExcEngine, blocks: seq[Block]) {.async.} =
|
proc resolveBlocks*(b: BlockExcEngine, blocks: seq[Block]) {.async.} =
|
||||||
await b.resolveBlocks(
|
await b.resolveBlocks(
|
||||||
blocks.mapIt(
|
blocks.mapIt(
|
||||||
BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid)
|
BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid))
|
||||||
)))
|
)
|
||||||
|
)
|
||||||
|
|
||||||
proc payForBlocks(engine: BlockExcEngine,
|
proc payForBlocks(
|
||||||
peer: BlockExcPeerCtx,
|
engine: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery]
|
||||||
blocksDelivery: seq[BlockDelivery]) {.async.} =
|
) {.async.} =
|
||||||
let
|
let
|
||||||
sendPayment = engine.network.request.sendPayment
|
sendPayment = engine.network.request.sendPayment
|
||||||
price = peer.price(blocksDelivery.mapIt(it.address))
|
price = peer.price(blocksDelivery.mapIt(it.address))
|
||||||
@ -304,9 +302,7 @@ proc payForBlocks(engine: BlockExcEngine,
|
|||||||
trace "Sending payment for blocks", price, len = blocksDelivery.len
|
trace "Sending payment for blocks", price, len = blocksDelivery.len
|
||||||
await sendPayment(peer.id, payment)
|
await sendPayment(peer.id, payment)
|
||||||
|
|
||||||
proc validateBlockDelivery(
|
proc validateBlockDelivery(b: BlockExcEngine, bd: BlockDelivery): ?!void =
|
||||||
b: BlockExcEngine,
|
|
||||||
bd: BlockDelivery): ?!void =
|
|
||||||
if bd.address notin b.pendingBlocks:
|
if bd.address notin b.pendingBlocks:
|
||||||
return failure("Received block is not currently a pending block")
|
return failure("Received block is not currently a pending block")
|
||||||
|
|
||||||
@ -315,27 +311,30 @@ proc validateBlockDelivery(
|
|||||||
return failure("Missing proof")
|
return failure("Missing proof")
|
||||||
|
|
||||||
if proof.index != bd.address.index:
|
if proof.index != bd.address.index:
|
||||||
return failure("Proof index " & $proof.index & " doesn't match leaf index " & $bd.address.index)
|
return failure(
|
||||||
|
"Proof index " & $proof.index & " doesn't match leaf index " & $bd.address.index
|
||||||
|
)
|
||||||
|
|
||||||
without leaf =? bd.blk.cid.mhash.mapFailure, err:
|
without leaf =? bd.blk.cid.mhash.mapFailure, err:
|
||||||
return failure("Unable to get mhash from cid for block, nested err: " & err.msg)
|
return failure("Unable to get mhash from cid for block, nested err: " & err.msg)
|
||||||
|
|
||||||
without treeRoot =? bd.address.treeCid.mhash.mapFailure, err:
|
without treeRoot =? bd.address.treeCid.mhash.mapFailure, err:
|
||||||
return failure("Unable to get mhash from treeCid for block, nested err: " & err.msg)
|
return
|
||||||
|
failure("Unable to get mhash from treeCid for block, nested err: " & err.msg)
|
||||||
|
|
||||||
if err =? proof.verify(leaf, treeRoot).errorOption:
|
if err =? proof.verify(leaf, treeRoot).errorOption:
|
||||||
return failure("Unable to verify proof for block, nested err: " & err.msg)
|
return failure("Unable to verify proof for block, nested err: " & err.msg)
|
||||||
|
|
||||||
else: # not leaf
|
else: # not leaf
|
||||||
if bd.address.cid != bd.blk.cid:
|
if bd.address.cid != bd.blk.cid:
|
||||||
return failure("Delivery cid " & $bd.address.cid & " doesn't match block cid " & $bd.blk.cid)
|
return failure(
|
||||||
|
"Delivery cid " & $bd.address.cid & " doesn't match block cid " & $bd.blk.cid
|
||||||
|
)
|
||||||
|
|
||||||
return success()
|
return success()
|
||||||
|
|
||||||
proc blocksDeliveryHandler*(
|
proc blocksDeliveryHandler*(
|
||||||
b: BlockExcEngine,
|
b: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||||
peer: PeerId,
|
) {.async.} =
|
||||||
blocksDelivery: seq[BlockDelivery]) {.async.} =
|
|
||||||
trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address))
|
trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address))
|
||||||
|
|
||||||
var validatedBlocksDelivery: seq[BlockDelivery]
|
var validatedBlocksDelivery: seq[BlockDelivery]
|
||||||
@ -356,12 +355,11 @@ proc blocksDeliveryHandler*(
|
|||||||
without proof =? bd.proof:
|
without proof =? bd.proof:
|
||||||
error "Proof expected for a leaf block delivery"
|
error "Proof expected for a leaf block delivery"
|
||||||
continue
|
continue
|
||||||
if err =? (await b.localStore.putCidAndProof(
|
if err =? (
|
||||||
bd.address.treeCid,
|
await b.localStore.putCidAndProof(
|
||||||
bd.address.index,
|
bd.address.treeCid, bd.address.index, bd.blk.cid, proof
|
||||||
bd.blk.cid,
|
)
|
||||||
proof)).errorOption:
|
).errorOption:
|
||||||
|
|
||||||
error "Unable to store proof and cid for a block"
|
error "Unable to store proof and cid for a block"
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -370,20 +368,15 @@ proc blocksDeliveryHandler*(
|
|||||||
await b.resolveBlocks(validatedBlocksDelivery)
|
await b.resolveBlocks(validatedBlocksDelivery)
|
||||||
codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64)
|
codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64)
|
||||||
|
|
||||||
let
|
let peerCtx = b.peers.get(peer)
|
||||||
peerCtx = b.peers.get(peer)
|
|
||||||
|
|
||||||
if peerCtx != nil:
|
if peerCtx != nil:
|
||||||
await b.payForBlocks(peerCtx, blocksDelivery)
|
await b.payForBlocks(peerCtx, blocksDelivery)
|
||||||
## shouldn't we remove them from the want-list instead of this:
|
## shouldn't we remove them from the want-list instead of this:
|
||||||
peerCtx.cleanPresence(blocksDelivery.mapIt(it.address))
|
peerCtx.cleanPresence(blocksDelivery.mapIt(it.address))
|
||||||
|
|
||||||
proc wantListHandler*(
|
proc wantListHandler*(b: BlockExcEngine, peer: PeerId, wantList: WantList) {.async.} =
|
||||||
b: BlockExcEngine,
|
let peerCtx = b.peers.get(peer)
|
||||||
peer: PeerId,
|
|
||||||
wantList: WantList) {.async.} =
|
|
||||||
let
|
|
||||||
peerCtx = b.peers.get(peer)
|
|
||||||
|
|
||||||
if peerCtx.isNil:
|
if peerCtx.isNil:
|
||||||
return
|
return
|
||||||
@ -393,8 +386,7 @@ proc wantListHandler*(
|
|||||||
schedulePeer = false
|
schedulePeer = false
|
||||||
|
|
||||||
for e in wantList.entries:
|
for e in wantList.entries:
|
||||||
let
|
let idx = peerCtx.peerWants.findIt(it.address == e.address)
|
||||||
idx = peerCtx.peerWants.findIt(it.address == e.address)
|
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
peer = peerCtx.id
|
peer = peerCtx.id
|
||||||
@ -404,24 +396,22 @@ proc wantListHandler*(
|
|||||||
if idx < 0: # Adding new entry to peer wants
|
if idx < 0: # Adding new entry to peer wants
|
||||||
let
|
let
|
||||||
have = await e.address in b.localStore
|
have = await e.address in b.localStore
|
||||||
price = @(
|
price = @(b.pricing.get(Pricing(price: 0.u256)).price.toBytesBE)
|
||||||
b.pricing.get(Pricing(price: 0.u256))
|
|
||||||
.price.toBytesBE)
|
|
||||||
|
|
||||||
if e.wantType == WantType.WantHave:
|
if e.wantType == WantType.WantHave:
|
||||||
if have:
|
if have:
|
||||||
presence.add(
|
presence.add(
|
||||||
BlockPresence(
|
BlockPresence(
|
||||||
address: e.address,
|
address: e.address, `type`: BlockPresenceType.Have, price: price
|
||||||
`type`: BlockPresenceType.Have,
|
)
|
||||||
price: price))
|
)
|
||||||
else:
|
else:
|
||||||
if e.sendDontHave:
|
if e.sendDontHave:
|
||||||
presence.add(
|
presence.add(
|
||||||
BlockPresence(
|
BlockPresence(
|
||||||
address: e.address,
|
address: e.address, `type`: BlockPresenceType.DontHave, price: price
|
||||||
`type`: BlockPresenceType.DontHave,
|
)
|
||||||
price: price))
|
)
|
||||||
peerCtx.peerWants.add(e)
|
peerCtx.peerWants.add(e)
|
||||||
|
|
||||||
codex_block_exchange_want_have_lists_received.inc()
|
codex_block_exchange_want_have_lists_received.inc()
|
||||||
@ -446,31 +436,24 @@ proc wantListHandler*(
|
|||||||
if not b.scheduleTask(peerCtx):
|
if not b.scheduleTask(peerCtx):
|
||||||
warn "Unable to schedule task for peer", peer
|
warn "Unable to schedule task for peer", peer
|
||||||
|
|
||||||
proc accountHandler*(
|
proc accountHandler*(engine: BlockExcEngine, peer: PeerId, account: Account) {.async.} =
|
||||||
engine: BlockExcEngine,
|
let context = engine.peers.get(peer)
|
||||||
peer: PeerId,
|
|
||||||
account: Account) {.async.} =
|
|
||||||
let
|
|
||||||
context = engine.peers.get(peer)
|
|
||||||
if context.isNil:
|
if context.isNil:
|
||||||
return
|
return
|
||||||
|
|
||||||
context.account = account.some
|
context.account = account.some
|
||||||
|
|
||||||
proc paymentHandler*(
|
proc paymentHandler*(
|
||||||
engine: BlockExcEngine,
|
engine: BlockExcEngine, peer: PeerId, payment: SignedState
|
||||||
peer: PeerId,
|
) {.async.} =
|
||||||
payment: SignedState) {.async.} =
|
|
||||||
trace "Handling payments", peer
|
trace "Handling payments", peer
|
||||||
|
|
||||||
without context =? engine.peers.get(peer).option and
|
without context =? engine.peers.get(peer).option and account =? context.account:
|
||||||
account =? context.account:
|
|
||||||
trace "No context or account for peer", peer
|
trace "No context or account for peer", peer
|
||||||
return
|
return
|
||||||
|
|
||||||
if channel =? context.paymentChannel:
|
if channel =? context.paymentChannel:
|
||||||
let
|
let sender = account.address
|
||||||
sender = account.address
|
|
||||||
discard engine.wallet.acceptPayment(channel, Asset, sender, payment)
|
discard engine.wallet.acceptPayment(channel, Asset, sender, payment)
|
||||||
else:
|
else:
|
||||||
context.paymentChannel = engine.wallet.acceptChannel(payment).option
|
context.paymentChannel = engine.wallet.acceptChannel(payment).option
|
||||||
@ -484,17 +467,14 @@ proc setupPeer*(b: BlockExcEngine, peer: PeerId) {.async.} =
|
|||||||
|
|
||||||
if peer notin b.peers:
|
if peer notin b.peers:
|
||||||
trace "Setting up new peer", peer
|
trace "Setting up new peer", peer
|
||||||
b.peers.add(BlockExcPeerCtx(
|
b.peers.add(BlockExcPeerCtx(id: peer))
|
||||||
id: peer
|
|
||||||
))
|
|
||||||
trace "Added peer", peers = b.peers.len
|
trace "Added peer", peers = b.peers.len
|
||||||
|
|
||||||
# broadcast our want list, the other peer will do the same
|
# broadcast our want list, the other peer will do the same
|
||||||
if b.pendingBlocks.wantListLen > 0:
|
if b.pendingBlocks.wantListLen > 0:
|
||||||
trace "Sending our want list to a peer", peer
|
trace "Sending our want list to a peer", peer
|
||||||
let cids = toSeq(b.pendingBlocks.wantList)
|
let cids = toSeq(b.pendingBlocks.wantList)
|
||||||
await b.network.request.sendWantList(
|
await b.network.request.sendWantList(peer, cids, full = true)
|
||||||
peer, cids, full = true)
|
|
||||||
|
|
||||||
if address =? b.pricing .? address:
|
if address =? b.pricing .? address:
|
||||||
await b.network.request.sendAccount(peer, Account(address: address))
|
await b.network.request.sendAccount(peer, Account(address: address))
|
||||||
@ -515,10 +495,8 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
|
|||||||
# TODO: There should be all sorts of accounting of
|
# TODO: There should be all sorts of accounting of
|
||||||
# bytes sent/received here
|
# bytes sent/received here
|
||||||
|
|
||||||
var
|
var wantsBlocks =
|
||||||
wantsBlocks = task.peerWants.filterIt(
|
task.peerWants.filterIt(it.wantType == WantType.WantBlock and not it.inFlight)
|
||||||
it.wantType == WantType.WantBlock and not it.inFlight
|
|
||||||
)
|
|
||||||
|
|
||||||
proc updateInFlight(addresses: seq[BlockAddress], inFlight: bool) =
|
proc updateInFlight(addresses: seq[BlockAddress], inFlight: bool) =
|
||||||
for peerWant in task.peerWants.mitems:
|
for peerWant in task.peerWants.mitems:
|
||||||
@ -535,18 +513,20 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
|
|||||||
if e.address.leaf:
|
if e.address.leaf:
|
||||||
(await b.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map(
|
(await b.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map(
|
||||||
(blkAndProof: (Block, CodexProof)) =>
|
(blkAndProof: (Block, CodexProof)) =>
|
||||||
BlockDelivery(address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some)
|
BlockDelivery(
|
||||||
|
address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some
|
||||||
|
)
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
(await b.localStore.getBlock(e.address)).map(
|
(await b.localStore.getBlock(e.address)).map(
|
||||||
(blk: Block) => BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none)
|
(blk: Block) =>
|
||||||
|
BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none)
|
||||||
)
|
)
|
||||||
|
|
||||||
let
|
let
|
||||||
blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup))
|
blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup))
|
||||||
blocksDelivery = blocksDeliveryFut
|
blocksDelivery =
|
||||||
.filterIt(it.completed and it.read.isOk)
|
blocksDeliveryFut.filterIt(it.completed and it.read.isOk).mapIt(it.read.get)
|
||||||
.mapIt(it.read.get)
|
|
||||||
|
|
||||||
# All the wants that failed local lookup must be set to not-in-flight again.
|
# All the wants that failed local lookup must be set to not-in-flight again.
|
||||||
let
|
let
|
||||||
@ -555,11 +535,9 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
|
|||||||
updateInFlight(failedAddresses, false)
|
updateInFlight(failedAddresses, false)
|
||||||
|
|
||||||
if blocksDelivery.len > 0:
|
if blocksDelivery.len > 0:
|
||||||
trace "Sending blocks to peer", peer = task.id, blocks = (blocksDelivery.mapIt(it.address))
|
trace "Sending blocks to peer",
|
||||||
await b.network.request.sendBlocksDelivery(
|
peer = task.id, blocks = (blocksDelivery.mapIt(it.address))
|
||||||
task.id,
|
await b.network.request.sendBlocksDelivery(task.id, blocksDelivery)
|
||||||
blocksDelivery
|
|
||||||
)
|
|
||||||
|
|
||||||
codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64)
|
codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64)
|
||||||
|
|
||||||
@ -572,8 +550,7 @@ proc blockexcTaskRunner(b: BlockExcEngine) {.async: (raises: []).} =
|
|||||||
trace "Starting blockexc task runner"
|
trace "Starting blockexc task runner"
|
||||||
while b.blockexcRunning:
|
while b.blockexcRunning:
|
||||||
try:
|
try:
|
||||||
let
|
let peerCtx = await b.taskQueue.pop()
|
||||||
peerCtx = await b.taskQueue.pop()
|
|
||||||
|
|
||||||
await b.taskHandler(peerCtx)
|
await b.taskHandler(peerCtx)
|
||||||
except CancelledError:
|
except CancelledError:
|
||||||
@ -599,8 +576,7 @@ proc new*(
|
|||||||
## Create new block exchange engine instance
|
## Create new block exchange engine instance
|
||||||
##
|
##
|
||||||
|
|
||||||
let
|
let engine = BlockExcEngine(
|
||||||
engine = BlockExcEngine(
|
|
||||||
localStore: localStore,
|
localStore: localStore,
|
||||||
peers: peerStore,
|
peers: peerStore,
|
||||||
pendingBlocks: pendingBlocks,
|
pendingBlocks: pendingBlocks,
|
||||||
@ -612,7 +588,8 @@ proc new*(
|
|||||||
taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize),
|
taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize),
|
||||||
discovery: discovery,
|
discovery: discovery,
|
||||||
advertiser: advertiser,
|
advertiser: advertiser,
|
||||||
blockFetchTimeout: blockFetchTimeout)
|
blockFetchTimeout: blockFetchTimeout,
|
||||||
|
)
|
||||||
|
|
||||||
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||||
if event.kind == PeerEventKind.Joined:
|
if event.kind == PeerEventKind.Joined:
|
||||||
@ -624,19 +601,17 @@ proc new*(
|
|||||||
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
|
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
|
||||||
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
|
network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
|
||||||
|
|
||||||
proc blockWantListHandler(
|
proc blockWantListHandler(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} =
|
||||||
peer: PeerId,
|
|
||||||
wantList: WantList): Future[void] {.gcsafe.} =
|
|
||||||
engine.wantListHandler(peer, wantList)
|
engine.wantListHandler(peer, wantList)
|
||||||
|
|
||||||
proc blockPresenceHandler(
|
proc blockPresenceHandler(
|
||||||
peer: PeerId,
|
peer: PeerId, presence: seq[BlockPresence]
|
||||||
presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
|
): Future[void] {.gcsafe.} =
|
||||||
engine.blockPresenceHandler(peer, presence)
|
engine.blockPresenceHandler(peer, presence)
|
||||||
|
|
||||||
proc blocksDeliveryHandler(
|
proc blocksDeliveryHandler(
|
||||||
peer: PeerId,
|
peer: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||||
blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} =
|
): Future[void] {.gcsafe.} =
|
||||||
engine.blocksDeliveryHandler(peer, blocksDelivery)
|
engine.blocksDeliveryHandler(peer, blocksDelivery)
|
||||||
|
|
||||||
proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} =
|
proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} =
|
||||||
@ -650,6 +625,7 @@ proc new*(
|
|||||||
onBlocksDelivery: blocksDeliveryHandler,
|
onBlocksDelivery: blocksDeliveryHandler,
|
||||||
onPresence: blockPresenceHandler,
|
onPresence: blockPresenceHandler,
|
||||||
onAccount: accountHandler,
|
onAccount: accountHandler,
|
||||||
onPayment: paymentHandler)
|
onPayment: paymentHandler,
|
||||||
|
)
|
||||||
|
|
||||||
return engine
|
return engine
|
||||||
|
@ -15,15 +15,16 @@ import ../peers
|
|||||||
export nitro
|
export nitro
|
||||||
export results
|
export results
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push:
|
||||||
|
{.upraises: [].}
|
||||||
|
|
||||||
const ChainId* = 0.u256 # invalid chain id for now
|
const ChainId* = 0.u256 # invalid chain id for now
|
||||||
const Asset* = EthAddress.zero # invalid ERC20 asset address for now
|
const Asset* = EthAddress.zero # invalid ERC20 asset address for now
|
||||||
const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals
|
const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals
|
||||||
|
|
||||||
func openLedgerChannel*(wallet: WalletRef,
|
func openLedgerChannel*(
|
||||||
hub: EthAddress,
|
wallet: WalletRef, hub: EthAddress, asset: EthAddress
|
||||||
asset: EthAddress): ?!ChannelId =
|
): ?!ChannelId =
|
||||||
wallet.openLedgerChannel(hub, ChainId, asset, AmountPerChannel)
|
wallet.openLedgerChannel(hub, ChainId, asset, AmountPerChannel)
|
||||||
|
|
||||||
func getOrOpenChannel(wallet: WalletRef, peer: BlockExcPeerCtx): ?!ChannelId =
|
func getOrOpenChannel(wallet: WalletRef, peer: BlockExcPeerCtx): ?!ChannelId =
|
||||||
@ -36,9 +37,7 @@ func getOrOpenChannel(wallet: WalletRef, peer: BlockExcPeerCtx): ?!ChannelId =
|
|||||||
else:
|
else:
|
||||||
failure "no account set for peer"
|
failure "no account set for peer"
|
||||||
|
|
||||||
func pay*(wallet: WalletRef,
|
func pay*(wallet: WalletRef, peer: BlockExcPeerCtx, amount: UInt256): ?!SignedState =
|
||||||
peer: BlockExcPeerCtx,
|
|
||||||
amount: UInt256): ?!SignedState =
|
|
||||||
if account =? peer.account:
|
if account =? peer.account:
|
||||||
let asset = Asset
|
let asset = Asset
|
||||||
let receiver = account.address
|
let receiver = account.address
|
||||||
|
@ -12,7 +12,8 @@ import std/monotimes
|
|||||||
|
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push:
|
||||||
|
{.upraises: [].}
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
@ -25,11 +26,15 @@ import ../../logutils
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "codex pendingblocks"
|
topics = "codex pendingblocks"
|
||||||
|
|
||||||
declareGauge(codex_block_exchange_pending_block_requests, "codex blockexchange pending block requests")
|
declareGauge(
|
||||||
declareGauge(codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us")
|
codex_block_exchange_pending_block_requests,
|
||||||
|
"codex blockexchange pending block requests",
|
||||||
|
)
|
||||||
|
declareGauge(
|
||||||
|
codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us"
|
||||||
|
)
|
||||||
|
|
||||||
const
|
const DefaultBlockTimeout* = 10.minutes
|
||||||
DefaultBlockTimeout* = 10.minutes
|
|
||||||
|
|
||||||
type
|
type
|
||||||
BlockReq* = object
|
BlockReq* = object
|
||||||
@ -47,7 +52,8 @@ proc getWantHandle*(
|
|||||||
p: PendingBlocksManager,
|
p: PendingBlocksManager,
|
||||||
address: BlockAddress,
|
address: BlockAddress,
|
||||||
timeout = DefaultBlockTimeout,
|
timeout = DefaultBlockTimeout,
|
||||||
inFlight = false): Future[Block] {.async.} =
|
inFlight = false,
|
||||||
|
): Future[Block] {.async.} =
|
||||||
## Add an event for a block
|
## Add an event for a block
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -56,7 +62,8 @@ proc getWantHandle*(
|
|||||||
p.blocks[address] = BlockReq(
|
p.blocks[address] = BlockReq(
|
||||||
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
|
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
|
||||||
inFlight: inFlight,
|
inFlight: inFlight,
|
||||||
startTime: getMonoTime().ticks)
|
startTime: getMonoTime().ticks,
|
||||||
|
)
|
||||||
|
|
||||||
p.updatePendingBlockGauge()
|
p.updatePendingBlockGauge()
|
||||||
return await p.blocks[address].handle.wait(timeout)
|
return await p.blocks[address].handle.wait(timeout)
|
||||||
@ -72,15 +79,13 @@ proc getWantHandle*(
|
|||||||
p.updatePendingBlockGauge()
|
p.updatePendingBlockGauge()
|
||||||
|
|
||||||
proc getWantHandle*(
|
proc getWantHandle*(
|
||||||
p: PendingBlocksManager,
|
p: PendingBlocksManager, cid: Cid, timeout = DefaultBlockTimeout, inFlight = false
|
||||||
cid: Cid,
|
): Future[Block] =
|
||||||
timeout = DefaultBlockTimeout,
|
|
||||||
inFlight = false): Future[Block] =
|
|
||||||
p.getWantHandle(BlockAddress.init(cid), timeout, inFlight)
|
p.getWantHandle(BlockAddress.init(cid), timeout, inFlight)
|
||||||
|
|
||||||
proc resolve*(
|
proc resolve*(
|
||||||
p: PendingBlocksManager,
|
p: PendingBlocksManager, blocksDelivery: seq[BlockDelivery]
|
||||||
blocksDelivery: seq[BlockDelivery]) {.gcsafe, raises: [].} =
|
) {.gcsafe, raises: [].} =
|
||||||
## Resolve pending blocks
|
## Resolve pending blocks
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -101,19 +106,14 @@ proc resolve*(
|
|||||||
else:
|
else:
|
||||||
trace "Block handle already finished", address = bd.address
|
trace "Block handle already finished", address = bd.address
|
||||||
|
|
||||||
proc setInFlight*(
|
proc setInFlight*(p: PendingBlocksManager, address: BlockAddress, inFlight = true) =
|
||||||
p: PendingBlocksManager,
|
|
||||||
address: BlockAddress,
|
|
||||||
inFlight = true) =
|
|
||||||
## Set inflight status for a block
|
## Set inflight status for a block
|
||||||
##
|
##
|
||||||
|
|
||||||
p.blocks.withValue(address, pending):
|
p.blocks.withValue(address, pending):
|
||||||
pending[].inFlight = inFlight
|
pending[].inFlight = inFlight
|
||||||
|
|
||||||
proc isInFlight*(
|
proc isInFlight*(p: PendingBlocksManager, address: BlockAddress): bool =
|
||||||
p: PendingBlocksManager,
|
|
||||||
address: BlockAddress): bool =
|
|
||||||
## Check if a block is in flight
|
## Check if a block is in flight
|
||||||
##
|
##
|
||||||
|
|
||||||
|
@ -35,8 +35,10 @@ const
|
|||||||
|
|
||||||
type
|
type
|
||||||
WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.}
|
WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.}
|
||||||
BlocksDeliveryHandler* = proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.}
|
BlocksDeliveryHandler* =
|
||||||
BlockPresenceHandler* = proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
|
proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.}
|
||||||
|
BlockPresenceHandler* =
|
||||||
|
proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
|
||||||
AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
|
AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
|
||||||
PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
|
PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
|
||||||
|
|
||||||
@ -54,10 +56,14 @@ type
|
|||||||
cancel: bool = false,
|
cancel: bool = false,
|
||||||
wantType: WantType = WantType.WantHave,
|
wantType: WantType = WantType.WantHave,
|
||||||
full: bool = false,
|
full: bool = false,
|
||||||
sendDontHave: bool = false): Future[void] {.gcsafe.}
|
sendDontHave: bool = false,
|
||||||
WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.}
|
): Future[void] {.gcsafe.}
|
||||||
BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.}
|
WantCancellationSender* =
|
||||||
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
|
proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.}
|
||||||
|
BlocksDeliverySender* =
|
||||||
|
proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.}
|
||||||
|
PresenceSender* =
|
||||||
|
proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
|
||||||
AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
|
AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
|
||||||
PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
|
PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
|
||||||
|
|
||||||
@ -108,10 +114,7 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
|
|||||||
finally:
|
finally:
|
||||||
b.inflightSema.release()
|
b.inflightSema.release()
|
||||||
|
|
||||||
proc handleWantList(
|
proc handleWantList(b: BlockExcNetwork, peer: NetworkPeer, list: WantList) {.async.} =
|
||||||
b: BlockExcNetwork,
|
|
||||||
peer: NetworkPeer,
|
|
||||||
list: WantList) {.async.} =
|
|
||||||
## Handle incoming want list
|
## Handle incoming want list
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -126,7 +129,8 @@ proc sendWantList*(
|
|||||||
cancel: bool = false,
|
cancel: bool = false,
|
||||||
wantType: WantType = WantType.WantHave,
|
wantType: WantType = WantType.WantHave,
|
||||||
full: bool = false,
|
full: bool = false,
|
||||||
sendDontHave: bool = false): Future[void] =
|
sendDontHave: bool = false,
|
||||||
|
): Future[void] =
|
||||||
## Send a want message to peer
|
## Send a want message to peer
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -137,43 +141,41 @@ proc sendWantList*(
|
|||||||
priority: priority,
|
priority: priority,
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
wantType: wantType,
|
wantType: wantType,
|
||||||
sendDontHave: sendDontHave) ),
|
sendDontHave: sendDontHave,
|
||||||
full: full)
|
)
|
||||||
|
),
|
||||||
|
full: full,
|
||||||
|
)
|
||||||
|
|
||||||
b.send(id, Message(wantlist: msg))
|
b.send(id, Message(wantlist: msg))
|
||||||
|
|
||||||
proc sendWantCancellations*(
|
proc sendWantCancellations*(
|
||||||
b: BlockExcNetwork,
|
b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress]
|
||||||
id: PeerId,
|
): Future[void] {.async.} =
|
||||||
addresses: seq[BlockAddress]): Future[void] {.async.} =
|
|
||||||
## Informs a remote peer that we're no longer interested in a set of blocks
|
## Informs a remote peer that we're no longer interested in a set of blocks
|
||||||
##
|
##
|
||||||
await b.sendWantList(id = id, addresses = addresses, cancel = true)
|
await b.sendWantList(id = id, addresses = addresses, cancel = true)
|
||||||
|
|
||||||
proc handleBlocksDelivery(
|
proc handleBlocksDelivery(
|
||||||
b: BlockExcNetwork,
|
b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery]
|
||||||
peer: NetworkPeer,
|
) {.async.} =
|
||||||
blocksDelivery: seq[BlockDelivery]) {.async.} =
|
|
||||||
## Handle incoming blocks
|
## Handle incoming blocks
|
||||||
##
|
##
|
||||||
|
|
||||||
if not b.handlers.onBlocksDelivery.isNil:
|
if not b.handlers.onBlocksDelivery.isNil:
|
||||||
await b.handlers.onBlocksDelivery(peer.id, blocksDelivery)
|
await b.handlers.onBlocksDelivery(peer.id, blocksDelivery)
|
||||||
|
|
||||||
|
|
||||||
proc sendBlocksDelivery*(
|
proc sendBlocksDelivery*(
|
||||||
b: BlockExcNetwork,
|
b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||||
id: PeerId,
|
): Future[void] =
|
||||||
blocksDelivery: seq[BlockDelivery]): Future[void] =
|
|
||||||
## Send blocks to remote
|
## Send blocks to remote
|
||||||
##
|
##
|
||||||
|
|
||||||
b.send(id, pb.Message(payload: blocksDelivery))
|
b.send(id, pb.Message(payload: blocksDelivery))
|
||||||
|
|
||||||
proc handleBlockPresence(
|
proc handleBlockPresence(
|
||||||
b: BlockExcNetwork,
|
b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence]
|
||||||
peer: NetworkPeer,
|
) {.async.} =
|
||||||
presence: seq[BlockPresence]) {.async.} =
|
|
||||||
## Handle block presence
|
## Handle block presence
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -181,56 +183,44 @@ proc handleBlockPresence(
|
|||||||
await b.handlers.onPresence(peer.id, presence)
|
await b.handlers.onPresence(peer.id, presence)
|
||||||
|
|
||||||
proc sendBlockPresence*(
|
proc sendBlockPresence*(
|
||||||
b: BlockExcNetwork,
|
b: BlockExcNetwork, id: PeerId, presence: seq[BlockPresence]
|
||||||
id: PeerId,
|
): Future[void] =
|
||||||
presence: seq[BlockPresence]): Future[void] =
|
|
||||||
## Send presence to remote
|
## Send presence to remote
|
||||||
##
|
##
|
||||||
|
|
||||||
b.send(id, Message(blockPresences: @presence))
|
b.send(id, Message(blockPresences: @presence))
|
||||||
|
|
||||||
proc handleAccount(
|
proc handleAccount(
|
||||||
network: BlockExcNetwork,
|
network: BlockExcNetwork, peer: NetworkPeer, account: Account
|
||||||
peer: NetworkPeer,
|
) {.async.} =
|
||||||
account: Account) {.async.} =
|
|
||||||
## Handle account info
|
## Handle account info
|
||||||
##
|
##
|
||||||
|
|
||||||
if not network.handlers.onAccount.isNil:
|
if not network.handlers.onAccount.isNil:
|
||||||
await network.handlers.onAccount(peer.id, account)
|
await network.handlers.onAccount(peer.id, account)
|
||||||
|
|
||||||
proc sendAccount*(
|
proc sendAccount*(b: BlockExcNetwork, id: PeerId, account: Account): Future[void] =
|
||||||
b: BlockExcNetwork,
|
|
||||||
id: PeerId,
|
|
||||||
account: Account): Future[void] =
|
|
||||||
## Send account info to remote
|
## Send account info to remote
|
||||||
##
|
##
|
||||||
|
|
||||||
b.send(id, Message(account: AccountMessage.init(account)))
|
b.send(id, Message(account: AccountMessage.init(account)))
|
||||||
|
|
||||||
proc sendPayment*(
|
proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[void] =
|
||||||
b: BlockExcNetwork,
|
|
||||||
id: PeerId,
|
|
||||||
payment: SignedState): Future[void] =
|
|
||||||
## Send payment to remote
|
## Send payment to remote
|
||||||
##
|
##
|
||||||
|
|
||||||
b.send(id, Message(payment: StateChannelUpdate.init(payment)))
|
b.send(id, Message(payment: StateChannelUpdate.init(payment)))
|
||||||
|
|
||||||
proc handlePayment(
|
proc handlePayment(
|
||||||
network: BlockExcNetwork,
|
network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState
|
||||||
peer: NetworkPeer,
|
) {.async.} =
|
||||||
payment: SignedState) {.async.} =
|
|
||||||
## Handle payment
|
## Handle payment
|
||||||
##
|
##
|
||||||
|
|
||||||
if not network.handlers.onPayment.isNil:
|
if not network.handlers.onPayment.isNil:
|
||||||
await network.handlers.onPayment(peer.id, payment)
|
await network.handlers.onPayment(peer.id, payment)
|
||||||
|
|
||||||
proc rpcHandler(
|
proc rpcHandler(b: BlockExcNetwork, peer: NetworkPeer, msg: Message) {.raises: [].} =
|
||||||
b: BlockExcNetwork,
|
|
||||||
peer: NetworkPeer,
|
|
||||||
msg: Message) {.raises: [].} =
|
|
||||||
## handle rpc messages
|
## handle rpc messages
|
||||||
##
|
##
|
||||||
if msg.wantList.entries.len > 0:
|
if msg.wantList.entries.len > 0:
|
||||||
@ -325,15 +315,14 @@ proc new*(
|
|||||||
T: type BlockExcNetwork,
|
T: type BlockExcNetwork,
|
||||||
switch: Switch,
|
switch: Switch,
|
||||||
connProvider: ConnProvider = nil,
|
connProvider: ConnProvider = nil,
|
||||||
maxInflight = MaxInflight): BlockExcNetwork =
|
maxInflight = MaxInflight,
|
||||||
|
): BlockExcNetwork =
|
||||||
## Create a new BlockExcNetwork instance
|
## Create a new BlockExcNetwork instance
|
||||||
##
|
##
|
||||||
|
|
||||||
let
|
let self = BlockExcNetwork(
|
||||||
self = BlockExcNetwork(
|
switch: switch, getConn: connProvider, inflightSema: newAsyncSemaphore(maxInflight)
|
||||||
switch: switch,
|
)
|
||||||
getConn: connProvider,
|
|
||||||
inflightSema: newAsyncSemaphore(maxInflight))
|
|
||||||
|
|
||||||
proc sendWantList(
|
proc sendWantList(
|
||||||
id: PeerId,
|
id: PeerId,
|
||||||
@ -342,15 +331,18 @@ proc new*(
|
|||||||
cancel: bool = false,
|
cancel: bool = false,
|
||||||
wantType: WantType = WantType.WantHave,
|
wantType: WantType = WantType.WantHave,
|
||||||
full: bool = false,
|
full: bool = false,
|
||||||
sendDontHave: bool = false): Future[void] {.gcsafe.} =
|
sendDontHave: bool = false,
|
||||||
self.sendWantList(
|
): Future[void] {.gcsafe.} =
|
||||||
id, cids, priority, cancel,
|
self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave)
|
||||||
wantType, full, sendDontHave)
|
|
||||||
|
|
||||||
proc sendWantCancellations(id: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.} =
|
proc sendWantCancellations(
|
||||||
|
id: PeerId, addresses: seq[BlockAddress]
|
||||||
|
): Future[void] {.gcsafe.} =
|
||||||
self.sendWantCancellations(id, addresses)
|
self.sendWantCancellations(id, addresses)
|
||||||
|
|
||||||
proc sendBlocksDelivery(id: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} =
|
proc sendBlocksDelivery(
|
||||||
|
id: PeerId, blocksDelivery: seq[BlockDelivery]
|
||||||
|
): Future[void] {.gcsafe.} =
|
||||||
self.sendBlocksDelivery(id, blocksDelivery)
|
self.sendBlocksDelivery(id, blocksDelivery)
|
||||||
|
|
||||||
proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
|
proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
|
||||||
@ -368,7 +360,8 @@ proc new*(
|
|||||||
sendBlocksDelivery: sendBlocksDelivery,
|
sendBlocksDelivery: sendBlocksDelivery,
|
||||||
sendPresence: sendPresence,
|
sendPresence: sendPresence,
|
||||||
sendAccount: sendAccount,
|
sendAccount: sendAccount,
|
||||||
sendPayment: sendPayment)
|
sendPayment: sendPayment,
|
||||||
|
)
|
||||||
|
|
||||||
self.init()
|
self.init()
|
||||||
return self
|
return self
|
||||||
|
@ -8,7 +8,8 @@
|
|||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
push: {.upraises: [].}
|
push:
|
||||||
|
{.upraises: [].}
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
@ -33,8 +34,7 @@ type
|
|||||||
getConn: ConnProvider
|
getConn: ConnProvider
|
||||||
|
|
||||||
proc connected*(b: NetworkPeer): bool =
|
proc connected*(b: NetworkPeer): bool =
|
||||||
not(isNil(b.sendConn)) and
|
not (isNil(b.sendConn)) and not (b.sendConn.closed or b.sendConn.atEof)
|
||||||
not(b.sendConn.closed or b.sendConn.atEof)
|
|
||||||
|
|
||||||
proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} =
|
proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} =
|
||||||
if isNil(conn):
|
if isNil(conn):
|
||||||
@ -83,12 +83,8 @@ func new*(
|
|||||||
T: type NetworkPeer,
|
T: type NetworkPeer,
|
||||||
peer: PeerId,
|
peer: PeerId,
|
||||||
connProvider: ConnProvider,
|
connProvider: ConnProvider,
|
||||||
rpcHandler: RPCHandler): NetworkPeer =
|
rpcHandler: RPCHandler,
|
||||||
|
): NetworkPeer =
|
||||||
|
doAssert(not isNil(connProvider), "should supply connection provider")
|
||||||
|
|
||||||
doAssert(not isNil(connProvider),
|
NetworkPeer(id: peer, getConn: connProvider, handler: rpcHandler)
|
||||||
"should supply connection provider")
|
|
||||||
|
|
||||||
NetworkPeer(
|
|
||||||
id: peer,
|
|
||||||
getConn: connProvider,
|
|
||||||
handler: rpcHandler)
|
|
||||||
|
@ -25,8 +25,7 @@ import ../../logutils
|
|||||||
|
|
||||||
export payments, nitro
|
export payments, nitro
|
||||||
|
|
||||||
type
|
type BlockExcPeerCtx* = ref object of RootObj
|
||||||
BlockExcPeerCtx* = ref object of RootObj
|
|
||||||
id*: PeerId
|
id*: PeerId
|
||||||
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
|
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
|
||||||
peerWants*: seq[WantListEntry] # remote peers want lists
|
peerWants*: seq[WantListEntry] # remote peers want lists
|
||||||
|
@ -13,7 +13,8 @@ import std/algorithm
|
|||||||
|
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push:
|
||||||
|
{.upraises: [].}
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
@ -22,7 +23,6 @@ import ../protobuf/blockexc
|
|||||||
import ../../blocktype
|
import ../../blocktype
|
||||||
import ../../logutils
|
import ../../logutils
|
||||||
|
|
||||||
|
|
||||||
import ./peercontext
|
import ./peercontext
|
||||||
export peercontext
|
export peercontext
|
||||||
|
|
||||||
@ -32,6 +32,7 @@ logScope:
|
|||||||
type
|
type
|
||||||
PeerCtxStore* = ref object of RootObj
|
PeerCtxStore* = ref object of RootObj
|
||||||
peers*: OrderedTable[PeerId, BlockExcPeerCtx]
|
peers*: OrderedTable[PeerId, BlockExcPeerCtx]
|
||||||
|
|
||||||
PeersForBlock* = object of RootObj
|
PeersForBlock* = object of RootObj
|
||||||
with*: seq[BlockExcPeerCtx]
|
with*: seq[BlockExcPeerCtx]
|
||||||
without*: seq[BlockExcPeerCtx]
|
without*: seq[BlockExcPeerCtx]
|
||||||
|
@ -42,7 +42,6 @@ proc `==`*(a: WantListEntry, b: BlockAddress): bool =
|
|||||||
proc `<`*(a, b: WantListEntry): bool =
|
proc `<`*(a, b: WantListEntry): bool =
|
||||||
a.priority < b.priority
|
a.priority < b.priority
|
||||||
|
|
||||||
|
|
||||||
proc `==`*(a: BlockPresence, b: BlockAddress): bool =
|
proc `==`*(a: BlockPresence, b: BlockAddress): bool =
|
||||||
return a.address == b
|
return a.address == b
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ const
|
|||||||
|
|
||||||
type
|
type
|
||||||
WantType* = enum
|
WantType* = enum
|
||||||
WantBlock = 0,
|
WantBlock = 0
|
||||||
WantHave = 1
|
WantHave = 1
|
||||||
|
|
||||||
WantListEntry* = object
|
WantListEntry* = object
|
||||||
@ -41,7 +41,7 @@ type
|
|||||||
proof*: ?CodexProof # Present only if `address.leaf` is true
|
proof*: ?CodexProof # Present only if `address.leaf` is true
|
||||||
|
|
||||||
BlockPresenceType* = enum
|
BlockPresenceType* = enum
|
||||||
Have = 0,
|
Have = 0
|
||||||
DontHave = 1
|
DontHave = 1
|
||||||
|
|
||||||
BlockPresence* = object
|
BlockPresence* = object
|
||||||
@ -140,7 +140,6 @@ proc protobufEncode*(value: Message): seq[byte] =
|
|||||||
ipb.finish()
|
ipb.finish()
|
||||||
ipb.buffer
|
ipb.buffer
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Decoding Message from seq[byte] in Protobuf format
|
# Decoding Message from seq[byte] in Protobuf format
|
||||||
#
|
#
|
||||||
@ -211,7 +210,8 @@ proc decode*(_: type BlockDelivery, pb: ProtoBuffer): ProtoResult[BlockDelivery]
|
|||||||
if ?pb.getField(1, cidBuf):
|
if ?pb.getField(1, cidBuf):
|
||||||
cid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
|
cid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
|
||||||
if ?pb.getField(2, dataBuf):
|
if ?pb.getField(2, dataBuf):
|
||||||
value.blk = ? Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob)
|
value.blk =
|
||||||
|
?Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob)
|
||||||
if ?pb.getField(3, ipb):
|
if ?pb.getField(3, ipb):
|
||||||
value.address = ?BlockAddress.decode(ipb)
|
value.address = ?BlockAddress.decode(ipb)
|
||||||
|
|
||||||
@ -240,14 +240,14 @@ proc decode*(_: type BlockPresence, pb: ProtoBuffer): ProtoResult[BlockPresence]
|
|||||||
ok(value)
|
ok(value)
|
||||||
|
|
||||||
proc decode*(_: type AccountMessage, pb: ProtoBuffer): ProtoResult[AccountMessage] =
|
proc decode*(_: type AccountMessage, pb: ProtoBuffer): ProtoResult[AccountMessage] =
|
||||||
var
|
var value = AccountMessage()
|
||||||
value = AccountMessage()
|
|
||||||
discard ?pb.getField(1, value.address)
|
discard ?pb.getField(1, value.address)
|
||||||
ok(value)
|
ok(value)
|
||||||
|
|
||||||
proc decode*(_: type StateChannelUpdate, pb: ProtoBuffer): ProtoResult[StateChannelUpdate] =
|
proc decode*(
|
||||||
var
|
_: type StateChannelUpdate, pb: ProtoBuffer
|
||||||
value = StateChannelUpdate()
|
): ProtoResult[StateChannelUpdate] =
|
||||||
|
var value = StateChannelUpdate()
|
||||||
discard ?pb.getField(1, value.update)
|
discard ?pb.getField(1, value.update)
|
||||||
ok(value)
|
ok(value)
|
||||||
|
|
||||||
@ -261,7 +261,9 @@ proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
|
|||||||
value.wantList = ?WantList.decode(ipb)
|
value.wantList = ?WantList.decode(ipb)
|
||||||
if ?pb.getRepeatedField(3, sublist):
|
if ?pb.getRepeatedField(3, sublist):
|
||||||
for item in sublist:
|
for item in sublist:
|
||||||
value.payload.add(? BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize)))
|
value.payload.add(
|
||||||
|
?BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize))
|
||||||
|
)
|
||||||
if ?pb.getRepeatedField(4, sublist):
|
if ?pb.getRepeatedField(4, sublist):
|
||||||
for item in sublist:
|
for item in sublist:
|
||||||
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))
|
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))
|
||||||
|
@ -11,10 +11,10 @@ export StateChannelUpdate
|
|||||||
export stint
|
export stint
|
||||||
export nitro
|
export nitro
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push:
|
||||||
|
{.upraises: [].}
|
||||||
|
|
||||||
type
|
type Account* = object
|
||||||
Account* = object
|
|
||||||
address*: EthAddress
|
address*: EthAddress
|
||||||
|
|
||||||
func init*(_: type AccountMessage, account: Account): AccountMessage =
|
func init*(_: type AccountMessage, account: Account): AccountMessage =
|
||||||
|
@ -11,7 +11,8 @@ export questionable
|
|||||||
export stint
|
export stint
|
||||||
export BlockPresenceType
|
export BlockPresenceType
|
||||||
|
|
||||||
upraises.push: {.upraises: [].}
|
upraises.push:
|
||||||
|
{.upraises: [].}
|
||||||
|
|
||||||
type
|
type
|
||||||
PresenceMessage* = blockexc.BlockPresence
|
PresenceMessage* = blockexc.BlockPresence
|
||||||
@ -32,15 +33,12 @@ func init*(_: type Presence, message: PresenceMessage): ?Presence =
|
|||||||
some Presence(
|
some Presence(
|
||||||
address: message.address,
|
address: message.address,
|
||||||
have: message.`type` == BlockPresenceType.Have,
|
have: message.`type` == BlockPresenceType.Have,
|
||||||
price: price
|
price: price,
|
||||||
)
|
)
|
||||||
|
|
||||||
func init*(_: type PresenceMessage, presence: Presence): PresenceMessage =
|
func init*(_: type PresenceMessage, presence: Presence): PresenceMessage =
|
||||||
PresenceMessage(
|
PresenceMessage(
|
||||||
address: presence.address,
|
address: presence.address,
|
||||||
`type`: if presence.have:
|
`type`: if presence.have: BlockPresenceType.Have else: BlockPresenceType.DontHave,
|
||||||
BlockPresenceType.Have
|
price: @(presence.price.toBytesBE),
|
||||||
else:
|
|
||||||
BlockPresenceType.DontHave,
|
|
||||||
price: @(presence.price.toBytesBE)
|
|
||||||
)
|
)
|
||||||
|
@ -14,7 +14,8 @@ export tables
|
|||||||
|
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push:
|
||||||
|
{.upraises: [].}
|
||||||
|
|
||||||
import pkg/libp2p/[cid, multicodec, multihash]
|
import pkg/libp2p/[cid, multicodec, multihash]
|
||||||
import pkg/stew/byteutils
|
import pkg/stew/byteutils
|
||||||
@ -49,11 +50,11 @@ logutils.formatIt(LogFormat.textLines, BlockAddress):
|
|||||||
else:
|
else:
|
||||||
"cid: " & shortLog($it.cid)
|
"cid: " & shortLog($it.cid)
|
||||||
|
|
||||||
logutils.formatIt(LogFormat.json, BlockAddress): %it
|
logutils.formatIt(LogFormat.json, BlockAddress):
|
||||||
|
%it
|
||||||
|
|
||||||
proc `==`*(a, b: BlockAddress): bool =
|
proc `==`*(a, b: BlockAddress): bool =
|
||||||
a.leaf == b.leaf and
|
a.leaf == b.leaf and (
|
||||||
(
|
|
||||||
if a.leaf:
|
if a.leaf:
|
||||||
a.treeCid == b.treeCid and a.index == b.index
|
a.treeCid == b.treeCid and a.index == b.index
|
||||||
else:
|
else:
|
||||||
@ -67,10 +68,7 @@ proc `$`*(a: BlockAddress): string =
|
|||||||
"cid: " & $a.cid
|
"cid: " & $a.cid
|
||||||
|
|
||||||
proc cidOrTreeCid*(a: BlockAddress): Cid =
|
proc cidOrTreeCid*(a: BlockAddress): Cid =
|
||||||
if a.leaf:
|
if a.leaf: a.treeCid else: a.cid
|
||||||
a.treeCid
|
|
||||||
else:
|
|
||||||
a.cid
|
|
||||||
|
|
||||||
proc address*(b: Block): BlockAddress =
|
proc address*(b: Block): BlockAddress =
|
||||||
BlockAddress(leaf: false, cid: b.cid)
|
BlockAddress(leaf: false, cid: b.cid)
|
||||||
@ -90,7 +88,8 @@ func new*(
|
|||||||
data: openArray[byte] = [],
|
data: openArray[byte] = [],
|
||||||
version = CIDv1,
|
version = CIDv1,
|
||||||
mcodec = Sha256HashCodec,
|
mcodec = Sha256HashCodec,
|
||||||
codec = BlockCodec): ?!Block =
|
codec = BlockCodec,
|
||||||
|
): ?!Block =
|
||||||
## creates a new block for both storage and network IO
|
## creates a new block for both storage and network IO
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -100,15 +99,11 @@ func new*(
|
|||||||
|
|
||||||
# TODO: If the hash is `>=` to the data,
|
# TODO: If the hash is `>=` to the data,
|
||||||
# use the Cid as a container!
|
# use the Cid as a container!
|
||||||
Block(
|
|
||||||
cid: cid,
|
Block(cid: cid, data: @data).success
|
||||||
data: @data).success
|
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type Block,
|
T: type Block, cid: Cid, data: openArray[byte], verify: bool = true
|
||||||
cid: Cid,
|
|
||||||
data: openArray[byte],
|
|
||||||
verify: bool = true
|
|
||||||
): ?!Block =
|
): ?!Block =
|
||||||
## creates a new block for both storage and network IO
|
## creates a new block for both storage and network IO
|
||||||
##
|
##
|
||||||
@ -121,22 +116,23 @@ proc new*(
|
|||||||
if computedCid != cid:
|
if computedCid != cid:
|
||||||
return "Cid doesn't match the data".failure
|
return "Cid doesn't match the data".failure
|
||||||
|
|
||||||
return Block(
|
return Block(cid: cid, data: @data).success
|
||||||
cid: cid,
|
|
||||||
data: @data
|
|
||||||
).success
|
|
||||||
|
|
||||||
proc emptyBlock*(version: CidVersion, hcodec: MultiCodec): ?!Block =
|
proc emptyBlock*(version: CidVersion, hcodec: MultiCodec): ?!Block =
|
||||||
emptyCid(version, hcodec, BlockCodec)
|
emptyCid(version, hcodec, BlockCodec).flatMap(
|
||||||
.flatMap((cid: Cid) => Block.new(cid = cid, data = @[]))
|
(cid: Cid) => Block.new(cid = cid, data = @[])
|
||||||
|
)
|
||||||
|
|
||||||
proc emptyBlock*(cid: Cid): ?!Block =
|
proc emptyBlock*(cid: Cid): ?!Block =
|
||||||
cid.mhash.mapFailure.flatMap((mhash: MultiHash) =>
|
cid.mhash.mapFailure.flatMap(
|
||||||
emptyBlock(cid.cidver, mhash.mcodec))
|
(mhash: MultiHash) => emptyBlock(cid.cidver, mhash.mcodec)
|
||||||
|
)
|
||||||
|
|
||||||
proc isEmpty*(cid: Cid): bool =
|
proc isEmpty*(cid: Cid): bool =
|
||||||
success(cid) == cid.mhash.mapFailure.flatMap((mhash: MultiHash) =>
|
success(cid) ==
|
||||||
emptyCid(cid.cidver, mhash.mcodec, cid.mcodec))
|
cid.mhash.mapFailure.flatMap(
|
||||||
|
(mhash: MultiHash) => emptyCid(cid.cidver, mhash.mcodec, cid.mcodec)
|
||||||
|
)
|
||||||
|
|
||||||
proc isEmpty*(blk: Block): bool =
|
proc isEmpty*(blk: Block): bool =
|
||||||
blk.cid.isEmpty
|
blk.cid.isEmpty
|
||||||
|
@ -11,7 +11,8 @@
|
|||||||
|
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push:
|
||||||
|
{.upraises: [].}
|
||||||
|
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
@ -23,8 +24,7 @@ import ./logutils
|
|||||||
|
|
||||||
export blocktype
|
export blocktype
|
||||||
|
|
||||||
const
|
const DefaultChunkSize* = DefaultBlockSize
|
||||||
DefaultChunkSize* = DefaultBlockSize
|
|
||||||
|
|
||||||
type
|
type
|
||||||
# default reader type
|
# default reader type
|
||||||
@ -60,30 +60,21 @@ proc getBytes*(c: Chunker): Future[seq[byte]] {.async.} =
|
|||||||
return move buff
|
return move buff
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type Chunker,
|
T: type Chunker, reader: Reader, chunkSize = DefaultChunkSize, pad = true
|
||||||
reader: Reader,
|
|
||||||
chunkSize = DefaultChunkSize,
|
|
||||||
pad = true
|
|
||||||
): Chunker =
|
): Chunker =
|
||||||
## create a new Chunker instance
|
## create a new Chunker instance
|
||||||
##
|
##
|
||||||
Chunker(
|
Chunker(reader: reader, offset: 0, chunkSize: chunkSize, pad: pad)
|
||||||
reader: reader,
|
|
||||||
offset: 0,
|
|
||||||
chunkSize: chunkSize,
|
|
||||||
pad: pad)
|
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type LPStreamChunker,
|
T: type LPStreamChunker, stream: LPStream, chunkSize = DefaultChunkSize, pad = true
|
||||||
stream: LPStream,
|
|
||||||
chunkSize = DefaultChunkSize,
|
|
||||||
pad = true
|
|
||||||
): LPStreamChunker =
|
): LPStreamChunker =
|
||||||
## create the default File chunker
|
## create the default File chunker
|
||||||
##
|
##
|
||||||
|
|
||||||
proc reader(data: ChunkBuffer, len: int): Future[int]
|
proc reader(
|
||||||
{.gcsafe, async, raises: [Defect].} =
|
data: ChunkBuffer, len: int
|
||||||
|
): Future[int] {.gcsafe, async, raises: [Defect].} =
|
||||||
var res = 0
|
var res = 0
|
||||||
try:
|
try:
|
||||||
while res < len:
|
while res < len:
|
||||||
@ -101,22 +92,17 @@ proc new*(
|
|||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
LPStreamChunker.new(
|
LPStreamChunker.new(reader = reader, chunkSize = chunkSize, pad = pad)
|
||||||
reader = reader,
|
|
||||||
chunkSize = chunkSize,
|
|
||||||
pad = pad)
|
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type FileChunker,
|
T: type FileChunker, file: File, chunkSize = DefaultChunkSize, pad = true
|
||||||
file: File,
|
|
||||||
chunkSize = DefaultChunkSize,
|
|
||||||
pad = true
|
|
||||||
): FileChunker =
|
): FileChunker =
|
||||||
## create the default File chunker
|
## create the default File chunker
|
||||||
##
|
##
|
||||||
|
|
||||||
proc reader(data: ChunkBuffer, len: int): Future[int]
|
proc reader(
|
||||||
{.gcsafe, async, raises: [Defect].} =
|
data: ChunkBuffer, len: int
|
||||||
|
): Future[int] {.gcsafe, async, raises: [Defect].} =
|
||||||
var total = 0
|
var total = 0
|
||||||
try:
|
try:
|
||||||
while total < len:
|
while total < len:
|
||||||
@ -135,7 +121,4 @@ proc new*(
|
|||||||
|
|
||||||
return total
|
return total
|
||||||
|
|
||||||
FileChunker.new(
|
FileChunker.new(reader = reader, chunkSize = chunkSize, pad = pad)
|
||||||
reader = reader,
|
|
||||||
chunkSize = chunkSize,
|
|
||||||
pad = pad)
|
|
||||||
|
@ -20,9 +20,9 @@ method start*(clock: Clock) {.base, async.} =
|
|||||||
method stop*(clock: Clock) {.base, async.} =
|
method stop*(clock: Clock) {.base, async.} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc withTimeout*(future: Future[void],
|
proc withTimeout*(
|
||||||
clock: Clock,
|
future: Future[void], clock: Clock, expiry: SecondsSince1970
|
||||||
expiry: SecondsSince1970) {.async.} =
|
) {.async.} =
|
||||||
let timeout = clock.waitUntil(expiry)
|
let timeout = clock.waitUntil(expiry)
|
||||||
try:
|
try:
|
||||||
await future or timeout
|
await future or timeout
|
||||||
|
111
codex/codex.nim
111
codex/codex.nim
@ -68,8 +68,7 @@ proc waitForSync(provider: Provider): Future[void] {.async.} =
|
|||||||
inc sleepTime
|
inc sleepTime
|
||||||
trace "Ethereum provider is synced."
|
trace "Ethereum provider is synced."
|
||||||
|
|
||||||
proc bootstrapInteractions(
|
proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
|
||||||
s: CodexServer): Future[void] {.async.} =
|
|
||||||
## bootstrap interactions and return contracts
|
## bootstrap interactions and return contracts
|
||||||
## using clients, hosts, validators pairings
|
## using clients, hosts, validators pairings
|
||||||
##
|
##
|
||||||
@ -137,10 +136,10 @@ proc bootstrapInteractions(
|
|||||||
host = some HostInteractions.new(clock, sales)
|
host = some HostInteractions.new(clock, sales)
|
||||||
|
|
||||||
if config.validator:
|
if config.validator:
|
||||||
without validationConfig =? ValidationConfig.init(
|
without validationConfig =?
|
||||||
config.validatorMaxSlots,
|
ValidationConfig.init(
|
||||||
config.validatorGroups,
|
config.validatorMaxSlots, config.validatorGroups, config.validatorGroupIndex
|
||||||
config.validatorGroupIndex), err:
|
), err:
|
||||||
error "Invalid validation parameters", err = err.msg
|
error "Invalid validation parameters", err = err.msg
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
let validation = Validation.new(clock, market, validationConfig)
|
let validation = Validation.new(clock, market, validationConfig)
|
||||||
@ -157,9 +156,8 @@ proc start*(s: CodexServer) {.async.} =
|
|||||||
await s.codexNode.switch.start()
|
await s.codexNode.switch.start()
|
||||||
|
|
||||||
let (announceAddrs, discoveryAddrs) = nattedAddress(
|
let (announceAddrs, discoveryAddrs) = nattedAddress(
|
||||||
s.config.nat,
|
s.config.nat, s.codexNode.switch.peerInfo.addrs, s.config.discoveryPort
|
||||||
s.codexNode.switch.peerInfo.addrs,
|
)
|
||||||
s.config.discoveryPort)
|
|
||||||
|
|
||||||
s.codexNode.discovery.updateAnnounceRecord(announceAddrs)
|
s.codexNode.discovery.updateAnnounceRecord(announceAddrs)
|
||||||
s.codexNode.discovery.updateDhtRecord(discoveryAddrs)
|
s.codexNode.discovery.updateDhtRecord(discoveryAddrs)
|
||||||
@ -176,15 +174,14 @@ proc stop*(s: CodexServer) {.async.} =
|
|||||||
s.codexNode.switch.stop(),
|
s.codexNode.switch.stop(),
|
||||||
s.codexNode.stop(),
|
s.codexNode.stop(),
|
||||||
s.repoStore.stop(),
|
s.repoStore.stop(),
|
||||||
s.maintenance.stop())
|
s.maintenance.stop(),
|
||||||
|
)
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type CodexServer,
|
T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey
|
||||||
config: CodexConf,
|
): CodexServer =
|
||||||
privateKey: CodexPrivateKey): CodexServer =
|
|
||||||
## create CodexServer including setting up datastore, repostore, etc
|
## create CodexServer including setting up datastore, repostore, etc
|
||||||
let
|
let switch = SwitchBuilder
|
||||||
switch = SwitchBuilder
|
|
||||||
.new()
|
.new()
|
||||||
.withPrivateKey(privateKey)
|
.withPrivateKey(privateKey)
|
||||||
.withAddresses(config.listenAddrs)
|
.withAddresses(config.listenAddrs)
|
||||||
@ -197,64 +194,88 @@ proc new*(
|
|||||||
.withTcpTransport({ServerFlags.ReuseAddr})
|
.withTcpTransport({ServerFlags.ReuseAddr})
|
||||||
.build()
|
.build()
|
||||||
|
|
||||||
var
|
var cache: CacheStore = nil
|
||||||
cache: CacheStore = nil
|
|
||||||
|
|
||||||
if config.cacheSize > 0'nb:
|
if config.cacheSize > 0'nb:
|
||||||
cache = CacheStore.new(cacheSize = config.cacheSize)
|
cache = CacheStore.new(cacheSize = config.cacheSize)
|
||||||
## Is unused?
|
## Is unused?
|
||||||
|
|
||||||
let
|
let discoveryDir = config.dataDir / CodexDhtNamespace
|
||||||
discoveryDir = config.dataDir / CodexDhtNamespace
|
|
||||||
|
|
||||||
if io2.createPath(discoveryDir).isErr:
|
if io2.createPath(discoveryDir).isErr:
|
||||||
trace "Unable to create discovery directory for block store", discoveryDir = discoveryDir
|
trace "Unable to create discovery directory for block store",
|
||||||
|
discoveryDir = discoveryDir
|
||||||
raise (ref Defect)(
|
raise (ref Defect)(
|
||||||
msg: "Unable to create discovery directory for block store: " & discoveryDir)
|
msg: "Unable to create discovery directory for block store: " & discoveryDir
|
||||||
|
)
|
||||||
|
|
||||||
let
|
let
|
||||||
discoveryStore = Datastore(
|
discoveryStore = Datastore(
|
||||||
LevelDbDatastore.new(config.dataDir / CodexDhtProvidersNamespace)
|
LevelDbDatastore.new(config.dataDir / CodexDhtProvidersNamespace).expect(
|
||||||
.expect("Should create discovery datastore!"))
|
"Should create discovery datastore!"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
discovery = Discovery.new(
|
discovery = Discovery.new(
|
||||||
switch.peerInfo.privateKey,
|
switch.peerInfo.privateKey,
|
||||||
announceAddrs = config.listenAddrs,
|
announceAddrs = config.listenAddrs,
|
||||||
bindPort = config.discoveryPort,
|
bindPort = config.discoveryPort,
|
||||||
bootstrapNodes = config.bootstrapNodes,
|
bootstrapNodes = config.bootstrapNodes,
|
||||||
store = discoveryStore)
|
store = discoveryStore,
|
||||||
|
)
|
||||||
|
|
||||||
wallet = WalletRef.new(EthPrivateKey.random())
|
wallet = WalletRef.new(EthPrivateKey.random())
|
||||||
network = BlockExcNetwork.new(switch)
|
network = BlockExcNetwork.new(switch)
|
||||||
|
|
||||||
repoData = case config.repoKind
|
repoData =
|
||||||
of repoFS: Datastore(FSDatastore.new($config.dataDir, depth = 5)
|
case config.repoKind
|
||||||
.expect("Should create repo file data store!"))
|
of repoFS:
|
||||||
of repoSQLite: Datastore(SQLiteDatastore.new($config.dataDir)
|
Datastore(
|
||||||
.expect("Should create repo SQLite data store!"))
|
FSDatastore.new($config.dataDir, depth = 5).expect(
|
||||||
of repoLevelDb: Datastore(LevelDbDatastore.new($config.dataDir)
|
"Should create repo file data store!"
|
||||||
.expect("Should create repo LevelDB data store!"))
|
)
|
||||||
|
)
|
||||||
|
of repoSQLite:
|
||||||
|
Datastore(
|
||||||
|
SQLiteDatastore.new($config.dataDir).expect(
|
||||||
|
"Should create repo SQLite data store!"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
of repoLevelDb:
|
||||||
|
Datastore(
|
||||||
|
LevelDbDatastore.new($config.dataDir).expect(
|
||||||
|
"Should create repo LevelDB data store!"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
repoStore = RepoStore.new(
|
repoStore = RepoStore.new(
|
||||||
repoDs = repoData,
|
repoDs = repoData,
|
||||||
metaDs = LevelDbDatastore.new(config.dataDir / CodexMetaNamespace)
|
metaDs = LevelDbDatastore.new(config.dataDir / CodexMetaNamespace).expect(
|
||||||
.expect("Should create metadata store!"),
|
"Should create metadata store!"
|
||||||
|
),
|
||||||
quotaMaxBytes = config.storageQuota,
|
quotaMaxBytes = config.storageQuota,
|
||||||
blockTtl = config.blockTtl)
|
blockTtl = config.blockTtl,
|
||||||
|
)
|
||||||
|
|
||||||
maintenance = BlockMaintainer.new(
|
maintenance = BlockMaintainer.new(
|
||||||
repoStore,
|
repoStore,
|
||||||
interval = config.blockMaintenanceInterval,
|
interval = config.blockMaintenanceInterval,
|
||||||
numberOfBlocksPerInterval = config.blockMaintenanceNumberOfBlocks)
|
numberOfBlocksPerInterval = config.blockMaintenanceNumberOfBlocks,
|
||||||
|
)
|
||||||
|
|
||||||
peerStore = PeerCtxStore.new()
|
peerStore = PeerCtxStore.new()
|
||||||
pendingBlocks = PendingBlocksManager.new()
|
pendingBlocks = PendingBlocksManager.new()
|
||||||
advertiser = Advertiser.new(repoStore, discovery)
|
advertiser = Advertiser.new(repoStore, discovery)
|
||||||
blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
|
blockDiscovery =
|
||||||
engine = BlockExcEngine.new(repoStore, wallet, network, blockDiscovery, advertiser, peerStore, pendingBlocks)
|
DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
|
||||||
|
engine = BlockExcEngine.new(
|
||||||
|
repoStore, wallet, network, blockDiscovery, advertiser, peerStore, pendingBlocks
|
||||||
|
)
|
||||||
store = NetworkStore.new(engine, repoStore)
|
store = NetworkStore.new(engine, repoStore)
|
||||||
prover = if config.prover:
|
prover =
|
||||||
let backend = config.initializeBackend().expect("Unable to create prover backend.")
|
if config.prover:
|
||||||
|
let backend =
|
||||||
|
config.initializeBackend().expect("Unable to create prover backend.")
|
||||||
some Prover.new(store, backend, config.numProofSamples)
|
some Prover.new(store, backend, config.numProofSamples)
|
||||||
else:
|
else:
|
||||||
none Prover
|
none Prover
|
||||||
@ -264,13 +285,16 @@ proc new*(
|
|||||||
networkStore = store,
|
networkStore = store,
|
||||||
engine = engine,
|
engine = engine,
|
||||||
discovery = discovery,
|
discovery = discovery,
|
||||||
prover = prover)
|
prover = prover,
|
||||||
|
)
|
||||||
|
|
||||||
restServer = RestServerRef.new(
|
restServer = RestServerRef
|
||||||
|
.new(
|
||||||
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
|
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
|
||||||
initTAddress(config.apiBindAddress, config.apiPort),
|
initTAddress(config.apiBindAddress, config.apiPort),
|
||||||
bufferSize = (1024 * 64),
|
bufferSize = (1024 * 64),
|
||||||
maxRequestBodySize = int.high)
|
maxRequestBodySize = int.high,
|
||||||
|
)
|
||||||
.expect("Should start rest server!")
|
.expect("Should start rest server!")
|
||||||
|
|
||||||
switch.mount(network)
|
switch.mount(network)
|
||||||
@ -280,4 +304,5 @@ proc new*(
|
|||||||
codexNode: codexNode,
|
codexNode: codexNode,
|
||||||
restServer: restServer,
|
restServer: restServer,
|
||||||
repoStore: repoStore,
|
repoStore: repoStore,
|
||||||
maintenance: maintenance)
|
maintenance: maintenance,
|
||||||
|
)
|
||||||
|
@ -48,18 +48,10 @@ const
|
|||||||
SlotProvingRootCodec* = multiCodec("codex-proving-root")
|
SlotProvingRootCodec* = multiCodec("codex-proving-root")
|
||||||
CodexSlotCellCodec* = multiCodec("codex-slot-cell")
|
CodexSlotCellCodec* = multiCodec("codex-slot-cell")
|
||||||
|
|
||||||
CodexHashesCodecs* = [
|
CodexHashesCodecs* = [Sha256HashCodec, Pos2Bn128SpngCodec, Pos2Bn128MrklCodec]
|
||||||
Sha256HashCodec,
|
|
||||||
Pos2Bn128SpngCodec,
|
|
||||||
Pos2Bn128MrklCodec
|
|
||||||
]
|
|
||||||
|
|
||||||
CodexPrimitivesCodecs* = [
|
CodexPrimitivesCodecs* = [
|
||||||
ManifestCodec,
|
ManifestCodec, DatasetRootCodec, BlockCodec, SlotRootCodec, SlotProvingRootCodec,
|
||||||
DatasetRootCodec,
|
|
||||||
BlockCodec,
|
|
||||||
SlotRootCodec,
|
|
||||||
SlotProvingRootCodec,
|
|
||||||
CodexSlotCellCodec,
|
CodexSlotCellCodec,
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -78,24 +70,19 @@ proc initEmptyCidTable(): ?!Table[(CidVersion, MultiCodec, MultiCodec), Cid] =
|
|||||||
Sha512HashCodec: ?MultiHash.digest($Sha512HashCodec, emptyData).mapFailure,
|
Sha512HashCodec: ?MultiHash.digest($Sha512HashCodec, emptyData).mapFailure,
|
||||||
}.toTable
|
}.toTable
|
||||||
|
|
||||||
var
|
var table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]()
|
||||||
table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]()
|
|
||||||
|
|
||||||
for hcodec, mhash in PadHashes.pairs:
|
for hcodec, mhash in PadHashes.pairs:
|
||||||
table[(CIDv1, hcodec, BlockCodec)] = ?Cid.init(CIDv1, BlockCodec, mhash).mapFailure
|
table[(CIDv1, hcodec, BlockCodec)] = ?Cid.init(CIDv1, BlockCodec, mhash).mapFailure
|
||||||
|
|
||||||
success table
|
success table
|
||||||
|
|
||||||
proc emptyCid*(
|
proc emptyCid*(version: CidVersion, hcodec: MultiCodec, dcodec: MultiCodec): ?!Cid =
|
||||||
version: CidVersion,
|
|
||||||
hcodec: MultiCodec,
|
|
||||||
dcodec: MultiCodec): ?!Cid =
|
|
||||||
## Returns cid representing empty content,
|
## Returns cid representing empty content,
|
||||||
## given cid version, hash codec and data codec
|
## given cid version, hash codec and data codec
|
||||||
##
|
##
|
||||||
|
|
||||||
var
|
var table {.global, threadvar.}: Table[(CidVersion, MultiCodec, MultiCodec), Cid]
|
||||||
table {.global, threadvar.}: Table[(CidVersion, MultiCodec, MultiCodec), Cid]
|
|
||||||
|
|
||||||
once:
|
once:
|
||||||
table = ?initEmptyCidTable()
|
table = ?initEmptyCidTable()
|
||||||
@ -103,11 +90,10 @@ proc emptyCid*(
|
|||||||
table[(version, hcodec, dcodec)].catch
|
table[(version, hcodec, dcodec)].catch
|
||||||
|
|
||||||
proc emptyDigest*(
|
proc emptyDigest*(
|
||||||
version: CidVersion,
|
version: CidVersion, hcodec: MultiCodec, dcodec: MultiCodec
|
||||||
hcodec: MultiCodec,
|
): ?!MultiHash =
|
||||||
dcodec: MultiCodec): ?!MultiHash =
|
|
||||||
## Returns hash representing empty content,
|
## Returns hash representing empty content,
|
||||||
## given cid version, hash codec and data codec
|
## given cid version, hash codec and data codec
|
||||||
##
|
##
|
||||||
emptyCid(version, hcodec, dcodec)
|
|
||||||
.flatMap((cid: Cid) => cid.mhash.mapFailure)
|
emptyCid(version, hcodec, dcodec).flatMap((cid: Cid) => cid.mhash.mapFailure)
|
||||||
|
484
codex/conf.nim
484
codex/conf.nim
@ -50,13 +50,12 @@ export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig
|
|||||||
export ValidationGroups, MaxSlots
|
export ValidationGroups, MaxSlots
|
||||||
|
|
||||||
export
|
export
|
||||||
DefaultQuotaBytes,
|
DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockMaintenanceInterval,
|
||||||
DefaultBlockTtl,
|
|
||||||
DefaultBlockMaintenanceInterval,
|
|
||||||
DefaultNumberOfBlocksToMaintainPerInterval
|
DefaultNumberOfBlocksToMaintainPerInterval
|
||||||
|
|
||||||
proc defaultDataDir*(): string =
|
proc defaultDataDir*(): string =
|
||||||
let dataDir = when defined(windows):
|
let dataDir =
|
||||||
|
when defined(windows):
|
||||||
"AppData" / "Roaming" / "Codex"
|
"AppData" / "Roaming" / "Codex"
|
||||||
elif defined(macosx):
|
elif defined(macosx):
|
||||||
"Library" / "Application Support" / "Codex"
|
"Library" / "Application Support" / "Codex"
|
||||||
@ -96,320 +95,341 @@ type
|
|||||||
|
|
||||||
CodexConf* = object
|
CodexConf* = object
|
||||||
configFile* {.
|
configFile* {.
|
||||||
desc: "Loads the configuration from a TOML file"
|
desc: "Loads the configuration from a TOML file",
|
||||||
defaultValueDesc: "none"
|
defaultValueDesc: "none",
|
||||||
defaultValue: InputFile.none
|
defaultValue: InputFile.none,
|
||||||
name: "config-file" }: Option[InputFile]
|
name: "config-file"
|
||||||
|
.}: Option[InputFile]
|
||||||
|
|
||||||
logLevel* {.
|
logLevel* {.defaultValue: "info", desc: "Sets the log level", name: "log-level".}:
|
||||||
defaultValue: "info"
|
string
|
||||||
desc: "Sets the log level",
|
|
||||||
name: "log-level" }: string
|
|
||||||
|
|
||||||
logFormat* {.
|
logFormat* {.
|
||||||
desc: "Specifies what kind of logs should be written to stdout (auto, " &
|
desc:
|
||||||
"colors, nocolors, json)"
|
"Specifies what kind of logs should be written to stdout (auto, " &
|
||||||
defaultValueDesc: "auto"
|
"colors, nocolors, json)",
|
||||||
defaultValue: LogKind.Auto
|
defaultValueDesc: "auto",
|
||||||
name: "log-format" }: LogKind
|
defaultValue: LogKind.Auto,
|
||||||
|
name: "log-format"
|
||||||
|
.}: LogKind
|
||||||
|
|
||||||
metricsEnabled* {.
|
metricsEnabled* {.
|
||||||
desc: "Enable the metrics server"
|
desc: "Enable the metrics server", defaultValue: false, name: "metrics"
|
||||||
defaultValue: false
|
.}: bool
|
||||||
name: "metrics" }: bool
|
|
||||||
|
|
||||||
metricsAddress* {.
|
metricsAddress* {.
|
||||||
desc: "Listening address of the metrics server"
|
desc: "Listening address of the metrics server",
|
||||||
defaultValue: defaultAddress(config)
|
defaultValue: defaultAddress(config),
|
||||||
defaultValueDesc: "127.0.0.1"
|
defaultValueDesc: "127.0.0.1",
|
||||||
name: "metrics-address" }: IpAddress
|
name: "metrics-address"
|
||||||
|
.}: IpAddress
|
||||||
|
|
||||||
metricsPort* {.
|
metricsPort* {.
|
||||||
desc: "Listening HTTP port of the metrics server"
|
desc: "Listening HTTP port of the metrics server",
|
||||||
defaultValue: 8008
|
defaultValue: 8008,
|
||||||
name: "metrics-port" }: Port
|
name: "metrics-port"
|
||||||
|
.}: Port
|
||||||
|
|
||||||
dataDir* {.
|
dataDir* {.
|
||||||
desc: "The directory where codex will store configuration and data"
|
desc: "The directory where codex will store configuration and data",
|
||||||
defaultValue: DefaultDataDir
|
defaultValue: DefaultDataDir,
|
||||||
defaultValueDesc: $DefaultDataDir
|
defaultValueDesc: $DefaultDataDir,
|
||||||
abbr: "d"
|
abbr: "d",
|
||||||
name: "data-dir" }: OutDir
|
name: "data-dir"
|
||||||
|
.}: OutDir
|
||||||
|
|
||||||
listenAddrs* {.
|
listenAddrs* {.
|
||||||
desc: "Multi Addresses to listen on"
|
desc: "Multi Addresses to listen on",
|
||||||
defaultValue: @[
|
defaultValue:
|
||||||
MultiAddress.init("/ip4/0.0.0.0/tcp/0")
|
@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").expect("Should init multiaddress")],
|
||||||
.expect("Should init multiaddress")]
|
defaultValueDesc: "/ip4/0.0.0.0/tcp/0",
|
||||||
defaultValueDesc: "/ip4/0.0.0.0/tcp/0"
|
abbr: "i",
|
||||||
abbr: "i"
|
name: "listen-addrs"
|
||||||
name: "listen-addrs" }: seq[MultiAddress]
|
.}: seq[MultiAddress]
|
||||||
|
|
||||||
nat* {.
|
nat* {.
|
||||||
desc: "Specify method to use for determining public address. " &
|
desc:
|
||||||
"Must be one of: any, none, upnp, pmp, extip:<IP>"
|
"Specify method to use for determining public address. " &
|
||||||
defaultValue: defaultNatConfig()
|
"Must be one of: any, none, upnp, pmp, extip:<IP>",
|
||||||
defaultValueDesc: "any"
|
defaultValue: defaultNatConfig(),
|
||||||
name: "nat" }: NatConfig
|
defaultValueDesc: "any",
|
||||||
|
name: "nat"
|
||||||
|
.}: NatConfig
|
||||||
|
|
||||||
discoveryPort* {.
|
discoveryPort* {.
|
||||||
desc: "Discovery (UDP) port"
|
desc: "Discovery (UDP) port",
|
||||||
defaultValue: 8090.Port
|
defaultValue: 8090.Port,
|
||||||
defaultValueDesc: "8090"
|
defaultValueDesc: "8090",
|
||||||
abbr: "u"
|
abbr: "u",
|
||||||
name: "disc-port" }: Port
|
name: "disc-port"
|
||||||
|
.}: Port
|
||||||
|
|
||||||
netPrivKeyFile* {.
|
netPrivKeyFile* {.
|
||||||
desc: "Source of network (secp256k1) private key file path or name"
|
desc: "Source of network (secp256k1) private key file path or name",
|
||||||
defaultValue: "key"
|
defaultValue: "key",
|
||||||
name: "net-privkey" }: string
|
name: "net-privkey"
|
||||||
|
.}: string
|
||||||
|
|
||||||
bootstrapNodes* {.
|
bootstrapNodes* {.
|
||||||
desc: "Specifies one or more bootstrap nodes to use when " &
|
desc:
|
||||||
"connecting to the network"
|
"Specifies one or more bootstrap nodes to use when " &
|
||||||
abbr: "b"
|
"connecting to the network",
|
||||||
name: "bootstrap-node" }: seq[SignedPeerRecord]
|
abbr: "b",
|
||||||
|
name: "bootstrap-node"
|
||||||
|
.}: seq[SignedPeerRecord]
|
||||||
|
|
||||||
maxPeers* {.
|
maxPeers* {.
|
||||||
desc: "The maximum number of peers to connect to"
|
desc: "The maximum number of peers to connect to",
|
||||||
defaultValue: 160
|
defaultValue: 160,
|
||||||
name: "max-peers" }: int
|
name: "max-peers"
|
||||||
|
.}: int
|
||||||
|
|
||||||
agentString* {.
|
agentString* {.
|
||||||
defaultValue: "Codex"
|
defaultValue: "Codex",
|
||||||
desc: "Node agent string which is used as identifier in network"
|
desc: "Node agent string which is used as identifier in network",
|
||||||
name: "agent-string" }: string
|
name: "agent-string"
|
||||||
|
.}: string
|
||||||
|
|
||||||
apiBindAddress* {.
|
apiBindAddress* {.
|
||||||
desc: "The REST API bind address"
|
desc: "The REST API bind address", defaultValue: "127.0.0.1", name: "api-bindaddr"
|
||||||
defaultValue: "127.0.0.1"
|
.}: string
|
||||||
name: "api-bindaddr"
|
|
||||||
}: string
|
|
||||||
|
|
||||||
apiPort* {.
|
apiPort* {.
|
||||||
desc: "The REST Api port",
|
desc: "The REST Api port",
|
||||||
defaultValue: 8080.Port
|
defaultValue: 8080.Port,
|
||||||
defaultValueDesc: "8080"
|
defaultValueDesc: "8080",
|
||||||
name: "api-port"
|
name: "api-port",
|
||||||
abbr: "p" }: Port
|
abbr: "p"
|
||||||
|
.}: Port
|
||||||
|
|
||||||
apiCorsAllowedOrigin* {.
|
apiCorsAllowedOrigin* {.
|
||||||
desc: "The REST Api CORS allowed origin for downloading data. " &
|
desc:
|
||||||
|
"The REST Api CORS allowed origin for downloading data. " &
|
||||||
"'*' will allow all origins, '' will allow none.",
|
"'*' will allow all origins, '' will allow none.",
|
||||||
defaultValue: string.none
|
defaultValue: string.none,
|
||||||
defaultValueDesc: "Disallow all cross origin requests to download data"
|
defaultValueDesc: "Disallow all cross origin requests to download data",
|
||||||
name: "api-cors-origin" }: Option[string]
|
name: "api-cors-origin"
|
||||||
|
|
||||||
repoKind* {.
|
|
||||||
desc: "Backend for main repo store (fs, sqlite, leveldb)"
|
|
||||||
defaultValueDesc: "fs"
|
|
||||||
defaultValue: repoFS
|
|
||||||
name: "repo-kind" }: RepoKind
|
|
||||||
|
|
||||||
storageQuota* {.
|
|
||||||
desc: "The size of the total storage quota dedicated to the node"
|
|
||||||
defaultValue: DefaultQuotaBytes
|
|
||||||
defaultValueDesc: $DefaultQuotaBytes
|
|
||||||
name: "storage-quota"
|
|
||||||
abbr: "q" }: NBytes
|
|
||||||
|
|
||||||
blockTtl* {.
|
|
||||||
desc: "Default block timeout in seconds - 0 disables the ttl"
|
|
||||||
defaultValue: DefaultBlockTtl
|
|
||||||
defaultValueDesc: $DefaultBlockTtl
|
|
||||||
name: "block-ttl"
|
|
||||||
abbr: "t" }: Duration
|
|
||||||
|
|
||||||
blockMaintenanceInterval* {.
|
|
||||||
desc: "Time interval in seconds - determines frequency of block " &
|
|
||||||
"maintenance cycle: how often blocks are checked " &
|
|
||||||
"for expiration and cleanup"
|
|
||||||
defaultValue: DefaultBlockMaintenanceInterval
|
|
||||||
defaultValueDesc: $DefaultBlockMaintenanceInterval
|
|
||||||
name: "block-mi" }: Duration
|
|
||||||
|
|
||||||
blockMaintenanceNumberOfBlocks* {.
|
|
||||||
desc: "Number of blocks to check every maintenance cycle"
|
|
||||||
defaultValue: DefaultNumberOfBlocksToMaintainPerInterval
|
|
||||||
defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval
|
|
||||||
name: "block-mn" }: int
|
|
||||||
|
|
||||||
cacheSize* {.
|
|
||||||
desc: "The size of the block cache, 0 disables the cache - " &
|
|
||||||
"might help on slow hardrives"
|
|
||||||
defaultValue: 0
|
|
||||||
defaultValueDesc: "0"
|
|
||||||
name: "cache-size"
|
|
||||||
abbr: "c" }: NBytes
|
|
||||||
|
|
||||||
logFile* {.
|
|
||||||
desc: "Logs to file"
|
|
||||||
defaultValue: string.none
|
|
||||||
name: "log-file"
|
|
||||||
hidden
|
|
||||||
.}: Option[string]
|
.}: Option[string]
|
||||||
|
|
||||||
case cmd* {.
|
repoKind* {.
|
||||||
defaultValue: noCmd
|
desc: "Backend for main repo store (fs, sqlite, leveldb)",
|
||||||
command }: StartUpCmd
|
defaultValueDesc: "fs",
|
||||||
|
defaultValue: repoFS,
|
||||||
|
name: "repo-kind"
|
||||||
|
.}: RepoKind
|
||||||
|
|
||||||
|
storageQuota* {.
|
||||||
|
desc: "The size of the total storage quota dedicated to the node",
|
||||||
|
defaultValue: DefaultQuotaBytes,
|
||||||
|
defaultValueDesc: $DefaultQuotaBytes,
|
||||||
|
name: "storage-quota",
|
||||||
|
abbr: "q"
|
||||||
|
.}: NBytes
|
||||||
|
|
||||||
|
blockTtl* {.
|
||||||
|
desc: "Default block timeout in seconds - 0 disables the ttl",
|
||||||
|
defaultValue: DefaultBlockTtl,
|
||||||
|
defaultValueDesc: $DefaultBlockTtl,
|
||||||
|
name: "block-ttl",
|
||||||
|
abbr: "t"
|
||||||
|
.}: Duration
|
||||||
|
|
||||||
|
blockMaintenanceInterval* {.
|
||||||
|
desc:
|
||||||
|
"Time interval in seconds - determines frequency of block " &
|
||||||
|
"maintenance cycle: how often blocks are checked " & "for expiration and cleanup",
|
||||||
|
defaultValue: DefaultBlockMaintenanceInterval,
|
||||||
|
defaultValueDesc: $DefaultBlockMaintenanceInterval,
|
||||||
|
name: "block-mi"
|
||||||
|
.}: Duration
|
||||||
|
|
||||||
|
blockMaintenanceNumberOfBlocks* {.
|
||||||
|
desc: "Number of blocks to check every maintenance cycle",
|
||||||
|
defaultValue: DefaultNumberOfBlocksToMaintainPerInterval,
|
||||||
|
defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval,
|
||||||
|
name: "block-mn"
|
||||||
|
.}: int
|
||||||
|
|
||||||
|
cacheSize* {.
|
||||||
|
desc:
|
||||||
|
"The size of the block cache, 0 disables the cache - " &
|
||||||
|
"might help on slow hardrives",
|
||||||
|
defaultValue: 0,
|
||||||
|
defaultValueDesc: "0",
|
||||||
|
name: "cache-size",
|
||||||
|
abbr: "c"
|
||||||
|
.}: NBytes
|
||||||
|
|
||||||
|
logFile* {.
|
||||||
|
desc: "Logs to file", defaultValue: string.none, name: "log-file", hidden
|
||||||
|
.}: Option[string]
|
||||||
|
|
||||||
|
case cmd* {.defaultValue: noCmd, command.}: StartUpCmd
|
||||||
of persistence:
|
of persistence:
|
||||||
ethProvider* {.
|
ethProvider* {.
|
||||||
desc: "The URL of the JSON-RPC API of the Ethereum node"
|
desc: "The URL of the JSON-RPC API of the Ethereum node",
|
||||||
defaultValue: "ws://localhost:8545"
|
defaultValue: "ws://localhost:8545",
|
||||||
name: "eth-provider"
|
name: "eth-provider"
|
||||||
.}: string
|
.}: string
|
||||||
|
|
||||||
ethAccount* {.
|
ethAccount* {.
|
||||||
desc: "The Ethereum account that is used for storage contracts"
|
desc: "The Ethereum account that is used for storage contracts",
|
||||||
defaultValue: EthAddress.none
|
defaultValue: EthAddress.none,
|
||||||
defaultValueDesc: ""
|
defaultValueDesc: "",
|
||||||
name: "eth-account"
|
name: "eth-account"
|
||||||
.}: Option[EthAddress]
|
.}: Option[EthAddress]
|
||||||
|
|
||||||
ethPrivateKey* {.
|
ethPrivateKey* {.
|
||||||
desc: "File containing Ethereum private key for storage contracts"
|
desc: "File containing Ethereum private key for storage contracts",
|
||||||
defaultValue: string.none
|
defaultValue: string.none,
|
||||||
defaultValueDesc: ""
|
defaultValueDesc: "",
|
||||||
name: "eth-private-key"
|
name: "eth-private-key"
|
||||||
.}: Option[string]
|
.}: Option[string]
|
||||||
|
|
||||||
marketplaceAddress* {.
|
marketplaceAddress* {.
|
||||||
desc: "Address of deployed Marketplace contract"
|
desc: "Address of deployed Marketplace contract",
|
||||||
defaultValue: EthAddress.none
|
defaultValue: EthAddress.none,
|
||||||
defaultValueDesc: ""
|
defaultValueDesc: "",
|
||||||
name: "marketplace-address"
|
name: "marketplace-address"
|
||||||
.}: Option[EthAddress]
|
.}: Option[EthAddress]
|
||||||
|
|
||||||
# TODO: should go behind a feature flag
|
# TODO: should go behind a feature flag
|
||||||
simulateProofFailures* {.
|
simulateProofFailures* {.
|
||||||
desc: "Simulates proof failures once every N proofs. 0 = disabled."
|
desc: "Simulates proof failures once every N proofs. 0 = disabled.",
|
||||||
defaultValue: 0
|
defaultValue: 0,
|
||||||
name: "simulate-proof-failures"
|
name: "simulate-proof-failures",
|
||||||
hidden
|
hidden
|
||||||
.}: int
|
.}: int
|
||||||
|
|
||||||
validator* {.
|
validator* {.
|
||||||
desc: "Enables validator, requires an Ethereum node"
|
desc: "Enables validator, requires an Ethereum node",
|
||||||
defaultValue: false
|
defaultValue: false,
|
||||||
name: "validator"
|
name: "validator"
|
||||||
.}: bool
|
.}: bool
|
||||||
|
|
||||||
validatorMaxSlots* {.
|
validatorMaxSlots* {.
|
||||||
desc: "Maximum number of slots that the validator monitors"
|
desc: "Maximum number of slots that the validator monitors",
|
||||||
longDesc: "If set to 0, the validator will not limit " &
|
longDesc:
|
||||||
"the maximum number of slots it monitors"
|
"If set to 0, the validator will not limit " &
|
||||||
defaultValue: 1000
|
"the maximum number of slots it monitors",
|
||||||
|
defaultValue: 1000,
|
||||||
name: "validator-max-slots"
|
name: "validator-max-slots"
|
||||||
.}: MaxSlots
|
.}: MaxSlots
|
||||||
|
|
||||||
validatorGroups* {.
|
validatorGroups* {.
|
||||||
desc: "Slot validation groups"
|
desc: "Slot validation groups",
|
||||||
longDesc: "A number indicating total number of groups into " &
|
longDesc:
|
||||||
|
"A number indicating total number of groups into " &
|
||||||
"which the whole slot id space will be divided. " &
|
"which the whole slot id space will be divided. " &
|
||||||
"The value must be in the range [2, 65535]. " &
|
"The value must be in the range [2, 65535]. " &
|
||||||
"If not provided, the validator will observe " &
|
"If not provided, the validator will observe " &
|
||||||
"the whole slot id space and the value of " &
|
"the whole slot id space and the value of " &
|
||||||
"the --validator-group-index parameter will be ignored. " &
|
"the --validator-group-index parameter will be ignored. " &
|
||||||
"Powers of twos are advised for even distribution"
|
"Powers of twos are advised for even distribution",
|
||||||
defaultValue: ValidationGroups.none
|
defaultValue: ValidationGroups.none,
|
||||||
name: "validator-groups"
|
name: "validator-groups"
|
||||||
.}: Option[ValidationGroups]
|
.}: Option[ValidationGroups]
|
||||||
|
|
||||||
validatorGroupIndex* {.
|
validatorGroupIndex* {.
|
||||||
desc: "Slot validation group index"
|
desc: "Slot validation group index",
|
||||||
longDesc: "The value provided must be in the range " &
|
longDesc:
|
||||||
|
"The value provided must be in the range " &
|
||||||
"[0, validatorGroups). Ignored when --validator-groups " &
|
"[0, validatorGroups). Ignored when --validator-groups " &
|
||||||
"is not provided. Only slot ids satisfying condition " &
|
"is not provided. Only slot ids satisfying condition " &
|
||||||
"[(slotId mod validationGroups) == groupIndex] will be " &
|
"[(slotId mod validationGroups) == groupIndex] will be " &
|
||||||
"observed by the validator"
|
"observed by the validator",
|
||||||
defaultValue: 0
|
defaultValue: 0,
|
||||||
name: "validator-group-index"
|
name: "validator-group-index"
|
||||||
.}: uint16
|
.}: uint16
|
||||||
|
|
||||||
rewardRecipient* {.
|
rewardRecipient* {.
|
||||||
desc: "Address to send payouts to (eg rewards and refunds)"
|
desc: "Address to send payouts to (eg rewards and refunds)",
|
||||||
name: "reward-recipient"
|
name: "reward-recipient"
|
||||||
.}: Option[EthAddress]
|
.}: Option[EthAddress]
|
||||||
|
|
||||||
case persistenceCmd* {.
|
case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd
|
||||||
defaultValue: noCmd
|
|
||||||
command }: PersistenceCmd
|
|
||||||
|
|
||||||
of PersistenceCmd.prover:
|
of PersistenceCmd.prover:
|
||||||
circuitDir* {.
|
circuitDir* {.
|
||||||
desc: "Directory where Codex will store proof circuit data"
|
desc: "Directory where Codex will store proof circuit data",
|
||||||
defaultValue: DefaultCircuitDir
|
defaultValue: DefaultCircuitDir,
|
||||||
defaultValueDesc: $DefaultCircuitDir
|
defaultValueDesc: $DefaultCircuitDir,
|
||||||
abbr: "cd"
|
abbr: "cd",
|
||||||
name: "circuit-dir" }: OutDir
|
name: "circuit-dir"
|
||||||
|
.}: OutDir
|
||||||
|
|
||||||
circomR1cs* {.
|
circomR1cs* {.
|
||||||
desc: "The r1cs file for the storage circuit"
|
desc: "The r1cs file for the storage circuit",
|
||||||
defaultValue: $DefaultCircuitDir / "proof_main.r1cs"
|
defaultValue: $DefaultCircuitDir / "proof_main.r1cs",
|
||||||
defaultValueDesc: $DefaultCircuitDir & "/proof_main.r1cs"
|
defaultValueDesc: $DefaultCircuitDir & "/proof_main.r1cs",
|
||||||
name: "circom-r1cs"
|
name: "circom-r1cs"
|
||||||
.}: InputFile
|
.}: InputFile
|
||||||
|
|
||||||
circomWasm* {.
|
circomWasm* {.
|
||||||
desc: "The wasm file for the storage circuit"
|
desc: "The wasm file for the storage circuit",
|
||||||
defaultValue: $DefaultCircuitDir / "proof_main.wasm"
|
defaultValue: $DefaultCircuitDir / "proof_main.wasm",
|
||||||
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm"
|
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm",
|
||||||
name: "circom-wasm"
|
name: "circom-wasm"
|
||||||
.}: InputFile
|
.}: InputFile
|
||||||
|
|
||||||
circomZkey* {.
|
circomZkey* {.
|
||||||
desc: "The zkey file for the storage circuit"
|
desc: "The zkey file for the storage circuit",
|
||||||
defaultValue: $DefaultCircuitDir / "proof_main.zkey"
|
defaultValue: $DefaultCircuitDir / "proof_main.zkey",
|
||||||
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey"
|
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey",
|
||||||
name: "circom-zkey"
|
name: "circom-zkey"
|
||||||
.}: InputFile
|
.}: InputFile
|
||||||
|
|
||||||
# TODO: should probably be hidden and behind a feature flag
|
# TODO: should probably be hidden and behind a feature flag
|
||||||
circomNoZkey* {.
|
circomNoZkey* {.
|
||||||
desc: "Ignore the zkey file - use only for testing!"
|
desc: "Ignore the zkey file - use only for testing!",
|
||||||
defaultValue: false
|
defaultValue: false,
|
||||||
name: "circom-no-zkey"
|
name: "circom-no-zkey"
|
||||||
.}: bool
|
.}: bool
|
||||||
|
|
||||||
numProofSamples* {.
|
numProofSamples* {.
|
||||||
desc: "Number of samples to prove"
|
desc: "Number of samples to prove",
|
||||||
defaultValue: DefaultSamplesNum
|
defaultValue: DefaultSamplesNum,
|
||||||
defaultValueDesc: $DefaultSamplesNum
|
defaultValueDesc: $DefaultSamplesNum,
|
||||||
name: "proof-samples" }: int
|
name: "proof-samples"
|
||||||
|
.}: int
|
||||||
|
|
||||||
maxSlotDepth* {.
|
maxSlotDepth* {.
|
||||||
desc: "The maximum depth of the slot tree"
|
desc: "The maximum depth of the slot tree",
|
||||||
defaultValue: DefaultMaxSlotDepth
|
defaultValue: DefaultMaxSlotDepth,
|
||||||
defaultValueDesc: $DefaultMaxSlotDepth
|
defaultValueDesc: $DefaultMaxSlotDepth,
|
||||||
name: "max-slot-depth" }: int
|
name: "max-slot-depth"
|
||||||
|
.}: int
|
||||||
|
|
||||||
maxDatasetDepth* {.
|
maxDatasetDepth* {.
|
||||||
desc: "The maximum depth of the dataset tree"
|
desc: "The maximum depth of the dataset tree",
|
||||||
defaultValue: DefaultMaxDatasetDepth
|
defaultValue: DefaultMaxDatasetDepth,
|
||||||
defaultValueDesc: $DefaultMaxDatasetDepth
|
defaultValueDesc: $DefaultMaxDatasetDepth,
|
||||||
name: "max-dataset-depth" }: int
|
name: "max-dataset-depth"
|
||||||
|
.}: int
|
||||||
|
|
||||||
maxBlockDepth* {.
|
maxBlockDepth* {.
|
||||||
desc: "The maximum depth of the network block merkle tree"
|
desc: "The maximum depth of the network block merkle tree",
|
||||||
defaultValue: DefaultBlockDepth
|
defaultValue: DefaultBlockDepth,
|
||||||
defaultValueDesc: $DefaultBlockDepth
|
defaultValueDesc: $DefaultBlockDepth,
|
||||||
name: "max-block-depth" }: int
|
name: "max-block-depth"
|
||||||
|
.}: int
|
||||||
|
|
||||||
maxCellElms* {.
|
maxCellElms* {.
|
||||||
desc: "The maximum number of elements in a cell"
|
desc: "The maximum number of elements in a cell",
|
||||||
defaultValue: DefaultCellElms
|
defaultValue: DefaultCellElms,
|
||||||
defaultValueDesc: $DefaultCellElms
|
defaultValueDesc: $DefaultCellElms,
|
||||||
name: "max-cell-elements" }: int
|
name: "max-cell-elements"
|
||||||
|
.}: int
|
||||||
of PersistenceCmd.noCmd:
|
of PersistenceCmd.noCmd:
|
||||||
discard
|
discard
|
||||||
|
|
||||||
of StartUpCmd.noCmd:
|
of StartUpCmd.noCmd:
|
||||||
discard # end of persistence
|
discard # end of persistence
|
||||||
|
|
||||||
EthAddress* = ethers.Address
|
EthAddress* = ethers.Address
|
||||||
|
|
||||||
logutils.formatIt(LogFormat.textLines, EthAddress): it.short0xHexLog
|
logutils.formatIt(LogFormat.textLines, EthAddress):
|
||||||
logutils.formatIt(LogFormat.json, EthAddress): %it
|
it.short0xHexLog
|
||||||
|
logutils.formatIt(LogFormat.json, EthAddress):
|
||||||
|
%it
|
||||||
|
|
||||||
func defaultAddress*(conf: CodexConf): IpAddress =
|
func defaultAddress*(conf: CodexConf): IpAddress =
|
||||||
result = static parseIpAddress("127.0.0.1")
|
result = static parseIpAddress("127.0.0.1")
|
||||||
@ -443,13 +463,12 @@ const
|
|||||||
nimBanner* = getNimBanner()
|
nimBanner* = getNimBanner()
|
||||||
|
|
||||||
codexFullVersion* =
|
codexFullVersion* =
|
||||||
"Codex version: " & codexVersion & "\p" &
|
"Codex version: " & codexVersion & "\p" & "Codex revision: " & codexRevision & "\p" &
|
||||||
"Codex revision: " & codexRevision & "\p" &
|
|
||||||
nimBanner
|
nimBanner
|
||||||
|
|
||||||
proc parseCmdArg*(T: typedesc[MultiAddress],
|
proc parseCmdArg*(
|
||||||
input: string): MultiAddress
|
T: typedesc[MultiAddress], input: string
|
||||||
{.upraises: [ValueError] .} =
|
): MultiAddress {.upraises: [ValueError].} =
|
||||||
var ma: MultiAddress
|
var ma: MultiAddress
|
||||||
try:
|
try:
|
||||||
let res = MultiAddress.init(input)
|
let res = MultiAddress.init(input)
|
||||||
@ -478,7 +497,7 @@ proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
|
|||||||
res
|
res
|
||||||
|
|
||||||
func parseCmdArg*(T: type NatConfig, p: string): T {.raises: [ValueError].} =
|
func parseCmdArg*(T: type NatConfig, p: string): T {.raises: [ValueError].} =
|
||||||
case p.toLowerAscii:
|
case p.toLowerAscii
|
||||||
of "any":
|
of "any":
|
||||||
NatConfig(hasExtIp: false, nat: NatStrategy.NatAny)
|
NatConfig(hasExtIp: false, nat: NatStrategy.NatAny)
|
||||||
of "none":
|
of "none":
|
||||||
@ -499,7 +518,7 @@ func parseCmdArg*(T: type NatConfig, p: string): T {.raises: [ValueError].} =
|
|||||||
let error = "Not a valid NAT option: " & p
|
let error = "Not a valid NAT option: " & p
|
||||||
raise newException(ValueError, error)
|
raise newException(ValueError, error)
|
||||||
|
|
||||||
proc completeCmdArg*(T: type NatConfig; val: string): seq[string] =
|
proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
|
||||||
return @[]
|
return @[]
|
||||||
|
|
||||||
proc parseCmdArg*(T: type EthAddress, address: string): T =
|
proc parseCmdArg*(T: type EthAddress, address: string): T =
|
||||||
@ -521,8 +540,9 @@ proc parseCmdArg*(T: type Duration, val: string): T =
|
|||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
dur
|
dur
|
||||||
|
|
||||||
proc readValue*(r: var TomlReader, val: var EthAddress)
|
proc readValue*(
|
||||||
{.upraises: [SerializationError, IOError].} =
|
r: var TomlReader, val: var EthAddress
|
||||||
|
) {.upraises: [SerializationError, IOError].} =
|
||||||
val = EthAddress.init(r.readValue(string)).get()
|
val = EthAddress.init(r.readValue(string)).get()
|
||||||
|
|
||||||
proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
|
proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
|
||||||
@ -548,8 +568,9 @@ proc readValue*(r: var TomlReader, val: var MultiAddress) =
|
|||||||
warn "Invalid MultiAddress", input = input, error = res.error()
|
warn "Invalid MultiAddress", input = input, error = res.error()
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
|
|
||||||
proc readValue*(r: var TomlReader, val: var NBytes)
|
proc readValue*(
|
||||||
{.upraises: [SerializationError, IOError].} =
|
r: var TomlReader, val: var NBytes
|
||||||
|
) {.upraises: [SerializationError, IOError].} =
|
||||||
var value = 0'i64
|
var value = 0'i64
|
||||||
var str = r.readValue(string)
|
var str = r.readValue(string)
|
||||||
let count = parseSize(str, value, alwaysBin = true)
|
let count = parseSize(str, value, alwaysBin = true)
|
||||||
@ -558,8 +579,9 @@ proc readValue*(r: var TomlReader, val: var NBytes)
|
|||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
val = NBytes(value)
|
val = NBytes(value)
|
||||||
|
|
||||||
proc readValue*(r: var TomlReader, val: var Duration)
|
proc readValue*(
|
||||||
{.upraises: [SerializationError, IOError].} =
|
r: var TomlReader, val: var Duration
|
||||||
|
) {.upraises: [SerializationError, IOError].} =
|
||||||
var str = r.readValue(string)
|
var str = r.readValue(string)
|
||||||
var dur: Duration
|
var dur: Duration
|
||||||
let count = parseDuration(str, dur)
|
let count = parseDuration(str, dur)
|
||||||
@ -568,20 +590,23 @@ proc readValue*(r: var TomlReader, val: var Duration)
|
|||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
val = dur
|
val = dur
|
||||||
|
|
||||||
proc readValue*(r: var TomlReader, val: var NatConfig)
|
proc readValue*(
|
||||||
{.raises: [SerializationError].} =
|
r: var TomlReader, val: var NatConfig
|
||||||
val = try: parseCmdArg(NatConfig, r.readValue(string))
|
) {.raises: [SerializationError].} =
|
||||||
|
val =
|
||||||
|
try:
|
||||||
|
parseCmdArg(NatConfig, r.readValue(string))
|
||||||
except CatchableError as err:
|
except CatchableError as err:
|
||||||
raise newException(SerializationError, err.msg)
|
raise newException(SerializationError, err.msg)
|
||||||
|
|
||||||
# no idea why confutils needs this:
|
# no idea why confutils needs this:
|
||||||
proc completeCmdArg*(T: type EthAddress; val: string): seq[string] =
|
proc completeCmdArg*(T: type EthAddress, val: string): seq[string] =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc completeCmdArg*(T: type NBytes; val: string): seq[string] =
|
proc completeCmdArg*(T: type NBytes, val: string): seq[string] =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc completeCmdArg*(T: type Duration; val: string): seq[string] =
|
proc completeCmdArg*(T: type Duration, val: string): seq[string] =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
# silly chronicles, colors is a compile-time property
|
# silly chronicles, colors is a compile-time property
|
||||||
@ -627,8 +652,8 @@ proc updateLogLevel*(logLevel: string) {.upraises: [ValueError].} =
|
|||||||
setLogLevel(parseEnum[LogLevel](directives[0].toUpperAscii))
|
setLogLevel(parseEnum[LogLevel](directives[0].toUpperAscii))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise (ref ValueError)(
|
raise (ref ValueError)(
|
||||||
msg: "Please specify one of: trace, debug, " &
|
msg:
|
||||||
"info, notice, warn, error or fatal"
|
"Please specify one of: trace, debug, " & "info, notice, warn, error or fatal"
|
||||||
)
|
)
|
||||||
|
|
||||||
if directives.len > 1:
|
if directives.len > 1:
|
||||||
@ -641,7 +666,9 @@ proc setupLogging*(conf: CodexConf) =
|
|||||||
warn "Logging configuration options not enabled in the current build"
|
warn "Logging configuration options not enabled in the current build"
|
||||||
else:
|
else:
|
||||||
var logFile: ?IoHandle
|
var logFile: ?IoHandle
|
||||||
proc noOutput(logLevel: LogLevel, msg: LogOutputStr) = discard
|
proc noOutput(logLevel: LogLevel, msg: LogOutputStr) =
|
||||||
|
discard
|
||||||
|
|
||||||
proc writeAndFlush(f: File, msg: LogOutputStr) =
|
proc writeAndFlush(f: File, msg: LogOutputStr) =
|
||||||
try:
|
try:
|
||||||
f.write(msg)
|
f.write(msg)
|
||||||
@ -662,14 +689,11 @@ proc setupLogging*(conf: CodexConf) =
|
|||||||
|
|
||||||
defaultChroniclesStream.outputs[2].writer = noOutput
|
defaultChroniclesStream.outputs[2].writer = noOutput
|
||||||
if logFilePath =? conf.logFile and logFilePath.len > 0:
|
if logFilePath =? conf.logFile and logFilePath.len > 0:
|
||||||
let logFileHandle = openFile(
|
let logFileHandle =
|
||||||
logFilePath,
|
openFile(logFilePath, {OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate})
|
||||||
{OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate}
|
|
||||||
)
|
|
||||||
if logFileHandle.isErr:
|
if logFileHandle.isErr:
|
||||||
error "failed to open log file",
|
error "failed to open log file",
|
||||||
path = logFilePath,
|
path = logFilePath, errorCode = $logFileHandle.error
|
||||||
errorCode = $logFileHandle.error
|
|
||||||
else:
|
else:
|
||||||
logFile = logFileHandle.option
|
logFile = logFileHandle.option
|
||||||
defaultChroniclesStream.outputs[2].writer = fileFlush
|
defaultChroniclesStream.outputs[2].writer = fileFlush
|
||||||
@ -677,14 +701,13 @@ proc setupLogging*(conf: CodexConf) =
|
|||||||
defaultChroniclesStream.outputs[1].writer = noOutput
|
defaultChroniclesStream.outputs[1].writer = noOutput
|
||||||
|
|
||||||
let writer =
|
let writer =
|
||||||
case conf.logFormat:
|
case conf.logFormat
|
||||||
of LogKind.Auto:
|
of LogKind.Auto:
|
||||||
if isatty(stdout):
|
if isatty(stdout): stdoutFlush else: noColorsFlush
|
||||||
|
of LogKind.Colors:
|
||||||
stdoutFlush
|
stdoutFlush
|
||||||
else:
|
of LogKind.NoColors:
|
||||||
noColorsFlush
|
noColorsFlush
|
||||||
of LogKind.Colors: stdoutFlush
|
|
||||||
of LogKind.NoColors: noColorsFlush
|
|
||||||
of LogKind.Json:
|
of LogKind.Json:
|
||||||
defaultChroniclesStream.outputs[1].writer = stdoutFlush
|
defaultChroniclesStream.outputs[1].writer = stdoutFlush
|
||||||
noOutput
|
noOutput
|
||||||
@ -697,6 +720,7 @@ proc setupLogging*(conf: CodexConf) =
|
|||||||
inc(counter)
|
inc(counter)
|
||||||
let withoutNewLine = msg[0 ..^ 2]
|
let withoutNewLine = msg[0 ..^ 2]
|
||||||
writer(logLevel, withoutNewLine & " count=" & $counter & "\n")
|
writer(logLevel, withoutNewLine & " count=" & $counter & "\n")
|
||||||
|
|
||||||
defaultChroniclesStream.outputs[0].writer = numberedWriter
|
defaultChroniclesStream.outputs[0].writer = numberedWriter
|
||||||
else:
|
else:
|
||||||
defaultChroniclesStream.outputs[0].writer = writer
|
defaultChroniclesStream.outputs[0].writer = writer
|
||||||
|
@ -11,8 +11,7 @@ export clock
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "contracts clock"
|
topics = "contracts clock"
|
||||||
|
|
||||||
type
|
type OnChainClock* = ref object of Clock
|
||||||
OnChainClock* = ref object of Clock
|
|
||||||
provider: Provider
|
provider: Provider
|
||||||
subscription: Subscription
|
subscription: Subscription
|
||||||
offset: times.Duration
|
offset: times.Duration
|
||||||
@ -29,7 +28,8 @@ proc update(clock: OnChainClock, blck: Block) =
|
|||||||
let computerTime = getTime()
|
let computerTime = getTime()
|
||||||
clock.offset = blockTime - computerTime
|
clock.offset = blockTime - computerTime
|
||||||
clock.blockNumber = number
|
clock.blockNumber = number
|
||||||
trace "updated clock", blockTime=blck.timestamp, blockNumber=number, offset=clock.offset
|
trace "updated clock",
|
||||||
|
blockTime = blck.timestamp, blockNumber = number, offset = clock.offset
|
||||||
clock.newBlock.fire()
|
clock.newBlock.fire()
|
||||||
|
|
||||||
proc update(clock: OnChainClock) {.async.} =
|
proc update(clock: OnChainClock) {.async.} =
|
||||||
|
@ -8,11 +8,14 @@ type
|
|||||||
MarketplaceConfig* = object
|
MarketplaceConfig* = object
|
||||||
collateral*: CollateralConfig
|
collateral*: CollateralConfig
|
||||||
proofs*: ProofConfig
|
proofs*: ProofConfig
|
||||||
|
|
||||||
CollateralConfig* = object
|
CollateralConfig* = object
|
||||||
repairRewardPercentage*: uint8 # percentage of remaining collateral slot has after it has been freed
|
repairRewardPercentage*: uint8
|
||||||
|
# percentage of remaining collateral slot has after it has been freed
|
||||||
maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value
|
maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value
|
||||||
slashCriterion*: uint16 # amount of proofs missed that lead to slashing
|
slashCriterion*: uint16 # amount of proofs missed that lead to slashing
|
||||||
slashPercentage*: uint8 # percentage of the collateral that is slashed
|
slashPercentage*: uint8 # percentage of the collateral that is slashed
|
||||||
|
|
||||||
ProofConfig* = object
|
ProofConfig* = object
|
||||||
period*: UInt256 # proofs requirements are calculated per period (in seconds)
|
period*: UInt256 # proofs requirements are calculated per period (in seconds)
|
||||||
timeout*: UInt256 # mark proofs as missing before the timeout (in seconds)
|
timeout*: UInt256 # mark proofs as missing before the timeout (in seconds)
|
||||||
@ -23,14 +26,13 @@ type
|
|||||||
# blocks. Should be a prime number to ensure there are no cycles.
|
# blocks. Should be a prime number to ensure there are no cycles.
|
||||||
downtimeProduct*: uint8
|
downtimeProduct*: uint8
|
||||||
|
|
||||||
|
|
||||||
func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig =
|
func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig =
|
||||||
ProofConfig(
|
ProofConfig(
|
||||||
period: tupl[0],
|
period: tupl[0],
|
||||||
timeout: tupl[1],
|
timeout: tupl[1],
|
||||||
downtime: tupl[2],
|
downtime: tupl[2],
|
||||||
zkeyHash: tupl[3],
|
zkeyHash: tupl[3],
|
||||||
downtimeProduct: tupl[4]
|
downtimeProduct: tupl[4],
|
||||||
)
|
)
|
||||||
|
|
||||||
func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig =
|
func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig =
|
||||||
@ -38,14 +40,11 @@ func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig =
|
|||||||
repairRewardPercentage: tupl[0],
|
repairRewardPercentage: tupl[0],
|
||||||
maxNumberOfSlashes: tupl[1],
|
maxNumberOfSlashes: tupl[1],
|
||||||
slashCriterion: tupl[2],
|
slashCriterion: tupl[2],
|
||||||
slashPercentage: tupl[3]
|
slashPercentage: tupl[3],
|
||||||
)
|
)
|
||||||
|
|
||||||
func fromTuple(_: type MarketplaceConfig, tupl: tuple): MarketplaceConfig =
|
func fromTuple(_: type MarketplaceConfig, tupl: tuple): MarketplaceConfig =
|
||||||
MarketplaceConfig(
|
MarketplaceConfig(collateral: tupl[0], proofs: tupl[1])
|
||||||
collateral: tupl[0],
|
|
||||||
proofs: tupl[1]
|
|
||||||
)
|
|
||||||
|
|
||||||
func solidityType*(_: type ProofConfig): string =
|
func solidityType*(_: type ProofConfig): string =
|
||||||
solidityType(ProofConfig.fieldTypes)
|
solidityType(ProofConfig.fieldTypes)
|
||||||
|
@ -13,17 +13,14 @@ type Deployment* = ref object
|
|||||||
|
|
||||||
const knownAddresses = {
|
const knownAddresses = {
|
||||||
# Hardhat localhost network
|
# Hardhat localhost network
|
||||||
"31337": {
|
"31337":
|
||||||
"Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44"),
|
{"Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44")}.toTable,
|
||||||
}.toTable,
|
|
||||||
# Taiko Alpha-3 Testnet
|
# Taiko Alpha-3 Testnet
|
||||||
"167005": {
|
"167005":
|
||||||
"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")
|
{"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable,
|
||||||
}.toTable,
|
|
||||||
# Codex Testnet - Nov 25 2024 18:41:29 PM (+00:00 UTC)
|
# Codex Testnet - Nov 25 2024 18:41:29 PM (+00:00 UTC)
|
||||||
"789987": {
|
"789987":
|
||||||
"Marketplace": Address.init("0xAB03b6a58C5262f530D54146DA2a552B1C0F7648")
|
{"Marketplace": Address.init("0xAB03b6a58C5262f530D54146DA2a552B1C0F7648")}.toTable,
|
||||||
}.toTable
|
|
||||||
}.toTable
|
}.toTable
|
||||||
|
|
||||||
proc getKnownAddress(T: type, chainId: UInt256): ?Address =
|
proc getKnownAddress(T: type, chainId: UInt256): ?Address =
|
||||||
|
@ -9,13 +9,12 @@ import ./interactions
|
|||||||
export purchasing
|
export purchasing
|
||||||
export logutils
|
export logutils
|
||||||
|
|
||||||
type
|
type ClientInteractions* = ref object of ContractInteractions
|
||||||
ClientInteractions* = ref object of ContractInteractions
|
|
||||||
purchasing*: Purchasing
|
purchasing*: Purchasing
|
||||||
|
|
||||||
proc new*(_: type ClientInteractions,
|
proc new*(
|
||||||
clock: OnChainClock,
|
_: type ClientInteractions, clock: OnChainClock, purchasing: Purchasing
|
||||||
purchasing: Purchasing): ClientInteractions =
|
): ClientInteractions =
|
||||||
ClientInteractions(clock: clock, purchasing: purchasing)
|
ClientInteractions(clock: clock, purchasing: purchasing)
|
||||||
|
|
||||||
proc start*(self: ClientInteractions) {.async.} =
|
proc start*(self: ClientInteractions) {.async.} =
|
||||||
|
@ -7,15 +7,10 @@ import ./interactions
|
|||||||
export sales
|
export sales
|
||||||
export logutils
|
export logutils
|
||||||
|
|
||||||
type
|
type HostInteractions* = ref object of ContractInteractions
|
||||||
HostInteractions* = ref object of ContractInteractions
|
|
||||||
sales*: Sales
|
sales*: Sales
|
||||||
|
|
||||||
proc new*(
|
proc new*(_: type HostInteractions, clock: Clock, sales: Sales): HostInteractions =
|
||||||
_: type HostInteractions,
|
|
||||||
clock: Clock,
|
|
||||||
sales: Sales
|
|
||||||
): HostInteractions =
|
|
||||||
## Create a new HostInteractions instance
|
## Create a new HostInteractions instance
|
||||||
##
|
##
|
||||||
HostInteractions(clock: clock, sales: sales)
|
HostInteractions(clock: clock, sales: sales)
|
||||||
|
@ -5,8 +5,7 @@ import ../market
|
|||||||
|
|
||||||
export clock
|
export clock
|
||||||
|
|
||||||
type
|
type ContractInteractions* = ref object of RootObj
|
||||||
ContractInteractions* = ref object of RootObj
|
|
||||||
clock*: Clock
|
clock*: Clock
|
||||||
|
|
||||||
method start*(self: ContractInteractions) {.async, base.} =
|
method start*(self: ContractInteractions) {.async, base.} =
|
||||||
|
@ -3,13 +3,12 @@ import ../../validation
|
|||||||
|
|
||||||
export validation
|
export validation
|
||||||
|
|
||||||
type
|
type ValidatorInteractions* = ref object of ContractInteractions
|
||||||
ValidatorInteractions* = ref object of ContractInteractions
|
|
||||||
validation: Validation
|
validation: Validation
|
||||||
|
|
||||||
proc new*(_: type ValidatorInteractions,
|
proc new*(
|
||||||
clock: OnChainClock,
|
_: type ValidatorInteractions, clock: OnChainClock, validation: Validation
|
||||||
validation: Validation): ValidatorInteractions =
|
): ValidatorInteractions =
|
||||||
ValidatorInteractions(clock: clock, validation: validation)
|
ValidatorInteractions(clock: clock, validation: validation)
|
||||||
|
|
||||||
proc start*(self: ValidatorInteractions) {.async.} =
|
proc start*(self: ValidatorInteractions) {.async.} =
|
||||||
|
@ -27,18 +27,12 @@ type
|
|||||||
eventSubscription: EventSubscription
|
eventSubscription: EventSubscription
|
||||||
|
|
||||||
func new*(
|
func new*(
|
||||||
_: type OnChainMarket,
|
_: type OnChainMarket, contract: Marketplace, rewardRecipient = Address.none
|
||||||
contract: Marketplace,
|
): OnChainMarket =
|
||||||
rewardRecipient = Address.none): OnChainMarket =
|
|
||||||
|
|
||||||
without signer =? contract.signer:
|
without signer =? contract.signer:
|
||||||
raiseAssert("Marketplace contract should have a signer")
|
raiseAssert("Marketplace contract should have a signer")
|
||||||
|
|
||||||
OnChainMarket(
|
OnChainMarket(contract: contract, signer: signer, rewardRecipient: rewardRecipient)
|
||||||
contract: contract,
|
|
||||||
signer: signer,
|
|
||||||
rewardRecipient: rewardRecipient
|
|
||||||
)
|
|
||||||
|
|
||||||
proc raiseMarketError(message: string) {.raises: [MarketError].} =
|
proc raiseMarketError(message: string) {.raises: [MarketError].} =
|
||||||
raise newException(MarketError, message)
|
raise newException(MarketError, message)
|
||||||
@ -115,16 +109,18 @@ method requestStorage(market: OnChainMarket, request: StorageRequest){.async.} =
|
|||||||
await market.approveFunds(request.price())
|
await market.approveFunds(request.price())
|
||||||
discard await market.contract.requestStorage(request).confirm(1)
|
discard await market.contract.requestStorage(request).confirm(1)
|
||||||
|
|
||||||
method getRequest*(market: OnChainMarket,
|
method getRequest*(
|
||||||
id: RequestId): Future[?StorageRequest] {.async.} =
|
market: OnChainMarket, id: RequestId
|
||||||
|
): Future[?StorageRequest] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
try:
|
try:
|
||||||
return some await market.contract.getRequest(id)
|
return some await market.contract.getRequest(id)
|
||||||
except Marketplace_UnknownRequest:
|
except Marketplace_UnknownRequest:
|
||||||
return none StorageRequest
|
return none StorageRequest
|
||||||
|
|
||||||
method requestState*(market: OnChainMarket,
|
method requestState*(
|
||||||
requestId: RequestId): Future[?RequestState] {.async.} =
|
market: OnChainMarket, requestId: RequestId
|
||||||
|
): Future[?RequestState] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
try:
|
try:
|
||||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
@ -132,25 +128,26 @@ method requestState*(market: OnChainMarket,
|
|||||||
except Marketplace_UnknownRequest:
|
except Marketplace_UnknownRequest:
|
||||||
return none RequestState
|
return none RequestState
|
||||||
|
|
||||||
method slotState*(market: OnChainMarket,
|
method slotState*(market: OnChainMarket, slotId: SlotId): Future[SlotState] {.async.} =
|
||||||
slotId: SlotId): Future[SlotState] {.async.} =
|
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
return await market.contract.slotState(slotId, overrides)
|
return await market.contract.slotState(slotId, overrides)
|
||||||
|
|
||||||
method getRequestEnd*(market: OnChainMarket,
|
method getRequestEnd*(
|
||||||
id: RequestId): Future[SecondsSince1970] {.async.} =
|
market: OnChainMarket, id: RequestId
|
||||||
|
): Future[SecondsSince1970] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
return await market.contract.requestEnd(id)
|
return await market.contract.requestEnd(id)
|
||||||
|
|
||||||
method requestExpiresAt*(market: OnChainMarket,
|
method requestExpiresAt*(
|
||||||
id: RequestId): Future[SecondsSince1970] {.async.} =
|
market: OnChainMarket, id: RequestId
|
||||||
|
): Future[SecondsSince1970] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
return await market.contract.requestExpiry(id)
|
return await market.contract.requestExpiry(id)
|
||||||
|
|
||||||
method getHost(market: OnChainMarket,
|
method getHost(
|
||||||
requestId: RequestId,
|
market: OnChainMarket, requestId: RequestId, slotIndex: UInt256
|
||||||
slotIndex: UInt256): Future[?Address] {.async.} =
|
): Future[?Address] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
let slotId = slotId(requestId, slotIndex)
|
let slotId = slotId(requestId, slotIndex)
|
||||||
let address = await market.contract.getHost(slotId)
|
let address = await market.contract.getHost(slotId)
|
||||||
@ -159,19 +156,20 @@ method getHost(market: OnChainMarket,
|
|||||||
else:
|
else:
|
||||||
return none Address
|
return none Address
|
||||||
|
|
||||||
method getActiveSlot*(market: OnChainMarket,
|
method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} =
|
||||||
slotId: SlotId): Future[?Slot] {.async.} =
|
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
try:
|
try:
|
||||||
return some await market.contract.getActiveSlot(slotId)
|
return some await market.contract.getActiveSlot(slotId)
|
||||||
except Marketplace_SlotIsFree:
|
except Marketplace_SlotIsFree:
|
||||||
return none Slot
|
return none Slot
|
||||||
|
|
||||||
method fillSlot(market: OnChainMarket,
|
method fillSlot(
|
||||||
|
market: OnChainMarket,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
slotIndex: UInt256,
|
slotIndex: UInt256,
|
||||||
proof: Groth16Proof,
|
proof: Groth16Proof,
|
||||||
collateral: UInt256) {.async.} =
|
collateral: UInt256,
|
||||||
|
) {.async.} =
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
logScope:
|
logScope:
|
||||||
requestId
|
requestId
|
||||||
@ -192,8 +190,8 @@ method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
|
|||||||
freeSlot = market.contract.freeSlot(
|
freeSlot = market.contract.freeSlot(
|
||||||
slotId,
|
slotId,
|
||||||
rewardRecipient, # --reward-recipient
|
rewardRecipient, # --reward-recipient
|
||||||
collateralRecipient) # SP's address
|
collateralRecipient,
|
||||||
|
) # SP's address
|
||||||
else:
|
else:
|
||||||
# Otherwise, use the SP's address as both the reward and collateral
|
# Otherwise, use the SP's address as both the reward and collateral
|
||||||
# recipient (the contract will use msg.sender for both)
|
# recipient (the contract will use msg.sender for both)
|
||||||
@ -201,14 +199,11 @@ method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
|
|||||||
|
|
||||||
discard await freeSlot.confirm(1)
|
discard await freeSlot.confirm(1)
|
||||||
|
|
||||||
|
method withdrawFunds(market: OnChainMarket, requestId: RequestId) {.async.} =
|
||||||
method withdrawFunds(market: OnChainMarket,
|
|
||||||
requestId: RequestId) {.async.} =
|
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
discard await market.contract.withdrawFunds(requestId).confirm(1)
|
discard await market.contract.withdrawFunds(requestId).confirm(1)
|
||||||
|
|
||||||
method isProofRequired*(market: OnChainMarket,
|
method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
|
||||||
id: SlotId): Future[bool] {.async.} =
|
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
try:
|
try:
|
||||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
@ -216,8 +211,7 @@ method isProofRequired*(market: OnChainMarket,
|
|||||||
except Marketplace_SlotIsFree:
|
except Marketplace_SlotIsFree:
|
||||||
return false
|
return false
|
||||||
|
|
||||||
method willProofBeRequired*(market: OnChainMarket,
|
method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
|
||||||
id: SlotId): Future[bool] {.async.} =
|
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
try:
|
try:
|
||||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
@ -225,27 +219,25 @@ method willProofBeRequired*(market: OnChainMarket,
|
|||||||
except Marketplace_SlotIsFree:
|
except Marketplace_SlotIsFree:
|
||||||
return false
|
return false
|
||||||
|
|
||||||
method getChallenge*(market: OnChainMarket, id: SlotId): Future[ProofChallenge] {.async.} =
|
method getChallenge*(
|
||||||
|
market: OnChainMarket, id: SlotId
|
||||||
|
): Future[ProofChallenge] {.async.} =
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||||
return await market.contract.getChallenge(id, overrides)
|
return await market.contract.getChallenge(id, overrides)
|
||||||
|
|
||||||
method submitProof*(market: OnChainMarket,
|
method submitProof*(market: OnChainMarket, id: SlotId, proof: Groth16Proof) {.async.} =
|
||||||
id: SlotId,
|
|
||||||
proof: Groth16Proof) {.async.} =
|
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
discard await market.contract.submitProof(id, proof).confirm(1)
|
discard await market.contract.submitProof(id, proof).confirm(1)
|
||||||
|
|
||||||
method markProofAsMissing*(market: OnChainMarket,
|
method markProofAsMissing*(
|
||||||
id: SlotId,
|
market: OnChainMarket, id: SlotId, period: Period
|
||||||
period: Period) {.async.} =
|
) {.async.} =
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
discard await market.contract.markProofAsMissing(id, period).confirm(1)
|
discard await market.contract.markProofAsMissing(id, period).confirm(1)
|
||||||
|
|
||||||
method canProofBeMarkedAsMissing*(
|
method canProofBeMarkedAsMissing*(
|
||||||
market: OnChainMarket,
|
market: OnChainMarket, id: SlotId, period: Period
|
||||||
id: SlotId,
|
|
||||||
period: Period
|
|
||||||
): Future[bool] {.async.} =
|
): Future[bool] {.async.} =
|
||||||
let provider = market.contract.provider
|
let provider = market.contract.provider
|
||||||
let contractWithoutSigner = market.contract.connect(provider)
|
let contractWithoutSigner = market.contract.connect(provider)
|
||||||
@ -258,45 +250,41 @@ method canProofBeMarkedAsMissing*(
|
|||||||
return false
|
return false
|
||||||
|
|
||||||
method reserveSlot*(
|
method reserveSlot*(
|
||||||
market: OnChainMarket,
|
market: OnChainMarket, requestId: RequestId, slotIndex: UInt256
|
||||||
requestId: RequestId,
|
) {.async.} =
|
||||||
slotIndex: UInt256) {.async.} =
|
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
discard await market.contract.reserveSlot(
|
discard await market.contract
|
||||||
|
.reserveSlot(
|
||||||
requestId,
|
requestId,
|
||||||
slotIndex,
|
slotIndex,
|
||||||
# reserveSlot runs out of gas for unknown reason, but 100k gas covers it
|
# reserveSlot runs out of gas for unknown reason, but 100k gas covers it
|
||||||
TransactionOverrides(gasLimit: some 100000.u256)
|
TransactionOverrides(gasLimit: some 100000.u256),
|
||||||
).confirm(1)
|
)
|
||||||
|
.confirm(1)
|
||||||
|
|
||||||
method canReserveSlot*(
|
method canReserveSlot*(
|
||||||
market: OnChainMarket,
|
market: OnChainMarket, requestId: RequestId, slotIndex: UInt256
|
||||||
requestId: RequestId,
|
): Future[bool] {.async.} =
|
||||||
slotIndex: UInt256): Future[bool] {.async.} =
|
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
return await market.contract.canReserveSlot(requestId, slotIndex)
|
return await market.contract.canReserveSlot(requestId, slotIndex)
|
||||||
|
|
||||||
method subscribeRequests*(market: OnChainMarket,
|
method subscribeRequests*(
|
||||||
callback: OnRequest):
|
market: OnChainMarket, callback: OnRequest
|
||||||
Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(eventResult: ?!StorageRequested) {.upraises: [].} =
|
proc onEvent(eventResult: ?!StorageRequested) {.upraises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in Request subscription", msg = eventErr.msg
|
error "There was an error in Request subscription", msg = eventErr.msg
|
||||||
return
|
return
|
||||||
|
|
||||||
callback(event.requestId,
|
callback(event.requestId, event.ask, event.expiry)
|
||||||
event.ask,
|
|
||||||
event.expiry)
|
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
|
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeSlotFilled*(market: OnChainMarket,
|
method subscribeSlotFilled*(
|
||||||
callback: OnSlotFilled):
|
market: OnChainMarket, callback: OnSlotFilled
|
||||||
Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(eventResult: ?!SlotFilled) {.upraises: [].} =
|
proc onEvent(eventResult: ?!SlotFilled) {.upraises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in SlotFilled subscription", msg = eventErr.msg
|
error "There was an error in SlotFilled subscription", msg = eventErr.msg
|
||||||
@ -308,11 +296,12 @@ method subscribeSlotFilled*(market: OnChainMarket,
|
|||||||
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
|
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeSlotFilled*(market: OnChainMarket,
|
method subscribeSlotFilled*(
|
||||||
|
market: OnChainMarket,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
slotIndex: UInt256,
|
slotIndex: UInt256,
|
||||||
callback: OnSlotFilled):
|
callback: OnSlotFilled,
|
||||||
Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: UInt256) =
|
proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: UInt256) =
|
||||||
if eventRequestId == requestId and eventSlotIndex == slotIndex:
|
if eventRequestId == requestId and eventSlotIndex == slotIndex:
|
||||||
callback(requestId, slotIndex)
|
callback(requestId, slotIndex)
|
||||||
@ -320,9 +309,9 @@ method subscribeSlotFilled*(market: OnChainMarket,
|
|||||||
convertEthersError:
|
convertEthersError:
|
||||||
return await market.subscribeSlotFilled(onSlotFilled)
|
return await market.subscribeSlotFilled(onSlotFilled)
|
||||||
|
|
||||||
method subscribeSlotFreed*(market: OnChainMarket,
|
method subscribeSlotFreed*(
|
||||||
callback: OnSlotFreed):
|
market: OnChainMarket, callback: OnSlotFreed
|
||||||
Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(eventResult: ?!SlotFreed) {.upraises: [].} =
|
proc onEvent(eventResult: ?!SlotFreed) {.upraises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in SlotFreed subscription", msg = eventErr.msg
|
error "There was an error in SlotFreed subscription", msg = eventErr.msg
|
||||||
@ -335,12 +324,12 @@ method subscribeSlotFreed*(market: OnChainMarket,
|
|||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeSlotReservationsFull*(
|
method subscribeSlotReservationsFull*(
|
||||||
market: OnChainMarket,
|
market: OnChainMarket, callback: OnSlotReservationsFull
|
||||||
callback: OnSlotReservationsFull): Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
|
|
||||||
proc onEvent(eventResult: ?!SlotReservationsFull) {.upraises: [].} =
|
proc onEvent(eventResult: ?!SlotReservationsFull) {.upraises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in SlotReservationsFull subscription", msg = eventErr.msg
|
error "There was an error in SlotReservationsFull subscription",
|
||||||
|
msg = eventErr.msg
|
||||||
return
|
return
|
||||||
|
|
||||||
callback(event.requestId, event.slotIndex)
|
callback(event.requestId, event.slotIndex)
|
||||||
@ -349,9 +338,9 @@ method subscribeSlotReservationsFull*(
|
|||||||
let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent)
|
let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeFulfillment(market: OnChainMarket,
|
method subscribeFulfillment(
|
||||||
callback: OnFulfillment):
|
market: OnChainMarket, callback: OnFulfillment
|
||||||
Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
|
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
|
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
|
||||||
@ -363,10 +352,9 @@ method subscribeFulfillment(market: OnChainMarket,
|
|||||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeFulfillment(market: OnChainMarket,
|
method subscribeFulfillment(
|
||||||
requestId: RequestId,
|
market: OnChainMarket, requestId: RequestId, callback: OnFulfillment
|
||||||
callback: OnFulfillment):
|
): Future[MarketSubscription] {.async.} =
|
||||||
Future[MarketSubscription] {.async.} =
|
|
||||||
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
|
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
|
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
|
||||||
@ -379,9 +367,9 @@ method subscribeFulfillment(market: OnChainMarket,
|
|||||||
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeRequestCancelled*(market: OnChainMarket,
|
method subscribeRequestCancelled*(
|
||||||
callback: OnRequestCancelled):
|
market: OnChainMarket, callback: OnRequestCancelled
|
||||||
Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
|
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
|
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
|
||||||
@ -393,10 +381,9 @@ method subscribeRequestCancelled*(market: OnChainMarket,
|
|||||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeRequestCancelled*(market: OnChainMarket,
|
method subscribeRequestCancelled*(
|
||||||
requestId: RequestId,
|
market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled
|
||||||
callback: OnRequestCancelled):
|
): Future[MarketSubscription] {.async.} =
|
||||||
Future[MarketSubscription] {.async.} =
|
|
||||||
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
|
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
|
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
|
||||||
@ -409,10 +396,10 @@ method subscribeRequestCancelled*(market: OnChainMarket,
|
|||||||
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeRequestFailed*(market: OnChainMarket,
|
method subscribeRequestFailed*(
|
||||||
callback: OnRequestFailed):
|
market: OnChainMarket, callback: OnRequestFailed
|
||||||
Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(eventResult: ?!RequestFailed) {.upraises:[]} =
|
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in RequestFailed subscription", msg = eventErr.msg
|
error "There was an error in RequestFailed subscription", msg = eventErr.msg
|
||||||
return
|
return
|
||||||
@ -423,11 +410,10 @@ method subscribeRequestFailed*(market: OnChainMarket,
|
|||||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeRequestFailed*(market: OnChainMarket,
|
method subscribeRequestFailed*(
|
||||||
requestId: RequestId,
|
market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed
|
||||||
callback: OnRequestFailed):
|
): Future[MarketSubscription] {.async.} =
|
||||||
Future[MarketSubscription] {.async.} =
|
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} =
|
||||||
proc onEvent(eventResult: ?!RequestFailed) {.upraises:[]} =
|
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in RequestFailed subscription", msg = eventErr.msg
|
error "There was an error in RequestFailed subscription", msg = eventErr.msg
|
||||||
return
|
return
|
||||||
@ -439,9 +425,9 @@ method subscribeRequestFailed*(market: OnChainMarket,
|
|||||||
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
|
||||||
return OnChainMarketSubscription(eventSubscription: subscription)
|
return OnChainMarketSubscription(eventSubscription: subscription)
|
||||||
|
|
||||||
method subscribeProofSubmission*(market: OnChainMarket,
|
method subscribeProofSubmission*(
|
||||||
callback: OnProofSubmitted):
|
market: OnChainMarket, callback: OnProofSubmitted
|
||||||
Future[MarketSubscription] {.async.} =
|
): Future[MarketSubscription] {.async.} =
|
||||||
proc onEvent(eventResult: ?!ProofSubmitted) {.upraises: [].} =
|
proc onEvent(eventResult: ?!ProofSubmitted) {.upraises: [].} =
|
||||||
without event =? eventResult, eventErr:
|
without event =? eventResult, eventErr:
|
||||||
error "There was an error in ProofSubmitted subscription", msg = eventErr.msg
|
error "There was an error in ProofSubmitted subscription", msg = eventErr.msg
|
||||||
@ -457,48 +443,37 @@ method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
|
|||||||
await subscription.eventSubscription.unsubscribe()
|
await subscription.eventSubscription.unsubscribe()
|
||||||
|
|
||||||
method queryPastSlotFilledEvents*(
|
method queryPastSlotFilledEvents*(
|
||||||
market: OnChainMarket,
|
market: OnChainMarket, fromBlock: BlockTag
|
||||||
fromBlock: BlockTag): Future[seq[SlotFilled]] {.async.} =
|
): Future[seq[SlotFilled]] {.async.} =
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
return await market.contract.queryFilter(SlotFilled,
|
return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest)
|
||||||
fromBlock,
|
|
||||||
BlockTag.latest)
|
|
||||||
|
|
||||||
method queryPastSlotFilledEvents*(
|
method queryPastSlotFilledEvents*(
|
||||||
market: OnChainMarket,
|
market: OnChainMarket, blocksAgo: int
|
||||||
blocksAgo: int): Future[seq[SlotFilled]] {.async.} =
|
): Future[seq[SlotFilled]] {.async.} =
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
let fromBlock =
|
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
|
||||||
await market.contract.provider.pastBlockTag(blocksAgo)
|
|
||||||
|
|
||||||
return await market.queryPastSlotFilledEvents(fromBlock)
|
return await market.queryPastSlotFilledEvents(fromBlock)
|
||||||
|
|
||||||
method queryPastSlotFilledEvents*(
|
method queryPastSlotFilledEvents*(
|
||||||
market: OnChainMarket,
|
market: OnChainMarket, fromTime: SecondsSince1970
|
||||||
fromTime: SecondsSince1970): Future[seq[SlotFilled]] {.async.} =
|
): Future[seq[SlotFilled]] {.async.} =
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
let fromBlock =
|
let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime)
|
||||||
await market.contract.provider.blockNumberForEpoch(fromTime)
|
|
||||||
return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock))
|
return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock))
|
||||||
|
|
||||||
method queryPastStorageRequestedEvents*(
|
method queryPastStorageRequestedEvents*(
|
||||||
market: OnChainMarket,
|
market: OnChainMarket, fromBlock: BlockTag
|
||||||
fromBlock: BlockTag): Future[seq[StorageRequested]] {.async.} =
|
): Future[seq[StorageRequested]] {.async.} =
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
return await market.contract.queryFilter(StorageRequested,
|
return
|
||||||
fromBlock,
|
await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest)
|
||||||
BlockTag.latest)
|
|
||||||
|
|
||||||
method queryPastStorageRequestedEvents*(
|
method queryPastStorageRequestedEvents*(
|
||||||
market: OnChainMarket,
|
market: OnChainMarket, blocksAgo: int
|
||||||
blocksAgo: int): Future[seq[StorageRequested]] {.async.} =
|
): Future[seq[StorageRequested]] {.async.} =
|
||||||
|
|
||||||
convertEthersError:
|
convertEthersError:
|
||||||
let fromBlock =
|
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
|
||||||
await market.contract.provider.pastBlockTag(blocksAgo)
|
|
||||||
|
|
||||||
return await market.queryPastStorageRequestedEvents(fromBlock)
|
return await market.queryPastStorageRequestedEvents(fromBlock)
|
||||||
|
@ -52,22 +52,96 @@ proc slashMisses*(marketplace: Marketplace): UInt256 {.contract, view.}
|
|||||||
proc slashPercentage*(marketplace: Marketplace): UInt256 {.contract, view.}
|
proc slashPercentage*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||||
proc minCollateralThreshold*(marketplace: Marketplace): UInt256 {.contract, view.}
|
proc minCollateralThreshold*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||||
|
|
||||||
proc requestStorage*(marketplace: Marketplace, request: StorageRequest): Confirmable {.contract, errors:[Marketplace_InvalidClientAddress, Marketplace_RequestAlreadyExists, Marketplace_InvalidExpiry, Marketplace_InsufficientSlots, Marketplace_InvalidMaxSlotLoss].}
|
proc requestStorage*(
|
||||||
proc fillSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256, proof: Groth16Proof): Confirmable {.contract, errors:[Marketplace_InvalidSlot, Marketplace_ReservationRequired, Marketplace_SlotNotFree, Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest].}
|
marketplace: Marketplace, request: StorageRequest
|
||||||
proc withdrawFunds*(marketplace: Marketplace, requestId: RequestId): Confirmable {.contract, errors:[Marketplace_InvalidClientAddress, Marketplace_InvalidState, Marketplace_NothingToWithdraw, Marketplace_UnknownRequest].}
|
): Confirmable {.
|
||||||
proc withdrawFunds*(marketplace: Marketplace, requestId: RequestId, withdrawAddress: Address): Confirmable {.contract, errors:[Marketplace_InvalidClientAddress, Marketplace_InvalidState, Marketplace_NothingToWithdraw, Marketplace_UnknownRequest].}
|
contract,
|
||||||
proc freeSlot*(marketplace: Marketplace, id: SlotId): Confirmable {.contract, errors:[Marketplace_InvalidSlotHost, Marketplace_AlreadyPaid, Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, Marketplace_SlotIsFree].}
|
errors: [
|
||||||
proc freeSlot*(marketplace: Marketplace, id: SlotId, rewardRecipient: Address, collateralRecipient: Address): Confirmable {.contract, errors:[Marketplace_InvalidSlotHost, Marketplace_AlreadyPaid, Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, Marketplace_SlotIsFree].}
|
Marketplace_InvalidClientAddress, Marketplace_RequestAlreadyExists,
|
||||||
proc getRequest*(marketplace: Marketplace, id: RequestId): StorageRequest {.contract, view, errors:[Marketplace_UnknownRequest].}
|
Marketplace_InvalidExpiry, Marketplace_InsufficientSlots,
|
||||||
|
Marketplace_InvalidMaxSlotLoss,
|
||||||
|
]
|
||||||
|
.}
|
||||||
|
|
||||||
|
proc fillSlot*(
|
||||||
|
marketplace: Marketplace,
|
||||||
|
requestId: RequestId,
|
||||||
|
slotIndex: UInt256,
|
||||||
|
proof: Groth16Proof,
|
||||||
|
): Confirmable {.
|
||||||
|
contract,
|
||||||
|
errors: [
|
||||||
|
Marketplace_InvalidSlot, Marketplace_ReservationRequired, Marketplace_SlotNotFree,
|
||||||
|
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest,
|
||||||
|
]
|
||||||
|
.}
|
||||||
|
|
||||||
|
proc withdrawFunds*(
|
||||||
|
marketplace: Marketplace, requestId: RequestId
|
||||||
|
): Confirmable {.
|
||||||
|
contract,
|
||||||
|
errors: [
|
||||||
|
Marketplace_InvalidClientAddress, Marketplace_InvalidState,
|
||||||
|
Marketplace_NothingToWithdraw, Marketplace_UnknownRequest,
|
||||||
|
]
|
||||||
|
.}
|
||||||
|
|
||||||
|
proc withdrawFunds*(
|
||||||
|
marketplace: Marketplace, requestId: RequestId, withdrawAddress: Address
|
||||||
|
): Confirmable {.
|
||||||
|
contract,
|
||||||
|
errors: [
|
||||||
|
Marketplace_InvalidClientAddress, Marketplace_InvalidState,
|
||||||
|
Marketplace_NothingToWithdraw, Marketplace_UnknownRequest,
|
||||||
|
]
|
||||||
|
.}
|
||||||
|
|
||||||
|
proc freeSlot*(
|
||||||
|
marketplace: Marketplace, id: SlotId
|
||||||
|
): Confirmable {.
|
||||||
|
contract,
|
||||||
|
errors: [
|
||||||
|
Marketplace_InvalidSlotHost, Marketplace_AlreadyPaid,
|
||||||
|
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, Marketplace_SlotIsFree,
|
||||||
|
]
|
||||||
|
.}
|
||||||
|
|
||||||
|
proc freeSlot*(
|
||||||
|
marketplace: Marketplace,
|
||||||
|
id: SlotId,
|
||||||
|
rewardRecipient: Address,
|
||||||
|
collateralRecipient: Address,
|
||||||
|
): Confirmable {.
|
||||||
|
contract,
|
||||||
|
errors: [
|
||||||
|
Marketplace_InvalidSlotHost, Marketplace_AlreadyPaid,
|
||||||
|
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, Marketplace_SlotIsFree,
|
||||||
|
]
|
||||||
|
.}
|
||||||
|
|
||||||
|
proc getRequest*(
|
||||||
|
marketplace: Marketplace, id: RequestId
|
||||||
|
): StorageRequest {.contract, view, errors: [Marketplace_UnknownRequest].}
|
||||||
|
|
||||||
proc getHost*(marketplace: Marketplace, id: SlotId): Address {.contract, view.}
|
proc getHost*(marketplace: Marketplace, id: SlotId): Address {.contract, view.}
|
||||||
proc getActiveSlot*(marketplace: Marketplace, id: SlotId): Slot {.contract, view, errors:[Marketplace_SlotIsFree].}
|
proc getActiveSlot*(
|
||||||
|
marketplace: Marketplace, id: SlotId
|
||||||
|
): Slot {.contract, view, errors: [Marketplace_SlotIsFree].}
|
||||||
|
|
||||||
proc myRequests*(marketplace: Marketplace): seq[RequestId] {.contract, view.}
|
proc myRequests*(marketplace: Marketplace): seq[RequestId] {.contract, view.}
|
||||||
proc mySlots*(marketplace: Marketplace): seq[SlotId] {.contract, view.}
|
proc mySlots*(marketplace: Marketplace): seq[SlotId] {.contract, view.}
|
||||||
proc requestState*(marketplace: Marketplace, requestId: RequestId): RequestState {.contract, view, errors:[Marketplace_UnknownRequest].}
|
proc requestState*(
|
||||||
|
marketplace: Marketplace, requestId: RequestId
|
||||||
|
): RequestState {.contract, view, errors: [Marketplace_UnknownRequest].}
|
||||||
|
|
||||||
proc slotState*(marketplace: Marketplace, slotId: SlotId): SlotState {.contract, view.}
|
proc slotState*(marketplace: Marketplace, slotId: SlotId): SlotState {.contract, view.}
|
||||||
proc requestEnd*(marketplace: Marketplace, requestId: RequestId): SecondsSince1970 {.contract, view.}
|
proc requestEnd*(
|
||||||
proc requestExpiry*(marketplace: Marketplace, requestId: RequestId): SecondsSince1970 {.contract, view.}
|
marketplace: Marketplace, requestId: RequestId
|
||||||
|
): SecondsSince1970 {.contract, view.}
|
||||||
|
|
||||||
|
proc requestExpiry*(
|
||||||
|
marketplace: Marketplace, requestId: RequestId
|
||||||
|
): SecondsSince1970 {.contract, view.}
|
||||||
|
|
||||||
proc proofTimeout*(marketplace: Marketplace): UInt256 {.contract, view.}
|
proc proofTimeout*(marketplace: Marketplace): UInt256 {.contract, view.}
|
||||||
|
|
||||||
@ -75,11 +149,35 @@ proc proofEnd*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
|
|||||||
proc missingProofs*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
|
proc missingProofs*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
|
||||||
proc isProofRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
|
proc isProofRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
|
||||||
proc willProofBeRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
|
proc willProofBeRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
|
||||||
proc getChallenge*(marketplace: Marketplace, id: SlotId): array[32, byte] {.contract, view.}
|
proc getChallenge*(
|
||||||
|
marketplace: Marketplace, id: SlotId
|
||||||
|
): array[32, byte] {.contract, view.}
|
||||||
|
|
||||||
proc getPointer*(marketplace: Marketplace, id: SlotId): uint8 {.contract, view.}
|
proc getPointer*(marketplace: Marketplace, id: SlotId): uint8 {.contract, view.}
|
||||||
|
|
||||||
proc submitProof*(marketplace: Marketplace, id: SlotId, proof: Groth16Proof): Confirmable {.contract, errors:[Proofs_ProofAlreadySubmitted, Proofs_InvalidProof, Marketplace_UnknownRequest].}
|
proc submitProof*(
|
||||||
proc markProofAsMissing*(marketplace: Marketplace, id: SlotId, period: UInt256): Confirmable {.contract, errors:[Marketplace_SlotNotAcceptingProofs, Marketplace_StartNotBeforeExpiry, Proofs_PeriodNotEnded, Proofs_ValidationTimedOut, Proofs_ProofNotMissing, Proofs_ProofNotRequired, Proofs_ProofAlreadyMarkedMissing].}
|
marketplace: Marketplace, id: SlotId, proof: Groth16Proof
|
||||||
|
): Confirmable {.
|
||||||
|
contract,
|
||||||
|
errors:
|
||||||
|
[Proofs_ProofAlreadySubmitted, Proofs_InvalidProof, Marketplace_UnknownRequest]
|
||||||
|
.}
|
||||||
|
|
||||||
proc reserveSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256): Confirmable {.contract.}
|
proc markProofAsMissing*(
|
||||||
proc canReserveSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256): bool {.contract, view.}
|
marketplace: Marketplace, id: SlotId, period: UInt256
|
||||||
|
): Confirmable {.
|
||||||
|
contract,
|
||||||
|
errors: [
|
||||||
|
Marketplace_SlotNotAcceptingProofs, Marketplace_StartNotBeforeExpiry,
|
||||||
|
Proofs_PeriodNotEnded, Proofs_ValidationTimedOut, Proofs_ProofNotMissing,
|
||||||
|
Proofs_ProofNotRequired, Proofs_ProofAlreadyMarkedMissing,
|
||||||
|
]
|
||||||
|
.}
|
||||||
|
|
||||||
|
proc reserveSlot*(
|
||||||
|
marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256
|
||||||
|
): Confirmable {.contract.}
|
||||||
|
|
||||||
|
proc canReserveSlot*(
|
||||||
|
marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256
|
||||||
|
): bool {.contract, view.}
|
||||||
|
@ -7,13 +7,16 @@ type
|
|||||||
a*: G1Point
|
a*: G1Point
|
||||||
b*: G2Point
|
b*: G2Point
|
||||||
c*: G1Point
|
c*: G1Point
|
||||||
|
|
||||||
G1Point* = object
|
G1Point* = object
|
||||||
x*: UInt256
|
x*: UInt256
|
||||||
y*: UInt256
|
y*: UInt256
|
||||||
|
|
||||||
# A field element F_{p^2} encoded as `real + i * imag`
|
# A field element F_{p^2} encoded as `real + i * imag`
|
||||||
Fp2Element* = object
|
Fp2Element* = object
|
||||||
real*: UInt256
|
real*: UInt256
|
||||||
imag*: UInt256
|
imag*: UInt256
|
||||||
|
|
||||||
G2Point* = object
|
G2Point* = object
|
||||||
x*: Fp2Element
|
x*: Fp2Element
|
||||||
y*: Fp2Element
|
y*: Fp2Element
|
||||||
|
@ -12,8 +12,9 @@ logScope:
|
|||||||
proc raiseProviderError(message: string) {.raises: [ProviderError].} =
|
proc raiseProviderError(message: string) {.raises: [ProviderError].} =
|
||||||
raise newException(ProviderError, message)
|
raise newException(ProviderError, message)
|
||||||
|
|
||||||
proc blockNumberAndTimestamp*(provider: Provider, blockTag: BlockTag):
|
proc blockNumberAndTimestamp*(
|
||||||
Future[(UInt256, UInt256)] {.async: (raises: [ProviderError]).} =
|
provider: Provider, blockTag: BlockTag
|
||||||
|
): Future[(UInt256, UInt256)] {.async: (raises: [ProviderError]).} =
|
||||||
without latestBlock =? await provider.getBlock(blockTag):
|
without latestBlock =? await provider.getBlock(blockTag):
|
||||||
raiseProviderError("Could not get latest block")
|
raiseProviderError("Could not get latest block")
|
||||||
|
|
||||||
@ -23,14 +24,10 @@ proc blockNumberAndTimestamp*(provider: Provider, blockTag: BlockTag):
|
|||||||
return (latestBlockNumber, latestBlock.timestamp)
|
return (latestBlockNumber, latestBlock.timestamp)
|
||||||
|
|
||||||
proc binarySearchFindClosestBlock(
|
proc binarySearchFindClosestBlock(
|
||||||
provider: Provider,
|
provider: Provider, epochTime: int, low: UInt256, high: UInt256
|
||||||
epochTime: int,
|
): Future[UInt256] {.async: (raises: [ProviderError]).} =
|
||||||
low: UInt256,
|
let (_, lowTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(low))
|
||||||
high: UInt256): Future[UInt256] {.async: (raises: [ProviderError]).} =
|
let (_, highTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(high))
|
||||||
let (_, lowTimestamp) =
|
|
||||||
await provider.blockNumberAndTimestamp(BlockTag.init(low))
|
|
||||||
let (_, highTimestamp) =
|
|
||||||
await provider.blockNumberAndTimestamp(BlockTag.init(high))
|
|
||||||
if abs(lowTimestamp.truncate(int) - epochTime) <
|
if abs(lowTimestamp.truncate(int) - epochTime) <
|
||||||
abs(highTimestamp.truncate(int) - epochTime):
|
abs(highTimestamp.truncate(int) - epochTime):
|
||||||
return low
|
return low
|
||||||
@ -41,8 +38,8 @@ proc binarySearchBlockNumberForEpoch(
|
|||||||
provider: Provider,
|
provider: Provider,
|
||||||
epochTime: UInt256,
|
epochTime: UInt256,
|
||||||
latestBlockNumber: UInt256,
|
latestBlockNumber: UInt256,
|
||||||
earliestBlockNumber: UInt256): Future[UInt256]
|
earliestBlockNumber: UInt256,
|
||||||
{.async: (raises: [ProviderError]).} =
|
): Future[UInt256] {.async: (raises: [ProviderError]).} =
|
||||||
var low = earliestBlockNumber
|
var low = earliestBlockNumber
|
||||||
var high = latestBlockNumber
|
var high = latestBlockNumber
|
||||||
|
|
||||||
@ -63,12 +60,12 @@ proc binarySearchBlockNumberForEpoch(
|
|||||||
# low is always greater than high - this is why we use high, where
|
# low is always greater than high - this is why we use high, where
|
||||||
# intuitively we would use low:
|
# intuitively we would use low:
|
||||||
await provider.binarySearchFindClosestBlock(
|
await provider.binarySearchFindClosestBlock(
|
||||||
epochTime.truncate(int), low=high, high=low)
|
epochTime.truncate(int), low = high, high = low
|
||||||
|
)
|
||||||
|
|
||||||
proc blockNumberForEpoch*(
|
proc blockNumberForEpoch*(
|
||||||
provider: Provider,
|
provider: Provider, epochTime: SecondsSince1970
|
||||||
epochTime: SecondsSince1970): Future[UInt256]
|
): Future[UInt256] {.async: (raises: [ProviderError]).} =
|
||||||
{.async: (raises: [ProviderError]).} =
|
|
||||||
let epochTimeUInt256 = epochTime.u256
|
let epochTimeUInt256 = epochTime.u256
|
||||||
let (latestBlockNumber, latestBlockTimestamp) =
|
let (latestBlockNumber, latestBlockTimestamp) =
|
||||||
await provider.blockNumberAndTimestamp(BlockTag.latest)
|
await provider.blockNumberAndTimestamp(BlockTag.latest)
|
||||||
@ -110,17 +107,17 @@ proc blockNumberForEpoch*(
|
|||||||
|
|
||||||
if earliestBlockNumber > 0 and earliestBlockTimestamp > epochTimeUInt256:
|
if earliestBlockNumber > 0 and earliestBlockTimestamp > epochTimeUInt256:
|
||||||
let availableHistoryInDays =
|
let availableHistoryInDays =
|
||||||
(latestBlockTimestamp - earliestBlockTimestamp) div
|
(latestBlockTimestamp - earliestBlockTimestamp) div 1.days.secs.u256
|
||||||
1.days.secs.u256
|
warn "Short block history detected.",
|
||||||
warn "Short block history detected.", earliestBlockTimestamp =
|
earliestBlockTimestamp = earliestBlockTimestamp, days = availableHistoryInDays
|
||||||
earliestBlockTimestamp, days = availableHistoryInDays
|
|
||||||
return earliestBlockNumber
|
return earliestBlockNumber
|
||||||
|
|
||||||
return await provider.binarySearchBlockNumberForEpoch(
|
return await provider.binarySearchBlockNumberForEpoch(
|
||||||
epochTimeUInt256, latestBlockNumber, earliestBlockNumber)
|
epochTimeUInt256, latestBlockNumber, earliestBlockNumber
|
||||||
|
)
|
||||||
|
|
||||||
proc pastBlockTag*(provider: Provider,
|
proc pastBlockTag*(
|
||||||
blocksAgo: int):
|
provider: Provider, blocksAgo: int
|
||||||
Future[BlockTag] {.async: (raises: [ProviderError]).} =
|
): Future[BlockTag] {.async: (raises: [ProviderError]).} =
|
||||||
let head = await provider.getBlockNumber()
|
let head = await provider.getBlockNumber()
|
||||||
return BlockTag.init(head - blocksAgo.abs.u256)
|
return BlockTag.init(head - blocksAgo.abs.u256)
|
||||||
|
@ -18,6 +18,7 @@ type
|
|||||||
content* {.serialize.}: StorageContent
|
content* {.serialize.}: StorageContent
|
||||||
expiry* {.serialize.}: UInt256
|
expiry* {.serialize.}: UInt256
|
||||||
nonce*: Nonce
|
nonce*: Nonce
|
||||||
|
|
||||||
StorageAsk* = object
|
StorageAsk* = object
|
||||||
slots* {.serialize.}: uint64
|
slots* {.serialize.}: uint64
|
||||||
slotSize* {.serialize.}: UInt256
|
slotSize* {.serialize.}: UInt256
|
||||||
@ -26,12 +27,15 @@ type
|
|||||||
reward* {.serialize.}: UInt256
|
reward* {.serialize.}: UInt256
|
||||||
collateral* {.serialize.}: UInt256
|
collateral* {.serialize.}: UInt256
|
||||||
maxSlotLoss* {.serialize.}: uint64
|
maxSlotLoss* {.serialize.}: uint64
|
||||||
|
|
||||||
StorageContent* = object
|
StorageContent* = object
|
||||||
cid* {.serialize.}: string
|
cid* {.serialize.}: string
|
||||||
merkleRoot*: array[32, byte]
|
merkleRoot*: array[32, byte]
|
||||||
|
|
||||||
Slot* = object
|
Slot* = object
|
||||||
request* {.serialize.}: StorageRequest
|
request* {.serialize.}: StorageRequest
|
||||||
slotIndex* {.serialize.}: UInt256
|
slotIndex* {.serialize.}: UInt256
|
||||||
|
|
||||||
SlotId* = distinct array[32, byte]
|
SlotId* = distinct array[32, byte]
|
||||||
RequestId* = distinct array[32, byte]
|
RequestId* = distinct array[32, byte]
|
||||||
Nonce* = distinct array[32, byte]
|
Nonce* = distinct array[32, byte]
|
||||||
@ -41,6 +45,7 @@ type
|
|||||||
Cancelled
|
Cancelled
|
||||||
Finished
|
Finished
|
||||||
Failed
|
Failed
|
||||||
|
|
||||||
SlotState* {.pure.} = enum
|
SlotState* {.pure.} = enum
|
||||||
Free
|
Free
|
||||||
Filled
|
Filled
|
||||||
@ -80,27 +85,26 @@ proc toHex*[T: distinct](id: T): string =
|
|||||||
type baseType = T.distinctBase
|
type baseType = T.distinctBase
|
||||||
baseType(id).toHex
|
baseType(id).toHex
|
||||||
|
|
||||||
logutils.formatIt(LogFormat.textLines, Nonce): it.short0xHexLog
|
logutils.formatIt(LogFormat.textLines, Nonce):
|
||||||
logutils.formatIt(LogFormat.textLines, RequestId): it.short0xHexLog
|
it.short0xHexLog
|
||||||
logutils.formatIt(LogFormat.textLines, SlotId): it.short0xHexLog
|
logutils.formatIt(LogFormat.textLines, RequestId):
|
||||||
logutils.formatIt(LogFormat.json, Nonce): it.to0xHexLog
|
it.short0xHexLog
|
||||||
logutils.formatIt(LogFormat.json, RequestId): it.to0xHexLog
|
logutils.formatIt(LogFormat.textLines, SlotId):
|
||||||
logutils.formatIt(LogFormat.json, SlotId): it.to0xHexLog
|
it.short0xHexLog
|
||||||
|
logutils.formatIt(LogFormat.json, Nonce):
|
||||||
|
it.to0xHexLog
|
||||||
|
logutils.formatIt(LogFormat.json, RequestId):
|
||||||
|
it.to0xHexLog
|
||||||
|
logutils.formatIt(LogFormat.json, SlotId):
|
||||||
|
it.to0xHexLog
|
||||||
|
|
||||||
func fromTuple(_: type StorageRequest, tupl: tuple): StorageRequest =
|
func fromTuple(_: type StorageRequest, tupl: tuple): StorageRequest =
|
||||||
StorageRequest(
|
StorageRequest(
|
||||||
client: tupl[0],
|
client: tupl[0], ask: tupl[1], content: tupl[2], expiry: tupl[3], nonce: tupl[4]
|
||||||
ask: tupl[1],
|
|
||||||
content: tupl[2],
|
|
||||||
expiry: tupl[3],
|
|
||||||
nonce: tupl[4]
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func fromTuple(_: type Slot, tupl: tuple): Slot =
|
func fromTuple(_: type Slot, tupl: tuple): Slot =
|
||||||
Slot(
|
Slot(request: tupl[0], slotIndex: tupl[1])
|
||||||
request: tupl[0],
|
|
||||||
slotIndex: tupl[1]
|
|
||||||
)
|
|
||||||
|
|
||||||
func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk =
|
func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk =
|
||||||
StorageAsk(
|
StorageAsk(
|
||||||
@ -110,14 +114,11 @@ func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk =
|
|||||||
proofProbability: tupl[3],
|
proofProbability: tupl[3],
|
||||||
reward: tupl[4],
|
reward: tupl[4],
|
||||||
collateral: tupl[5],
|
collateral: tupl[5],
|
||||||
maxSlotLoss: tupl[6]
|
maxSlotLoss: tupl[6],
|
||||||
)
|
)
|
||||||
|
|
||||||
func fromTuple(_: type StorageContent, tupl: tuple): StorageContent =
|
func fromTuple(_: type StorageContent, tupl: tuple): StorageContent =
|
||||||
StorageContent(
|
StorageContent(cid: tupl[0], merkleRoot: tupl[1])
|
||||||
cid: tupl[0],
|
|
||||||
merkleRoot: tupl[1]
|
|
||||||
)
|
|
||||||
|
|
||||||
func solidityType*(_: type StorageContent): string =
|
func solidityType*(_: type StorageContent): string =
|
||||||
solidityType(StorageContent.fieldTypes)
|
solidityType(StorageContent.fieldTypes)
|
||||||
|
@ -32,13 +32,13 @@ export discv5
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "codex discovery"
|
topics = "codex discovery"
|
||||||
|
|
||||||
type
|
type Discovery* = ref object of RootObj
|
||||||
Discovery* = ref object of RootObj
|
|
||||||
protocol*: discv5.Protocol # dht protocol
|
protocol*: discv5.Protocol # dht protocol
|
||||||
key: PrivateKey # private key
|
key: PrivateKey # private key
|
||||||
peerId: PeerId # the peer id of the local node
|
peerId: PeerId # the peer id of the local node
|
||||||
announceAddrs*: seq[MultiAddress] # addresses announced as part of the provider records
|
announceAddrs*: seq[MultiAddress] # addresses announced as part of the provider records
|
||||||
providerRecord*: ?SignedPeerRecord # record to advertice node connection information, this carry any
|
providerRecord*: ?SignedPeerRecord
|
||||||
|
# record to advertice node connection information, this carry any
|
||||||
# address that the node can be connected on
|
# address that the node can be connected on
|
||||||
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
|
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
|
||||||
|
|
||||||
@ -54,14 +54,11 @@ proc toNodeId*(host: ca.Address): NodeId =
|
|||||||
|
|
||||||
readUintBE[256](keccak256.digest(host.toArray).data)
|
readUintBE[256](keccak256.digest(host.toArray).data)
|
||||||
|
|
||||||
proc findPeer*(
|
proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} =
|
||||||
d: Discovery,
|
|
||||||
peerId: PeerId): Future[?PeerRecord] {.async.} =
|
|
||||||
trace "protocol.resolve..."
|
trace "protocol.resolve..."
|
||||||
## Find peer using the given Discovery object
|
## Find peer using the given Discovery object
|
||||||
##
|
##
|
||||||
let
|
let node = await d.protocol.resolve(toNodeId(peerId))
|
||||||
node = await d.protocol.resolve(toNodeId(peerId))
|
|
||||||
|
|
||||||
return
|
return
|
||||||
if node.isSome():
|
if node.isSome():
|
||||||
@ -69,13 +66,10 @@ proc findPeer*(
|
|||||||
else:
|
else:
|
||||||
PeerRecord.none
|
PeerRecord.none
|
||||||
|
|
||||||
method find*(
|
method find*(d: Discovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} =
|
||||||
d: Discovery,
|
|
||||||
cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} =
|
|
||||||
## Find block providers
|
## Find block providers
|
||||||
##
|
##
|
||||||
without providers =?
|
without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure, error:
|
||||||
(await d.protocol.getProviders(cid.toNodeId())).mapFailure, error:
|
|
||||||
warn "Error finding providers for block", cid, error = error.msg
|
warn "Error finding providers for block", cid, error = error.msg
|
||||||
|
|
||||||
return providers.filterIt(not (it.data.peerId == d.peerId))
|
return providers.filterIt(not (it.data.peerId == d.peerId))
|
||||||
@ -83,23 +77,20 @@ method find*(
|
|||||||
method provide*(d: Discovery, cid: Cid) {.async, base.} =
|
method provide*(d: Discovery, cid: Cid) {.async, base.} =
|
||||||
## Provide a block Cid
|
## Provide a block Cid
|
||||||
##
|
##
|
||||||
let
|
let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get)
|
||||||
nodes = await d.protocol.addProvider(
|
|
||||||
cid.toNodeId(), d.providerRecord.get)
|
|
||||||
|
|
||||||
if nodes.len <= 0:
|
if nodes.len <= 0:
|
||||||
warn "Couldn't provide to any nodes!"
|
warn "Couldn't provide to any nodes!"
|
||||||
|
|
||||||
|
|
||||||
method find*(
|
method find*(
|
||||||
d: Discovery,
|
d: Discovery, host: ca.Address
|
||||||
host: ca.Address): Future[seq[SignedPeerRecord]] {.async, base.} =
|
): Future[seq[SignedPeerRecord]] {.async, base.} =
|
||||||
## Find host providers
|
## Find host providers
|
||||||
##
|
##
|
||||||
|
|
||||||
trace "Finding providers for host", host = $host
|
trace "Finding providers for host", host = $host
|
||||||
without var providers =?
|
without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure,
|
||||||
(await d.protocol.getProviders(host.toNodeId())).mapFailure, error:
|
error:
|
||||||
trace "Error finding providers for host", host = $host, exc = error.msg
|
trace "Error finding providers for host", host = $host, exc = error.msg
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -117,15 +108,11 @@ method provide*(d: Discovery, host: ca.Address) {.async, base.} =
|
|||||||
##
|
##
|
||||||
|
|
||||||
trace "Providing host", host = $host
|
trace "Providing host", host = $host
|
||||||
let
|
let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get)
|
||||||
nodes = await d.protocol.addProvider(
|
|
||||||
host.toNodeId(), d.providerRecord.get)
|
|
||||||
if nodes.len > 0:
|
if nodes.len > 0:
|
||||||
trace "Provided to nodes", nodes = nodes.len
|
trace "Provided to nodes", nodes = nodes.len
|
||||||
|
|
||||||
method removeProvider*(
|
method removeProvider*(d: Discovery, peerId: PeerId): Future[void] {.base, gcsafe.} =
|
||||||
d: Discovery,
|
|
||||||
peerId: PeerId): Future[void] {.base, gcsafe.} =
|
|
||||||
## Remove provider from providers table
|
## Remove provider from providers table
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -139,26 +126,24 @@ proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
|||||||
d.announceAddrs = @addrs
|
d.announceAddrs = @addrs
|
||||||
|
|
||||||
trace "Updating announce record", addrs = d.announceAddrs
|
trace "Updating announce record", addrs = d.announceAddrs
|
||||||
d.providerRecord = SignedPeerRecord.init(
|
d.providerRecord = SignedPeerRecord
|
||||||
d.key, PeerRecord.init(d.peerId, d.announceAddrs))
|
.init(d.key, PeerRecord.init(d.peerId, d.announceAddrs))
|
||||||
.expect("Should construct signed record").some
|
.expect("Should construct signed record").some
|
||||||
|
|
||||||
if not d.protocol.isNil:
|
if not d.protocol.isNil:
|
||||||
d.protocol.updateRecord(d.providerRecord)
|
d.protocol.updateRecord(d.providerRecord).expect("Should update SPR")
|
||||||
.expect("Should update SPR")
|
|
||||||
|
|
||||||
proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
||||||
## Update providers record
|
## Update providers record
|
||||||
##
|
##
|
||||||
|
|
||||||
trace "Updating Dht record", addrs = addrs
|
trace "Updating Dht record", addrs = addrs
|
||||||
d.dhtRecord = SignedPeerRecord.init(
|
d.dhtRecord = SignedPeerRecord
|
||||||
d.key, PeerRecord.init(d.peerId, @addrs))
|
.init(d.key, PeerRecord.init(d.peerId, @addrs))
|
||||||
.expect("Should construct signed record").some
|
.expect("Should construct signed record").some
|
||||||
|
|
||||||
if not d.protocol.isNil:
|
if not d.protocol.isNil:
|
||||||
d.protocol.updateRecord(d.dhtRecord)
|
d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR")
|
||||||
.expect("Should update SPR")
|
|
||||||
|
|
||||||
proc start*(d: Discovery) {.async.} =
|
proc start*(d: Discovery) {.async.} =
|
||||||
d.protocol.open()
|
d.protocol.open()
|
||||||
@ -174,15 +159,13 @@ proc new*(
|
|||||||
bindPort = 0.Port,
|
bindPort = 0.Port,
|
||||||
announceAddrs: openArray[MultiAddress],
|
announceAddrs: openArray[MultiAddress],
|
||||||
bootstrapNodes: openArray[SignedPeerRecord] = [],
|
bootstrapNodes: openArray[SignedPeerRecord] = [],
|
||||||
store: Datastore = SQLiteDatastore.new(Memory).expect("Should not fail!")
|
store: Datastore = SQLiteDatastore.new(Memory).expect("Should not fail!"),
|
||||||
): Discovery =
|
): Discovery =
|
||||||
## Create a new Discovery node instance for the given key and datastore
|
## Create a new Discovery node instance for the given key and datastore
|
||||||
##
|
##
|
||||||
|
|
||||||
var
|
var self =
|
||||||
self = Discovery(
|
Discovery(key: key, peerId: PeerId.init(key).expect("Should construct PeerId"))
|
||||||
key: key,
|
|
||||||
peerId: PeerId.init(key).expect("Should construct PeerId"))
|
|
||||||
|
|
||||||
self.updateAnnounceRecord(announceAddrs)
|
self.updateAnnounceRecord(announceAddrs)
|
||||||
|
|
||||||
@ -190,11 +173,8 @@ proc new*(
|
|||||||
# FIXME disable IP limits temporarily so we can run our workshop. Re-enable
|
# FIXME disable IP limits temporarily so we can run our workshop. Re-enable
|
||||||
# and figure out proper solution.
|
# and figure out proper solution.
|
||||||
let discoveryConfig = DiscoveryConfig(
|
let discoveryConfig = DiscoveryConfig(
|
||||||
tableIpLimits: TableIpLimits(
|
tableIpLimits: TableIpLimits(tableIpLimit: high(uint), bucketIpLimit: high(uint)),
|
||||||
tableIpLimit: high(uint),
|
bitsPerHop: DefaultBitsPerHop,
|
||||||
bucketIpLimit:high(uint)
|
|
||||||
),
|
|
||||||
bitsPerHop: DefaultBitsPerHop
|
|
||||||
)
|
)
|
||||||
# --------------------------------------------------------------------------
|
# --------------------------------------------------------------------------
|
||||||
|
|
||||||
@ -206,6 +186,7 @@ proc new*(
|
|||||||
bootstrapRecords = bootstrapNodes,
|
bootstrapRecords = bootstrapNodes,
|
||||||
rng = Rng.instance(),
|
rng = Rng.instance(),
|
||||||
providers = ProvidersManager.new(store),
|
providers = ProvidersManager.new(store),
|
||||||
config = discoveryConfig)
|
config = discoveryConfig,
|
||||||
|
)
|
||||||
|
|
||||||
self
|
self
|
||||||
|
@ -9,7 +9,8 @@
|
|||||||
|
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push:
|
||||||
|
{.upraises: [].}
|
||||||
|
|
||||||
import ../stores
|
import ../stores
|
||||||
|
|
||||||
@ -28,19 +29,14 @@ method release*(self: ErasureBackend) {.base, gcsafe.} =
|
|||||||
raiseAssert("not implemented!")
|
raiseAssert("not implemented!")
|
||||||
|
|
||||||
method encode*(
|
method encode*(
|
||||||
self: EncoderBackend,
|
self: EncoderBackend, buffers, parity: var openArray[seq[byte]]
|
||||||
buffers,
|
|
||||||
parity: var openArray[seq[byte]]
|
|
||||||
): Result[void, cstring] {.base, gcsafe.} =
|
): Result[void, cstring] {.base, gcsafe.} =
|
||||||
## encode buffers using a backend
|
## encode buffers using a backend
|
||||||
##
|
##
|
||||||
raiseAssert("not implemented!")
|
raiseAssert("not implemented!")
|
||||||
|
|
||||||
method decode*(
|
method decode*(
|
||||||
self: DecoderBackend,
|
self: DecoderBackend, buffers, parity, recovered: var openArray[seq[byte]]
|
||||||
buffers,
|
|
||||||
parity,
|
|
||||||
recovered: var openArray[seq[byte]]
|
|
||||||
): Result[void, cstring] {.base, gcsafe.} =
|
): Result[void, cstring] {.base, gcsafe.} =
|
||||||
## decode buffers using a backend
|
## decode buffers using a backend
|
||||||
##
|
##
|
||||||
|
@ -22,19 +22,16 @@ type
|
|||||||
decoder*: Option[LeoDecoder]
|
decoder*: Option[LeoDecoder]
|
||||||
|
|
||||||
method encode*(
|
method encode*(
|
||||||
self: LeoEncoderBackend,
|
self: LeoEncoderBackend, data, parity: var openArray[seq[byte]]
|
||||||
data,
|
): Result[void, cstring] =
|
||||||
parity: var openArray[seq[byte]]): Result[void, cstring] =
|
|
||||||
## Encode data using Leopard backend
|
## Encode data using Leopard backend
|
||||||
|
|
||||||
if parity.len == 0:
|
if parity.len == 0:
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
var encoder = if self.encoder.isNone:
|
var encoder =
|
||||||
self.encoder = (? LeoEncoder.init(
|
if self.encoder.isNone:
|
||||||
self.blockSize,
|
self.encoder = (?LeoEncoder.init(self.blockSize, self.buffers, self.parity)).some
|
||||||
self.buffers,
|
|
||||||
self.parity)).some
|
|
||||||
self.encoder.get()
|
self.encoder.get()
|
||||||
else:
|
else:
|
||||||
self.encoder.get()
|
self.encoder.get()
|
||||||
@ -42,18 +39,13 @@ method encode*(
|
|||||||
encoder.encode(data, parity)
|
encoder.encode(data, parity)
|
||||||
|
|
||||||
method decode*(
|
method decode*(
|
||||||
self: LeoDecoderBackend,
|
self: LeoDecoderBackend, data, parity, recovered: var openArray[seq[byte]]
|
||||||
data,
|
): Result[void, cstring] =
|
||||||
parity,
|
|
||||||
recovered: var openArray[seq[byte]]): Result[void, cstring] =
|
|
||||||
## Decode data using given Leopard backend
|
## Decode data using given Leopard backend
|
||||||
|
|
||||||
var decoder =
|
var decoder =
|
||||||
if self.decoder.isNone:
|
if self.decoder.isNone:
|
||||||
self.decoder = (? LeoDecoder.init(
|
self.decoder = (?LeoDecoder.init(self.blockSize, self.buffers, self.parity)).some
|
||||||
self.blockSize,
|
|
||||||
self.buffers,
|
|
||||||
self.parity)).some
|
|
||||||
self.decoder.get()
|
self.decoder.get()
|
||||||
else:
|
else:
|
||||||
self.decoder.get()
|
self.decoder.get()
|
||||||
@ -69,25 +61,15 @@ method release*(self: LeoDecoderBackend) =
|
|||||||
self.decoder.get().free()
|
self.decoder.get().free()
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type LeoEncoderBackend,
|
T: type LeoEncoderBackend, blockSize, buffers, parity: int
|
||||||
blockSize,
|
): LeoEncoderBackend =
|
||||||
buffers,
|
|
||||||
parity: int): LeoEncoderBackend =
|
|
||||||
## Create an instance of an Leopard Encoder backend
|
## Create an instance of an Leopard Encoder backend
|
||||||
##
|
##
|
||||||
LeoEncoderBackend(
|
LeoEncoderBackend(blockSize: blockSize, buffers: buffers, parity: parity)
|
||||||
blockSize: blockSize,
|
|
||||||
buffers: buffers,
|
|
||||||
parity: parity)
|
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type LeoDecoderBackend,
|
T: type LeoDecoderBackend, blockSize, buffers, parity: int
|
||||||
blockSize,
|
): LeoDecoderBackend =
|
||||||
buffers,
|
|
||||||
parity: int): LeoDecoderBackend =
|
|
||||||
## Create an instance of an Leopard Decoder backend
|
## Create an instance of an Leopard Decoder backend
|
||||||
##
|
##
|
||||||
LeoDecoderBackend(
|
LeoDecoderBackend(blockSize: blockSize, buffers: buffers, parity: parity)
|
||||||
blockSize: blockSize,
|
|
||||||
buffers: buffers,
|
|
||||||
parity: parity)
|
|
||||||
|
@ -9,7 +9,8 @@
|
|||||||
|
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push:
|
||||||
|
{.upraises: [].}
|
||||||
|
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
import std/sugar
|
import std/sugar
|
||||||
@ -60,12 +61,11 @@ type
|
|||||||
## columns (with up to M blocks missing per column),
|
## columns (with up to M blocks missing per column),
|
||||||
## or any combination there of.
|
## or any combination there of.
|
||||||
##
|
##
|
||||||
|
EncoderProvider* =
|
||||||
|
proc(size, blocks, parity: int): EncoderBackend {.raises: [Defect], noSideEffect.}
|
||||||
|
|
||||||
EncoderProvider* = proc(size, blocks, parity: int): EncoderBackend
|
DecoderProvider* =
|
||||||
{.raises: [Defect], noSideEffect.}
|
proc(size, blocks, parity: int): DecoderBackend {.raises: [Defect], noSideEffect.}
|
||||||
|
|
||||||
DecoderProvider* = proc(size, blocks, parity: int): DecoderBackend
|
|
||||||
{.raises: [Defect], noSideEffect.}
|
|
||||||
|
|
||||||
Erasure* = ref object
|
Erasure* = ref object
|
||||||
encoderProvider*: EncoderProvider
|
encoderProvider*: EncoderProvider
|
||||||
@ -98,21 +98,22 @@ func indexToPos(steps, idx, step: int): int {.inline.} =
|
|||||||
(idx - step) div steps
|
(idx - step) div steps
|
||||||
|
|
||||||
proc getPendingBlocks(
|
proc getPendingBlocks(
|
||||||
self: Erasure,
|
self: Erasure, manifest: Manifest, indicies: seq[int]
|
||||||
manifest: Manifest,
|
): AsyncIter[(?!bt.Block, int)] =
|
||||||
indicies: seq[int]): AsyncIter[(?!bt.Block, int)] =
|
|
||||||
## Get pending blocks iterator
|
## Get pending blocks iterator
|
||||||
##
|
##
|
||||||
|
|
||||||
var
|
var
|
||||||
# request blocks from the store
|
# request blocks from the store
|
||||||
pendingBlocks = indicies.map( (i: int) =>
|
pendingBlocks = indicies.map(
|
||||||
self.store.getBlock(
|
(i: int) =>
|
||||||
BlockAddress.init(manifest.treeCid, i)
|
self.store.getBlock(BlockAddress.init(manifest.treeCid, i)).map(
|
||||||
).map((r: ?!bt.Block) => (r, i)) # Get the data blocks (first K)
|
(r: ?!bt.Block) => (r, i)
|
||||||
|
) # Get the data blocks (first K)
|
||||||
)
|
)
|
||||||
|
|
||||||
proc isFinished(): bool = pendingBlocks.len == 0
|
proc isFinished(): bool =
|
||||||
|
pendingBlocks.len == 0
|
||||||
|
|
||||||
proc genNext(): Future[(?!bt.Block, int)] {.async.} =
|
proc genNext(): Future[(?!bt.Block, int)] {.async.} =
|
||||||
let completedFut = await one(pendingBlocks)
|
let completedFut = await one(pendingBlocks)
|
||||||
@ -123,7 +124,9 @@ proc getPendingBlocks(
|
|||||||
let (_, index) = await completedFut
|
let (_, index) = await completedFut
|
||||||
raise newException(
|
raise newException(
|
||||||
CatchableError,
|
CatchableError,
|
||||||
"Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " & $index)
|
"Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " &
|
||||||
|
$index,
|
||||||
|
)
|
||||||
|
|
||||||
AsyncIter[(?!bt.Block, int)].new(genNext, isFinished)
|
AsyncIter[(?!bt.Block, int)].new(genNext, isFinished)
|
||||||
|
|
||||||
@ -134,18 +137,18 @@ proc prepareEncodingData(
|
|||||||
step: Natural,
|
step: Natural,
|
||||||
data: ref seq[seq[byte]],
|
data: ref seq[seq[byte]],
|
||||||
cids: ref seq[Cid],
|
cids: ref seq[Cid],
|
||||||
emptyBlock: seq[byte]): Future[?!Natural] {.async.} =
|
emptyBlock: seq[byte],
|
||||||
|
): Future[?!Natural] {.async.} =
|
||||||
## Prepare data for encoding
|
## Prepare data for encoding
|
||||||
##
|
##
|
||||||
|
|
||||||
let
|
let
|
||||||
strategy = params.strategy.init(
|
strategy = params.strategy.init(
|
||||||
firstIndex = 0,
|
firstIndex = 0, lastIndex = params.rounded - 1, iterations = params.steps
|
||||||
lastIndex = params.rounded - 1,
|
|
||||||
iterations = params.steps
|
|
||||||
)
|
)
|
||||||
indicies = toSeq(strategy.getIndicies(step))
|
indicies = toSeq(strategy.getIndicies(step))
|
||||||
pendingBlocksIter = self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount))
|
pendingBlocksIter =
|
||||||
|
self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount))
|
||||||
|
|
||||||
var resolved = 0
|
var resolved = 0
|
||||||
for fut in pendingBlocksIter:
|
for fut in pendingBlocksIter:
|
||||||
@ -164,7 +167,8 @@ proc prepareEncodingData(
|
|||||||
let pos = indexToPos(params.steps, idx, step)
|
let pos = indexToPos(params.steps, idx, step)
|
||||||
trace "Padding with empty block", idx
|
trace "Padding with empty block", idx
|
||||||
shallowCopy(data[pos], emptyBlock)
|
shallowCopy(data[pos], emptyBlock)
|
||||||
without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec), err:
|
without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec),
|
||||||
|
err:
|
||||||
return failure(err)
|
return failure(err)
|
||||||
cids[idx] = emptyBlockCid
|
cids[idx] = emptyBlockCid
|
||||||
|
|
||||||
@ -177,7 +181,8 @@ proc prepareDecodingData(
|
|||||||
data: ref seq[seq[byte]],
|
data: ref seq[seq[byte]],
|
||||||
parityData: ref seq[seq[byte]],
|
parityData: ref seq[seq[byte]],
|
||||||
cids: ref seq[Cid],
|
cids: ref seq[Cid],
|
||||||
emptyBlock: seq[byte]): Future[?!(Natural, Natural)] {.async.} =
|
emptyBlock: seq[byte],
|
||||||
|
): Future[?!(Natural, Natural)] {.async.} =
|
||||||
## Prepare data for decoding
|
## Prepare data for decoding
|
||||||
## `encoded` - the encoded manifest
|
## `encoded` - the encoded manifest
|
||||||
## `step` - the current step
|
## `step` - the current step
|
||||||
@ -189,9 +194,7 @@ proc prepareDecodingData(
|
|||||||
|
|
||||||
let
|
let
|
||||||
strategy = encoded.protectedStrategy.init(
|
strategy = encoded.protectedStrategy.init(
|
||||||
firstIndex = 0,
|
firstIndex = 0, lastIndex = encoded.blocksCount - 1, iterations = encoded.steps
|
||||||
lastIndex = encoded.blocksCount - 1,
|
|
||||||
iterations = encoded.steps
|
|
||||||
)
|
)
|
||||||
indicies = toSeq(strategy.getIndicies(step))
|
indicies = toSeq(strategy.getIndicies(step))
|
||||||
pendingBlocksIter = self.getPendingBlocks(encoded, indicies)
|
pendingBlocksIter = self.getPendingBlocks(encoded, indicies)
|
||||||
@ -211,8 +214,7 @@ proc prepareDecodingData(
|
|||||||
trace "Failed retreiving a block", idx, treeCid = encoded.treeCid, msg = err.msg
|
trace "Failed retreiving a block", idx, treeCid = encoded.treeCid, msg = err.msg
|
||||||
continue
|
continue
|
||||||
|
|
||||||
let
|
let pos = indexToPos(encoded.steps, idx, step)
|
||||||
pos = indexToPos(encoded.steps, idx, step)
|
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
cid = blk.cid
|
cid = blk.cid
|
||||||
@ -224,7 +226,9 @@ proc prepareDecodingData(
|
|||||||
cids[idx] = blk.cid
|
cids[idx] = blk.cid
|
||||||
if idx >= encoded.rounded:
|
if idx >= encoded.rounded:
|
||||||
trace "Retrieved parity block"
|
trace "Retrieved parity block"
|
||||||
shallowCopy(parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data)
|
shallowCopy(
|
||||||
|
parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data
|
||||||
|
)
|
||||||
parityPieces.inc
|
parityPieces.inc
|
||||||
else:
|
else:
|
||||||
trace "Retrieved data block"
|
trace "Retrieved data block"
|
||||||
@ -238,15 +242,17 @@ proc prepareDecodingData(
|
|||||||
proc init*(
|
proc init*(
|
||||||
_: type EncodingParams,
|
_: type EncodingParams,
|
||||||
manifest: Manifest,
|
manifest: Manifest,
|
||||||
ecK: Natural, ecM: Natural,
|
ecK: Natural,
|
||||||
strategy: StrategyType): ?!EncodingParams =
|
ecM: Natural,
|
||||||
|
strategy: StrategyType,
|
||||||
|
): ?!EncodingParams =
|
||||||
if ecK > manifest.blocksCount:
|
if ecK > manifest.blocksCount:
|
||||||
let exc = (ref InsufficientBlocksError)(
|
let exc = (ref InsufficientBlocksError)(
|
||||||
msg: "Unable to encode manifest, not enough blocks, ecK = " &
|
msg:
|
||||||
$ecK &
|
"Unable to encode manifest, not enough blocks, ecK = " & $ecK &
|
||||||
", blocksCount = " &
|
", blocksCount = " & $manifest.blocksCount,
|
||||||
$manifest.blocksCount,
|
minSize: ecK.NBytes * manifest.blockSize,
|
||||||
minSize: ecK.NBytes * manifest.blockSize)
|
)
|
||||||
return failure(exc)
|
return failure(exc)
|
||||||
|
|
||||||
let
|
let
|
||||||
@ -260,13 +266,11 @@ proc init*(
|
|||||||
rounded: rounded,
|
rounded: rounded,
|
||||||
steps: steps,
|
steps: steps,
|
||||||
blocksCount: blocksCount,
|
blocksCount: blocksCount,
|
||||||
strategy: strategy
|
strategy: strategy,
|
||||||
)
|
)
|
||||||
|
|
||||||
proc encodeData(
|
proc encodeData(
|
||||||
self: Erasure,
|
self: Erasure, manifest: Manifest, params: EncodingParams
|
||||||
manifest: Manifest,
|
|
||||||
params: EncodingParams
|
|
||||||
): Future[?!Manifest] {.async.} =
|
): Future[?!Manifest] {.async.} =
|
||||||
## Encode blocks pointed to by the protected manifest
|
## Encode blocks pointed to by the protected manifest
|
||||||
##
|
##
|
||||||
@ -292,7 +296,8 @@ proc encodeData(
|
|||||||
# TODO: Don't allocate a new seq every time, allocate once and zero out
|
# TODO: Don't allocate a new seq every time, allocate once and zero out
|
||||||
var
|
var
|
||||||
data = seq[seq[byte]].new() # number of blocks to encode
|
data = seq[seq[byte]].new() # number of blocks to encode
|
||||||
parityData = newSeqWith[seq[byte]](params.ecM, newSeq[byte](manifest.blockSize.int))
|
parityData =
|
||||||
|
newSeqWith[seq[byte]](params.ecM, newSeq[byte](manifest.blockSize.int))
|
||||||
|
|
||||||
data[].setLen(params.ecK)
|
data[].setLen(params.ecK)
|
||||||
# TODO: this is a tight blocking loop so we sleep here to allow
|
# TODO: this is a tight blocking loop so we sleep here to allow
|
||||||
@ -301,15 +306,14 @@ proc encodeData(
|
|||||||
await sleepAsync(10.millis)
|
await sleepAsync(10.millis)
|
||||||
|
|
||||||
without resolved =?
|
without resolved =?
|
||||||
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)), err:
|
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)),
|
||||||
|
err:
|
||||||
trace "Unable to prepare data", error = err.msg
|
trace "Unable to prepare data", error = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
trace "Erasure coding data", data = data[].len, parity = parityData.len
|
trace "Erasure coding data", data = data[].len, parity = parityData.len
|
||||||
|
|
||||||
if (
|
if (let res = encoder.encode(data[], parityData); res.isErr):
|
||||||
let res = encoder.encode(data[], parityData);
|
|
||||||
res.isErr):
|
|
||||||
trace "Unable to encode manifest!", error = $res.error
|
trace "Unable to encode manifest!", error = $res.error
|
||||||
return failure($res.error)
|
return failure($res.error)
|
||||||
|
|
||||||
@ -341,7 +345,7 @@ proc encodeData(
|
|||||||
datasetSize = (manifest.blockSize.int * params.blocksCount).NBytes,
|
datasetSize = (manifest.blockSize.int * params.blocksCount).NBytes,
|
||||||
ecK = params.ecK,
|
ecK = params.ecK,
|
||||||
ecM = params.ecM,
|
ecM = params.ecM,
|
||||||
strategy = params.strategy
|
strategy = params.strategy,
|
||||||
)
|
)
|
||||||
|
|
||||||
trace "Encoded data successfully", treeCid, blocksCount = params.blocksCount
|
trace "Encoded data successfully", treeCid, blocksCount = params.blocksCount
|
||||||
@ -360,7 +364,8 @@ proc encode*(
|
|||||||
manifest: Manifest,
|
manifest: Manifest,
|
||||||
blocks: Natural,
|
blocks: Natural,
|
||||||
parity: Natural,
|
parity: Natural,
|
||||||
strategy = SteppedStrategy): Future[?!Manifest] {.async.} =
|
strategy = SteppedStrategy,
|
||||||
|
): Future[?!Manifest] {.async.} =
|
||||||
## Encode a manifest into one that is erasure protected.
|
## Encode a manifest into one that is erasure protected.
|
||||||
##
|
##
|
||||||
## `manifest` - the original manifest to be encoded
|
## `manifest` - the original manifest to be encoded
|
||||||
@ -376,9 +381,7 @@ proc encode*(
|
|||||||
|
|
||||||
return success encodedManifest
|
return success encodedManifest
|
||||||
|
|
||||||
proc decode*(
|
proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
|
||||||
self: Erasure,
|
|
||||||
encoded: Manifest): Future[?!Manifest] {.async.} =
|
|
||||||
## Decode a protected manifest into it's original
|
## Decode a protected manifest into it's original
|
||||||
## manifest
|
## manifest
|
||||||
##
|
##
|
||||||
@ -408,13 +411,17 @@ proc decode*(
|
|||||||
var
|
var
|
||||||
data = seq[seq[byte]].new()
|
data = seq[seq[byte]].new()
|
||||||
parityData = seq[seq[byte]].new()
|
parityData = seq[seq[byte]].new()
|
||||||
recovered = newSeqWith[seq[byte]](encoded.ecK, newSeq[byte](encoded.blockSize.int))
|
recovered =
|
||||||
|
newSeqWith[seq[byte]](encoded.ecK, newSeq[byte](encoded.blockSize.int))
|
||||||
|
|
||||||
data[].setLen(encoded.ecK) # set len to K
|
data[].setLen(encoded.ecK) # set len to K
|
||||||
parityData[].setLen(encoded.ecM) # set len to M
|
parityData[].setLen(encoded.ecM) # set len to M
|
||||||
|
|
||||||
without (dataPieces, _) =?
|
without (dataPieces, _) =? (
|
||||||
(await self.prepareDecodingData(encoded, step, data, parityData, cids, emptyBlock)), err:
|
await self.prepareDecodingData(
|
||||||
|
encoded, step, data, parityData, cids, emptyBlock
|
||||||
|
)
|
||||||
|
), err:
|
||||||
trace "Unable to prepare data", error = err.msg
|
trace "Unable to prepare data", error = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
@ -424,9 +431,7 @@ proc decode*(
|
|||||||
|
|
||||||
trace "Erasure decoding data"
|
trace "Erasure decoding data"
|
||||||
|
|
||||||
if (
|
if (let err = decoder.decode(data[], parityData[], recovered); err.isErr):
|
||||||
let err = decoder.decode(data[], parityData[], recovered);
|
|
||||||
err.isErr):
|
|
||||||
trace "Unable to decode data!", err = $err.error
|
trace "Unable to decode data!", err = $err.error
|
||||||
return failure($err.error)
|
return failure($err.error)
|
||||||
|
|
||||||
@ -460,10 +465,12 @@ proc decode*(
|
|||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
if treeCid != encoded.originalTreeCid:
|
if treeCid != encoded.originalTreeCid:
|
||||||
return failure("Original tree root differs from the tree root computed out of recovered data")
|
return failure(
|
||||||
|
"Original tree root differs from the tree root computed out of recovered data"
|
||||||
|
)
|
||||||
|
|
||||||
let idxIter = Iter[Natural].new(recoveredIndices)
|
let idxIter =
|
||||||
.filter((i: Natural) => i < tree.leavesCount)
|
Iter[Natural].new(recoveredIndices).filter((i: Natural) => i < tree.leavesCount)
|
||||||
|
|
||||||
if err =? (await self.store.putSomeProofs(tree, idxIter)).errorOption:
|
if err =? (await self.store.putSomeProofs(tree, idxIter)).errorOption:
|
||||||
return failure(err)
|
return failure(err)
|
||||||
@ -482,11 +489,11 @@ proc new*(
|
|||||||
T: type Erasure,
|
T: type Erasure,
|
||||||
store: BlockStore,
|
store: BlockStore,
|
||||||
encoderProvider: EncoderProvider,
|
encoderProvider: EncoderProvider,
|
||||||
decoderProvider: DecoderProvider): Erasure =
|
decoderProvider: DecoderProvider,
|
||||||
|
): Erasure =
|
||||||
## Create a new Erasure instance for encoding and decoding manifests
|
## Create a new Erasure instance for encoding and decoding manifests
|
||||||
##
|
##
|
||||||
|
|
||||||
Erasure(
|
Erasure(
|
||||||
store: store,
|
store: store, encoderProvider: encoderProvider, decoderProvider: decoderProvider
|
||||||
encoderProvider: encoderProvider,
|
)
|
||||||
decoderProvider: decoderProvider)
|
|
||||||
|
@ -20,13 +20,15 @@ type
|
|||||||
CodexResult*[T] = Result[T, ref CodexError]
|
CodexResult*[T] = Result[T, ref CodexError]
|
||||||
|
|
||||||
template mapFailure*[T, V, E](
|
template mapFailure*[T, V, E](
|
||||||
exp: Result[T, V],
|
exp: Result[T, V], exc: typedesc[E]
|
||||||
exc: typedesc[E],
|
|
||||||
): Result[T, ref CatchableError] =
|
): Result[T, ref CatchableError] =
|
||||||
## Convert `Result[T, E]` to `Result[E, ref CatchableError]`
|
## Convert `Result[T, E]` to `Result[E, ref CatchableError]`
|
||||||
##
|
##
|
||||||
|
|
||||||
exp.mapErr(proc (e: V): ref CatchableError = (ref exc)(msg: $e))
|
exp.mapErr(
|
||||||
|
proc(e: V): ref CatchableError =
|
||||||
|
(ref exc)(msg: $e)
|
||||||
|
)
|
||||||
|
|
||||||
template mapFailure*[T, V](exp: Result[T, V]): Result[T, ref CatchableError] =
|
template mapFailure*[T, V](exp: Result[T, V]): Result[T, ref CatchableError] =
|
||||||
mapFailure(exp, CodexError)
|
mapFailure(exp, CodexError)
|
||||||
|
@ -10,7 +10,7 @@ type
|
|||||||
# 0 => 0, 1, 2
|
# 0 => 0, 1, 2
|
||||||
# 1 => 3, 4, 5
|
# 1 => 3, 4, 5
|
||||||
# 2 => 6, 7, 8
|
# 2 => 6, 7, 8
|
||||||
LinearStrategy,
|
LinearStrategy
|
||||||
|
|
||||||
# Stepped indexing:
|
# Stepped indexing:
|
||||||
# 0 => 0, 3, 6
|
# 0 => 0, 3, 6
|
||||||
@ -21,7 +21,6 @@ type
|
|||||||
# Representing a strategy for grouping indices (of blocks usually)
|
# Representing a strategy for grouping indices (of blocks usually)
|
||||||
# Given an interation-count as input, will produce a seq of
|
# Given an interation-count as input, will produce a seq of
|
||||||
# selected indices.
|
# selected indices.
|
||||||
|
|
||||||
IndexingError* = object of CodexError
|
IndexingError* = object of CodexError
|
||||||
IndexingWrongIndexError* = object of IndexingError
|
IndexingWrongIndexError* = object of IndexingError
|
||||||
IndexingWrongIterationsError* = object of IndexingError
|
IndexingWrongIterationsError* = object of IndexingError
|
||||||
@ -33,19 +32,21 @@ type
|
|||||||
iterations*: int # getIndices(iteration) will run from 0 ..< iterations
|
iterations*: int # getIndices(iteration) will run from 0 ..< iterations
|
||||||
step*: int
|
step*: int
|
||||||
|
|
||||||
func checkIteration(self: IndexingStrategy, iteration: int): void {.raises: [IndexingError].} =
|
func checkIteration(
|
||||||
|
self: IndexingStrategy, iteration: int
|
||||||
|
): void {.raises: [IndexingError].} =
|
||||||
if iteration >= self.iterations:
|
if iteration >= self.iterations:
|
||||||
raise newException(
|
raise newException(
|
||||||
IndexingError,
|
IndexingError, "Indexing iteration can't be greater than or equal to iterations."
|
||||||
"Indexing iteration can't be greater than or equal to iterations.")
|
)
|
||||||
|
|
||||||
func getIter(first, last, step: int): Iter[int] =
|
func getIter(first, last, step: int): Iter[int] =
|
||||||
{.cast(noSideEffect).}:
|
{.cast(noSideEffect).}:
|
||||||
Iter[int].new(first, last, step)
|
Iter[int].new(first, last, step)
|
||||||
|
|
||||||
func getLinearIndicies(
|
func getLinearIndicies(
|
||||||
self: IndexingStrategy,
|
self: IndexingStrategy, iteration: int
|
||||||
iteration: int): Iter[int] {.raises: [IndexingError].} =
|
): Iter[int] {.raises: [IndexingError].} =
|
||||||
self.checkIteration(iteration)
|
self.checkIteration(iteration)
|
||||||
|
|
||||||
let
|
let
|
||||||
@ -55,8 +56,8 @@ func getLinearIndicies(
|
|||||||
getIter(first, last, 1)
|
getIter(first, last, 1)
|
||||||
|
|
||||||
func getSteppedIndicies(
|
func getSteppedIndicies(
|
||||||
self: IndexingStrategy,
|
self: IndexingStrategy, iteration: int
|
||||||
iteration: int): Iter[int] {.raises: [IndexingError].} =
|
): Iter[int] {.raises: [IndexingError].} =
|
||||||
self.checkIteration(iteration)
|
self.checkIteration(iteration)
|
||||||
|
|
||||||
let
|
let
|
||||||
@ -66,9 +67,8 @@ func getSteppedIndicies(
|
|||||||
getIter(first, last, self.iterations)
|
getIter(first, last, self.iterations)
|
||||||
|
|
||||||
func getIndicies*(
|
func getIndicies*(
|
||||||
self: IndexingStrategy,
|
self: IndexingStrategy, iteration: int
|
||||||
iteration: int): Iter[int] {.raises: [IndexingError].} =
|
): Iter[int] {.raises: [IndexingError].} =
|
||||||
|
|
||||||
case self.strategyType
|
case self.strategyType
|
||||||
of StrategyType.LinearStrategy:
|
of StrategyType.LinearStrategy:
|
||||||
self.getLinearIndicies(iteration)
|
self.getLinearIndicies(iteration)
|
||||||
@ -76,22 +76,25 @@ func getIndicies*(
|
|||||||
self.getSteppedIndicies(iteration)
|
self.getSteppedIndicies(iteration)
|
||||||
|
|
||||||
func init*(
|
func init*(
|
||||||
strategy: StrategyType,
|
strategy: StrategyType, firstIndex, lastIndex, iterations: int
|
||||||
firstIndex, lastIndex, iterations: int): IndexingStrategy {.raises: [IndexingError].} =
|
): IndexingStrategy {.raises: [IndexingError].} =
|
||||||
|
|
||||||
if firstIndex > lastIndex:
|
if firstIndex > lastIndex:
|
||||||
raise newException(
|
raise newException(
|
||||||
IndexingWrongIndexError,
|
IndexingWrongIndexError,
|
||||||
"firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex & ")")
|
"firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex &
|
||||||
|
")",
|
||||||
|
)
|
||||||
|
|
||||||
if iterations <= 0:
|
if iterations <= 0:
|
||||||
raise newException(
|
raise newException(
|
||||||
IndexingWrongIterationsError,
|
IndexingWrongIterationsError,
|
||||||
"iterations (" & $iterations & ") must be greater than zero.")
|
"iterations (" & $iterations & ") must be greater than zero.",
|
||||||
|
)
|
||||||
|
|
||||||
IndexingStrategy(
|
IndexingStrategy(
|
||||||
strategyType: strategy,
|
strategyType: strategy,
|
||||||
firstIndex: firstIndex,
|
firstIndex: firstIndex,
|
||||||
lastIndex: lastIndex,
|
lastIndex: lastIndex,
|
||||||
iterations: iterations,
|
iterations: iterations,
|
||||||
step: divUp((lastIndex - firstIndex + 1), iterations))
|
step: divUp((lastIndex - firstIndex + 1), iterations),
|
||||||
|
)
|
||||||
|
@ -123,7 +123,8 @@ func shortLog*(long: string, ellipses = "*", start = 3, stop = 6): string =
|
|||||||
short
|
short
|
||||||
|
|
||||||
func shortHexLog*(long: string): string =
|
func shortHexLog*(long: string): string =
|
||||||
if long[0..1] == "0x": result &= "0x"
|
if long[0 .. 1] == "0x":
|
||||||
|
result &= "0x"
|
||||||
result &= long[2 .. long.high].shortLog("..", 4, 4)
|
result &= long[2 .. long.high].shortLog("..", 4, 4)
|
||||||
|
|
||||||
func short0xHexLog*[N: static[int], T: array[N, byte]](v: T): string =
|
func short0xHexLog*[N: static[int], T: array[N, byte]](v: T): string =
|
||||||
@ -182,12 +183,16 @@ template formatIt*(format: LogFormat, T: typedesc, body: untyped) =
|
|||||||
let v = opts.map(opt => opt.formatJsonOption)
|
let v = opts.map(opt => opt.formatJsonOption)
|
||||||
setProperty(r, key, json.`%`(v))
|
setProperty(r, key, json.`%`(v))
|
||||||
|
|
||||||
proc setProperty*(r: var JsonRecord, key: string, val: seq[T]) {.raises:[ValueError, IOError].} =
|
proc setProperty*(
|
||||||
|
r: var JsonRecord, key: string, val: seq[T]
|
||||||
|
) {.raises: [ValueError, IOError].} =
|
||||||
var it {.inject, used.}: T
|
var it {.inject, used.}: T
|
||||||
let v = val.map(it => body)
|
let v = val.map(it => body)
|
||||||
setProperty(r, key, json.`%`(v))
|
setProperty(r, key, json.`%`(v))
|
||||||
|
|
||||||
proc setProperty*(r: var JsonRecord, key: string, val: T) {.raises:[ValueError, IOError].} =
|
proc setProperty*(
|
||||||
|
r: var JsonRecord, key: string, val: T
|
||||||
|
) {.raises: [ValueError, IOError].} =
|
||||||
var it {.inject, used.}: T = val
|
var it {.inject, used.}: T = val
|
||||||
let v = body
|
let v = body
|
||||||
setProperty(r, key, json.`%`(v))
|
setProperty(r, key, json.`%`(v))
|
||||||
@ -218,23 +223,35 @@ template formatIt*(format: LogFormat, T: typedesc, body: untyped) =
|
|||||||
let v = opts.map(opt => opt.formatTextLineOption)
|
let v = opts.map(opt => opt.formatTextLineOption)
|
||||||
setProperty(r, key, v.formatTextLineSeq)
|
setProperty(r, key, v.formatTextLineSeq)
|
||||||
|
|
||||||
proc setProperty*(r: var TextLineRecord, key: string, val: seq[T]) {.raises:[ValueError, IOError].} =
|
proc setProperty*(
|
||||||
|
r: var TextLineRecord, key: string, val: seq[T]
|
||||||
|
) {.raises: [ValueError, IOError].} =
|
||||||
var it {.inject, used.}: T
|
var it {.inject, used.}: T
|
||||||
let v = val.map(it => body)
|
let v = val.map(it => body)
|
||||||
setProperty(r, key, v.formatTextLineSeq)
|
setProperty(r, key, v.formatTextLineSeq)
|
||||||
|
|
||||||
proc setProperty*(r: var TextLineRecord, key: string, val: T) {.raises:[ValueError, IOError].} =
|
proc setProperty*(
|
||||||
|
r: var TextLineRecord, key: string, val: T
|
||||||
|
) {.raises: [ValueError, IOError].} =
|
||||||
var it {.inject, used.}: T = val
|
var it {.inject, used.}: T = val
|
||||||
let v = body
|
let v = body
|
||||||
setProperty(r, key, v)
|
setProperty(r, key, v)
|
||||||
|
|
||||||
template formatIt*(T: type, body: untyped) {.dirty.} =
|
template formatIt*(T: type, body: untyped) {.dirty.} =
|
||||||
formatIt(LogFormat.textLines, T): body
|
formatIt(LogFormat.textLines, T):
|
||||||
formatIt(LogFormat.json, T): body
|
body
|
||||||
|
formatIt(LogFormat.json, T):
|
||||||
|
body
|
||||||
|
|
||||||
formatIt(LogFormat.textLines, Cid): shortLog($it)
|
formatIt(LogFormat.textLines, Cid):
|
||||||
formatIt(LogFormat.json, Cid): $it
|
shortLog($it)
|
||||||
formatIt(UInt256): $it
|
formatIt(LogFormat.json, Cid):
|
||||||
formatIt(MultiAddress): $it
|
$it
|
||||||
formatIt(LogFormat.textLines, array[32, byte]): it.short0xHexLog
|
formatIt(UInt256):
|
||||||
formatIt(LogFormat.json, array[32, byte]): it.to0xHex
|
$it
|
||||||
|
formatIt(MultiAddress):
|
||||||
|
$it
|
||||||
|
formatIt(LogFormat.textLines, array[32, byte]):
|
||||||
|
it.short0xHexLog
|
||||||
|
formatIt(LogFormat.json, array[32, byte]):
|
||||||
|
it.to0xHex
|
||||||
|
@ -12,7 +12,8 @@
|
|||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
import times
|
import times
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push:
|
||||||
|
{.upraises: [].}
|
||||||
|
|
||||||
import std/tables
|
import std/tables
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
@ -206,15 +207,14 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
|
|||||||
if pbVerificationInfo.getField(4, verifiableStrategy).isErr:
|
if pbVerificationInfo.getField(4, verifiableStrategy).isErr:
|
||||||
return failure("Unable to decode `verifiableStrategy` from manifest!")
|
return failure("Unable to decode `verifiableStrategy` from manifest!")
|
||||||
|
|
||||||
let
|
let treeCid = ?Cid.init(treeCidBuf).mapFailure
|
||||||
treeCid = ? Cid.init(treeCidBuf).mapFailure
|
|
||||||
|
|
||||||
var filenameOption = if filename.len == 0: string.none else: filename.some
|
var filenameOption = if filename.len == 0: string.none else: filename.some
|
||||||
var mimetypeOption = if mimetype.len == 0: string.none else: mimetype.some
|
var mimetypeOption = if mimetype.len == 0: string.none else: mimetype.some
|
||||||
var uploadedAtOption = if uploadedAt == 0: int64.none else: uploadedAt.int64.some
|
var uploadedAtOption = if uploadedAt == 0: int64.none else: uploadedAt.int64.some
|
||||||
|
|
||||||
let
|
let self =
|
||||||
self = if protected:
|
if protected:
|
||||||
Manifest.new(
|
Manifest.new(
|
||||||
treeCid = treeCid,
|
treeCid = treeCid,
|
||||||
datasetSize = datasetSize.NBytes,
|
datasetSize = datasetSize.NBytes,
|
||||||
@ -229,7 +229,8 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
|
|||||||
strategy = StrategyType(protectedStrategy),
|
strategy = StrategyType(protectedStrategy),
|
||||||
filename = filenameOption,
|
filename = filenameOption,
|
||||||
mimetype = mimetypeOption,
|
mimetype = mimetypeOption,
|
||||||
uploadedAt = uploadedAtOption)
|
uploadedAt = uploadedAtOption,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
Manifest.new(
|
Manifest.new(
|
||||||
treeCid = treeCid,
|
treeCid = treeCid,
|
||||||
@ -240,7 +241,8 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
|
|||||||
codec = codec.MultiCodec,
|
codec = codec.MultiCodec,
|
||||||
filename = filenameOption,
|
filename = filenameOption,
|
||||||
mimetype = mimetypeOption,
|
mimetype = mimetypeOption,
|
||||||
uploadedAt = uploadedAtOption)
|
uploadedAt = uploadedAtOption,
|
||||||
|
)
|
||||||
|
|
||||||
?self.verify()
|
?self.verify()
|
||||||
|
|
||||||
@ -254,7 +256,7 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
|
|||||||
verifyRoot = verifyRootCid,
|
verifyRoot = verifyRootCid,
|
||||||
slotRoots = slotRootCids,
|
slotRoots = slotRootCids,
|
||||||
cellSize = cellSize.NBytes,
|
cellSize = cellSize.NBytes,
|
||||||
strategy = StrategyType(verifiableStrategy)
|
strategy = StrategyType(verifiableStrategy),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.success
|
self.success
|
||||||
|
@ -11,7 +11,8 @@
|
|||||||
|
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push:
|
||||||
|
{.upraises: [].}
|
||||||
|
|
||||||
import pkg/libp2p/protobuf/minprotobuf
|
import pkg/libp2p/protobuf/minprotobuf
|
||||||
import pkg/libp2p/[cid, multihash, multicodec]
|
import pkg/libp2p/[cid, multihash, multicodec]
|
||||||
@ -25,14 +26,13 @@ import ../blocktype
|
|||||||
import ../indexingstrategy
|
import ../indexingstrategy
|
||||||
import ../logutils
|
import ../logutils
|
||||||
|
|
||||||
|
|
||||||
# TODO: Manifest should be reworked to more concrete types,
|
# TODO: Manifest should be reworked to more concrete types,
|
||||||
# perhaps using inheritance
|
# perhaps using inheritance
|
||||||
type
|
type Manifest* = ref object of RootObj
|
||||||
Manifest* = ref object of RootObj
|
|
||||||
treeCid {.serialize.}: Cid # Root of the merkle tree
|
treeCid {.serialize.}: Cid # Root of the merkle tree
|
||||||
datasetSize {.serialize.}: NBytes # Total size of all blocks
|
datasetSize {.serialize.}: NBytes # Total size of all blocks
|
||||||
blockSize {.serialize.}: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed)
|
blockSize {.serialize.}: NBytes
|
||||||
|
# Size of each contained block (might not be needed if blocks are len-prefixed)
|
||||||
codec: MultiCodec # Dataset codec
|
codec: MultiCodec # Dataset codec
|
||||||
hcodec: MultiCodec # Multihash codec
|
hcodec: MultiCodec # Multihash codec
|
||||||
version: CidVersion # Cid version
|
version: CidVersion # Cid version
|
||||||
@ -46,7 +46,8 @@ type
|
|||||||
originalTreeCid: Cid # The original root of the dataset being erasure coded
|
originalTreeCid: Cid # The original root of the dataset being erasure coded
|
||||||
originalDatasetSize: NBytes
|
originalDatasetSize: NBytes
|
||||||
protectedStrategy: StrategyType # Indexing strategy used to build the slot roots
|
protectedStrategy: StrategyType # Indexing strategy used to build the slot roots
|
||||||
case verifiable {.serialize.}: bool # Verifiable datasets can be used to generate storage proofs
|
case verifiable {.serialize.}: bool
|
||||||
|
# Verifiable datasets can be used to generate storage proofs
|
||||||
of true:
|
of true:
|
||||||
verifyRoot: Cid # Root of the top level merkle tree built from slot roots
|
verifyRoot: Cid # Root of the top level merkle tree built from slot roots
|
||||||
slotRoots: seq[Cid] # Individual slot root built from the original dataset blocks
|
slotRoots: seq[Cid] # Individual slot root built from the original dataset blocks
|
||||||
@ -159,7 +160,8 @@ func verify*(self: Manifest): ?!void =
|
|||||||
##
|
##
|
||||||
|
|
||||||
if self.protected and (self.blocksCount != self.steps * (self.ecK + self.ecM)):
|
if self.protected and (self.blocksCount != self.steps * (self.ecK + self.ecM)):
|
||||||
return failure newException(CodexError, "Broken manifest: wrong originalBlocksCount")
|
return
|
||||||
|
failure newException(CodexError, "Broken manifest: wrong originalBlocksCount")
|
||||||
|
|
||||||
return success()
|
return success()
|
||||||
|
|
||||||
@ -167,41 +169,32 @@ func cid*(self: Manifest): ?!Cid {.deprecated: "use treeCid instead".} =
|
|||||||
self.treeCid.success
|
self.treeCid.success
|
||||||
|
|
||||||
func `==`*(a, b: Manifest): bool =
|
func `==`*(a, b: Manifest): bool =
|
||||||
(a.treeCid == b.treeCid) and
|
(a.treeCid == b.treeCid) and (a.datasetSize == b.datasetSize) and
|
||||||
(a.datasetSize == b.datasetSize) and
|
(a.blockSize == b.blockSize) and (a.version == b.version) and (a.hcodec == b.hcodec) and
|
||||||
(a.blockSize == b.blockSize) and
|
(a.codec == b.codec) and (a.protected == b.protected) and (a.filename == b.filename) and
|
||||||
(a.version == b.version) and
|
(a.mimetype == b.mimetype) and (a.uploadedAt == b.uploadedAt) and (
|
||||||
(a.hcodec == b.hcodec) and
|
if a.protected:
|
||||||
(a.codec == b.codec) and
|
(a.ecK == b.ecK) and (a.ecM == b.ecM) and (a.originalTreeCid == b.originalTreeCid) and
|
||||||
(a.protected == b.protected) and
|
|
||||||
(a.filename == b.filename) and
|
|
||||||
(a.mimetype == b.mimetype) and
|
|
||||||
(a.uploadedAt == b.uploadedAt) and
|
|
||||||
(if a.protected:
|
|
||||||
(a.ecK == b.ecK) and
|
|
||||||
(a.ecM == b.ecM) and
|
|
||||||
(a.originalTreeCid == b.originalTreeCid) and
|
|
||||||
(a.originalDatasetSize == b.originalDatasetSize) and
|
(a.originalDatasetSize == b.originalDatasetSize) and
|
||||||
(a.protectedStrategy == b.protectedStrategy) and
|
(a.protectedStrategy == b.protectedStrategy) and (a.verifiable == b.verifiable) and
|
||||||
(a.verifiable == b.verifiable) and
|
(
|
||||||
(if a.verifiable:
|
if a.verifiable:
|
||||||
(a.verifyRoot == b.verifyRoot) and
|
(a.verifyRoot == b.verifyRoot) and (a.slotRoots == b.slotRoots) and
|
||||||
(a.slotRoots == b.slotRoots) and
|
(a.cellSize == b.cellSize) and (
|
||||||
(a.cellSize == b.cellSize) and
|
a.verifiableStrategy == b.verifiableStrategy
|
||||||
(a.verifiableStrategy == b.verifiableStrategy)
|
)
|
||||||
else:
|
else:
|
||||||
true)
|
true
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
true)
|
true
|
||||||
|
)
|
||||||
|
|
||||||
func `$`*(self: Manifest): string =
|
func `$`*(self: Manifest): string =
|
||||||
result = "treeCid: " & $self.treeCid &
|
result =
|
||||||
", datasetSize: " & $self.datasetSize &
|
"treeCid: " & $self.treeCid & ", datasetSize: " & $self.datasetSize & ", blockSize: " &
|
||||||
", blockSize: " & $self.blockSize &
|
$self.blockSize & ", version: " & $self.version & ", hcodec: " & $self.hcodec &
|
||||||
", version: " & $self.version &
|
", codec: " & $self.codec & ", protected: " & $self.protected
|
||||||
", hcodec: " & $self.hcodec &
|
|
||||||
", codec: " & $self.codec &
|
|
||||||
", protected: " & $self.protected
|
|
||||||
|
|
||||||
if self.filename.isSome:
|
if self.filename.isSome:
|
||||||
result &= ", filename: " & $self.filename
|
result &= ", filename: " & $self.filename
|
||||||
@ -212,19 +205,19 @@ func `$`*(self: Manifest): string =
|
|||||||
if self.uploadedAt.isSome:
|
if self.uploadedAt.isSome:
|
||||||
result &= ", uploadedAt: " & $self.uploadedAt
|
result &= ", uploadedAt: " & $self.uploadedAt
|
||||||
|
|
||||||
result &= (if self.protected:
|
result &= (
|
||||||
", ecK: " & $self.ecK &
|
if self.protected:
|
||||||
", ecM: " & $self.ecM &
|
", ecK: " & $self.ecK & ", ecM: " & $self.ecM & ", originalTreeCid: " &
|
||||||
", originalTreeCid: " & $self.originalTreeCid &
|
$self.originalTreeCid & ", originalDatasetSize: " & $self.originalDatasetSize &
|
||||||
", originalDatasetSize: " & $self.originalDatasetSize &
|
", verifiable: " & $self.verifiable & (
|
||||||
", verifiable: " & $self.verifiable &
|
if self.verifiable:
|
||||||
(if self.verifiable:
|
", verifyRoot: " & $self.verifyRoot & ", slotRoots: " & $self.slotRoots
|
||||||
", verifyRoot: " & $self.verifyRoot &
|
|
||||||
", slotRoots: " & $self.slotRoots
|
|
||||||
else:
|
else:
|
||||||
"")
|
""
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
"")
|
""
|
||||||
|
)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -243,8 +236,8 @@ func new*(
|
|||||||
protected = false,
|
protected = false,
|
||||||
filename: ?string = string.none,
|
filename: ?string = string.none,
|
||||||
mimetype: ?string = string.none,
|
mimetype: ?string = string.none,
|
||||||
uploadedAt: ?int64 = int64.none): Manifest =
|
uploadedAt: ?int64 = int64.none,
|
||||||
|
): Manifest =
|
||||||
T(
|
T(
|
||||||
treeCid: treeCid,
|
treeCid: treeCid,
|
||||||
blockSize: blockSize,
|
blockSize: blockSize,
|
||||||
@ -255,7 +248,8 @@ func new*(
|
|||||||
protected: protected,
|
protected: protected,
|
||||||
filename: filename,
|
filename: filename,
|
||||||
mimetype: mimetype,
|
mimetype: mimetype,
|
||||||
uploadedAt: uploadedAt)
|
uploadedAt: uploadedAt,
|
||||||
|
)
|
||||||
|
|
||||||
func new*(
|
func new*(
|
||||||
T: type Manifest,
|
T: type Manifest,
|
||||||
@ -263,7 +257,8 @@ func new*(
|
|||||||
treeCid: Cid,
|
treeCid: Cid,
|
||||||
datasetSize: NBytes,
|
datasetSize: NBytes,
|
||||||
ecK, ecM: int,
|
ecK, ecM: int,
|
||||||
strategy = SteppedStrategy): Manifest =
|
strategy = SteppedStrategy,
|
||||||
|
): Manifest =
|
||||||
## Create an erasure protected dataset from an
|
## Create an erasure protected dataset from an
|
||||||
## unprotected one
|
## unprotected one
|
||||||
##
|
##
|
||||||
@ -276,18 +271,17 @@ func new*(
|
|||||||
hcodec: manifest.hcodec,
|
hcodec: manifest.hcodec,
|
||||||
blockSize: manifest.blockSize,
|
blockSize: manifest.blockSize,
|
||||||
protected: true,
|
protected: true,
|
||||||
ecK: ecK, ecM: ecM,
|
ecK: ecK,
|
||||||
|
ecM: ecM,
|
||||||
originalTreeCid: manifest.treeCid,
|
originalTreeCid: manifest.treeCid,
|
||||||
originalDatasetSize: manifest.datasetSize,
|
originalDatasetSize: manifest.datasetSize,
|
||||||
protectedStrategy: strategy,
|
protectedStrategy: strategy,
|
||||||
filename: manifest.filename,
|
filename: manifest.filename,
|
||||||
mimetype: manifest.mimetype,
|
mimetype: manifest.mimetype,
|
||||||
uploadedAt: manifest.uploadedAt
|
uploadedAt: manifest.uploadedAt,
|
||||||
)
|
)
|
||||||
|
|
||||||
func new*(
|
func new*(T: type Manifest, manifest: Manifest): Manifest =
|
||||||
T: type Manifest,
|
|
||||||
manifest: Manifest): Manifest =
|
|
||||||
## Create an unprotected dataset from an
|
## Create an unprotected dataset from an
|
||||||
## erasure protected one
|
## erasure protected one
|
||||||
##
|
##
|
||||||
@ -302,7 +296,8 @@ func new*(
|
|||||||
protected: false,
|
protected: false,
|
||||||
filename: manifest.filename,
|
filename: manifest.filename,
|
||||||
mimetype: manifest.mimetype,
|
mimetype: manifest.mimetype,
|
||||||
uploadedAt: manifest.uploadedAt)
|
uploadedAt: manifest.uploadedAt,
|
||||||
|
)
|
||||||
|
|
||||||
func new*(
|
func new*(
|
||||||
T: type Manifest,
|
T: type Manifest,
|
||||||
@ -319,8 +314,8 @@ func new*(
|
|||||||
strategy = SteppedStrategy,
|
strategy = SteppedStrategy,
|
||||||
filename: ?string = string.none,
|
filename: ?string = string.none,
|
||||||
mimetype: ?string = string.none,
|
mimetype: ?string = string.none,
|
||||||
uploadedAt: ?int64 = int64.none): Manifest =
|
uploadedAt: ?int64 = int64.none,
|
||||||
|
): Manifest =
|
||||||
Manifest(
|
Manifest(
|
||||||
treeCid: treeCid,
|
treeCid: treeCid,
|
||||||
datasetSize: datasetSize,
|
datasetSize: datasetSize,
|
||||||
@ -336,7 +331,8 @@ func new*(
|
|||||||
protectedStrategy: strategy,
|
protectedStrategy: strategy,
|
||||||
filename: filename,
|
filename: filename,
|
||||||
mimetype: mimetype,
|
mimetype: mimetype,
|
||||||
uploadedAt: uploadedAt)
|
uploadedAt: uploadedAt,
|
||||||
|
)
|
||||||
|
|
||||||
func new*(
|
func new*(
|
||||||
T: type Manifest,
|
T: type Manifest,
|
||||||
@ -344,18 +340,19 @@ func new*(
|
|||||||
verifyRoot: Cid,
|
verifyRoot: Cid,
|
||||||
slotRoots: openArray[Cid],
|
slotRoots: openArray[Cid],
|
||||||
cellSize = DefaultCellSize,
|
cellSize = DefaultCellSize,
|
||||||
strategy = LinearStrategy): ?!Manifest =
|
strategy = LinearStrategy,
|
||||||
|
): ?!Manifest =
|
||||||
## Create a verifiable dataset from an
|
## Create a verifiable dataset from an
|
||||||
## protected one
|
## protected one
|
||||||
##
|
##
|
||||||
|
|
||||||
if not manifest.protected:
|
if not manifest.protected:
|
||||||
return failure newException(
|
return failure newException(
|
||||||
CodexError, "Can create verifiable manifest only from protected manifest.")
|
CodexError, "Can create verifiable manifest only from protected manifest."
|
||||||
|
)
|
||||||
|
|
||||||
if slotRoots.len != manifest.numSlots:
|
if slotRoots.len != manifest.numSlots:
|
||||||
return failure newException(
|
return failure newException(CodexError, "Wrong number of slot roots.")
|
||||||
CodexError, "Wrong number of slot roots.")
|
|
||||||
|
|
||||||
success Manifest(
|
success Manifest(
|
||||||
treeCid: manifest.treeCid,
|
treeCid: manifest.treeCid,
|
||||||
@ -377,12 +374,10 @@ func new*(
|
|||||||
verifiableStrategy: strategy,
|
verifiableStrategy: strategy,
|
||||||
filename: manifest.filename,
|
filename: manifest.filename,
|
||||||
mimetype: manifest.mimetype,
|
mimetype: manifest.mimetype,
|
||||||
uploadedAt: manifest.uploadedAt
|
uploadedAt: manifest.uploadedAt,
|
||||||
)
|
)
|
||||||
|
|
||||||
func new*(
|
func new*(T: type Manifest, data: openArray[byte]): ?!Manifest =
|
||||||
T: type Manifest,
|
|
||||||
data: openArray[byte]): ?!Manifest =
|
|
||||||
## Create a manifest instance from given data
|
## Create a manifest instance from given data
|
||||||
##
|
##
|
||||||
|
|
||||||
|
199
codex/market.nim
199
codex/market.nim
@ -19,13 +19,14 @@ type
|
|||||||
Market* = ref object of RootObj
|
Market* = ref object of RootObj
|
||||||
MarketError* = object of CodexError
|
MarketError* = object of CodexError
|
||||||
Subscription* = ref object of RootObj
|
Subscription* = ref object of RootObj
|
||||||
OnRequest* = proc(id: RequestId,
|
OnRequest* =
|
||||||
ask: StorageAsk,
|
proc(id: RequestId, ask: StorageAsk, expiry: UInt256) {.gcsafe, upraises: [].}
|
||||||
expiry: UInt256) {.gcsafe, upraises:[].}
|
|
||||||
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
||||||
OnSlotFilled* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises:[].}
|
OnSlotFilled* =
|
||||||
|
proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||||
OnSlotFreed* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
OnSlotFreed* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||||
OnSlotReservationsFull* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
OnSlotReservationsFull* =
|
||||||
|
proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||||
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
||||||
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
|
||||||
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises: [].}
|
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises: [].}
|
||||||
@ -37,21 +38,28 @@ type
|
|||||||
requestId*: RequestId
|
requestId*: RequestId
|
||||||
ask*: StorageAsk
|
ask*: StorageAsk
|
||||||
expiry*: UInt256
|
expiry*: UInt256
|
||||||
|
|
||||||
SlotFilled* = object of MarketplaceEvent
|
SlotFilled* = object of MarketplaceEvent
|
||||||
requestId* {.indexed.}: RequestId
|
requestId* {.indexed.}: RequestId
|
||||||
slotIndex*: UInt256
|
slotIndex*: UInt256
|
||||||
|
|
||||||
SlotFreed* = object of MarketplaceEvent
|
SlotFreed* = object of MarketplaceEvent
|
||||||
requestId* {.indexed.}: RequestId
|
requestId* {.indexed.}: RequestId
|
||||||
slotIndex*: UInt256
|
slotIndex*: UInt256
|
||||||
|
|
||||||
SlotReservationsFull* = object of MarketplaceEvent
|
SlotReservationsFull* = object of MarketplaceEvent
|
||||||
requestId* {.indexed.}: RequestId
|
requestId* {.indexed.}: RequestId
|
||||||
slotIndex*: UInt256
|
slotIndex*: UInt256
|
||||||
|
|
||||||
RequestFulfilled* = object of MarketplaceEvent
|
RequestFulfilled* = object of MarketplaceEvent
|
||||||
requestId* {.indexed.}: RequestId
|
requestId* {.indexed.}: RequestId
|
||||||
|
|
||||||
RequestCancelled* = object of MarketplaceEvent
|
RequestCancelled* = object of MarketplaceEvent
|
||||||
requestId* {.indexed.}: RequestId
|
requestId* {.indexed.}: RequestId
|
||||||
|
|
||||||
RequestFailed* = object of MarketplaceEvent
|
RequestFailed* = object of MarketplaceEvent
|
||||||
requestId* {.indexed.}: RequestId
|
requestId* {.indexed.}: RequestId
|
||||||
|
|
||||||
ProofSubmitted* = object of MarketplaceEvent
|
ProofSubmitted* = object of MarketplaceEvent
|
||||||
id*: SlotId
|
id*: SlotId
|
||||||
|
|
||||||
@ -81,8 +89,7 @@ proc inDowntime*(market: Market, slotId: SlotId): Future[bool] {.async.} =
|
|||||||
let pntr = await market.getPointer(slotId)
|
let pntr = await market.getPointer(slotId)
|
||||||
return pntr < downtime
|
return pntr < downtime
|
||||||
|
|
||||||
method requestStorage*(market: Market,
|
method requestStorage*(market: Market, request: StorageRequest) {.base, async.} =
|
||||||
request: StorageRequest) {.base, async.} =
|
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
|
method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
|
||||||
@ -91,182 +98,168 @@ method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
|
|||||||
method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} =
|
method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method getRequest*(market: Market,
|
method getRequest*(
|
||||||
id: RequestId):
|
market: Market, id: RequestId
|
||||||
Future[?StorageRequest] {.base, async.} =
|
): Future[?StorageRequest] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method requestState*(market: Market,
|
method requestState*(
|
||||||
requestId: RequestId): Future[?RequestState] {.base, async.} =
|
market: Market, requestId: RequestId
|
||||||
|
): Future[?RequestState] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method slotState*(market: Market,
|
method slotState*(market: Market, slotId: SlotId): Future[SlotState] {.base, async.} =
|
||||||
slotId: SlotId): Future[SlotState] {.base, async.} =
|
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method getRequestEnd*(market: Market,
|
method getRequestEnd*(
|
||||||
id: RequestId): Future[SecondsSince1970] {.base, async.} =
|
market: Market, id: RequestId
|
||||||
|
): Future[SecondsSince1970] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method requestExpiresAt*(market: Market,
|
method requestExpiresAt*(
|
||||||
id: RequestId): Future[SecondsSince1970] {.base, async.} =
|
market: Market, id: RequestId
|
||||||
|
): Future[SecondsSince1970] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method getHost*(market: Market,
|
method getHost*(
|
||||||
requestId: RequestId,
|
market: Market, requestId: RequestId, slotIndex: UInt256
|
||||||
slotIndex: UInt256): Future[?Address] {.base, async.} =
|
): Future[?Address] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method getActiveSlot*(
|
method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, async.} =
|
||||||
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
|
method fillSlot*(
|
||||||
market: Market,
|
market: Market,
|
||||||
slotId: SlotId): Future[?Slot] {.base, async.} =
|
|
||||||
|
|
||||||
raiseAssert("not implemented")
|
|
||||||
|
|
||||||
method fillSlot*(market: Market,
|
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
slotIndex: UInt256,
|
slotIndex: UInt256,
|
||||||
proof: Groth16Proof,
|
proof: Groth16Proof,
|
||||||
collateral: UInt256) {.base, async.} =
|
collateral: UInt256,
|
||||||
|
) {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method freeSlot*(market: Market, slotId: SlotId) {.base, async.} =
|
method freeSlot*(market: Market, slotId: SlotId) {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method withdrawFunds*(market: Market,
|
method withdrawFunds*(market: Market, requestId: RequestId) {.base, async.} =
|
||||||
requestId: RequestId) {.base, async.} =
|
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method subscribeRequests*(market: Market,
|
method subscribeRequests*(
|
||||||
callback: OnRequest):
|
market: Market, callback: OnRequest
|
||||||
Future[Subscription] {.base, async.} =
|
): Future[Subscription] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method isProofRequired*(market: Market,
|
method isProofRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} =
|
||||||
id: SlotId): Future[bool] {.base, async.} =
|
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method willProofBeRequired*(market: Market,
|
method willProofBeRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} =
|
||||||
id: SlotId): Future[bool] {.base, async.} =
|
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method getChallenge*(market: Market, id: SlotId): Future[ProofChallenge] {.base, async.} =
|
method getChallenge*(
|
||||||
|
market: Market, id: SlotId
|
||||||
|
): Future[ProofChallenge] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method submitProof*(market: Market,
|
method submitProof*(market: Market, id: SlotId, proof: Groth16Proof) {.base, async.} =
|
||||||
id: SlotId,
|
|
||||||
proof: Groth16Proof) {.base, async.} =
|
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method markProofAsMissing*(market: Market,
|
method markProofAsMissing*(market: Market, id: SlotId, period: Period) {.base, async.} =
|
||||||
id: SlotId,
|
|
||||||
period: Period) {.base, async.} =
|
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method canProofBeMarkedAsMissing*(market: Market,
|
method canProofBeMarkedAsMissing*(
|
||||||
id: SlotId,
|
market: Market, id: SlotId, period: Period
|
||||||
period: Period): Future[bool] {.base, async.} =
|
): Future[bool] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method reserveSlot*(
|
method reserveSlot*(
|
||||||
market: Market,
|
market: Market, requestId: RequestId, slotIndex: UInt256
|
||||||
requestId: RequestId,
|
) {.base, async.} =
|
||||||
slotIndex: UInt256) {.base, async.} =
|
|
||||||
|
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method canReserveSlot*(
|
method canReserveSlot*(
|
||||||
market: Market,
|
market: Market, requestId: RequestId, slotIndex: UInt256
|
||||||
requestId: RequestId,
|
): Future[bool] {.base, async.} =
|
||||||
slotIndex: UInt256): Future[bool] {.base, async.} =
|
|
||||||
|
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method subscribeFulfillment*(market: Market,
|
method subscribeFulfillment*(
|
||||||
callback: OnFulfillment):
|
market: Market, callback: OnFulfillment
|
||||||
Future[Subscription] {.base, async.} =
|
): Future[Subscription] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method subscribeFulfillment*(market: Market,
|
method subscribeFulfillment*(
|
||||||
requestId: RequestId,
|
market: Market, requestId: RequestId, callback: OnFulfillment
|
||||||
callback: OnFulfillment):
|
): Future[Subscription] {.base, async.} =
|
||||||
Future[Subscription] {.base, async.} =
|
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method subscribeSlotFilled*(market: Market,
|
method subscribeSlotFilled*(
|
||||||
callback: OnSlotFilled):
|
market: Market, callback: OnSlotFilled
|
||||||
Future[Subscription] {.base, async.} =
|
): Future[Subscription] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method subscribeSlotFilled*(market: Market,
|
method subscribeSlotFilled*(
|
||||||
requestId: RequestId,
|
market: Market, requestId: RequestId, slotIndex: UInt256, callback: OnSlotFilled
|
||||||
slotIndex: UInt256,
|
): Future[Subscription] {.base, async.} =
|
||||||
callback: OnSlotFilled):
|
|
||||||
Future[Subscription] {.base, async.} =
|
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method subscribeSlotFreed*(market: Market,
|
method subscribeSlotFreed*(
|
||||||
callback: OnSlotFreed):
|
market: Market, callback: OnSlotFreed
|
||||||
Future[Subscription] {.base, async.} =
|
): Future[Subscription] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method subscribeSlotReservationsFull*(
|
method subscribeSlotReservationsFull*(
|
||||||
market: Market,
|
market: Market, callback: OnSlotReservationsFull
|
||||||
callback: OnSlotReservationsFull): Future[Subscription] {.base, async.} =
|
): Future[Subscription] {.base, async.} =
|
||||||
|
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method subscribeRequestCancelled*(market: Market,
|
method subscribeRequestCancelled*(
|
||||||
callback: OnRequestCancelled):
|
market: Market, callback: OnRequestCancelled
|
||||||
Future[Subscription] {.base, async.} =
|
): Future[Subscription] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method subscribeRequestCancelled*(market: Market,
|
method subscribeRequestCancelled*(
|
||||||
requestId: RequestId,
|
market: Market, requestId: RequestId, callback: OnRequestCancelled
|
||||||
callback: OnRequestCancelled):
|
): Future[Subscription] {.base, async.} =
|
||||||
Future[Subscription] {.base, async.} =
|
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method subscribeRequestFailed*(market: Market,
|
method subscribeRequestFailed*(
|
||||||
callback: OnRequestFailed):
|
market: Market, callback: OnRequestFailed
|
||||||
Future[Subscription] {.base, async.} =
|
): Future[Subscription] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method subscribeRequestFailed*(market: Market,
|
method subscribeRequestFailed*(
|
||||||
requestId: RequestId,
|
market: Market, requestId: RequestId, callback: OnRequestFailed
|
||||||
callback: OnRequestFailed):
|
): Future[Subscription] {.base, async.} =
|
||||||
Future[Subscription] {.base, async.} =
|
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method subscribeProofSubmission*(market: Market,
|
method subscribeProofSubmission*(
|
||||||
callback: OnProofSubmitted):
|
market: Market, callback: OnProofSubmitted
|
||||||
Future[Subscription] {.base, async.} =
|
): Future[Subscription] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method unsubscribe*(subscription: Subscription) {.base, async, upraises: [].} =
|
method unsubscribe*(subscription: Subscription) {.base, async, upraises: [].} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method queryPastSlotFilledEvents*(
|
method queryPastSlotFilledEvents*(
|
||||||
market: Market,
|
market: Market, fromBlock: BlockTag
|
||||||
fromBlock: BlockTag): Future[seq[SlotFilled]] {.base, async.} =
|
): Future[seq[SlotFilled]] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method queryPastSlotFilledEvents*(
|
method queryPastSlotFilledEvents*(
|
||||||
market: Market,
|
market: Market, blocksAgo: int
|
||||||
blocksAgo: int): Future[seq[SlotFilled]] {.base, async.} =
|
): Future[seq[SlotFilled]] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method queryPastSlotFilledEvents*(
|
method queryPastSlotFilledEvents*(
|
||||||
market: Market,
|
market: Market, fromTime: SecondsSince1970
|
||||||
fromTime: SecondsSince1970): Future[seq[SlotFilled]] {.base, async.} =
|
): Future[seq[SlotFilled]] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method queryPastStorageRequestedEvents*(
|
method queryPastStorageRequestedEvents*(
|
||||||
market: Market,
|
market: Market, fromBlock: BlockTag
|
||||||
fromBlock: BlockTag): Future[seq[StorageRequested]] {.base, async.} =
|
): Future[seq[StorageRequested]] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
|
||||||
method queryPastStorageRequestedEvents*(
|
method queryPastStorageRequestedEvents*(
|
||||||
market: Market,
|
market: Market, blocksAgo: int
|
||||||
blocksAgo: int): Future[seq[StorageRequested]] {.base, async.} =
|
): Future[seq[StorageRequested]] {.base, async.} =
|
||||||
raiseAssert("not implemented")
|
raiseAssert("not implemented")
|
||||||
|
@ -9,7 +9,8 @@
|
|||||||
|
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push:
|
||||||
|
{.upraises: [].}
|
||||||
|
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
@ -103,10 +104,7 @@ proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof =
|
|||||||
|
|
||||||
CodexProof.init(mcodec, index.int, nleaves.int, nodes)
|
CodexProof.init(mcodec, index.int, nleaves.int, nodes)
|
||||||
|
|
||||||
proc fromJson*(
|
proc fromJson*(_: type CodexProof, json: JsonNode): ?!CodexProof =
|
||||||
_: type CodexProof,
|
|
||||||
json: JsonNode
|
|
||||||
): ?!CodexProof =
|
|
||||||
expectJsonKind(Cid, JString, json)
|
expectJsonKind(Cid, JString, json)
|
||||||
var bytes: seq[byte]
|
var bytes: seq[byte]
|
||||||
try:
|
try:
|
||||||
@ -116,4 +114,5 @@ proc fromJson*(
|
|||||||
|
|
||||||
CodexProof.decode(bytes)
|
CodexProof.decode(bytes)
|
||||||
|
|
||||||
func `%`*(proof: CodexProof): JsonNode = % byteutils.toHex(proof.encode())
|
func `%`*(proof: CodexProof): JsonNode =
|
||||||
|
%byteutils.toHex(proof.encode())
|
||||||
|
@ -56,8 +56,7 @@ proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} =
|
|||||||
const CodeHashes = initMultiHashCodeTable()
|
const CodeHashes = initMultiHashCodeTable()
|
||||||
|
|
||||||
func mhash*(mcodec: MultiCodec): ?!MHash =
|
func mhash*(mcodec: MultiCodec): ?!MHash =
|
||||||
let
|
let mhash = CodeHashes.getOrDefault(mcodec)
|
||||||
mhash = CodeHashes.getOrDefault(mcodec)
|
|
||||||
|
|
||||||
if isNil(mhash.coder):
|
if isNil(mhash.coder):
|
||||||
return failure "Invalid multihash codec"
|
return failure "Invalid multihash codec"
|
||||||
@ -71,8 +70,7 @@ func digestSize*(self: (CodexTree or CodexProof)): int =
|
|||||||
self.mhash.size
|
self.mhash.size
|
||||||
|
|
||||||
func getProof*(self: CodexTree, index: int): ?!CodexProof =
|
func getProof*(self: CodexTree, index: int): ?!CodexProof =
|
||||||
var
|
var proof = CodexProof(mcodec: self.mcodec)
|
||||||
proof = CodexProof(mcodec: self.mcodec)
|
|
||||||
|
|
||||||
?self.getProof(index, proof)
|
?self.getProof(index, proof)
|
||||||
|
|
||||||
@ -86,12 +84,10 @@ func verify*(self: CodexProof, leaf: MultiHash, root: MultiHash): ?!bool =
|
|||||||
rootBytes = root.digestBytes
|
rootBytes = root.digestBytes
|
||||||
leafBytes = leaf.digestBytes
|
leafBytes = leaf.digestBytes
|
||||||
|
|
||||||
if self.mcodec != root.mcodec or
|
if self.mcodec != root.mcodec or self.mcodec != leaf.mcodec:
|
||||||
self.mcodec != leaf.mcodec:
|
|
||||||
return failure "Hash codec mismatch"
|
return failure "Hash codec mismatch"
|
||||||
|
|
||||||
if rootBytes.len != root.size and
|
if rootBytes.len != root.size and leafBytes.len != leaf.size:
|
||||||
leafBytes.len != leaf.size:
|
|
||||||
return failure "Invalid hash length"
|
return failure "Invalid hash length"
|
||||||
|
|
||||||
self.verify(leafBytes, rootBytes)
|
self.verify(leafBytes, rootBytes)
|
||||||
@ -99,25 +95,17 @@ func verify*(self: CodexProof, leaf: MultiHash, root: MultiHash): ?!bool =
|
|||||||
func verify*(self: CodexProof, leaf: Cid, root: Cid): ?!bool =
|
func verify*(self: CodexProof, leaf: Cid, root: Cid): ?!bool =
|
||||||
self.verify(?leaf.mhash.mapFailure, ?leaf.mhash.mapFailure)
|
self.verify(?leaf.mhash.mapFailure, ?leaf.mhash.mapFailure)
|
||||||
|
|
||||||
proc rootCid*(
|
proc rootCid*(self: CodexTree, version = CIDv1, dataCodec = DatasetRootCodec): ?!Cid =
|
||||||
self: CodexTree,
|
|
||||||
version = CIDv1,
|
|
||||||
dataCodec = DatasetRootCodec): ?!Cid =
|
|
||||||
|
|
||||||
if (?self.root).len == 0:
|
if (?self.root).len == 0:
|
||||||
return failure "Empty root"
|
return failure "Empty root"
|
||||||
|
|
||||||
let
|
let mhash = ?MultiHash.init(self.mcodec, ?self.root).mapFailure
|
||||||
mhash = ? MultiHash.init(self.mcodec, ? self.root).mapFailure
|
|
||||||
|
|
||||||
Cid.init(version, DatasetRootCodec, mhash).mapFailure
|
Cid.init(version, DatasetRootCodec, mhash).mapFailure
|
||||||
|
|
||||||
func getLeafCid*(
|
func getLeafCid*(
|
||||||
self: CodexTree,
|
self: CodexTree, i: Natural, version = CIDv1, dataCodec = BlockCodec
|
||||||
i: Natural,
|
): ?!Cid =
|
||||||
version = CIDv1,
|
|
||||||
dataCodec = BlockCodec): ?!Cid =
|
|
||||||
|
|
||||||
if i >= self.leavesCount:
|
if i >= self.leavesCount:
|
||||||
return failure "Invalid leaf index " & $i
|
return failure "Invalid leaf index " & $i
|
||||||
|
|
||||||
@ -128,24 +116,19 @@ func getLeafCid*(
|
|||||||
Cid.init(version, dataCodec, mhash).mapFailure
|
Cid.init(version, dataCodec, mhash).mapFailure
|
||||||
|
|
||||||
proc `$`*(self: CodexTree): string =
|
proc `$`*(self: CodexTree): string =
|
||||||
let root = if self.root.isOk: byteutils.toHex(self.root.get) else: "none"
|
let root =
|
||||||
"CodexTree(" &
|
if self.root.isOk:
|
||||||
" root: " & root &
|
byteutils.toHex(self.root.get)
|
||||||
", leavesCount: " & $self.leavesCount &
|
else:
|
||||||
", levels: " & $self.levels &
|
"none"
|
||||||
", mcodec: " & $self.mcodec & " )"
|
"CodexTree(" & " root: " & root & ", leavesCount: " & $self.leavesCount & ", levels: " &
|
||||||
|
$self.levels & ", mcodec: " & $self.mcodec & " )"
|
||||||
|
|
||||||
proc `$`*(self: CodexProof): string =
|
proc `$`*(self: CodexProof): string =
|
||||||
"CodexProof(" &
|
"CodexProof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index & ", path: " &
|
||||||
" nleaves: " & $self.nleaves &
|
$self.path.mapIt(byteutils.toHex(it)) & ", mcodec: " & $self.mcodec & " )"
|
||||||
", index: " & $self.index &
|
|
||||||
", path: " & $self.path.mapIt( byteutils.toHex(it) ) &
|
|
||||||
", mcodec: " & $self.mcodec & " )"
|
|
||||||
|
|
||||||
func compress*(
|
func compress*(x, y: openArray[byte], key: ByteTreeKey, mhash: MHash): ?!ByteHash =
|
||||||
x, y: openArray[byte],
|
|
||||||
key: ByteTreeKey,
|
|
||||||
mhash: MHash): ?!ByteHash =
|
|
||||||
## Compress two hashes
|
## Compress two hashes
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -154,10 +137,8 @@ func compress*(
|
|||||||
success digest
|
success digest
|
||||||
|
|
||||||
func init*(
|
func init*(
|
||||||
_: type CodexTree,
|
_: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash]
|
||||||
mcodec: MultiCodec = Sha256HashCodec,
|
): ?!CodexTree =
|
||||||
leaves: openArray[ByteHash]): ?!CodexTree =
|
|
||||||
|
|
||||||
if leaves.len == 0:
|
if leaves.len == 0:
|
||||||
return failure "Empty leaves"
|
return failure "Empty leaves"
|
||||||
|
|
||||||
@ -170,16 +151,12 @@ func init*(
|
|||||||
if mhash.size != leaves[0].len:
|
if mhash.size != leaves[0].len:
|
||||||
return failure "Invalid hash length"
|
return failure "Invalid hash length"
|
||||||
|
|
||||||
var
|
var self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero)
|
||||||
self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero)
|
|
||||||
|
|
||||||
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
|
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
|
||||||
success self
|
success self
|
||||||
|
|
||||||
func init*(
|
func init*(_: type CodexTree, leaves: openArray[MultiHash]): ?!CodexTree =
|
||||||
_: type CodexTree,
|
|
||||||
leaves: openArray[MultiHash]): ?!CodexTree =
|
|
||||||
|
|
||||||
if leaves.len == 0:
|
if leaves.len == 0:
|
||||||
return failure "Empty leaves"
|
return failure "Empty leaves"
|
||||||
|
|
||||||
@ -189,9 +166,7 @@ func init*(
|
|||||||
|
|
||||||
CodexTree.init(mcodec, leaves)
|
CodexTree.init(mcodec, leaves)
|
||||||
|
|
||||||
func init*(
|
func init*(_: type CodexTree, leaves: openArray[Cid]): ?!CodexTree =
|
||||||
_: type CodexTree,
|
|
||||||
leaves: openArray[Cid]): ?!CodexTree =
|
|
||||||
if leaves.len == 0:
|
if leaves.len == 0:
|
||||||
return failure "Empty leaves"
|
return failure "Empty leaves"
|
||||||
|
|
||||||
@ -205,8 +180,8 @@ proc fromNodes*(
|
|||||||
_: type CodexTree,
|
_: type CodexTree,
|
||||||
mcodec: MultiCodec = Sha256HashCodec,
|
mcodec: MultiCodec = Sha256HashCodec,
|
||||||
nodes: openArray[ByteHash],
|
nodes: openArray[ByteHash],
|
||||||
nleaves: int): ?!CodexTree =
|
nleaves: int,
|
||||||
|
): ?!CodexTree =
|
||||||
if nodes.len == 0:
|
if nodes.len == 0:
|
||||||
return failure "Empty nodes"
|
return failure "Empty nodes"
|
||||||
|
|
||||||
@ -243,8 +218,8 @@ func init*(
|
|||||||
mcodec: MultiCodec = Sha256HashCodec,
|
mcodec: MultiCodec = Sha256HashCodec,
|
||||||
index: int,
|
index: int,
|
||||||
nleaves: int,
|
nleaves: int,
|
||||||
nodes: openArray[ByteHash]): ?!CodexProof =
|
nodes: openArray[ByteHash],
|
||||||
|
): ?!CodexProof =
|
||||||
if nodes.len == 0:
|
if nodes.len == 0:
|
||||||
return failure "Empty nodes"
|
return failure "Empty nodes"
|
||||||
|
|
||||||
@ -260,4 +235,5 @@ func init*(
|
|||||||
mcodec: mcodec,
|
mcodec: mcodec,
|
||||||
index: index,
|
index: index,
|
||||||
nleaves: nleaves,
|
nleaves: nleaves,
|
||||||
path: @nodes)
|
path: @nodes,
|
||||||
|
)
|
||||||
|
@ -59,9 +59,8 @@ func root*[H, K](self: MerkleTree[H, K]): ?!H =
|
|||||||
return success last[0]
|
return success last[0]
|
||||||
|
|
||||||
func getProof*[H, K](
|
func getProof*[H, K](
|
||||||
self: MerkleTree[H, K],
|
self: MerkleTree[H, K], index: int, proof: MerkleProof[H, K]
|
||||||
index: int,
|
): ?!void =
|
||||||
proof: MerkleProof[H, K]): ?!void =
|
|
||||||
let depth = self.depth
|
let depth = self.depth
|
||||||
let nleaves = self.leavesCount
|
let nleaves = self.leavesCount
|
||||||
|
|
||||||
@ -73,7 +72,11 @@ func getProof*[H, K](
|
|||||||
var m = nleaves
|
var m = nleaves
|
||||||
for i in 0 ..< depth:
|
for i in 0 ..< depth:
|
||||||
let j = k xor 1
|
let j = k xor 1
|
||||||
path[i] = if (j < m): self.layers[i][j] else: self.zero
|
path[i] =
|
||||||
|
if (j < m):
|
||||||
|
self.layers[i][j]
|
||||||
|
else:
|
||||||
|
self.zero
|
||||||
k = k shr 1
|
k = k shr 1
|
||||||
m = (m + 1) shr 1
|
m = (m + 1) shr 1
|
||||||
|
|
||||||
@ -85,8 +88,7 @@ func getProof*[H, K](
|
|||||||
success()
|
success()
|
||||||
|
|
||||||
func getProof*[H, K](self: MerkleTree[H, K], index: int): ?!MerkleProof[H, K] =
|
func getProof*[H, K](self: MerkleTree[H, K], index: int): ?!MerkleProof[H, K] =
|
||||||
var
|
var proof = MerkleProof[H, K]()
|
||||||
proof = MerkleProof[H, K]()
|
|
||||||
|
|
||||||
?self.getProof(index, proof)
|
?self.getProof(index, proof)
|
||||||
|
|
||||||
@ -121,10 +123,8 @@ func verify*[H, K](proof: MerkleProof[H, K], leaf: H, root: H): ?!bool =
|
|||||||
success bool(root == ?proof.reconstructRoot(leaf))
|
success bool(root == ?proof.reconstructRoot(leaf))
|
||||||
|
|
||||||
func merkleTreeWorker*[H, K](
|
func merkleTreeWorker*[H, K](
|
||||||
self: MerkleTree[H, K],
|
self: MerkleTree[H, K], xs: openArray[H], isBottomLayer: static bool
|
||||||
xs: openArray[H],
|
): ?!seq[seq[H]] =
|
||||||
isBottomLayer: static bool): ?!seq[seq[H]] =
|
|
||||||
|
|
||||||
let a = low(xs)
|
let a = low(xs)
|
||||||
let b = high(xs)
|
let b = high(xs)
|
||||||
let m = b - a + 1
|
let m = b - a + 1
|
||||||
|
@ -46,64 +46,49 @@ type
|
|||||||
|
|
||||||
proc `$`*(self: Poseidon2Tree): string =
|
proc `$`*(self: Poseidon2Tree): string =
|
||||||
let root = if self.root.isOk: self.root.get.toHex else: "none"
|
let root = if self.root.isOk: self.root.get.toHex else: "none"
|
||||||
"Poseidon2Tree(" &
|
"Poseidon2Tree(" & " root: " & root & ", leavesCount: " & $self.leavesCount &
|
||||||
" root: " & root &
|
|
||||||
", leavesCount: " & $self.leavesCount &
|
|
||||||
", levels: " & $self.levels & " )"
|
", levels: " & $self.levels & " )"
|
||||||
|
|
||||||
proc `$`*(self: Poseidon2Proof): string =
|
proc `$`*(self: Poseidon2Proof): string =
|
||||||
"Poseidon2Proof(" &
|
"Poseidon2Proof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index &
|
||||||
" nleaves: " & $self.nleaves &
|
|
||||||
", index: " & $self.index &
|
|
||||||
", path: " & $self.path.mapIt(it.toHex) & " )"
|
", path: " & $self.path.mapIt(it.toHex) & " )"
|
||||||
|
|
||||||
func toArray32*(bytes: openArray[byte]): array[32, byte] =
|
func toArray32*(bytes: openArray[byte]): array[32, byte] =
|
||||||
result[0 ..< bytes.len] = bytes[0 ..< bytes.len]
|
result[0 ..< bytes.len] = bytes[0 ..< bytes.len]
|
||||||
|
|
||||||
converter toKey*(key: PoseidonKeysEnum): Poseidon2Hash =
|
converter toKey*(key: PoseidonKeysEnum): Poseidon2Hash =
|
||||||
case key:
|
case key
|
||||||
of KeyNone: KeyNoneF
|
of KeyNone: KeyNoneF
|
||||||
of KeyBottomLayer: KeyBottomLayerF
|
of KeyBottomLayer: KeyBottomLayerF
|
||||||
of KeyOdd: KeyOddF
|
of KeyOdd: KeyOddF
|
||||||
of KeyOddAndBottomLayer: KeyOddAndBottomLayerF
|
of KeyOddAndBottomLayer: KeyOddAndBottomLayerF
|
||||||
|
|
||||||
func init*(
|
func init*(_: type Poseidon2Tree, leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
|
||||||
_: type Poseidon2Tree,
|
|
||||||
leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
|
|
||||||
|
|
||||||
if leaves.len == 0:
|
if leaves.len == 0:
|
||||||
return failure "Empty leaves"
|
return failure "Empty leaves"
|
||||||
|
|
||||||
let
|
let compressor = proc(
|
||||||
compressor = proc(
|
x, y: Poseidon2Hash, key: PoseidonKeysEnum
|
||||||
x, y: Poseidon2Hash,
|
): ?!Poseidon2Hash {.noSideEffect.} =
|
||||||
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
|
|
||||||
success compress(x, y, key.toKey)
|
success compress(x, y, key.toKey)
|
||||||
|
|
||||||
var
|
var self = Poseidon2Tree(compress: compressor, zero: Poseidon2Zero)
|
||||||
self = Poseidon2Tree(compress: compressor, zero: Poseidon2Zero)
|
|
||||||
|
|
||||||
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
|
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
|
||||||
success self
|
success self
|
||||||
|
|
||||||
func init*(
|
func init*(_: type Poseidon2Tree, leaves: openArray[array[31, byte]]): ?!Poseidon2Tree =
|
||||||
_: type Poseidon2Tree,
|
Poseidon2Tree.init(leaves.mapIt(Poseidon2Hash.fromBytes(it)))
|
||||||
leaves: openArray[array[31, byte]]): ?!Poseidon2Tree =
|
|
||||||
Poseidon2Tree.init(
|
|
||||||
leaves.mapIt( Poseidon2Hash.fromBytes(it) ))
|
|
||||||
|
|
||||||
proc fromNodes*(
|
proc fromNodes*(
|
||||||
_: type Poseidon2Tree,
|
_: type Poseidon2Tree, nodes: openArray[Poseidon2Hash], nleaves: int
|
||||||
nodes: openArray[Poseidon2Hash],
|
): ?!Poseidon2Tree =
|
||||||
nleaves: int): ?!Poseidon2Tree =
|
|
||||||
|
|
||||||
if nodes.len == 0:
|
if nodes.len == 0:
|
||||||
return failure "Empty nodes"
|
return failure "Empty nodes"
|
||||||
|
|
||||||
let
|
let compressor = proc(
|
||||||
compressor = proc(
|
x, y: Poseidon2Hash, key: PoseidonKeysEnum
|
||||||
x, y: Poseidon2Hash,
|
): ?!Poseidon2Hash {.noSideEffect.} =
|
||||||
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
|
|
||||||
success compress(x, y, key.toKey)
|
success compress(x, y, key.toKey)
|
||||||
|
|
||||||
var
|
var
|
||||||
@ -126,18 +111,14 @@ proc fromNodes*(
|
|||||||
success self
|
success self
|
||||||
|
|
||||||
func init*(
|
func init*(
|
||||||
_: type Poseidon2Proof,
|
_: type Poseidon2Proof, index: int, nleaves: int, nodes: openArray[Poseidon2Hash]
|
||||||
index: int,
|
): ?!Poseidon2Proof =
|
||||||
nleaves: int,
|
|
||||||
nodes: openArray[Poseidon2Hash]): ?!Poseidon2Proof =
|
|
||||||
|
|
||||||
if nodes.len == 0:
|
if nodes.len == 0:
|
||||||
return failure "Empty nodes"
|
return failure "Empty nodes"
|
||||||
|
|
||||||
let
|
let compressor = proc(
|
||||||
compressor = proc(
|
x, y: Poseidon2Hash, key: PoseidonKeysEnum
|
||||||
x, y: Poseidon2Hash,
|
): ?!Poseidon2Hash {.noSideEffect.} =
|
||||||
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
|
|
||||||
success compress(x, y, key.toKey)
|
success compress(x, y, key.toKey)
|
||||||
|
|
||||||
success Poseidon2Proof(
|
success Poseidon2Proof(
|
||||||
@ -145,4 +126,5 @@ func init*(
|
|||||||
zero: Poseidon2Zero,
|
zero: Poseidon2Zero,
|
||||||
index: index,
|
index: index,
|
||||||
nleaves: nleaves,
|
nleaves: nleaves,
|
||||||
path: @nodes)
|
path: @nodes,
|
||||||
|
)
|
||||||
|
@ -11,7 +11,8 @@ const
|
|||||||
# Namespaces
|
# Namespaces
|
||||||
CodexMetaNamespace* = "meta" # meta info stored here
|
CodexMetaNamespace* = "meta" # meta info stored here
|
||||||
CodexRepoNamespace* = "repo" # repository namespace, blocks and manifests are subkeys
|
CodexRepoNamespace* = "repo" # repository namespace, blocks and manifests are subkeys
|
||||||
CodexBlockTotalNamespace* = CodexMetaNamespace & "/total" # number of blocks in the repo
|
CodexBlockTotalNamespace* = CodexMetaNamespace & "/total"
|
||||||
|
# number of blocks in the repo
|
||||||
CodexBlocksNamespace* = CodexRepoNamespace & "/blocks" # blocks namespace
|
CodexBlocksNamespace* = CodexRepoNamespace & "/blocks" # blocks namespace
|
||||||
CodexManifestNamespace* = CodexRepoNamespace & "/manifests" # manifest namespace
|
CodexManifestNamespace* = CodexRepoNamespace & "/manifests" # manifest namespace
|
||||||
CodexBlocksTtlNamespace* = # Cid TTL
|
CodexBlocksTtlNamespace* = # Cid TTL
|
||||||
|
138
codex/nat.nim
138
codex/nat.nim
@ -9,8 +9,10 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[options, os, strutils, times, net],stew/shims/net as stewNet,
|
std/[options, os, strutils, times, net],
|
||||||
stew/[objects,results], nat_traversal/[miniupnpc, natpmp],
|
stew/shims/net as stewNet,
|
||||||
|
stew/[objects, results],
|
||||||
|
nat_traversal/[miniupnpc, natpmp],
|
||||||
json_serialization/std/net
|
json_serialization/std/net
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
@ -38,8 +40,7 @@ var
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "nat"
|
topics = "nat"
|
||||||
|
|
||||||
type
|
type PrefSrcStatus = enum
|
||||||
PrefSrcStatus = enum
|
|
||||||
NoRoutingInfo
|
NoRoutingInfo
|
||||||
PrefSrcIsPublic
|
PrefSrcIsPublic
|
||||||
PrefSrcIsPrivate
|
PrefSrcIsPrivate
|
||||||
@ -63,7 +64,7 @@ proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress]
|
|||||||
var
|
var
|
||||||
msg: cstring
|
msg: cstring
|
||||||
canContinue = true
|
canContinue = true
|
||||||
case upnp.selectIGD():
|
case upnp.selectIGD()
|
||||||
of IGDNotFound:
|
of IGDNotFound:
|
||||||
msg = "Internet Gateway Device not found. Giving up."
|
msg = "Internet Gateway Device not found. Giving up."
|
||||||
canContinue = false
|
canContinue = false
|
||||||
@ -72,9 +73,11 @@ proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress]
|
|||||||
of IGDNotConnected:
|
of IGDNotConnected:
|
||||||
msg = "Internet Gateway Device found but it's not connected. Trying anyway."
|
msg = "Internet Gateway Device found but it's not connected. Trying anyway."
|
||||||
of NotAnIGD:
|
of NotAnIGD:
|
||||||
msg = "Some device found, but it's not recognised as an Internet Gateway Device. Trying anyway."
|
msg =
|
||||||
|
"Some device found, but it's not recognised as an Internet Gateway Device. Trying anyway."
|
||||||
of IGDIpNotRoutable:
|
of IGDIpNotRoutable:
|
||||||
msg = "Internet Gateway Device found and is connected, but with a reserved or non-routable IP. Trying anyway."
|
msg =
|
||||||
|
"Internet Gateway Device found and is connected, but with a reserved or non-routable IP. Trying anyway."
|
||||||
if not quiet:
|
if not quiet:
|
||||||
debug "UPnP", msg
|
debug "UPnP", msg
|
||||||
if canContinue:
|
if canContinue:
|
||||||
@ -116,8 +119,7 @@ proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress]
|
|||||||
# Further more, we check if the bind address (user provided, or a "0.0.0.0"
|
# Further more, we check if the bind address (user provided, or a "0.0.0.0"
|
||||||
# default) is a public IP. That's a long shot, because code paths involving a
|
# default) is a public IP. That's a long shot, because code paths involving a
|
||||||
# user-provided bind address are not supposed to get here.
|
# user-provided bind address are not supposed to get here.
|
||||||
proc getRoutePrefSrc(
|
proc getRoutePrefSrc(bindIp: IpAddress): (Option[IpAddress], PrefSrcStatus) =
|
||||||
bindIp: IpAddress): (Option[IpAddress], PrefSrcStatus) =
|
|
||||||
let bindAddress = initTAddress(bindIp, Port(0))
|
let bindAddress = initTAddress(bindIp, Port(0))
|
||||||
|
|
||||||
if bindAddress.isAnyLocal():
|
if bindAddress.isAnyLocal():
|
||||||
@ -137,10 +139,12 @@ proc getRoutePrefSrc(
|
|||||||
return (none(IpAddress), BindAddressIsPrivate)
|
return (none(IpAddress), BindAddressIsPrivate)
|
||||||
|
|
||||||
# Try to detect a public IP assigned to this host, before trying NAT traversal.
|
# Try to detect a public IP assigned to this host, before trying NAT traversal.
|
||||||
proc getPublicRoutePrefSrcOrExternalIP*(natStrategy: NatStrategy, bindIp: IpAddress, quiet = true): Option[IpAddress] =
|
proc getPublicRoutePrefSrcOrExternalIP*(
|
||||||
|
natStrategy: NatStrategy, bindIp: IpAddress, quiet = true
|
||||||
|
): Option[IpAddress] =
|
||||||
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
|
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
|
||||||
|
|
||||||
case prefSrcStatus:
|
case prefSrcStatus
|
||||||
of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic:
|
of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic:
|
||||||
return prefSrcIp
|
return prefSrcIp
|
||||||
of PrefSrcIsPrivate, BindAddressIsPrivate:
|
of PrefSrcIsPrivate, BindAddressIsPrivate:
|
||||||
@ -148,7 +152,9 @@ proc getPublicRoutePrefSrcOrExternalIP*(natStrategy: NatStrategy, bindIp: IpAddr
|
|||||||
if extIp.isSome:
|
if extIp.isSome:
|
||||||
return some(extIp.get)
|
return some(extIp.get)
|
||||||
|
|
||||||
proc doPortMapping(tcpPort, udpPort: Port, description: string): Option[(Port, Port)] {.gcsafe.} =
|
proc doPortMapping(
|
||||||
|
tcpPort, udpPort: Port, description: string
|
||||||
|
): Option[(Port, Port)] {.gcsafe.} =
|
||||||
var
|
var
|
||||||
extTcpPort: Port
|
extTcpPort: Port
|
||||||
extUdpPort: Port
|
extUdpPort: Port
|
||||||
@ -157,24 +163,28 @@ proc doPortMapping(tcpPort, udpPort: Port, description: string): Option[(Port, P
|
|||||||
for t in [(tcpPort, UPNPProtocol.TCP), (udpPort, UPNPProtocol.UDP)]:
|
for t in [(tcpPort, UPNPProtocol.TCP), (udpPort, UPNPProtocol.UDP)]:
|
||||||
let
|
let
|
||||||
(port, protocol) = t
|
(port, protocol) = t
|
||||||
pmres = upnp.addPortMapping(externalPort = $port,
|
pmres = upnp.addPortMapping(
|
||||||
|
externalPort = $port,
|
||||||
protocol = protocol,
|
protocol = protocol,
|
||||||
internalHost = upnp.lanAddr,
|
internalHost = upnp.lanAddr,
|
||||||
internalPort = $port,
|
internalPort = $port,
|
||||||
desc = description,
|
desc = description,
|
||||||
leaseDuration = 0)
|
leaseDuration = 0,
|
||||||
|
)
|
||||||
if pmres.isErr:
|
if pmres.isErr:
|
||||||
error "UPnP port mapping", msg = pmres.error, port
|
error "UPnP port mapping", msg = pmres.error, port
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
# let's check it
|
# let's check it
|
||||||
let cres = upnp.getSpecificPortMapping(externalPort = $port,
|
let cres =
|
||||||
protocol = protocol)
|
upnp.getSpecificPortMapping(externalPort = $port, protocol = protocol)
|
||||||
if cres.isErr:
|
if cres.isErr:
|
||||||
warn "UPnP port mapping check failed. Assuming the check itself is broken and the port mapping was done.", msg = cres.error
|
warn "UPnP port mapping check failed. Assuming the check itself is broken and the port mapping was done.",
|
||||||
|
msg = cres.error
|
||||||
|
|
||||||
info "UPnP: added port mapping", externalPort = port, internalPort = port, protocol = protocol
|
info "UPnP: added port mapping",
|
||||||
case protocol:
|
externalPort = port, internalPort = port, protocol = protocol
|
||||||
|
case protocol
|
||||||
of UPNPProtocol.TCP:
|
of UPNPProtocol.TCP:
|
||||||
extTcpPort = port
|
extTcpPort = port
|
||||||
of UPNPProtocol.UDP:
|
of UPNPProtocol.UDP:
|
||||||
@ -183,17 +193,20 @@ proc doPortMapping(tcpPort, udpPort: Port, description: string): Option[(Port, P
|
|||||||
for t in [(tcpPort, NatPmpProtocol.TCP), (udpPort, NatPmpProtocol.UDP)]:
|
for t in [(tcpPort, NatPmpProtocol.TCP), (udpPort, NatPmpProtocol.UDP)]:
|
||||||
let
|
let
|
||||||
(port, protocol) = t
|
(port, protocol) = t
|
||||||
pmres = npmp.addPortMapping(eport = port.cushort,
|
pmres = npmp.addPortMapping(
|
||||||
|
eport = port.cushort,
|
||||||
iport = port.cushort,
|
iport = port.cushort,
|
||||||
protocol = protocol,
|
protocol = protocol,
|
||||||
lifetime = NATPMP_LIFETIME)
|
lifetime = NATPMP_LIFETIME,
|
||||||
|
)
|
||||||
if pmres.isErr:
|
if pmres.isErr:
|
||||||
error "NAT-PMP port mapping", msg = pmres.error, port
|
error "NAT-PMP port mapping", msg = pmres.error, port
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
let extPort = Port(pmres.value)
|
let extPort = Port(pmres.value)
|
||||||
info "NAT-PMP: added port mapping", externalPort = extPort, internalPort = port, protocol = protocol
|
info "NAT-PMP: added port mapping",
|
||||||
case protocol:
|
externalPort = extPort, internalPort = port, protocol = protocol
|
||||||
|
case protocol
|
||||||
of NatPmpProtocol.TCP:
|
of NatPmpProtocol.TCP:
|
||||||
extTcpPort = extPort
|
extTcpPort = extPort
|
||||||
of NatPmpProtocol.UDP:
|
of NatPmpProtocol.UDP:
|
||||||
@ -223,8 +236,11 @@ proc repeatPortMapping(args: PortMappingArgs) {.thread, raises: [ValueError].} =
|
|||||||
while true:
|
while true:
|
||||||
# we're being silly here with this channel polling because we can't
|
# we're being silly here with this channel polling because we can't
|
||||||
# select on Nim channels like on Go ones
|
# select on Nim channels like on Go ones
|
||||||
let (dataAvailable, _) = try: natCloseChan.tryRecv()
|
let (dataAvailable, _) =
|
||||||
except Exception: (false, false)
|
try:
|
||||||
|
natCloseChan.tryRecv()
|
||||||
|
except Exception:
|
||||||
|
(false, false)
|
||||||
if dataAvailable:
|
if dataAvailable:
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
@ -255,26 +271,33 @@ proc stopNatThread() {.noconv.} =
|
|||||||
let ipres = getExternalIP(strategy, quiet = true)
|
let ipres = getExternalIP(strategy, quiet = true)
|
||||||
if ipres.isSome:
|
if ipres.isSome:
|
||||||
if strategy == NatStrategy.NatUpnp:
|
if strategy == NatStrategy.NatUpnp:
|
||||||
for t in [(externalTcpPort, internalTcpPort, UPNPProtocol.TCP), (externalUdpPort, internalUdpPort, UPNPProtocol.UDP)]:
|
for t in [
|
||||||
|
(externalTcpPort, internalTcpPort, UPNPProtocol.TCP),
|
||||||
|
(externalUdpPort, internalUdpPort, UPNPProtocol.UDP),
|
||||||
|
]:
|
||||||
let
|
let
|
||||||
(eport, iport, protocol) = t
|
(eport, iport, protocol) = t
|
||||||
pmres = upnp.deletePortMapping(externalPort = $eport,
|
pmres = upnp.deletePortMapping(externalPort = $eport, protocol = protocol)
|
||||||
protocol = protocol)
|
|
||||||
if pmres.isErr:
|
if pmres.isErr:
|
||||||
error "UPnP port mapping deletion", msg = pmres.error
|
error "UPnP port mapping deletion", msg = pmres.error
|
||||||
else:
|
else:
|
||||||
debug "UPnP: deleted port mapping", externalPort = eport, internalPort = iport, protocol = protocol
|
debug "UPnP: deleted port mapping",
|
||||||
|
externalPort = eport, internalPort = iport, protocol = protocol
|
||||||
elif strategy == NatStrategy.NatPmp:
|
elif strategy == NatStrategy.NatPmp:
|
||||||
for t in [(externalTcpPort, internalTcpPort, NatPmpProtocol.TCP), (externalUdpPort, internalUdpPort, NatPmpProtocol.UDP)]:
|
for t in [
|
||||||
|
(externalTcpPort, internalTcpPort, NatPmpProtocol.TCP),
|
||||||
|
(externalUdpPort, internalUdpPort, NatPmpProtocol.UDP),
|
||||||
|
]:
|
||||||
let
|
let
|
||||||
(eport, iport, protocol) = t
|
(eport, iport, protocol) = t
|
||||||
pmres = npmp.deletePortMapping(eport = eport.cushort,
|
pmres = npmp.deletePortMapping(
|
||||||
iport = iport.cushort,
|
eport = eport.cushort, iport = iport.cushort, protocol = protocol
|
||||||
protocol = protocol)
|
)
|
||||||
if pmres.isErr:
|
if pmres.isErr:
|
||||||
error "NAT-PMP port mapping deletion", msg = pmres.error
|
error "NAT-PMP port mapping deletion", msg = pmres.error
|
||||||
else:
|
else:
|
||||||
debug "NAT-PMP: deleted port mapping", externalPort = eport, internalPort = iport, protocol = protocol
|
debug "NAT-PMP: deleted port mapping",
|
||||||
|
externalPort = eport, internalPort = iport, protocol = protocol
|
||||||
|
|
||||||
proc redirectPorts*(tcpPort, udpPort: Port, description: string): Option[(Port, Port)] =
|
proc redirectPorts*(tcpPort, udpPort: Port, description: string): Option[(Port, Port)] =
|
||||||
result = doPortMapping(tcpPort, udpPort, description)
|
result = doPortMapping(tcpPort, udpPort, description)
|
||||||
@ -288,15 +311,17 @@ proc redirectPorts*(tcpPort, udpPort: Port, description: string): Option[(Port,
|
|||||||
# these mappings.
|
# these mappings.
|
||||||
natCloseChan.open()
|
natCloseChan.open()
|
||||||
try:
|
try:
|
||||||
natThread.createThread(repeatPortMapping, (externalTcpPort, externalUdpPort, description))
|
natThread.createThread(
|
||||||
|
repeatPortMapping, (externalTcpPort, externalUdpPort, description)
|
||||||
|
)
|
||||||
# atexit() in disguise
|
# atexit() in disguise
|
||||||
addQuitProc(stopNatThread)
|
addQuitProc(stopNatThread)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
warn "Failed to create NAT port mapping renewal thread", exc = exc.msg
|
warn "Failed to create NAT port mapping renewal thread", exc = exc.msg
|
||||||
|
|
||||||
proc setupNat*(natStrategy: NatStrategy, tcpPort, udpPort: Port,
|
proc setupNat*(
|
||||||
clientId: string):
|
natStrategy: NatStrategy, tcpPort, udpPort: Port, clientId: string
|
||||||
tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]] =
|
): tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]] =
|
||||||
## Setup NAT port mapping and get external IP address.
|
## Setup NAT port mapping and get external IP address.
|
||||||
## If any of this fails, we don't return any IP address but do return the
|
## If any of this fails, we don't return any IP address but do return the
|
||||||
## original ports as best effort.
|
## original ports as best effort.
|
||||||
@ -304,10 +329,10 @@ proc setupNat*(natStrategy: NatStrategy, tcpPort, udpPort: Port,
|
|||||||
let extIp = getExternalIP(natStrategy)
|
let extIp = getExternalIP(natStrategy)
|
||||||
if extIp.isSome:
|
if extIp.isSome:
|
||||||
let ip = extIp.get
|
let ip = extIp.get
|
||||||
let extPorts = ({.gcsafe.}:
|
let extPorts = (
|
||||||
redirectPorts(tcpPort = tcpPort,
|
{.gcsafe.}:
|
||||||
udpPort = udpPort,
|
redirectPorts(tcpPort = tcpPort, udpPort = udpPort, description = clientId)
|
||||||
description = clientId))
|
)
|
||||||
if extPorts.isSome:
|
if extPorts.isSome:
|
||||||
let (extTcpPort, extUdpPort) = extPorts.get()
|
let (extTcpPort, extUdpPort) = extPorts.get()
|
||||||
(ip: some(ip), tcpPort: some(extTcpPort), udpPort: some(extUdpPort))
|
(ip: some(ip), tcpPort: some(extTcpPort), udpPort: some(extUdpPort))
|
||||||
@ -318,16 +343,14 @@ proc setupNat*(natStrategy: NatStrategy, tcpPort, udpPort: Port,
|
|||||||
warn "UPnP/NAT-PMP not available"
|
warn "UPnP/NAT-PMP not available"
|
||||||
(ip: none(IpAddress), tcpPort: some(tcpPort), udpPort: some(udpPort))
|
(ip: none(IpAddress), tcpPort: some(tcpPort), udpPort: some(udpPort))
|
||||||
|
|
||||||
type
|
type NatConfig* = object
|
||||||
NatConfig* = object
|
|
||||||
case hasExtIp*: bool
|
case hasExtIp*: bool
|
||||||
of true: extIp*: IpAddress
|
of true: extIp*: IpAddress
|
||||||
of false: nat*: NatStrategy
|
of false: nat*: NatStrategy
|
||||||
|
|
||||||
proc setupAddress*(natConfig: NatConfig, bindIp: IpAddress,
|
proc setupAddress*(
|
||||||
tcpPort, udpPort: Port, clientId: string):
|
natConfig: NatConfig, bindIp: IpAddress, tcpPort, udpPort: Port, clientId: string
|
||||||
tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]]
|
): tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]] {.gcsafe.} =
|
||||||
{.gcsafe.} =
|
|
||||||
## Set-up of the external address via any of the ways as configured in
|
## Set-up of the external address via any of the ways as configured in
|
||||||
## `NatConfig`. In case all fails an error is logged and the bind ports are
|
## `NatConfig`. In case all fails an error is logged and the bind ports are
|
||||||
## selected also as external ports, as best effort and in hope that the
|
## selected also as external ports, as best effort and in hope that the
|
||||||
@ -338,11 +361,11 @@ proc setupAddress*(natConfig: NatConfig, bindIp: IpAddress,
|
|||||||
# any required port redirection must be done by hand
|
# any required port redirection must be done by hand
|
||||||
return (some(natConfig.extIp), some(tcpPort), some(udpPort))
|
return (some(natConfig.extIp), some(tcpPort), some(udpPort))
|
||||||
|
|
||||||
case natConfig.nat:
|
case natConfig.nat
|
||||||
of NatStrategy.NatAny:
|
of NatStrategy.NatAny:
|
||||||
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
|
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
|
||||||
|
|
||||||
case prefSrcStatus:
|
case prefSrcStatus
|
||||||
of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic:
|
of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic:
|
||||||
return (prefSrcIp, some(tcpPort), some(udpPort))
|
return (prefSrcIp, some(tcpPort), some(udpPort))
|
||||||
of PrefSrcIsPrivate, BindAddressIsPrivate:
|
of PrefSrcIsPrivate, BindAddressIsPrivate:
|
||||||
@ -350,7 +373,7 @@ proc setupAddress*(natConfig: NatConfig, bindIp: IpAddress,
|
|||||||
of NatStrategy.NatNone:
|
of NatStrategy.NatNone:
|
||||||
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
|
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
|
||||||
|
|
||||||
case prefSrcStatus:
|
case prefSrcStatus
|
||||||
of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic:
|
of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic:
|
||||||
return (prefSrcIp, some(tcpPort), some(udpPort))
|
return (prefSrcIp, some(tcpPort), some(udpPort))
|
||||||
of PrefSrcIsPrivate:
|
of PrefSrcIsPrivate:
|
||||||
@ -362,20 +385,22 @@ proc setupAddress*(natConfig: NatConfig, bindIp: IpAddress,
|
|||||||
of NatStrategy.NatUpnp, NatStrategy.NatPmp:
|
of NatStrategy.NatUpnp, NatStrategy.NatPmp:
|
||||||
return setupNat(natConfig.nat, tcpPort, udpPort, clientId)
|
return setupNat(natConfig.nat, tcpPort, udpPort, clientId)
|
||||||
|
|
||||||
proc nattedAddress*(natConfig: NatConfig, addrs: seq[MultiAddress], udpPort: Port): tuple[libp2p, discovery: seq[MultiAddress]] =
|
proc nattedAddress*(
|
||||||
|
natConfig: NatConfig, addrs: seq[MultiAddress], udpPort: Port
|
||||||
|
): tuple[libp2p, discovery: seq[MultiAddress]] =
|
||||||
## Takes a NAT configuration, sequence of multiaddresses and UDP port and returns:
|
## Takes a NAT configuration, sequence of multiaddresses and UDP port and returns:
|
||||||
## - Modified multiaddresses with NAT-mapped addresses for libp2p
|
## - Modified multiaddresses with NAT-mapped addresses for libp2p
|
||||||
## - Discovery addresses with NAT-mapped UDP ports
|
## - Discovery addresses with NAT-mapped UDP ports
|
||||||
|
|
||||||
var discoveryAddrs = newSeq[MultiAddress](0)
|
var discoveryAddrs = newSeq[MultiAddress](0)
|
||||||
let
|
let newAddrs = addrs.mapIt:
|
||||||
newAddrs = addrs.mapIt:
|
|
||||||
block:
|
block:
|
||||||
# Extract IP address and port from the multiaddress
|
# Extract IP address and port from the multiaddress
|
||||||
let (ipPart, port) = getAddressAndPort(it)
|
let (ipPart, port) = getAddressAndPort(it)
|
||||||
if ipPart.isSome and port.isSome:
|
if ipPart.isSome and port.isSome:
|
||||||
# Try to setup NAT mapping for the address
|
# Try to setup NAT mapping for the address
|
||||||
let (newIP, tcp, udp) = setupAddress(natConfig, ipPart.get, port.get, udpPort, "codex")
|
let (newIP, tcp, udp) =
|
||||||
|
setupAddress(natConfig, ipPart.get, port.get, udpPort, "codex")
|
||||||
if newIP.isSome:
|
if newIP.isSome:
|
||||||
# NAT mapping successful - add discovery address with mapped UDP port
|
# NAT mapping successful - add discovery address with mapped UDP port
|
||||||
discoveryAddrs.add(getMultiAddrWithIPAndUDPPort(newIP.get, udp.get))
|
discoveryAddrs.add(getMultiAddrWithIPAndUDPPort(newIP.get, udp.get))
|
||||||
@ -390,6 +415,3 @@ proc nattedAddress*(natConfig: NatConfig, addrs: seq[MultiAddress], udpPort: Por
|
|||||||
# Invalid multiaddress format - return as is
|
# Invalid multiaddress format - return as is
|
||||||
it
|
it
|
||||||
(newAddrs, discoveryAddrs)
|
(newAddrs, discoveryAddrs)
|
||||||
|
|
||||||
|
|
||||||
|
|
194
codex/node.nim
194
codex/node.nim
@ -50,14 +50,15 @@ export logutils
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "codex node"
|
topics = "codex node"
|
||||||
|
|
||||||
const
|
const FetchBatch = 200
|
||||||
FetchBatch = 200
|
|
||||||
|
|
||||||
type
|
type
|
||||||
Contracts* = tuple
|
Contracts* =
|
||||||
client: ?ClientInteractions
|
tuple[
|
||||||
host: ?HostInteractions
|
client: ?ClientInteractions,
|
||||||
validator: ?ValidatorInteractions
|
host: ?HostInteractions,
|
||||||
|
validator: ?ValidatorInteractions,
|
||||||
|
]
|
||||||
|
|
||||||
CodexNode* = object
|
CodexNode* = object
|
||||||
switch: Switch
|
switch: Switch
|
||||||
@ -88,8 +89,8 @@ func discovery*(self: CodexNodeRef): Discovery =
|
|||||||
return self.discovery
|
return self.discovery
|
||||||
|
|
||||||
proc storeManifest*(
|
proc storeManifest*(
|
||||||
self: CodexNodeRef,
|
self: CodexNodeRef, manifest: Manifest
|
||||||
manifest: Manifest): Future[?!bt.Block] {.async.} =
|
): Future[?!bt.Block] {.async.} =
|
||||||
without encodedVerifiable =? manifest.encode(), err:
|
without encodedVerifiable =? manifest.encode(), err:
|
||||||
trace "Unable to encode manifest"
|
trace "Unable to encode manifest"
|
||||||
return failure(err)
|
return failure(err)
|
||||||
@ -104,9 +105,7 @@ proc storeManifest*(
|
|||||||
|
|
||||||
success blk
|
success blk
|
||||||
|
|
||||||
proc fetchManifest*(
|
proc fetchManifest*(self: CodexNodeRef, cid: Cid): Future[?!Manifest] {.async.} =
|
||||||
self: CodexNodeRef,
|
|
||||||
cid: Cid): Future[?!Manifest] {.async.} =
|
|
||||||
## Fetch and decode a manifest block
|
## Fetch and decode a manifest block
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -129,33 +128,27 @@ proc fetchManifest*(
|
|||||||
|
|
||||||
return manifest.success
|
return manifest.success
|
||||||
|
|
||||||
proc findPeer*(
|
proc findPeer*(self: CodexNodeRef, peerId: PeerId): Future[?PeerRecord] {.async.} =
|
||||||
self: CodexNodeRef,
|
|
||||||
peerId: PeerId): Future[?PeerRecord] {.async.} =
|
|
||||||
## Find peer using the discovery service from the given CodexNode
|
## Find peer using the discovery service from the given CodexNode
|
||||||
##
|
##
|
||||||
return await self.discovery.findPeer(peerId)
|
return await self.discovery.findPeer(peerId)
|
||||||
|
|
||||||
proc connect*(
|
proc connect*(
|
||||||
self: CodexNodeRef,
|
self: CodexNodeRef, peerId: PeerId, addrs: seq[MultiAddress]
|
||||||
peerId: PeerId,
|
|
||||||
addrs: seq[MultiAddress]
|
|
||||||
): Future[void] =
|
): Future[void] =
|
||||||
self.switch.connect(peerId, addrs)
|
self.switch.connect(peerId, addrs)
|
||||||
|
|
||||||
proc updateExpiry*(
|
proc updateExpiry*(
|
||||||
self: CodexNodeRef,
|
self: CodexNodeRef, manifestCid: Cid, expiry: SecondsSince1970
|
||||||
manifestCid: Cid,
|
): Future[?!void] {.async.} =
|
||||||
expiry: SecondsSince1970): Future[?!void] {.async.} =
|
|
||||||
|
|
||||||
without manifest =? await self.fetchManifest(manifestCid), error:
|
without manifest =? await self.fetchManifest(manifestCid), error:
|
||||||
trace "Unable to fetch manifest for cid", manifestCid
|
trace "Unable to fetch manifest for cid", manifestCid
|
||||||
return failure(error)
|
return failure(error)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
let
|
let ensuringFutures = Iter[int].new(0 ..< manifest.blocksCount).mapIt(
|
||||||
ensuringFutures = Iter[int].new(0..<manifest.blocksCount)
|
self.networkStore.localStore.ensureExpiry(manifest.treeCid, it, expiry)
|
||||||
.mapIt(self.networkStore.localStore.ensureExpiry( manifest.treeCid, it, expiry ))
|
)
|
||||||
await allFuturesThrowing(ensuringFutures)
|
await allFuturesThrowing(ensuringFutures)
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
raise exc
|
raise exc
|
||||||
@ -169,7 +162,8 @@ proc fetchBatched*(
|
|||||||
cid: Cid,
|
cid: Cid,
|
||||||
iter: Iter[int],
|
iter: Iter[int],
|
||||||
batchSize = FetchBatch,
|
batchSize = FetchBatch,
|
||||||
onBatch: BatchProc = nil): Future[?!void] {.async, gcsafe.} =
|
onBatch: BatchProc = nil,
|
||||||
|
): Future[?!void] {.async, gcsafe.} =
|
||||||
## Fetch blocks in batches of `batchSize`
|
## Fetch blocks in batches of `batchSize`
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -198,7 +192,8 @@ proc fetchBatched*(
|
|||||||
self: CodexNodeRef,
|
self: CodexNodeRef,
|
||||||
manifest: Manifest,
|
manifest: Manifest,
|
||||||
batchSize = FetchBatch,
|
batchSize = FetchBatch,
|
||||||
onBatch: BatchProc = nil): Future[?!void] =
|
onBatch: BatchProc = nil,
|
||||||
|
): Future[?!void] =
|
||||||
## Fetch manifest in batches of `batchSize`
|
## Fetch manifest in batches of `batchSize`
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -207,16 +202,12 @@ proc fetchBatched*(
|
|||||||
let iter = Iter[int].new(0 ..< manifest.blocksCount)
|
let iter = Iter[int].new(0 ..< manifest.blocksCount)
|
||||||
self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch)
|
self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch)
|
||||||
|
|
||||||
proc streamSingleBlock(
|
proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async.} =
|
||||||
self: CodexNodeRef,
|
|
||||||
cid: Cid
|
|
||||||
): Future[?!LPStream] {.async.} =
|
|
||||||
## Streams the contents of a single block.
|
## Streams the contents of a single block.
|
||||||
##
|
##
|
||||||
trace "Streaming single block", cid = cid
|
trace "Streaming single block", cid = cid
|
||||||
|
|
||||||
let
|
let stream = BufferStream.new()
|
||||||
stream = BufferStream.new()
|
|
||||||
|
|
||||||
without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err:
|
without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err:
|
||||||
return failure(err)
|
return failure(err)
|
||||||
@ -234,9 +225,7 @@ proc streamSingleBlock(
|
|||||||
LPStream(stream).success
|
LPStream(stream).success
|
||||||
|
|
||||||
proc streamEntireDataset(
|
proc streamEntireDataset(
|
||||||
self: CodexNodeRef,
|
self: CodexNodeRef, manifest: Manifest, manifestCid: Cid
|
||||||
manifest: Manifest,
|
|
||||||
manifestCid: Cid,
|
|
||||||
): Future[?!LPStream] {.async.} =
|
): Future[?!LPStream] {.async.} =
|
||||||
## Streams the contents of the entire dataset described by the manifest.
|
## Streams the contents of the entire dataset described by the manifest.
|
||||||
##
|
##
|
||||||
@ -246,11 +235,8 @@ proc streamEntireDataset(
|
|||||||
# Retrieve, decode and save to the local store all EС groups
|
# Retrieve, decode and save to the local store all EС groups
|
||||||
proc erasureJob(): Future[?!void] {.async.} =
|
proc erasureJob(): Future[?!void] {.async.} =
|
||||||
# Spawn an erasure decoding job
|
# Spawn an erasure decoding job
|
||||||
let
|
let erasure =
|
||||||
erasure = Erasure.new(
|
Erasure.new(self.networkStore, leoEncoderProvider, leoDecoderProvider)
|
||||||
self.networkStore,
|
|
||||||
leoEncoderProvider,
|
|
||||||
leoDecoderProvider)
|
|
||||||
without _ =? (await erasure.decode(manifest)), error:
|
without _ =? (await erasure.decode(manifest)), error:
|
||||||
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
|
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
|
||||||
return failure(error)
|
return failure(error)
|
||||||
@ -265,9 +251,8 @@ proc streamEntireDataset(
|
|||||||
LPStream(StoreStream.new(self.networkStore, manifest, pad = false)).success
|
LPStream(StoreStream.new(self.networkStore, manifest, pad = false)).success
|
||||||
|
|
||||||
proc retrieve*(
|
proc retrieve*(
|
||||||
self: CodexNodeRef,
|
self: CodexNodeRef, cid: Cid, local: bool = true
|
||||||
cid: Cid,
|
): Future[?!LPStream] {.async.} =
|
||||||
local: bool = true): Future[?!LPStream] {.async.} =
|
|
||||||
## Retrieve by Cid a single block or an entire dataset described by manifest
|
## Retrieve by Cid a single block or an entire dataset described by manifest
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -287,7 +272,8 @@ proc store*(
|
|||||||
stream: LPStream,
|
stream: LPStream,
|
||||||
filename: ?string = string.none,
|
filename: ?string = string.none,
|
||||||
mimetype: ?string = string.none,
|
mimetype: ?string = string.none,
|
||||||
blockSize = DefaultBlockSize): Future[?!Cid] {.async.} =
|
blockSize = DefaultBlockSize,
|
||||||
|
): Future[?!Cid] {.async.} =
|
||||||
## Save stream contents as dataset with given blockSize
|
## Save stream contents as dataset with given blockSize
|
||||||
## to nodes's BlockStore, and return Cid of its manifest
|
## to nodes's BlockStore, and return Cid of its manifest
|
||||||
##
|
##
|
||||||
@ -301,10 +287,7 @@ proc store*(
|
|||||||
var cids: seq[Cid]
|
var cids: seq[Cid]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
while (
|
while (let chunk = await chunker.getBytes(); chunk.len > 0):
|
||||||
let chunk = await chunker.getBytes();
|
|
||||||
chunk.len > 0):
|
|
||||||
|
|
||||||
without mhash =? MultiHash.digest($hcodec, chunk).mapFailure, err:
|
without mhash =? MultiHash.digest($hcodec, chunk).mapFailure, err:
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
@ -335,7 +318,8 @@ proc store*(
|
|||||||
for index, cid in cids:
|
for index, cid in cids:
|
||||||
without proof =? tree.getProof(index), err:
|
without proof =? tree.getProof(index), err:
|
||||||
return failure(err)
|
return failure(err)
|
||||||
if err =? (await self.networkStore.putCidAndProof(treeCid, index, cid, proof)).errorOption:
|
if err =?
|
||||||
|
(await self.networkStore.putCidAndProof(treeCid, index, cid, proof)).errorOption:
|
||||||
# TODO add log here
|
# TODO add log here
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
@ -348,13 +332,15 @@ proc store*(
|
|||||||
codec = dataCodec,
|
codec = dataCodec,
|
||||||
filename = filename,
|
filename = filename,
|
||||||
mimetype = mimetype,
|
mimetype = mimetype,
|
||||||
uploadedAt = now().utc.toTime.toUnix.some)
|
uploadedAt = now().utc.toTime.toUnix.some,
|
||||||
|
)
|
||||||
|
|
||||||
without manifestBlk =? await self.storeManifest(manifest), err:
|
without manifestBlk =? await self.storeManifest(manifest), err:
|
||||||
error "Unable to store manifest"
|
error "Unable to store manifest"
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
info "Stored data", manifestCid = manifestBlk.cid,
|
info "Stored data",
|
||||||
|
manifestCid = manifestBlk.cid,
|
||||||
treeCid = treeCid,
|
treeCid = treeCid,
|
||||||
blocks = manifest.blocksCount,
|
blocks = manifest.blocksCount,
|
||||||
datasetSize = manifest.datasetSize,
|
datasetSize = manifest.datasetSize,
|
||||||
@ -389,7 +375,8 @@ proc setupRequest(
|
|||||||
tolerance: uint,
|
tolerance: uint,
|
||||||
reward: UInt256,
|
reward: UInt256,
|
||||||
collateral: UInt256,
|
collateral: UInt256,
|
||||||
expiry: UInt256): Future[?!StorageRequest] {.async.} =
|
expiry: UInt256,
|
||||||
|
): Future[?!StorageRequest] {.async.} =
|
||||||
## Setup slots for a given dataset
|
## Setup slots for a given dataset
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -416,11 +403,8 @@ proc setupRequest(
|
|||||||
return failure error
|
return failure error
|
||||||
|
|
||||||
# Erasure code the dataset according to provided parameters
|
# Erasure code the dataset according to provided parameters
|
||||||
let
|
let erasure =
|
||||||
erasure = Erasure.new(
|
Erasure.new(self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider)
|
||||||
self.networkStore.localStore,
|
|
||||||
leoEncoderProvider,
|
|
||||||
leoDecoderProvider)
|
|
||||||
|
|
||||||
without encoded =? (await erasure.encode(manifest, ecK, ecM)), error:
|
without encoded =? (await erasure.encode(manifest, ecK, ecM)), error:
|
||||||
trace "Unable to erasure code dataset"
|
trace "Unable to erasure code dataset"
|
||||||
@ -453,13 +437,13 @@ proc setupRequest(
|
|||||||
proofProbability: proofProbability,
|
proofProbability: proofProbability,
|
||||||
reward: reward,
|
reward: reward,
|
||||||
collateral: collateral,
|
collateral: collateral,
|
||||||
maxSlotLoss: tolerance
|
maxSlotLoss: tolerance,
|
||||||
),
|
),
|
||||||
content: StorageContent(
|
content: StorageContent(
|
||||||
cid: $manifestBlk.cid, # TODO: why string?
|
cid: $manifestBlk.cid, # TODO: why string?
|
||||||
merkleRoot: verifyRoot
|
merkleRoot: verifyRoot,
|
||||||
),
|
),
|
||||||
expiry: expiry
|
expiry: expiry,
|
||||||
)
|
)
|
||||||
|
|
||||||
trace "Request created", request = $request
|
trace "Request created", request = $request
|
||||||
@ -474,7 +458,8 @@ proc requestStorage*(
|
|||||||
tolerance: uint,
|
tolerance: uint,
|
||||||
reward: UInt256,
|
reward: UInt256,
|
||||||
collateral: UInt256,
|
collateral: UInt256,
|
||||||
expiry: UInt256): Future[?!PurchaseId] {.async.} =
|
expiry: UInt256,
|
||||||
|
): Future[?!PurchaseId] {.async.} =
|
||||||
## Initiate a request for storage sequence, this might
|
## Initiate a request for storage sequence, this might
|
||||||
## be a multistep procedure.
|
## be a multistep procedure.
|
||||||
##
|
##
|
||||||
@ -496,16 +481,11 @@ proc requestStorage*(
|
|||||||
trace "Purchasing not available"
|
trace "Purchasing not available"
|
||||||
return failure "Purchasing not available"
|
return failure "Purchasing not available"
|
||||||
|
|
||||||
without request =?
|
without request =? (
|
||||||
(await self.setupRequest(
|
await self.setupRequest(
|
||||||
cid,
|
cid, duration, proofProbability, nodes, tolerance, reward, collateral, expiry
|
||||||
duration,
|
)
|
||||||
proofProbability,
|
), err:
|
||||||
nodes,
|
|
||||||
tolerance,
|
|
||||||
reward,
|
|
||||||
collateral,
|
|
||||||
expiry)), err:
|
|
||||||
trace "Unable to setup request"
|
trace "Unable to setup request"
|
||||||
return failure err
|
return failure err
|
||||||
|
|
||||||
@ -513,10 +493,8 @@ proc requestStorage*(
|
|||||||
success purchase.id
|
success purchase.id
|
||||||
|
|
||||||
proc onStore(
|
proc onStore(
|
||||||
self: CodexNodeRef,
|
self: CodexNodeRef, request: StorageRequest, slotIdx: UInt256, blocksCb: BlocksCb
|
||||||
request: StorageRequest,
|
): Future[?!void] {.async.} =
|
||||||
slotIdx: UInt256,
|
|
||||||
blocksCb: BlocksCb): Future[?!void] {.async.} =
|
|
||||||
## store data in local storage
|
## store data in local storage
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -534,9 +512,8 @@ proc onStore(
|
|||||||
trace "Unable to fetch manifest for cid", cid, err = err.msg
|
trace "Unable to fetch manifest for cid", cid, err = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
without builder =? Poseidon2Builder.new(
|
without builder =?
|
||||||
self.networkStore, manifest, manifest.verifiableStrategy
|
Poseidon2Builder.new(self.networkStore, manifest, manifest.verifiableStrategy), err:
|
||||||
), err:
|
|
||||||
trace "Unable to create slots builder", err = err.msg
|
trace "Unable to create slots builder", err = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
@ -551,7 +528,8 @@ proc onStore(
|
|||||||
proc updateExpiry(blocks: seq[bt.Block]): Future[?!void] {.async.} =
|
proc updateExpiry(blocks: seq[bt.Block]): Future[?!void] {.async.} =
|
||||||
trace "Updating expiry for blocks", blocks = blocks.len
|
trace "Updating expiry for blocks", blocks = blocks.len
|
||||||
|
|
||||||
let ensureExpiryFutures = blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry))
|
let ensureExpiryFutures =
|
||||||
|
blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry))
|
||||||
if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption:
|
if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption:
|
||||||
return failure(updateExpiryErr)
|
return failure(updateExpiryErr)
|
||||||
|
|
||||||
@ -561,8 +539,9 @@ proc onStore(
|
|||||||
|
|
||||||
return success()
|
return success()
|
||||||
|
|
||||||
without indexer =? manifest.verifiableStrategy.init(
|
without indexer =?
|
||||||
0, manifest.blocksCount - 1, manifest.numSlots).catch, err:
|
manifest.verifiableStrategy.init(0, manifest.blocksCount - 1, manifest.numSlots).catch,
|
||||||
|
err:
|
||||||
trace "Unable to create indexing strategy from protected manifest", err = err.msg
|
trace "Unable to create indexing strategy from protected manifest", err = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
@ -570,10 +549,9 @@ proc onStore(
|
|||||||
trace "Unable to get indicies from strategy", err = err.msg
|
trace "Unable to get indicies from strategy", err = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
if err =? (await self.fetchBatched(
|
if err =? (
|
||||||
manifest.treeCid,
|
await self.fetchBatched(manifest.treeCid, blksIter, onBatch = updateExpiry)
|
||||||
blksIter,
|
).errorOption:
|
||||||
onBatch = updateExpiry)).errorOption:
|
|
||||||
trace "Unable to fetch blocks", err = err.msg
|
trace "Unable to fetch blocks", err = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
@ -584,7 +562,8 @@ proc onStore(
|
|||||||
trace "Slot successfully retrieved and reconstructed"
|
trace "Slot successfully retrieved and reconstructed"
|
||||||
|
|
||||||
if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx.int]:
|
if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx.int]:
|
||||||
trace "Slot root mismatch", manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid()
|
trace "Slot root mismatch",
|
||||||
|
manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid()
|
||||||
return failure(newException(CodexError, "Slot root mismatch"))
|
return failure(newException(CodexError, "Slot root mismatch"))
|
||||||
|
|
||||||
trace "Slot successfully retrieved and reconstructed"
|
trace "Slot successfully retrieved and reconstructed"
|
||||||
@ -592,9 +571,8 @@ proc onStore(
|
|||||||
return success()
|
return success()
|
||||||
|
|
||||||
proc onProve(
|
proc onProve(
|
||||||
self: CodexNodeRef,
|
self: CodexNodeRef, slot: Slot, challenge: ProofChallenge
|
||||||
slot: Slot,
|
): Future[?!Groth16Proof] {.async.} =
|
||||||
challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} =
|
|
||||||
## Generats a proof for a given slot and challenge
|
## Generats a proof for a given slot and challenge
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -648,9 +626,8 @@ proc onProve(
|
|||||||
failure "Prover not enabled"
|
failure "Prover not enabled"
|
||||||
|
|
||||||
proc onExpiryUpdate(
|
proc onExpiryUpdate(
|
||||||
self: CodexNodeRef,
|
self: CodexNodeRef, rootCid: string, expiry: SecondsSince1970
|
||||||
rootCid: string,
|
): Future[?!void] {.async.} =
|
||||||
expiry: SecondsSince1970): Future[?!void] {.async.} =
|
|
||||||
without cid =? Cid.init(rootCid):
|
without cid =? Cid.init(rootCid):
|
||||||
trace "Unable to parse Cid", cid
|
trace "Unable to parse Cid", cid
|
||||||
let error = newException(CodexError, "Unable to parse Cid")
|
let error = newException(CodexError, "Unable to parse Cid")
|
||||||
@ -658,10 +635,7 @@ proc onExpiryUpdate(
|
|||||||
|
|
||||||
return await self.updateExpiry(cid, expiry)
|
return await self.updateExpiry(cid, expiry)
|
||||||
|
|
||||||
proc onClear(
|
proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: UInt256) =
|
||||||
self: CodexNodeRef,
|
|
||||||
request: StorageRequest,
|
|
||||||
slotIndex: UInt256) =
|
|
||||||
# TODO: remove data from local storage
|
# TODO: remove data from local storage
|
||||||
discard
|
discard
|
||||||
|
|
||||||
@ -676,23 +650,23 @@ proc start*(self: CodexNodeRef) {.async.} =
|
|||||||
await self.clock.start()
|
await self.clock.start()
|
||||||
|
|
||||||
if hostContracts =? self.contracts.host:
|
if hostContracts =? self.contracts.host:
|
||||||
hostContracts.sales.onStore =
|
hostContracts.sales.onStore = proc(
|
||||||
proc(
|
request: StorageRequest, slot: UInt256, onBatch: BatchProc
|
||||||
request: StorageRequest,
|
): Future[?!void] =
|
||||||
slot: UInt256,
|
self.onStore(request, slot, onBatch)
|
||||||
onBatch: BatchProc): Future[?!void] = self.onStore(request, slot, onBatch)
|
|
||||||
|
|
||||||
hostContracts.sales.onExpiryUpdate =
|
hostContracts.sales.onExpiryUpdate = proc(
|
||||||
proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] =
|
rootCid: string, expiry: SecondsSince1970
|
||||||
|
): Future[?!void] =
|
||||||
self.onExpiryUpdate(rootCid, expiry)
|
self.onExpiryUpdate(rootCid, expiry)
|
||||||
|
|
||||||
hostContracts.sales.onClear =
|
hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: UInt256) =
|
||||||
proc(request: StorageRequest, slotIndex: UInt256) =
|
|
||||||
# TODO: remove data from local storage
|
# TODO: remove data from local storage
|
||||||
self.onClear(request, slotIndex)
|
self.onClear(request, slotIndex)
|
||||||
|
|
||||||
hostContracts.sales.onProve =
|
hostContracts.sales.onProve = proc(
|
||||||
proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] =
|
slot: Slot, challenge: ProofChallenge
|
||||||
|
): Future[?!Groth16Proof] =
|
||||||
# TODO: generate proof
|
# TODO: generate proof
|
||||||
self.onProve(slot, challenge)
|
self.onProve(slot, challenge)
|
||||||
|
|
||||||
@ -756,7 +730,8 @@ proc new*(
|
|||||||
engine: BlockExcEngine,
|
engine: BlockExcEngine,
|
||||||
discovery: Discovery,
|
discovery: Discovery,
|
||||||
prover = Prover.none,
|
prover = Prover.none,
|
||||||
contracts = Contracts.default): CodexNodeRef =
|
contracts = Contracts.default,
|
||||||
|
): CodexNodeRef =
|
||||||
## Create new instance of a Codex self, call `start` to run it
|
## Create new instance of a Codex self, call `start` to run it
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -766,4 +741,5 @@ proc new*(
|
|||||||
engine: engine,
|
engine: engine,
|
||||||
prover: prover,
|
prover: prover,
|
||||||
discovery: discovery,
|
discovery: discovery,
|
||||||
contracts: contracts)
|
contracts: contracts,
|
||||||
|
)
|
||||||
|
@ -3,6 +3,7 @@ import pkg/stint
|
|||||||
type
|
type
|
||||||
Periodicity* = object
|
Periodicity* = object
|
||||||
seconds*: UInt256
|
seconds*: UInt256
|
||||||
|
|
||||||
Period* = UInt256
|
Period* = UInt256
|
||||||
Timestamp* = UInt256
|
Timestamp* = UInt256
|
||||||
|
|
||||||
|
@ -18,16 +18,13 @@ type
|
|||||||
clock: Clock
|
clock: Clock
|
||||||
purchases: Table[PurchaseId, Purchase]
|
purchases: Table[PurchaseId, Purchase]
|
||||||
proofProbability*: UInt256
|
proofProbability*: UInt256
|
||||||
|
|
||||||
PurchaseTimeout* = Timeout
|
PurchaseTimeout* = Timeout
|
||||||
|
|
||||||
const DefaultProofProbability = 100.u256
|
const DefaultProofProbability = 100.u256
|
||||||
|
|
||||||
proc new*(_: type Purchasing, market: Market, clock: Clock): Purchasing =
|
proc new*(_: type Purchasing, market: Market, clock: Clock): Purchasing =
|
||||||
Purchasing(
|
Purchasing(market: market, clock: clock, proofProbability: DefaultProofProbability)
|
||||||
market: market,
|
|
||||||
clock: clock,
|
|
||||||
proofProbability: DefaultProofProbability,
|
|
||||||
)
|
|
||||||
|
|
||||||
proc load*(purchasing: Purchasing) {.async.} =
|
proc load*(purchasing: Purchasing) {.async.} =
|
||||||
let market = purchasing.market
|
let market = purchasing.market
|
||||||
@ -43,8 +40,8 @@ proc start*(purchasing: Purchasing) {.async.} =
|
|||||||
proc stop*(purchasing: Purchasing) {.async.} =
|
proc stop*(purchasing: Purchasing) {.async.} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc populate*(purchasing: Purchasing,
|
proc populate*(
|
||||||
request: StorageRequest
|
purchasing: Purchasing, request: StorageRequest
|
||||||
): Future[StorageRequest] {.async.} =
|
): Future[StorageRequest] {.async.} =
|
||||||
result = request
|
result = request
|
||||||
if result.ask.proofProbability == 0.u256:
|
if result.ask.proofProbability == 0.u256:
|
||||||
@ -55,8 +52,8 @@ proc populate*(purchasing: Purchasing,
|
|||||||
result.nonce = Nonce(id)
|
result.nonce = Nonce(id)
|
||||||
result.client = await purchasing.market.getSigner()
|
result.client = await purchasing.market.getSigner()
|
||||||
|
|
||||||
proc purchase*(purchasing: Purchasing,
|
proc purchase*(
|
||||||
request: StorageRequest
|
purchasing: Purchasing, request: StorageRequest
|
||||||
): Future[Purchase] {.async.} =
|
): Future[Purchase] {.async.} =
|
||||||
let request = await purchasing.populate(request)
|
let request = await purchasing.populate(request)
|
||||||
let purchase = Purchase.new(request, purchasing.market, purchasing.clock)
|
let purchase = Purchase.new(request, purchasing.market, purchasing.clock)
|
||||||
@ -75,4 +72,3 @@ func getPurchaseIds*(purchasing: Purchasing): seq[PurchaseId] =
|
|||||||
for key in purchasing.purchases.keys:
|
for key in purchasing.purchases.keys:
|
||||||
pIds.add(key)
|
pIds.add(key)
|
||||||
return pIds
|
return pIds
|
||||||
|
|
||||||
|
@ -25,10 +25,7 @@ export purchaseid
|
|||||||
export statemachine
|
export statemachine
|
||||||
|
|
||||||
func new*(
|
func new*(
|
||||||
_: type Purchase,
|
_: type Purchase, requestId: RequestId, market: Market, clock: Clock
|
||||||
requestId: RequestId,
|
|
||||||
market: Market,
|
|
||||||
clock: Clock
|
|
||||||
): Purchase =
|
): Purchase =
|
||||||
## create a new instance of a Purchase
|
## create a new instance of a Purchase
|
||||||
##
|
##
|
||||||
@ -42,10 +39,7 @@ func new*(
|
|||||||
return purchase
|
return purchase
|
||||||
|
|
||||||
func new*(
|
func new*(
|
||||||
_: type Purchase,
|
_: type Purchase, request: StorageRequest, market: Market, clock: Clock
|
||||||
request: StorageRequest,
|
|
||||||
market: Market,
|
|
||||||
clock: Clock
|
|
||||||
): Purchase =
|
): Purchase =
|
||||||
## Create a new purchase using the given market and clock
|
## Create a new purchase using the given market and clock
|
||||||
let purchase = Purchase.new(request.id, market, clock)
|
let purchase = Purchase.new(request.id, market, clock)
|
||||||
@ -76,4 +70,5 @@ func error*(purchase: Purchase): ?(ref CatchableError) =
|
|||||||
func state*(purchase: Purchase): ?string =
|
func state*(purchase: Purchase): ?string =
|
||||||
proc description(state: State): string =
|
proc description(state: State): string =
|
||||||
$state
|
$state
|
||||||
|
|
||||||
purchase.query(description)
|
purchase.query(description)
|
||||||
|
@ -3,9 +3,12 @@ import ../logutils
|
|||||||
|
|
||||||
type PurchaseId* = distinct array[32, byte]
|
type PurchaseId* = distinct array[32, byte]
|
||||||
|
|
||||||
logutils.formatIt(LogFormat.textLines, PurchaseId): it.short0xHexLog
|
logutils.formatIt(LogFormat.textLines, PurchaseId):
|
||||||
logutils.formatIt(LogFormat.json, PurchaseId): it.to0xHexLog
|
it.short0xHexLog
|
||||||
|
logutils.formatIt(LogFormat.json, PurchaseId):
|
||||||
|
it.to0xHexLog
|
||||||
|
|
||||||
proc hash*(x: PurchaseId): Hash {.borrow.}
|
proc hash*(x: PurchaseId): Hash {.borrow.}
|
||||||
proc `==`*(x, y: PurchaseId): bool {.borrow.}
|
proc `==`*(x, y: PurchaseId): bool {.borrow.}
|
||||||
proc toHex*(x: PurchaseId): string = array[32, byte](x).toHex
|
proc toHex*(x: PurchaseId): string =
|
||||||
|
array[32, byte](x).toHex
|
||||||
|
@ -14,5 +14,6 @@ type
|
|||||||
clock*: Clock
|
clock*: Clock
|
||||||
requestId*: RequestId
|
requestId*: RequestId
|
||||||
request*: ?StorageRequest
|
request*: ?StorageRequest
|
||||||
|
|
||||||
PurchaseState* = ref object of State
|
PurchaseState* = ref object of State
|
||||||
PurchaseError* = object of CodexError
|
PurchaseError* = object of CodexError
|
||||||
|
@ -18,6 +18,7 @@ method run*(state: PurchaseErrored, machine: Machine): Future[?State] {.async.}
|
|||||||
codex_purchases_error.inc()
|
codex_purchases_error.inc()
|
||||||
let purchase = Purchase(machine)
|
let purchase = Purchase(machine)
|
||||||
|
|
||||||
error "Purchasing error", error=state.error.msgDetail, requestId = purchase.requestId
|
error "Purchasing error",
|
||||||
|
error = state.error.msgDetail, requestId = purchase.requestId
|
||||||
|
|
||||||
purchase.future.fail(state.error)
|
purchase.future.fail(state.error)
|
||||||
|
@ -2,8 +2,7 @@ import pkg/questionable
|
|||||||
import ../statemachine
|
import ../statemachine
|
||||||
import ./error
|
import ./error
|
||||||
|
|
||||||
type
|
type ErrorHandlingState* = ref object of PurchaseState
|
||||||
ErrorHandlingState* = ref object of PurchaseState
|
|
||||||
|
|
||||||
method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State =
|
method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State =
|
||||||
some State(PurchaseErrored(error: error))
|
some State(PurchaseErrored(error: error))
|
||||||
|
@ -5,8 +5,7 @@ import ./error
|
|||||||
|
|
||||||
declareCounter(codex_purchases_failed, "codex purchases failed")
|
declareCounter(codex_purchases_failed, "codex purchases failed")
|
||||||
|
|
||||||
type
|
type PurchaseFailed* = ref object of PurchaseState
|
||||||
PurchaseFailed* = ref object of PurchaseState
|
|
||||||
|
|
||||||
method `$`*(state: PurchaseFailed): string =
|
method `$`*(state: PurchaseFailed): string =
|
||||||
"failed"
|
"failed"
|
||||||
|
@ -27,6 +27,7 @@ method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.}
|
|||||||
let failed = newFuture[void]()
|
let failed = newFuture[void]()
|
||||||
proc callback(_: RequestId) =
|
proc callback(_: RequestId) =
|
||||||
failed.complete()
|
failed.complete()
|
||||||
|
|
||||||
let subscription = await market.subscribeRequestFailed(purchase.requestId, callback)
|
let subscription = await market.subscribeRequestFailed(purchase.requestId, callback)
|
||||||
|
|
||||||
# Ensure that we're past the request end by waiting an additional second
|
# Ensure that we're past the request end by waiting an additional second
|
||||||
|
@ -23,12 +23,14 @@ method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async.
|
|||||||
let market = purchase.market
|
let market = purchase.market
|
||||||
let clock = purchase.clock
|
let clock = purchase.clock
|
||||||
|
|
||||||
info "Request submitted, waiting for slots to be filled", requestId = purchase.requestId
|
info "Request submitted, waiting for slots to be filled",
|
||||||
|
requestId = purchase.requestId
|
||||||
|
|
||||||
proc wait {.async.} =
|
proc wait() {.async.} =
|
||||||
let done = newFuture[void]()
|
let done = newFuture[void]()
|
||||||
proc callback(_: RequestId) =
|
proc callback(_: RequestId) =
|
||||||
done.complete()
|
done.complete()
|
||||||
|
|
||||||
let subscription = await market.subscribeFulfillment(request.id, callback)
|
let subscription = await market.subscribeFulfillment(request.id, callback)
|
||||||
await done
|
await done
|
||||||
await subscription.unsubscribe()
|
await subscription.unsubscribe()
|
||||||
|
@ -19,7 +19,6 @@ method run*(state: PurchaseUnknown, machine: Machine): Future[?State] {.async.}
|
|||||||
let purchase = Purchase(machine)
|
let purchase = Purchase(machine)
|
||||||
if (request =? await purchase.market.getRequest(purchase.requestId)) and
|
if (request =? await purchase.market.getRequest(purchase.requestId)) and
|
||||||
(requestState =? await purchase.market.requestState(purchase.requestId)):
|
(requestState =? await purchase.market.requestState(purchase.requestId)):
|
||||||
|
|
||||||
purchase.request = some request
|
purchase.request = some request
|
||||||
|
|
||||||
case requestState
|
case requestState
|
||||||
|
@ -9,8 +9,8 @@
|
|||||||
|
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push:
|
||||||
|
{.upraises: [].}
|
||||||
|
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
import mimetypes
|
import mimetypes
|
||||||
@ -49,10 +49,7 @@ logScope:
|
|||||||
declareCounter(codex_api_uploads, "codex API uploads")
|
declareCounter(codex_api_uploads, "codex API uploads")
|
||||||
declareCounter(codex_api_downloads, "codex API downloads")
|
declareCounter(codex_api_downloads, "codex API downloads")
|
||||||
|
|
||||||
proc validate(
|
proc validate(pattern: string, value: string): int {.gcsafe, raises: [Defect].} =
|
||||||
pattern: string,
|
|
||||||
value: string): int
|
|
||||||
{.gcsafe, raises: [Defect].} =
|
|
||||||
0
|
0
|
||||||
|
|
||||||
proc formatManifest(cid: Cid, manifest: Manifest): RestContent =
|
proc formatManifest(cid: Cid, manifest: Manifest): RestContent =
|
||||||
@ -63,21 +60,19 @@ proc formatManifestBlocks(node: CodexNodeRef): Future[JsonNode] {.async.} =
|
|||||||
|
|
||||||
proc addManifest(cid: Cid, manifest: Manifest) =
|
proc addManifest(cid: Cid, manifest: Manifest) =
|
||||||
content.add(formatManifest(cid, manifest))
|
content.add(formatManifest(cid, manifest))
|
||||||
|
|
||||||
await node.iterateManifests(addManifest)
|
await node.iterateManifests(addManifest)
|
||||||
|
|
||||||
return %RestContentList.init(content)
|
return %RestContentList.init(content)
|
||||||
|
|
||||||
proc retrieveCid(
|
proc retrieveCid(
|
||||||
node: CodexNodeRef,
|
node: CodexNodeRef, cid: Cid, local: bool = true, resp: HttpResponseRef
|
||||||
cid: Cid,
|
): Future[RestApiResponse] {.async.} =
|
||||||
local: bool = true,
|
|
||||||
resp: HttpResponseRef): Future[RestApiResponse] {.async.} =
|
|
||||||
## Download a file from the node in a streaming
|
## Download a file from the node in a streaming
|
||||||
## manner
|
## manner
|
||||||
##
|
##
|
||||||
|
|
||||||
var
|
var stream: LPStream
|
||||||
stream: LPStream
|
|
||||||
|
|
||||||
var bytes = 0
|
var bytes = 0
|
||||||
try:
|
try:
|
||||||
@ -101,7 +96,10 @@ proc retrieveCid(
|
|||||||
resp.addHeader("Content-Type", "application/octet-stream")
|
resp.addHeader("Content-Type", "application/octet-stream")
|
||||||
|
|
||||||
if manifest.filename.isSome:
|
if manifest.filename.isSome:
|
||||||
resp.setHeader("Content-Disposition", "attachment; filename=\"" & manifest.filename.get() & "\"")
|
resp.setHeader(
|
||||||
|
"Content-Disposition",
|
||||||
|
"attachment; filename=\"" & manifest.filename.get() & "\"",
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
resp.setHeader("Content-Disposition", "attachment")
|
resp.setHeader("Content-Disposition", "attachment")
|
||||||
|
|
||||||
@ -130,7 +128,9 @@ proc retrieveCid(
|
|||||||
if not stream.isNil:
|
if not stream.isNil:
|
||||||
await stream.close()
|
await stream.close()
|
||||||
|
|
||||||
proc buildCorsHeaders(httpMethod: string, allowedOrigin: Option[string]): seq[(string, string)] =
|
proc buildCorsHeaders(
|
||||||
|
httpMethod: string, allowedOrigin: Option[string]
|
||||||
|
): seq[(string, string)] =
|
||||||
var headers: seq[(string, string)] = newSeq[(string, string)]()
|
var headers: seq[(string, string)] = newSeq[(string, string)]()
|
||||||
|
|
||||||
if corsOrigin =? allowedOrigin:
|
if corsOrigin =? allowedOrigin:
|
||||||
@ -160,22 +160,19 @@ proc getFilenameFromContentDisposition(contentDisposition: string): ?string =
|
|||||||
proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRouter) =
|
proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRouter) =
|
||||||
let allowedOrigin = router.allowedOrigin # prevents capture inside of api defintion
|
let allowedOrigin = router.allowedOrigin # prevents capture inside of api defintion
|
||||||
|
|
||||||
router.api(
|
router.api(MethodOptions, "/api/codex/v1/data") do(
|
||||||
MethodOptions,
|
resp: HttpResponseRef
|
||||||
"/api/codex/v1/data") do (
|
) -> RestApiResponse:
|
||||||
resp: HttpResponseRef) -> RestApiResponse:
|
|
||||||
|
|
||||||
if corsOrigin =? allowedOrigin:
|
if corsOrigin =? allowedOrigin:
|
||||||
resp.setCorsHeaders("POST", corsOrigin)
|
resp.setCorsHeaders("POST", corsOrigin)
|
||||||
resp.setHeader("Access-Control-Allow-Headers", "content-type, content-disposition")
|
resp.setHeader(
|
||||||
|
"Access-Control-Allow-Headers", "content-type, content-disposition"
|
||||||
|
)
|
||||||
|
|
||||||
resp.status = Http204
|
resp.status = Http204
|
||||||
await resp.sendBody("")
|
await resp.sendBody("")
|
||||||
|
|
||||||
router.rawApi(
|
router.rawApi(MethodPost, "/api/codex/v1/data") do() -> RestApiResponse:
|
||||||
MethodPost,
|
|
||||||
"/api/codex/v1/data") do (
|
|
||||||
) -> RestApiResponse:
|
|
||||||
## Upload a file in a streaming manner
|
## Upload a file in a streaming manner
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -209,12 +206,16 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
|||||||
|
|
||||||
# Here we could check if the extension matches the filename if needed
|
# Here we could check if the extension matches the filename if needed
|
||||||
|
|
||||||
let
|
let reader = bodyReader.get()
|
||||||
reader = bodyReader.get()
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
without cid =? (
|
without cid =? (
|
||||||
await node.store(AsyncStreamWrapper.new(reader = AsyncStreamReader(reader)), filename = filename, mimetype = mimetype)), error:
|
await node.store(
|
||||||
|
AsyncStreamWrapper.new(reader = AsyncStreamReader(reader)),
|
||||||
|
filename = filename,
|
||||||
|
mimetype = mimetype,
|
||||||
|
)
|
||||||
|
), error:
|
||||||
error "Error uploading file", exc = error.msg
|
error "Error uploading file", exc = error.msg
|
||||||
return RestApiResponse.error(Http500, error.msg)
|
return RestApiResponse.error(Http500, error.msg)
|
||||||
|
|
||||||
@ -233,26 +234,19 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
|||||||
trace "Something went wrong error"
|
trace "Something went wrong error"
|
||||||
return RestApiResponse.error(Http500)
|
return RestApiResponse.error(Http500)
|
||||||
|
|
||||||
router.api(
|
router.api(MethodGet, "/api/codex/v1/data") do() -> RestApiResponse:
|
||||||
MethodGet,
|
|
||||||
"/api/codex/v1/data") do () -> RestApiResponse:
|
|
||||||
let json = await formatManifestBlocks(node)
|
let json = await formatManifestBlocks(node)
|
||||||
return RestApiResponse.response($json, contentType = "application/json")
|
return RestApiResponse.response($json, contentType = "application/json")
|
||||||
|
|
||||||
router.api(
|
router.api(MethodGet, "/api/codex/v1/data/{cid}") do(
|
||||||
MethodGet,
|
cid: Cid, resp: HttpResponseRef
|
||||||
"/api/codex/v1/data/{cid}") do (
|
) -> RestApiResponse:
|
||||||
cid: Cid, resp: HttpResponseRef) -> RestApiResponse:
|
|
||||||
|
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
|
|
||||||
## Download a file from the local node in a streaming
|
## Download a file from the local node in a streaming
|
||||||
## manner
|
## manner
|
||||||
if cid.isErr:
|
if cid.isErr:
|
||||||
return RestApiResponse.error(
|
return RestApiResponse.error(Http400, $cid.error(), headers = headers)
|
||||||
Http400,
|
|
||||||
$cid.error(),
|
|
||||||
headers = headers)
|
|
||||||
|
|
||||||
if corsOrigin =? allowedOrigin:
|
if corsOrigin =? allowedOrigin:
|
||||||
resp.setCorsHeaders("GET", corsOrigin)
|
resp.setCorsHeaders("GET", corsOrigin)
|
||||||
@ -260,25 +254,20 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
|||||||
|
|
||||||
await node.retrieveCid(cid.get(), local = true, resp = resp)
|
await node.retrieveCid(cid.get(), local = true, resp = resp)
|
||||||
|
|
||||||
router.api(
|
router.api(MethodPost, "/api/codex/v1/data/{cid}/network") do(
|
||||||
MethodPost,
|
cid: Cid, resp: HttpResponseRef
|
||||||
"/api/codex/v1/data/{cid}/network") do (
|
) -> RestApiResponse:
|
||||||
cid: Cid, resp: HttpResponseRef) -> RestApiResponse:
|
|
||||||
## Download a file from the network to the local node
|
## Download a file from the network to the local node
|
||||||
##
|
##
|
||||||
|
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
|
|
||||||
if cid.isErr:
|
if cid.isErr:
|
||||||
return RestApiResponse.error(
|
return RestApiResponse.error(Http400, $cid.error(), headers = headers)
|
||||||
Http400,
|
|
||||||
$cid.error(), headers = headers)
|
|
||||||
|
|
||||||
without manifest =? (await node.fetchManifest(cid.get())), err:
|
without manifest =? (await node.fetchManifest(cid.get())), err:
|
||||||
error "Failed to fetch manifest", err = err.msg
|
error "Failed to fetch manifest", err = err.msg
|
||||||
return RestApiResponse.error(
|
return RestApiResponse.error(Http404, err.msg, headers = headers)
|
||||||
Http404,
|
|
||||||
err.msg, headers = headers)
|
|
||||||
|
|
||||||
proc fetchDatasetAsync(): Future[void] {.async.} =
|
proc fetchDatasetAsync(): Future[void] {.async.} =
|
||||||
try:
|
try:
|
||||||
@ -293,10 +282,9 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
|||||||
let json = %formatManifest(cid.get(), manifest)
|
let json = %formatManifest(cid.get(), manifest)
|
||||||
return RestApiResponse.response($json, contentType = "application/json")
|
return RestApiResponse.response($json, contentType = "application/json")
|
||||||
|
|
||||||
router.api(
|
router.api(MethodGet, "/api/codex/v1/data/{cid}/network/stream") do(
|
||||||
MethodGet,
|
cid: Cid, resp: HttpResponseRef
|
||||||
"/api/codex/v1/data/{cid}/network/stream") do (
|
) -> RestApiResponse:
|
||||||
cid: Cid, resp: HttpResponseRef) -> RestApiResponse:
|
|
||||||
## Download a file from the network in a streaming
|
## Download a file from the network in a streaming
|
||||||
## manner
|
## manner
|
||||||
##
|
##
|
||||||
@ -304,9 +292,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
|||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
|
|
||||||
if cid.isErr:
|
if cid.isErr:
|
||||||
return RestApiResponse.error(
|
return RestApiResponse.error(Http400, $cid.error(), headers = headers)
|
||||||
Http400,
|
|
||||||
$cid.error(), headers = headers)
|
|
||||||
|
|
||||||
if corsOrigin =? allowedOrigin:
|
if corsOrigin =? allowedOrigin:
|
||||||
resp.setCorsHeaders("GET", corsOrigin)
|
resp.setCorsHeaders("GET", corsOrigin)
|
||||||
@ -314,74 +300,72 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
|||||||
|
|
||||||
await node.retrieveCid(cid.get(), local = false, resp = resp)
|
await node.retrieveCid(cid.get(), local = false, resp = resp)
|
||||||
|
|
||||||
router.api(
|
router.api(MethodGet, "/api/codex/v1/data/{cid}/network/manifest") do(
|
||||||
MethodGet,
|
cid: Cid, resp: HttpResponseRef
|
||||||
"/api/codex/v1/data/{cid}/network/manifest") do (
|
) -> RestApiResponse:
|
||||||
cid: Cid, resp: HttpResponseRef) -> RestApiResponse:
|
|
||||||
## Download only the manifest.
|
## Download only the manifest.
|
||||||
##
|
##
|
||||||
|
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
|
|
||||||
if cid.isErr:
|
if cid.isErr:
|
||||||
return RestApiResponse.error(
|
return RestApiResponse.error(Http400, $cid.error(), headers = headers)
|
||||||
Http400,
|
|
||||||
$cid.error(), headers = headers)
|
|
||||||
|
|
||||||
without manifest =? (await node.fetchManifest(cid.get())), err:
|
without manifest =? (await node.fetchManifest(cid.get())), err:
|
||||||
error "Failed to fetch manifest", err = err.msg
|
error "Failed to fetch manifest", err = err.msg
|
||||||
return RestApiResponse.error(
|
return RestApiResponse.error(Http404, err.msg, headers = headers)
|
||||||
Http404,
|
|
||||||
err.msg, headers = headers)
|
|
||||||
|
|
||||||
let json = %formatManifest(cid.get(), manifest)
|
let json = %formatManifest(cid.get(), manifest)
|
||||||
return RestApiResponse.response($json, contentType = "application/json")
|
return RestApiResponse.response($json, contentType = "application/json")
|
||||||
|
|
||||||
router.api(
|
router.api(MethodGet, "/api/codex/v1/space") do() -> RestApiResponse:
|
||||||
MethodGet,
|
let json =
|
||||||
"/api/codex/v1/space") do () -> RestApiResponse:
|
%RestRepoStore(
|
||||||
let json = % RestRepoStore(
|
|
||||||
totalBlocks: repoStore.totalBlocks,
|
totalBlocks: repoStore.totalBlocks,
|
||||||
quotaMaxBytes: repoStore.quotaMaxBytes,
|
quotaMaxBytes: repoStore.quotaMaxBytes,
|
||||||
quotaUsedBytes: repoStore.quotaUsedBytes,
|
quotaUsedBytes: repoStore.quotaUsedBytes,
|
||||||
quotaReservedBytes: repoStore.quotaReservedBytes
|
quotaReservedBytes: repoStore.quotaReservedBytes,
|
||||||
)
|
)
|
||||||
return RestApiResponse.response($json, contentType = "application/json")
|
return RestApiResponse.response($json, contentType = "application/json")
|
||||||
|
|
||||||
proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
||||||
let allowedOrigin = router.allowedOrigin
|
let allowedOrigin = router.allowedOrigin
|
||||||
|
|
||||||
router.api(
|
router.api(MethodGet, "/api/codex/v1/sales/slots") do() -> RestApiResponse:
|
||||||
MethodGet,
|
|
||||||
"/api/codex/v1/sales/slots") do () -> RestApiResponse:
|
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
|
|
||||||
## Returns active slots for the host
|
## Returns active slots for the host
|
||||||
try:
|
try:
|
||||||
without contracts =? node.contracts.host:
|
without contracts =? node.contracts.host:
|
||||||
return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers)
|
return RestApiResponse.error(
|
||||||
|
Http503, "Persistence is not enabled", headers = headers
|
||||||
|
)
|
||||||
|
|
||||||
let json = %(await contracts.sales.mySlots())
|
let json = %(await contracts.sales.mySlots())
|
||||||
return RestApiResponse.response($json, contentType="application/json", headers = headers)
|
return RestApiResponse.response(
|
||||||
|
$json, contentType = "application/json", headers = headers
|
||||||
|
)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
|
|
||||||
router.api(
|
router.api(MethodGet, "/api/codex/v1/sales/slots/{slotId}") do(
|
||||||
MethodGet,
|
slotId: SlotId
|
||||||
"/api/codex/v1/sales/slots/{slotId}") do (slotId: SlotId) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
## Returns active slot with id {slotId} for the host. Returns 404 if the
|
## Returns active slot with id {slotId} for the host. Returns 404 if the
|
||||||
## slot is not active for the host.
|
## slot is not active for the host.
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
|
|
||||||
without contracts =? node.contracts.host:
|
without contracts =? node.contracts.host:
|
||||||
return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers)
|
return
|
||||||
|
RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers)
|
||||||
|
|
||||||
without slotId =? slotId.tryGet.catch, error:
|
without slotId =? slotId.tryGet.catch, error:
|
||||||
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
||||||
|
|
||||||
without agent =? await contracts.sales.activeSale(slotId):
|
without agent =? await contracts.sales.activeSale(slotId):
|
||||||
return RestApiResponse.error(Http404, "Provider not filling slot", headers = headers)
|
return
|
||||||
|
RestApiResponse.error(Http404, "Provider not filling slot", headers = headers)
|
||||||
|
|
||||||
let restAgent = RestSalesAgent(
|
let restAgent = RestSalesAgent(
|
||||||
state: agent.state() |? "none",
|
state: agent.state() |? "none",
|
||||||
@ -391,30 +375,33 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
reservation: agent.data.reservation,
|
reservation: agent.data.reservation,
|
||||||
)
|
)
|
||||||
|
|
||||||
return RestApiResponse.response(restAgent.toJson, contentType="application/json", headers = headers)
|
return RestApiResponse.response(
|
||||||
|
restAgent.toJson, contentType = "application/json", headers = headers
|
||||||
|
)
|
||||||
|
|
||||||
router.api(
|
router.api(MethodGet, "/api/codex/v1/sales/availability") do() -> RestApiResponse:
|
||||||
MethodGet,
|
|
||||||
"/api/codex/v1/sales/availability") do () -> RestApiResponse:
|
|
||||||
## Returns storage that is for sale
|
## Returns storage that is for sale
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
without contracts =? node.contracts.host:
|
without contracts =? node.contracts.host:
|
||||||
return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers)
|
return RestApiResponse.error(
|
||||||
|
Http503, "Persistence is not enabled", headers = headers
|
||||||
|
)
|
||||||
|
|
||||||
without avails =? (await contracts.sales.context.reservations.all(Availability)), err:
|
without avails =? (await contracts.sales.context.reservations.all(Availability)),
|
||||||
|
err:
|
||||||
return RestApiResponse.error(Http500, err.msg, headers = headers)
|
return RestApiResponse.error(Http500, err.msg, headers = headers)
|
||||||
|
|
||||||
let json = %avails
|
let json = %avails
|
||||||
return RestApiResponse.response($json, contentType="application/json", headers = headers)
|
return RestApiResponse.response(
|
||||||
|
$json, contentType = "application/json", headers = headers
|
||||||
|
)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
|
|
||||||
router.rawApi(
|
router.rawApi(MethodPost, "/api/codex/v1/sales/availability") do() -> RestApiResponse:
|
||||||
MethodPost,
|
|
||||||
"/api/codex/v1/sales/availability") do () -> RestApiResponse:
|
|
||||||
## Add available storage to sell.
|
## Add available storage to sell.
|
||||||
## Every time Availability's offer finishes, its capacity is returned to the availability.
|
## Every time Availability's offer finishes, its capacity is returned to the availability.
|
||||||
##
|
##
|
||||||
@ -427,7 +414,9 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
without contracts =? node.contracts.host:
|
without contracts =? node.contracts.host:
|
||||||
return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers)
|
return RestApiResponse.error(
|
||||||
|
Http503, "Persistence is not enabled", headers = headers
|
||||||
|
)
|
||||||
|
|
||||||
let body = await request.getBody()
|
let body = await request.getBody()
|
||||||
|
|
||||||
@ -437,41 +426,43 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
let reservations = contracts.sales.context.reservations
|
let reservations = contracts.sales.context.reservations
|
||||||
|
|
||||||
if restAv.totalSize == 0:
|
if restAv.totalSize == 0:
|
||||||
return RestApiResponse.error(Http400, "Total size must be larger then zero", headers = headers)
|
return RestApiResponse.error(
|
||||||
|
Http400, "Total size must be larger then zero", headers = headers
|
||||||
|
)
|
||||||
|
|
||||||
if not reservations.hasAvailable(restAv.totalSize.truncate(uint)):
|
if not reservations.hasAvailable(restAv.totalSize.truncate(uint)):
|
||||||
return RestApiResponse.error(Http422, "Not enough storage quota", headers = headers)
|
return
|
||||||
|
RestApiResponse.error(Http422, "Not enough storage quota", headers = headers)
|
||||||
|
|
||||||
without availability =? (
|
without availability =? (
|
||||||
await reservations.createAvailability(
|
await reservations.createAvailability(
|
||||||
restAv.totalSize,
|
restAv.totalSize, restAv.duration, restAv.minPrice, restAv.maxCollateral
|
||||||
restAv.duration,
|
)
|
||||||
restAv.minPrice,
|
|
||||||
restAv.maxCollateral)
|
|
||||||
), error:
|
), error:
|
||||||
return RestApiResponse.error(Http500, error.msg, headers = headers)
|
return RestApiResponse.error(Http500, error.msg, headers = headers)
|
||||||
|
|
||||||
return RestApiResponse.response(availability.toJson,
|
return RestApiResponse.response(
|
||||||
|
availability.toJson,
|
||||||
Http201,
|
Http201,
|
||||||
contentType = "application/json",
|
contentType = "application/json",
|
||||||
headers = headers)
|
headers = headers,
|
||||||
|
)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
|
|
||||||
router.api(
|
router.api(MethodOptions, "/api/codex/v1/sales/availability/{id}") do(
|
||||||
MethodOptions,
|
id: AvailabilityId, resp: HttpResponseRef
|
||||||
"/api/codex/v1/sales/availability/{id}") do (id: AvailabilityId, resp: HttpResponseRef) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
|
|
||||||
if corsOrigin =? allowedOrigin:
|
if corsOrigin =? allowedOrigin:
|
||||||
resp.setCorsHeaders("PATCH", corsOrigin)
|
resp.setCorsHeaders("PATCH", corsOrigin)
|
||||||
|
|
||||||
resp.status = Http204
|
resp.status = Http204
|
||||||
await resp.sendBody("")
|
await resp.sendBody("")
|
||||||
|
|
||||||
router.rawApi(
|
router.rawApi(MethodPatch, "/api/codex/v1/sales/availability/{id}") do(
|
||||||
MethodPatch,
|
id: AvailabilityId
|
||||||
"/api/codex/v1/sales/availability/{id}") do (id: AvailabilityId) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
## Updates Availability.
|
## Updates Availability.
|
||||||
## The new parameters will be only considered for new requests.
|
## The new parameters will be only considered for new requests.
|
||||||
## Existing Requests linked to this Availability will continue as is.
|
## Existing Requests linked to this Availability will continue as is.
|
||||||
@ -509,7 +500,11 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
if size =? restAv.totalSize:
|
if size =? restAv.totalSize:
|
||||||
# we don't allow lowering the totalSize bellow currently utilized size
|
# we don't allow lowering the totalSize bellow currently utilized size
|
||||||
if size < (availability.totalSize - availability.freeSize):
|
if size < (availability.totalSize - availability.freeSize):
|
||||||
return RestApiResponse.error(Http400, "New totalSize must be larger then current totalSize - freeSize, which is currently: " & $(availability.totalSize - availability.freeSize))
|
return RestApiResponse.error(
|
||||||
|
Http400,
|
||||||
|
"New totalSize must be larger then current totalSize - freeSize, which is currently: " &
|
||||||
|
$(availability.totalSize - availability.freeSize),
|
||||||
|
)
|
||||||
|
|
||||||
availability.freeSize += size - availability.totalSize
|
availability.freeSize += size - availability.totalSize
|
||||||
availability.totalSize = size
|
availability.totalSize = size
|
||||||
@ -531,15 +526,17 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500)
|
return RestApiResponse.error(Http500)
|
||||||
|
|
||||||
router.rawApi(
|
router.rawApi(MethodGet, "/api/codex/v1/sales/availability/{id}/reservations") do(
|
||||||
MethodGet,
|
id: AvailabilityId
|
||||||
"/api/codex/v1/sales/availability/{id}/reservations") do (id: AvailabilityId) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
## Gets Availability's reservations.
|
## Gets Availability's reservations.
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
without contracts =? node.contracts.host:
|
without contracts =? node.contracts.host:
|
||||||
return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers)
|
return RestApiResponse.error(
|
||||||
|
Http503, "Persistence is not enabled", headers = headers
|
||||||
|
)
|
||||||
|
|
||||||
without id =? id.tryGet.catch, error:
|
without id =? id.tryGet.catch, error:
|
||||||
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
||||||
@ -551,15 +548,21 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
|
|
||||||
if error =? (await reservations.get(keyId, Availability)).errorOption:
|
if error =? (await reservations.get(keyId, Availability)).errorOption:
|
||||||
if error of NotExistsError:
|
if error of NotExistsError:
|
||||||
return RestApiResponse.error(Http404, "Availability not found", headers = headers)
|
return
|
||||||
|
RestApiResponse.error(Http404, "Availability not found", headers = headers)
|
||||||
else:
|
else:
|
||||||
return RestApiResponse.error(Http500, error.msg, headers = headers)
|
return RestApiResponse.error(Http500, error.msg, headers = headers)
|
||||||
|
|
||||||
without availabilitysReservations =? (await reservations.all(Reservation, id)), err:
|
without availabilitysReservations =? (await reservations.all(Reservation, id)),
|
||||||
|
err:
|
||||||
return RestApiResponse.error(Http500, err.msg, headers = headers)
|
return RestApiResponse.error(Http500, err.msg, headers = headers)
|
||||||
|
|
||||||
# TODO: Expand this structure with information about the linked StorageRequest not only RequestID
|
# TODO: Expand this structure with information about the linked StorageRequest not only RequestID
|
||||||
return RestApiResponse.response(availabilitysReservations.toJson, contentType="application/json", headers = headers)
|
return RestApiResponse.response(
|
||||||
|
availabilitysReservations.toJson,
|
||||||
|
contentType = "application/json",
|
||||||
|
headers = headers,
|
||||||
|
)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
@ -567,9 +570,9 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
||||||
let allowedOrigin = router.allowedOrigin
|
let allowedOrigin = router.allowedOrigin
|
||||||
|
|
||||||
router.rawApi(
|
router.rawApi(MethodPost, "/api/codex/v1/storage/request/{cid}") do(
|
||||||
MethodPost,
|
cid: Cid
|
||||||
"/api/codex/v1/storage/request/{cid}") do (cid: Cid) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
var headers = buildCorsHeaders("POST", allowedOrigin)
|
var headers = buildCorsHeaders("POST", allowedOrigin)
|
||||||
|
|
||||||
## Create a request for storage
|
## Create a request for storage
|
||||||
@ -584,7 +587,9 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
## colateral - requested collateral from hosts when they fill slot
|
## colateral - requested collateral from hosts when they fill slot
|
||||||
try:
|
try:
|
||||||
without contracts =? node.contracts.client:
|
without contracts =? node.contracts.client:
|
||||||
return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers)
|
return RestApiResponse.error(
|
||||||
|
Http503, "Persistence is not enabled", headers = headers
|
||||||
|
)
|
||||||
|
|
||||||
without cid =? cid.tryGet.catch, error:
|
without cid =? cid.tryGet.catch, error:
|
||||||
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
||||||
@ -598,39 +603,51 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
let tolerance = params.tolerance |? 1
|
let tolerance = params.tolerance |? 1
|
||||||
|
|
||||||
if tolerance == 0:
|
if tolerance == 0:
|
||||||
return RestApiResponse.error(Http400, "Tolerance needs to be bigger then zero", headers = headers)
|
return RestApiResponse.error(
|
||||||
|
Http400, "Tolerance needs to be bigger then zero", headers = headers
|
||||||
|
)
|
||||||
|
|
||||||
# prevent underflow
|
# prevent underflow
|
||||||
if tolerance > nodes:
|
if tolerance > nodes:
|
||||||
return RestApiResponse.error(Http400, "Invalid parameters: `tolerance` cannot be greater than `nodes`", headers = headers)
|
return RestApiResponse.error(
|
||||||
|
Http400,
|
||||||
|
"Invalid parameters: `tolerance` cannot be greater than `nodes`",
|
||||||
|
headers = headers,
|
||||||
|
)
|
||||||
|
|
||||||
let ecK = nodes - tolerance
|
let ecK = nodes - tolerance
|
||||||
let ecM = tolerance # for readability
|
let ecM = tolerance # for readability
|
||||||
|
|
||||||
# ensure leopard constrainst of 1 < K ≥ M
|
# ensure leopard constrainst of 1 < K ≥ M
|
||||||
if ecK <= 1 or ecK < ecM:
|
if ecK <= 1 or ecK < ecM:
|
||||||
return RestApiResponse.error(Http400, "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`", headers = headers)
|
return RestApiResponse.error(
|
||||||
|
Http400,
|
||||||
|
"Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`",
|
||||||
|
headers = headers,
|
||||||
|
)
|
||||||
|
|
||||||
without expiry =? params.expiry:
|
without expiry =? params.expiry:
|
||||||
return RestApiResponse.error(Http400, "Expiry required", headers = headers)
|
return RestApiResponse.error(Http400, "Expiry required", headers = headers)
|
||||||
|
|
||||||
if expiry <= 0 or expiry >= params.duration:
|
if expiry <= 0 or expiry >= params.duration:
|
||||||
return RestApiResponse.error(Http400, "Expiry needs value bigger then zero and smaller then the request's duration", headers = headers)
|
return RestApiResponse.error(
|
||||||
|
Http400,
|
||||||
without purchaseId =? await node.requestStorage(
|
"Expiry needs value bigger then zero and smaller then the request's duration",
|
||||||
cid,
|
headers = headers,
|
||||||
params.duration,
|
)
|
||||||
params.proofProbability,
|
|
||||||
nodes,
|
|
||||||
tolerance,
|
|
||||||
params.reward,
|
|
||||||
params.collateral,
|
|
||||||
expiry), error:
|
|
||||||
|
|
||||||
|
without purchaseId =?
|
||||||
|
await node.requestStorage(
|
||||||
|
cid, params.duration, params.proofProbability, nodes, tolerance,
|
||||||
|
params.reward, params.collateral, expiry,
|
||||||
|
), error:
|
||||||
if error of InsufficientBlocksError:
|
if error of InsufficientBlocksError:
|
||||||
return RestApiResponse.error(Http400,
|
return RestApiResponse.error(
|
||||||
|
Http400,
|
||||||
"Dataset too small for erasure parameters, need at least " &
|
"Dataset too small for erasure parameters, need at least " &
|
||||||
$(ref InsufficientBlocksError)(error).minSize.int & " bytes", headers = headers)
|
$(ref InsufficientBlocksError)(error).minSize.int & " bytes",
|
||||||
|
headers = headers,
|
||||||
|
)
|
||||||
|
|
||||||
return RestApiResponse.error(Http500, error.msg, headers = headers)
|
return RestApiResponse.error(Http500, error.msg, headers = headers)
|
||||||
|
|
||||||
@ -639,15 +656,16 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
|
|
||||||
router.api(
|
router.api(MethodGet, "/api/codex/v1/storage/purchases/{id}") do(
|
||||||
MethodGet,
|
id: PurchaseId
|
||||||
"/api/codex/v1/storage/purchases/{id}") do (
|
) -> RestApiResponse:
|
||||||
id: PurchaseId) -> RestApiResponse:
|
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
without contracts =? node.contracts.client:
|
without contracts =? node.contracts.client:
|
||||||
return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers)
|
return RestApiResponse.error(
|
||||||
|
Http503, "Persistence is not enabled", headers = headers
|
||||||
|
)
|
||||||
|
|
||||||
without id =? id.tryGet.catch, error:
|
without id =? id.tryGet.catch, error:
|
||||||
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
||||||
@ -655,29 +673,34 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
without purchase =? contracts.purchasing.getPurchase(id):
|
without purchase =? contracts.purchasing.getPurchase(id):
|
||||||
return RestApiResponse.error(Http404, headers = headers)
|
return RestApiResponse.error(Http404, headers = headers)
|
||||||
|
|
||||||
let json = % RestPurchase(
|
let json =
|
||||||
|
%RestPurchase(
|
||||||
state: purchase.state |? "none",
|
state: purchase.state |? "none",
|
||||||
error: purchase.error .? msg,
|
error: purchase.error .? msg,
|
||||||
request: purchase.request,
|
request: purchase.request,
|
||||||
requestId: purchase.requestId
|
requestId: purchase.requestId,
|
||||||
)
|
)
|
||||||
|
|
||||||
return RestApiResponse.response($json, contentType="application/json", headers = headers)
|
return RestApiResponse.response(
|
||||||
|
$json, contentType = "application/json", headers = headers
|
||||||
|
)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
|
|
||||||
router.api(
|
router.api(MethodGet, "/api/codex/v1/storage/purchases") do() -> RestApiResponse:
|
||||||
MethodGet,
|
|
||||||
"/api/codex/v1/storage/purchases") do () -> RestApiResponse:
|
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
without contracts =? node.contracts.client:
|
without contracts =? node.contracts.client:
|
||||||
return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers)
|
return RestApiResponse.error(
|
||||||
|
Http503, "Persistence is not enabled", headers = headers
|
||||||
|
)
|
||||||
|
|
||||||
let purchaseIds = contracts.purchasing.getPurchaseIds()
|
let purchaseIds = contracts.purchasing.getPurchaseIds()
|
||||||
return RestApiResponse.response($ %purchaseIds, contentType="application/json", headers = headers)
|
return RestApiResponse.response(
|
||||||
|
$ %purchaseIds, contentType = "application/json", headers = headers
|
||||||
|
)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
@ -687,28 +710,30 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
|||||||
|
|
||||||
## various node management api's
|
## various node management api's
|
||||||
##
|
##
|
||||||
router.api(
|
router.api(MethodGet, "/api/codex/v1/spr") do() -> RestApiResponse:
|
||||||
MethodGet,
|
|
||||||
"/api/codex/v1/spr") do () -> RestApiResponse:
|
|
||||||
## Returns node SPR in requested format, json or text.
|
## Returns node SPR in requested format, json or text.
|
||||||
##
|
##
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
without spr =? node.discovery.dhtRecord:
|
without spr =? node.discovery.dhtRecord:
|
||||||
return RestApiResponse.response("", status=Http503, contentType="application/json", headers = headers)
|
return RestApiResponse.response(
|
||||||
|
"", status = Http503, contentType = "application/json", headers = headers
|
||||||
|
)
|
||||||
|
|
||||||
if $preferredContentType().get() == "text/plain":
|
if $preferredContentType().get() == "text/plain":
|
||||||
return RestApiResponse.response(spr.toURI, contentType="text/plain", headers = headers)
|
return RestApiResponse.response(
|
||||||
|
spr.toURI, contentType = "text/plain", headers = headers
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
return RestApiResponse.response($ %* {"spr": spr.toURI}, contentType="application/json", headers = headers)
|
return RestApiResponse.response(
|
||||||
|
$ %*{"spr": spr.toURI}, contentType = "application/json", headers = headers
|
||||||
|
)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
|
|
||||||
router.api(
|
router.api(MethodGet, "/api/codex/v1/peerid") do() -> RestApiResponse:
|
||||||
MethodGet,
|
|
||||||
"/api/codex/v1/peerid") do () -> RestApiResponse:
|
|
||||||
## Returns node's peerId in requested format, json or text.
|
## Returns node's peerId in requested format, json or text.
|
||||||
##
|
##
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
@ -717,18 +742,19 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
|||||||
let id = $node.switch.peerInfo.peerId
|
let id = $node.switch.peerInfo.peerId
|
||||||
|
|
||||||
if $preferredContentType().get() == "text/plain":
|
if $preferredContentType().get() == "text/plain":
|
||||||
return RestApiResponse.response(id, contentType="text/plain", headers = headers)
|
return
|
||||||
|
RestApiResponse.response(id, contentType = "text/plain", headers = headers)
|
||||||
else:
|
else:
|
||||||
return RestApiResponse.response($ %* {"id": id}, contentType="application/json", headers = headers)
|
return RestApiResponse.response(
|
||||||
|
$ %*{"id": id}, contentType = "application/json", headers = headers
|
||||||
|
)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
|
|
||||||
router.api(
|
router.api(MethodGet, "/api/codex/v1/connect/{peerId}") do(
|
||||||
MethodGet,
|
peerId: PeerId, addrs: seq[MultiAddress]
|
||||||
"/api/codex/v1/connect/{peerId}") do (
|
) -> RestApiResponse:
|
||||||
peerId: PeerId,
|
|
||||||
addrs: seq[MultiAddress]) -> RestApiResponse:
|
|
||||||
## Connect to a peer
|
## Connect to a peer
|
||||||
##
|
##
|
||||||
## If `addrs` param is supplied, it will be used to
|
## If `addrs` param is supplied, it will be used to
|
||||||
@ -741,34 +767,30 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
|||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
|
|
||||||
if peerId.isErr:
|
if peerId.isErr:
|
||||||
return RestApiResponse.error(
|
return RestApiResponse.error(Http400, $peerId.error(), headers = headers)
|
||||||
Http400,
|
|
||||||
$peerId.error(),
|
|
||||||
headers = headers)
|
|
||||||
|
|
||||||
let addresses = if addrs.isOk and addrs.get().len > 0:
|
let addresses =
|
||||||
|
if addrs.isOk and addrs.get().len > 0:
|
||||||
addrs.get()
|
addrs.get()
|
||||||
else:
|
else:
|
||||||
without peerRecord =? (await node.findPeer(peerId.get())):
|
without peerRecord =? (await node.findPeer(peerId.get())):
|
||||||
return RestApiResponse.error(
|
return
|
||||||
Http400,
|
RestApiResponse.error(Http400, "Unable to find Peer!", headers = headers)
|
||||||
"Unable to find Peer!",
|
|
||||||
headers = headers)
|
|
||||||
peerRecord.addresses.mapIt(it.address)
|
peerRecord.addresses.mapIt(it.address)
|
||||||
try:
|
try:
|
||||||
await node.connect(peerId.get(), addresses)
|
await node.connect(peerId.get(), addresses)
|
||||||
return RestApiResponse.response("Successfully connected to peer", headers = headers)
|
return
|
||||||
|
RestApiResponse.response("Successfully connected to peer", headers = headers)
|
||||||
except DialFailedError:
|
except DialFailedError:
|
||||||
return RestApiResponse.error(Http400, "Unable to dial peer", headers = headers)
|
return RestApiResponse.error(Http400, "Unable to dial peer", headers = headers)
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
return RestApiResponse.error(Http500, "Unknown error dialling peer", headers = headers)
|
return
|
||||||
|
RestApiResponse.error(Http500, "Unknown error dialling peer", headers = headers)
|
||||||
|
|
||||||
proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
||||||
let allowedOrigin = router.allowedOrigin
|
let allowedOrigin = router.allowedOrigin
|
||||||
|
|
||||||
router.api(
|
router.api(MethodGet, "/api/codex/v1/debug/info") do() -> RestApiResponse:
|
||||||
MethodGet,
|
|
||||||
"/api/codex/v1/debug/info") do () -> RestApiResponse:
|
|
||||||
## Print rudimentary node information
|
## Print rudimentary node information
|
||||||
##
|
##
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
@ -776,8 +798,8 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
|||||||
try:
|
try:
|
||||||
let table = RestRoutingTable.init(node.discovery.protocol.routingTable)
|
let table = RestRoutingTable.init(node.discovery.protocol.routingTable)
|
||||||
|
|
||||||
let
|
let json =
|
||||||
json = %*{
|
%*{
|
||||||
"id": $node.switch.peerInfo.peerId,
|
"id": $node.switch.peerInfo.peerId,
|
||||||
"addrs": node.switch.peerInfo.addrs.mapIt($it),
|
"addrs": node.switch.peerInfo.addrs.mapIt($it),
|
||||||
"repo": $conf.dataDir,
|
"repo": $conf.dataDir,
|
||||||
@ -788,22 +810,20 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
|||||||
"",
|
"",
|
||||||
"announceAddresses": node.discovery.announceAddrs,
|
"announceAddresses": node.discovery.announceAddrs,
|
||||||
"table": table,
|
"table": table,
|
||||||
"codex": {
|
"codex": {"version": $codexVersion, "revision": $codexRevision},
|
||||||
"version": $codexVersion,
|
|
||||||
"revision": $codexRevision
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# return pretty json for human readability
|
# return pretty json for human readability
|
||||||
return RestApiResponse.response(json.pretty(), contentType="application/json", headers = headers)
|
return RestApiResponse.response(
|
||||||
|
json.pretty(), contentType = "application/json", headers = headers
|
||||||
|
)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Excepting processing request", exc = exc.msg
|
trace "Excepting processing request", exc = exc.msg
|
||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
|
|
||||||
router.api(
|
router.api(MethodPost, "/api/codex/v1/debug/chronicles/loglevel") do(
|
||||||
MethodPost,
|
level: Option[string]
|
||||||
"/api/codex/v1/debug/chronicles/loglevel") do (
|
) -> RestApiResponse:
|
||||||
level: Option[string]) -> RestApiResponse:
|
|
||||||
## Set log level at run time
|
## Set log level at run time
|
||||||
##
|
##
|
||||||
## e.g. `chronicles/loglevel?level=DEBUG`
|
## e.g. `chronicles/loglevel?level=DEBUG`
|
||||||
@ -828,19 +848,17 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
|||||||
return RestApiResponse.error(Http500, headers = headers)
|
return RestApiResponse.error(Http500, headers = headers)
|
||||||
|
|
||||||
when codex_enable_api_debug_peers:
|
when codex_enable_api_debug_peers:
|
||||||
router.api(
|
router.api(MethodGet, "/api/codex/v1/debug/peer/{peerId}") do(
|
||||||
MethodGet,
|
peerId: PeerId
|
||||||
"/api/codex/v1/debug/peer/{peerId}") do (peerId: PeerId) -> RestApiResponse:
|
) -> RestApiResponse:
|
||||||
var headers = buildCorsHeaders("GET", allowedOrigin)
|
var headers = buildCorsHeaders("GET", allowedOrigin)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
trace "debug/peer start"
|
trace "debug/peer start"
|
||||||
without peerRecord =? (await node.findPeer(peerId.get())):
|
without peerRecord =? (await node.findPeer(peerId.get())):
|
||||||
trace "debug/peer peer not found!"
|
trace "debug/peer peer not found!"
|
||||||
return RestApiResponse.error(
|
return
|
||||||
Http400,
|
RestApiResponse.error(Http400, "Unable to find Peer!", headers = headers)
|
||||||
"Unable to find Peer!",
|
|
||||||
headers = headers)
|
|
||||||
|
|
||||||
let json = %RestPeerRecord.init(peerRecord)
|
let json = %RestPeerRecord.init(peerRecord)
|
||||||
trace "debug/peer returning peer record"
|
trace "debug/peer returning peer record"
|
||||||
@ -853,8 +871,8 @@ proc initRestApi*(
|
|||||||
node: CodexNodeRef,
|
node: CodexNodeRef,
|
||||||
conf: CodexConf,
|
conf: CodexConf,
|
||||||
repoStore: RepoStore,
|
repoStore: RepoStore,
|
||||||
corsAllowedOrigin: ?string): RestRouter =
|
corsAllowedOrigin: ?string,
|
||||||
|
): RestRouter =
|
||||||
var router = RestRouter.init(validate, corsAllowedOrigin)
|
var router = RestRouter.init(validate, corsAllowedOrigin)
|
||||||
|
|
||||||
initDataApi(node, repoStore, router)
|
initDataApi(node, repoStore, router)
|
||||||
|
@ -25,9 +25,7 @@ proc encodeString*(cid: type Cid): Result[string, cstring] =
|
|||||||
ok($cid)
|
ok($cid)
|
||||||
|
|
||||||
proc decodeString*(T: type Cid, value: string): Result[Cid, cstring] =
|
proc decodeString*(T: type Cid, value: string): Result[Cid, cstring] =
|
||||||
Cid
|
Cid.init(value).mapErr do(e: CidError) -> cstring:
|
||||||
.init(value)
|
|
||||||
.mapErr do(e: CidError) -> cstring:
|
|
||||||
case e
|
case e
|
||||||
of CidError.Incorrect: "Incorrect Cid".cstring
|
of CidError.Incorrect: "Incorrect Cid".cstring
|
||||||
of CidError.Unsupported: "Unsupported Cid".cstring
|
of CidError.Unsupported: "Unsupported Cid".cstring
|
||||||
@ -44,9 +42,8 @@ proc encodeString*(address: MultiAddress): Result[string, cstring] =
|
|||||||
ok($address)
|
ok($address)
|
||||||
|
|
||||||
proc decodeString*(T: type MultiAddress, value: string): Result[MultiAddress, cstring] =
|
proc decodeString*(T: type MultiAddress, value: string): Result[MultiAddress, cstring] =
|
||||||
MultiAddress
|
MultiAddress.init(value).mapErr do(e: string) -> cstring:
|
||||||
.init(value)
|
cstring(e)
|
||||||
.mapErr do(e: string) -> cstring: cstring(e)
|
|
||||||
|
|
||||||
proc decodeString*(T: type SomeUnsignedInt, value: string): Result[T, cstring] =
|
proc decodeString*(T: type SomeUnsignedInt, value: string): Result[T, cstring] =
|
||||||
Base10.decode(T, value)
|
Base10.decode(T, value)
|
||||||
@ -77,19 +74,20 @@ proc decodeString*(_: type UInt256, value: string): Result[UInt256, cstring] =
|
|||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
err e.msg.cstring
|
err e.msg.cstring
|
||||||
|
|
||||||
proc decodeString*(_: type array[32, byte],
|
proc decodeString*(
|
||||||
value: string): Result[array[32, byte], cstring] =
|
_: type array[32, byte], value: string
|
||||||
|
): Result[array[32, byte], cstring] =
|
||||||
try:
|
try:
|
||||||
ok array[32, byte].fromHex(value)
|
ok array[32, byte].fromHex(value)
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
err e.msg.cstring
|
err e.msg.cstring
|
||||||
|
|
||||||
proc decodeString*[T: PurchaseId | RequestId | Nonce | SlotId | AvailabilityId](_: type T,
|
proc decodeString*[T: PurchaseId | RequestId | Nonce | SlotId | AvailabilityId](
|
||||||
value: string): Result[T, cstring] =
|
_: type T, value: string
|
||||||
|
): Result[T, cstring] =
|
||||||
array[32, byte].decodeString(value).map(id => T(id))
|
array[32, byte].decodeString(value).map(id => T(id))
|
||||||
|
|
||||||
proc decodeString*(t: typedesc[string],
|
proc decodeString*(t: typedesc[string], value: string): Result[string, cstring] =
|
||||||
value: string): Result[string, cstring] =
|
|
||||||
ok(value)
|
ok(value)
|
||||||
|
|
||||||
proc encodeString*(value: string): RestResult[string] =
|
proc encodeString*(value: string): RestResult[string] =
|
||||||
|
@ -74,15 +74,10 @@ type
|
|||||||
quotaReservedBytes* {.serialize.}: NBytes
|
quotaReservedBytes* {.serialize.}: NBytes
|
||||||
|
|
||||||
proc init*(_: type RestContentList, content: seq[RestContent]): RestContentList =
|
proc init*(_: type RestContentList, content: seq[RestContent]): RestContentList =
|
||||||
RestContentList(
|
RestContentList(content: content)
|
||||||
content: content
|
|
||||||
)
|
|
||||||
|
|
||||||
proc init*(_: type RestContent, cid: Cid, manifest: Manifest): RestContent =
|
proc init*(_: type RestContent, cid: Cid, manifest: Manifest): RestContent =
|
||||||
RestContent(
|
RestContent(cid: cid, manifest: manifest)
|
||||||
cid: cid,
|
|
||||||
manifest: manifest
|
|
||||||
)
|
|
||||||
|
|
||||||
proc init*(_: type RestNode, node: dn.Node): RestNode =
|
proc init*(_: type RestNode, node: dn.Node): RestNode =
|
||||||
RestNode(
|
RestNode(
|
||||||
@ -90,7 +85,7 @@ proc init*(_: type RestNode, node: dn.Node): RestNode =
|
|||||||
peerId: node.record.data.peerId,
|
peerId: node.record.data.peerId,
|
||||||
record: node.record,
|
record: node.record,
|
||||||
address: node.address,
|
address: node.address,
|
||||||
seen: node.seen > 0.5
|
seen: node.seen > 0.5,
|
||||||
)
|
)
|
||||||
|
|
||||||
proc init*(_: type RestRoutingTable, routingTable: rt.RoutingTable): RestRoutingTable =
|
proc init*(_: type RestRoutingTable, routingTable: rt.RoutingTable): RestRoutingTable =
|
||||||
@ -99,28 +94,23 @@ proc init*(_: type RestRoutingTable, routingTable: rt.RoutingTable): RestRouting
|
|||||||
for node in bucket.nodes:
|
for node in bucket.nodes:
|
||||||
nodes.add(RestNode.init(node))
|
nodes.add(RestNode.init(node))
|
||||||
|
|
||||||
RestRoutingTable(
|
RestRoutingTable(localNode: RestNode.init(routingTable.localNode), nodes: nodes)
|
||||||
localNode: RestNode.init(routingTable.localNode),
|
|
||||||
nodes: nodes
|
|
||||||
)
|
|
||||||
|
|
||||||
proc init*(_: type RestPeerRecord, peerRecord: PeerRecord): RestPeerRecord =
|
proc init*(_: type RestPeerRecord, peerRecord: PeerRecord): RestPeerRecord =
|
||||||
RestPeerRecord(
|
RestPeerRecord(
|
||||||
peerId: peerRecord.peerId,
|
peerId: peerRecord.peerId, seqNo: peerRecord.seqNo, addresses: peerRecord.addresses
|
||||||
seqNo: peerRecord.seqNo,
|
|
||||||
addresses: peerRecord.addresses
|
|
||||||
)
|
)
|
||||||
|
|
||||||
proc init*(_: type RestNodeId, id: NodeId): RestNodeId =
|
proc init*(_: type RestNodeId, id: NodeId): RestNodeId =
|
||||||
RestNodeId(
|
RestNodeId(id: id)
|
||||||
id: id
|
|
||||||
)
|
|
||||||
|
|
||||||
proc `%`*(obj: StorageRequest | Slot): JsonNode =
|
proc `%`*(obj: StorageRequest | Slot): JsonNode =
|
||||||
let jsonObj = newJObject()
|
let jsonObj = newJObject()
|
||||||
for k, v in obj.fieldPairs: jsonObj[k] = %v
|
for k, v in obj.fieldPairs:
|
||||||
|
jsonObj[k] = %v
|
||||||
jsonObj["id"] = %(obj.id)
|
jsonObj["id"] = %(obj.id)
|
||||||
|
|
||||||
return jsonObj
|
return jsonObj
|
||||||
|
|
||||||
proc `%`*(obj: RestNodeId): JsonNode = % $obj.id
|
proc `%`*(obj: RestNodeId): JsonNode =
|
||||||
|
% $obj.id
|
||||||
|
@ -9,7 +9,8 @@
|
|||||||
|
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push:
|
||||||
|
{.upraises: [].}
|
||||||
|
|
||||||
import pkg/libp2p/crypto/crypto
|
import pkg/libp2p/crypto/crypto
|
||||||
import pkg/bearssl/rand
|
import pkg/bearssl/rand
|
||||||
@ -30,7 +31,8 @@ proc instance*(t: type Rng): Rng =
|
|||||||
const randMax = 18_446_744_073_709_551_615'u64
|
const randMax = 18_446_744_073_709_551_615'u64
|
||||||
|
|
||||||
proc rand*(rng: Rng, max: Natural): int =
|
proc rand*(rng: Rng, max: Natural): int =
|
||||||
if max == 0: return 0
|
if max == 0:
|
||||||
|
return 0
|
||||||
|
|
||||||
while true:
|
while true:
|
||||||
let x = rng[].generate(uint64)
|
let x = rng[].generate(uint64)
|
||||||
@ -41,8 +43,8 @@ proc sample*[T](rng: Rng, a: openArray[T]): T =
|
|||||||
result = a[rng.rand(a.high)]
|
result = a[rng.rand(a.high)]
|
||||||
|
|
||||||
proc sample*[T](
|
proc sample*[T](
|
||||||
rng: Rng, sample, exclude: openArray[T]): T
|
rng: Rng, sample, exclude: openArray[T]
|
||||||
{.raises: [Defect, RngSampleError].} =
|
): T {.raises: [Defect, RngSampleError].} =
|
||||||
if sample == exclude:
|
if sample == exclude:
|
||||||
raise newException(RngSampleError, "Sample and exclude arrays are the same!")
|
raise newException(RngSampleError, "Sample and exclude arrays are the same!")
|
||||||
|
|
||||||
|
130
codex/sales.nim
130
codex/sales.nim
@ -45,8 +45,7 @@ export salescontext
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "sales marketplace"
|
topics = "sales marketplace"
|
||||||
|
|
||||||
type
|
type Sales* = ref object
|
||||||
Sales* = ref object
|
|
||||||
context*: SalesContext
|
context*: SalesContext
|
||||||
agents*: seq[SalesAgent]
|
agents*: seq[SalesAgent]
|
||||||
running: bool
|
running: bool
|
||||||
@ -68,28 +67,31 @@ proc `onProve=`*(sales: Sales, callback: OnProve) =
|
|||||||
proc `onExpiryUpdate=`*(sales: Sales, callback: OnExpiryUpdate) =
|
proc `onExpiryUpdate=`*(sales: Sales, callback: OnExpiryUpdate) =
|
||||||
sales.context.onExpiryUpdate = some callback
|
sales.context.onExpiryUpdate = some callback
|
||||||
|
|
||||||
proc onStore*(sales: Sales): ?OnStore = sales.context.onStore
|
proc onStore*(sales: Sales): ?OnStore =
|
||||||
|
sales.context.onStore
|
||||||
|
|
||||||
proc onClear*(sales: Sales): ?OnClear = sales.context.onClear
|
proc onClear*(sales: Sales): ?OnClear =
|
||||||
|
sales.context.onClear
|
||||||
|
|
||||||
proc onSale*(sales: Sales): ?OnSale = sales.context.onSale
|
proc onSale*(sales: Sales): ?OnSale =
|
||||||
|
sales.context.onSale
|
||||||
|
|
||||||
proc onProve*(sales: Sales): ?OnProve = sales.context.onProve
|
proc onProve*(sales: Sales): ?OnProve =
|
||||||
|
sales.context.onProve
|
||||||
|
|
||||||
proc onExpiryUpdate*(sales: Sales): ?OnExpiryUpdate = sales.context.onExpiryUpdate
|
proc onExpiryUpdate*(sales: Sales): ?OnExpiryUpdate =
|
||||||
|
sales.context.onExpiryUpdate
|
||||||
|
|
||||||
proc new*(_: type Sales,
|
proc new*(_: type Sales, market: Market, clock: Clock, repo: RepoStore): Sales =
|
||||||
market: Market,
|
|
||||||
clock: Clock,
|
|
||||||
repo: RepoStore): Sales =
|
|
||||||
Sales.new(market, clock, repo, 0)
|
Sales.new(market, clock, repo, 0)
|
||||||
|
|
||||||
proc new*(_: type Sales,
|
proc new*(
|
||||||
|
_: type Sales,
|
||||||
market: Market,
|
market: Market,
|
||||||
clock: Clock,
|
clock: Clock,
|
||||||
repo: RepoStore,
|
repo: RepoStore,
|
||||||
simulateProofFailures: int): Sales =
|
simulateProofFailures: int,
|
||||||
|
): Sales =
|
||||||
let reservations = Reservations.new(repo)
|
let reservations = Reservations.new(repo)
|
||||||
Sales(
|
Sales(
|
||||||
context: SalesContext(
|
context: SalesContext(
|
||||||
@ -97,10 +99,10 @@ proc new*(_: type Sales,
|
|||||||
clock: clock,
|
clock: clock,
|
||||||
reservations: reservations,
|
reservations: reservations,
|
||||||
slotQueue: SlotQueue.new(),
|
slotQueue: SlotQueue.new(),
|
||||||
simulateProofFailures: simulateProofFailures
|
simulateProofFailures: simulateProofFailures,
|
||||||
),
|
),
|
||||||
trackedFutures: TrackedFutures.new(),
|
trackedFutures: TrackedFutures.new(),
|
||||||
subscriptions: @[]
|
subscriptions: @[],
|
||||||
)
|
)
|
||||||
|
|
||||||
proc remove(sales: Sales, agent: SalesAgent) {.async.} =
|
proc remove(sales: Sales, agent: SalesAgent) {.async.} =
|
||||||
@ -108,12 +110,13 @@ proc remove(sales: Sales, agent: SalesAgent) {.async.} =
|
|||||||
if sales.running:
|
if sales.running:
|
||||||
sales.agents.keepItIf(it != agent)
|
sales.agents.keepItIf(it != agent)
|
||||||
|
|
||||||
proc cleanUp(sales: Sales,
|
proc cleanUp(
|
||||||
|
sales: Sales,
|
||||||
agent: SalesAgent,
|
agent: SalesAgent,
|
||||||
returnBytes: bool,
|
returnBytes: bool,
|
||||||
reprocessSlot: bool,
|
reprocessSlot: bool,
|
||||||
processing: Future[void]) {.async.} =
|
processing: Future[void],
|
||||||
|
) {.async.} =
|
||||||
let data = agent.data
|
let data = agent.data
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
@ -129,36 +132,37 @@ proc cleanUp(sales: Sales,
|
|||||||
# that the cleanUp was called before the sales process really started, so
|
# that the cleanUp was called before the sales process really started, so
|
||||||
# there are not really any bytes to be returned
|
# there are not really any bytes to be returned
|
||||||
if returnBytes and request =? data.request and reservation =? data.reservation:
|
if returnBytes and request =? data.request and reservation =? data.reservation:
|
||||||
if returnErr =? (await sales.context.reservations.returnBytesToAvailability(
|
if returnErr =? (
|
||||||
reservation.availabilityId,
|
await sales.context.reservations.returnBytesToAvailability(
|
||||||
reservation.id,
|
reservation.availabilityId, reservation.id, request.ask.slotSize
|
||||||
request.ask.slotSize
|
)
|
||||||
)).errorOption:
|
).errorOption:
|
||||||
error "failure returning bytes",
|
error "failure returning bytes",
|
||||||
error = returnErr.msg,
|
error = returnErr.msg, bytes = request.ask.slotSize
|
||||||
bytes = request.ask.slotSize
|
|
||||||
|
|
||||||
# delete reservation and return reservation bytes back to the availability
|
# delete reservation and return reservation bytes back to the availability
|
||||||
if reservation =? data.reservation and
|
if reservation =? data.reservation and
|
||||||
deleteErr =? (await sales.context.reservations.deleteReservation(
|
deleteErr =? (
|
||||||
reservation.id,
|
await sales.context.reservations.deleteReservation(
|
||||||
reservation.availabilityId
|
reservation.id, reservation.availabilityId
|
||||||
)).errorOption:
|
)
|
||||||
|
).errorOption:
|
||||||
error "failure deleting reservation", error = deleteErr.msg
|
error "failure deleting reservation", error = deleteErr.msg
|
||||||
|
|
||||||
# Re-add items back into the queue to prevent small availabilities from
|
# Re-add items back into the queue to prevent small availabilities from
|
||||||
# draining the queue. Seen items will be ordered last.
|
# draining the queue. Seen items will be ordered last.
|
||||||
if reprocessSlot and request =? data.request:
|
if reprocessSlot and request =? data.request:
|
||||||
let queue = sales.context.slotQueue
|
let queue = sales.context.slotQueue
|
||||||
var seenItem = SlotQueueItem.init(data.requestId,
|
var seenItem = SlotQueueItem.init(
|
||||||
|
data.requestId,
|
||||||
data.slotIndex.truncate(uint16),
|
data.slotIndex.truncate(uint16),
|
||||||
data.ask,
|
data.ask,
|
||||||
request.expiry,
|
request.expiry,
|
||||||
seen = true)
|
seen = true,
|
||||||
|
)
|
||||||
trace "pushing ignored item to queue, marked as seen"
|
trace "pushing ignored item to queue, marked as seen"
|
||||||
if err =? queue.push(seenItem).errorOption:
|
if err =? queue.push(seenItem).errorOption:
|
||||||
error "failed to readd slot to queue",
|
error "failed to readd slot to queue", errorType = $(type err), error = err.msg
|
||||||
errorType = $(type err), error = err.msg
|
|
||||||
|
|
||||||
await sales.remove(agent)
|
await sales.remove(agent)
|
||||||
|
|
||||||
@ -167,11 +171,8 @@ proc cleanUp(sales: Sales,
|
|||||||
processing.complete()
|
processing.complete()
|
||||||
|
|
||||||
proc filled(
|
proc filled(
|
||||||
sales: Sales,
|
sales: Sales, request: StorageRequest, slotIndex: UInt256, processing: Future[void]
|
||||||
request: StorageRequest,
|
) =
|
||||||
slotIndex: UInt256,
|
|
||||||
processing: Future[void]) =
|
|
||||||
|
|
||||||
if onSale =? sales.context.onSale:
|
if onSale =? sales.context.onSale:
|
||||||
onSale(request, slotIndex)
|
onSale(request, slotIndex)
|
||||||
|
|
||||||
@ -180,14 +181,10 @@ proc filled(
|
|||||||
processing.complete()
|
processing.complete()
|
||||||
|
|
||||||
proc processSlot(sales: Sales, item: SlotQueueItem, done: Future[void]) =
|
proc processSlot(sales: Sales, item: SlotQueueItem, done: Future[void]) =
|
||||||
debug "Processing slot from queue", requestId = item.requestId,
|
debug "Processing slot from queue", requestId = item.requestId, slot = item.slotIndex
|
||||||
slot = item.slotIndex
|
|
||||||
|
|
||||||
let agent = newSalesAgent(
|
let agent = newSalesAgent(
|
||||||
sales.context,
|
sales.context, item.requestId, item.slotIndex.u256, none StorageRequest
|
||||||
item.requestId,
|
|
||||||
item.slotIndex.u256,
|
|
||||||
none StorageRequest
|
|
||||||
)
|
)
|
||||||
|
|
||||||
agent.onCleanUp = proc(returnBytes = false, reprocessSlot = false) {.async.} =
|
agent.onCleanUp = proc(returnBytes = false, reprocessSlot = false) {.async.} =
|
||||||
@ -204,10 +201,12 @@ proc deleteInactiveReservations(sales: Sales, activeSlots: seq[Slot]) {.async.}
|
|||||||
without reservs =? await reservations.all(Reservation):
|
without reservs =? await reservations.all(Reservation):
|
||||||
return
|
return
|
||||||
|
|
||||||
let unused = reservs.filter(r => (
|
let unused = reservs.filter(
|
||||||
|
r => (
|
||||||
let slotId = slotId(r.requestId, r.slotIndex)
|
let slotId = slotId(r.requestId, r.slotIndex)
|
||||||
not activeSlots.any(slot => slot.id == slotId)
|
not activeSlots.any(slot => slot.id == slotId)
|
||||||
))
|
)
|
||||||
|
)
|
||||||
|
|
||||||
if unused.len == 0:
|
if unused.len == 0:
|
||||||
return
|
return
|
||||||
@ -215,14 +214,13 @@ proc deleteInactiveReservations(sales: Sales, activeSlots: seq[Slot]) {.async.}
|
|||||||
info "Found unused reservations for deletion", unused = unused.len
|
info "Found unused reservations for deletion", unused = unused.len
|
||||||
|
|
||||||
for reservation in unused:
|
for reservation in unused:
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
reservationId = reservation.id
|
reservationId = reservation.id
|
||||||
availabilityId = reservation.availabilityId
|
availabilityId = reservation.availabilityId
|
||||||
|
|
||||||
if err =? (await reservations.deleteReservation(
|
if err =? (
|
||||||
reservation.id, reservation.availabilityId
|
await reservations.deleteReservation(reservation.id, reservation.availabilityId)
|
||||||
)).errorOption:
|
).errorOption:
|
||||||
error "Failed to delete unused reservation", error = err.msg
|
error "Failed to delete unused reservation", error = err.msg
|
||||||
else:
|
else:
|
||||||
trace "Deleted unused reservation"
|
trace "Deleted unused reservation"
|
||||||
@ -252,11 +250,8 @@ proc load*(sales: Sales) {.async.} =
|
|||||||
await sales.deleteInactiveReservations(activeSlots)
|
await sales.deleteInactiveReservations(activeSlots)
|
||||||
|
|
||||||
for slot in activeSlots:
|
for slot in activeSlots:
|
||||||
let agent = newSalesAgent(
|
let agent =
|
||||||
sales.context,
|
newSalesAgent(sales.context, slot.request.id, slot.slotIndex, some slot.request)
|
||||||
slot.request.id,
|
|
||||||
slot.slotIndex,
|
|
||||||
some slot.request)
|
|
||||||
|
|
||||||
agent.onCleanUp = proc(returnBytes = false, reprocessSlot = false) {.async.} =
|
agent.onCleanUp = proc(returnBytes = false, reprocessSlot = false) {.async.} =
|
||||||
# since workers are not being dispatched, this future has not been created
|
# since workers are not being dispatched, this future has not been created
|
||||||
@ -282,11 +277,9 @@ proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} =
|
|||||||
trace "unpausing queue after new availability added"
|
trace "unpausing queue after new availability added"
|
||||||
queue.unpause()
|
queue.unpause()
|
||||||
|
|
||||||
proc onStorageRequested(sales: Sales,
|
proc onStorageRequested(
|
||||||
requestId: RequestId,
|
sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: UInt256
|
||||||
ask: StorageAsk,
|
) =
|
||||||
expiry: UInt256) =
|
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "marketplace sales onStorageRequested"
|
topics = "marketplace sales onStorageRequested"
|
||||||
requestId
|
requestId
|
||||||
@ -314,10 +307,7 @@ proc onStorageRequested(sales: Sales,
|
|||||||
else:
|
else:
|
||||||
warn "Error adding request to SlotQueue", error = err.msg
|
warn "Error adding request to SlotQueue", error = err.msg
|
||||||
|
|
||||||
proc onSlotFreed(sales: Sales,
|
proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: UInt256) =
|
||||||
requestId: RequestId,
|
|
||||||
slotIndex: UInt256) =
|
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "marketplace sales onSlotFreed"
|
topics = "marketplace sales onSlotFreed"
|
||||||
requestId
|
requestId
|
||||||
@ -331,8 +321,7 @@ proc onSlotFreed(sales: Sales,
|
|||||||
let queue = context.slotQueue
|
let queue = context.slotQueue
|
||||||
|
|
||||||
# first attempt to populate request using existing slot metadata in queue
|
# first attempt to populate request using existing slot metadata in queue
|
||||||
without var found =? queue.populateItem(requestId,
|
without var found =? queue.populateItem(requestId, slotIndex.truncate(uint16)):
|
||||||
slotIndex.truncate(uint16)):
|
|
||||||
trace "no existing request metadata, getting request info from contract"
|
trace "no existing request metadata, getting request info from contract"
|
||||||
# if there's no existing slot for that request, retrieve the request
|
# if there's no existing slot for that request, retrieve the request
|
||||||
# from the contract.
|
# from the contract.
|
||||||
@ -359,9 +348,7 @@ proc subscribeRequested(sales: Sales) {.async.} =
|
|||||||
let context = sales.context
|
let context = sales.context
|
||||||
let market = context.market
|
let market = context.market
|
||||||
|
|
||||||
proc onStorageRequested(requestId: RequestId,
|
proc onStorageRequested(requestId: RequestId, ask: StorageAsk, expiry: UInt256) =
|
||||||
ask: StorageAsk,
|
|
||||||
expiry: UInt256) =
|
|
||||||
sales.onStorageRequested(requestId, ask, expiry)
|
sales.onStorageRequested(requestId, ask, expiry)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -485,8 +472,7 @@ proc startSlotQueue(sales: Sales) =
|
|||||||
let slotQueue = sales.context.slotQueue
|
let slotQueue = sales.context.slotQueue
|
||||||
let reservations = sales.context.reservations
|
let reservations = sales.context.reservations
|
||||||
|
|
||||||
slotQueue.onProcessSlot =
|
slotQueue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
||||||
proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
|
||||||
trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex
|
trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex
|
||||||
sales.processSlot(item, done)
|
sales.processSlot(item, done)
|
||||||
|
|
||||||
|
@ -26,7 +26,8 @@
|
|||||||
## +----------------------------------------+
|
## +----------------------------------------+
|
||||||
|
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
push: {.upraises: [].}
|
push:
|
||||||
|
{.upraises: [].}
|
||||||
|
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
import std/sugar
|
import std/sugar
|
||||||
@ -54,7 +55,6 @@ export logutils
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "sales reservations"
|
topics = "sales reservations"
|
||||||
|
|
||||||
|
|
||||||
type
|
type
|
||||||
AvailabilityId* = distinct array[32, byte]
|
AvailabilityId* = distinct array[32, byte]
|
||||||
ReservationId* = distinct array[32, byte]
|
ReservationId* = distinct array[32, byte]
|
||||||
@ -65,25 +65,32 @@ type
|
|||||||
totalSize* {.serialize.}: UInt256
|
totalSize* {.serialize.}: UInt256
|
||||||
freeSize* {.serialize.}: UInt256
|
freeSize* {.serialize.}: UInt256
|
||||||
duration* {.serialize.}: UInt256
|
duration* {.serialize.}: UInt256
|
||||||
minPrice* {.serialize.}: UInt256 # minimal price paid for the whole hosted slot for the request's duration
|
minPrice* {.serialize.}: UInt256
|
||||||
|
# minimal price paid for the whole hosted slot for the request's duration
|
||||||
maxCollateral* {.serialize.}: UInt256
|
maxCollateral* {.serialize.}: UInt256
|
||||||
|
|
||||||
Reservation* = ref object
|
Reservation* = ref object
|
||||||
id* {.serialize.}: ReservationId
|
id* {.serialize.}: ReservationId
|
||||||
availabilityId* {.serialize.}: AvailabilityId
|
availabilityId* {.serialize.}: AvailabilityId
|
||||||
size* {.serialize.}: UInt256
|
size* {.serialize.}: UInt256
|
||||||
requestId* {.serialize.}: RequestId
|
requestId* {.serialize.}: RequestId
|
||||||
slotIndex* {.serialize.}: UInt256
|
slotIndex* {.serialize.}: UInt256
|
||||||
|
|
||||||
Reservations* = ref object of RootObj
|
Reservations* = ref object of RootObj
|
||||||
availabilityLock: AsyncLock # Lock for protecting assertions of availability's sizes when searching for matching availability
|
availabilityLock: AsyncLock
|
||||||
|
# Lock for protecting assertions of availability's sizes when searching for matching availability
|
||||||
repo: RepoStore
|
repo: RepoStore
|
||||||
onAvailabilityAdded: ?OnAvailabilityAdded
|
onAvailabilityAdded: ?OnAvailabilityAdded
|
||||||
|
|
||||||
GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.}
|
GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.}
|
||||||
IterDispose* = proc(): Future[?!void] {.gcsafe, closure.}
|
IterDispose* = proc(): Future[?!void] {.gcsafe, closure.}
|
||||||
OnAvailabilityAdded* = proc(availability: Availability): Future[void] {.upraises: [], gcsafe.}
|
OnAvailabilityAdded* =
|
||||||
|
proc(availability: Availability): Future[void] {.upraises: [], gcsafe.}
|
||||||
StorableIter* = ref object
|
StorableIter* = ref object
|
||||||
finished*: bool
|
finished*: bool
|
||||||
next*: GetNext
|
next*: GetNext
|
||||||
dispose*: IterDispose
|
dispose*: IterDispose
|
||||||
|
|
||||||
ReservationsError* = object of CodexError
|
ReservationsError* = object of CodexError
|
||||||
ReserveFailedError* = object of ReservationsError
|
ReserveFailedError* = object of ReservationsError
|
||||||
ReleaseFailedError* = object of ReservationsError
|
ReleaseFailedError* = object of ReservationsError
|
||||||
@ -109,10 +116,7 @@ template withLock(lock, body) =
|
|||||||
if lock.locked:
|
if lock.locked:
|
||||||
lock.release()
|
lock.release()
|
||||||
|
|
||||||
|
proc new*(T: type Reservations, repo: RepoStore): Reservations =
|
||||||
proc new*(T: type Reservations,
|
|
||||||
repo: RepoStore): Reservations =
|
|
||||||
|
|
||||||
T(availabilityLock: newAsyncLock(), repo: repo)
|
T(availabilityLock: newAsyncLock(), repo: repo)
|
||||||
|
|
||||||
proc init*(
|
proc init*(
|
||||||
@ -121,23 +125,35 @@ proc init*(
|
|||||||
freeSize: UInt256,
|
freeSize: UInt256,
|
||||||
duration: UInt256,
|
duration: UInt256,
|
||||||
minPrice: UInt256,
|
minPrice: UInt256,
|
||||||
maxCollateral: UInt256): Availability =
|
maxCollateral: UInt256,
|
||||||
|
): Availability =
|
||||||
var id: array[32, byte]
|
var id: array[32, byte]
|
||||||
doAssert randomBytes(id) == 32
|
doAssert randomBytes(id) == 32
|
||||||
Availability(id: AvailabilityId(id), totalSize:totalSize, freeSize: freeSize, duration: duration, minPrice: minPrice, maxCollateral: maxCollateral)
|
Availability(
|
||||||
|
id: AvailabilityId(id),
|
||||||
|
totalSize: totalSize,
|
||||||
|
freeSize: freeSize,
|
||||||
|
duration: duration,
|
||||||
|
minPrice: minPrice,
|
||||||
|
maxCollateral: maxCollateral,
|
||||||
|
)
|
||||||
|
|
||||||
proc init*(
|
proc init*(
|
||||||
_: type Reservation,
|
_: type Reservation,
|
||||||
availabilityId: AvailabilityId,
|
availabilityId: AvailabilityId,
|
||||||
size: UInt256,
|
size: UInt256,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
slotIndex: UInt256
|
slotIndex: UInt256,
|
||||||
): Reservation =
|
): Reservation =
|
||||||
|
|
||||||
var id: array[32, byte]
|
var id: array[32, byte]
|
||||||
doAssert randomBytes(id) == 32
|
doAssert randomBytes(id) == 32
|
||||||
Reservation(id: ReservationId(id), availabilityId: availabilityId, size: size, requestId: requestId, slotIndex: slotIndex)
|
Reservation(
|
||||||
|
id: ReservationId(id),
|
||||||
|
availabilityId: availabilityId,
|
||||||
|
size: size,
|
||||||
|
requestId: requestId,
|
||||||
|
slotIndex: slotIndex,
|
||||||
|
)
|
||||||
|
|
||||||
func toArray(id: SomeStorableId): array[32, byte] =
|
func toArray(id: SomeStorableId): array[32, byte] =
|
||||||
array[32, byte](id)
|
array[32, byte](id)
|
||||||
@ -146,23 +162,26 @@ proc `==`*(x, y: AvailabilityId): bool {.borrow.}
|
|||||||
proc `==`*(x, y: ReservationId): bool {.borrow.}
|
proc `==`*(x, y: ReservationId): bool {.borrow.}
|
||||||
proc `==`*(x, y: Reservation): bool =
|
proc `==`*(x, y: Reservation): bool =
|
||||||
x.id == y.id
|
x.id == y.id
|
||||||
|
|
||||||
proc `==`*(x, y: Availability): bool =
|
proc `==`*(x, y: Availability): bool =
|
||||||
x.id == y.id
|
x.id == y.id
|
||||||
|
|
||||||
proc `$`*(id: SomeStorableId): string = id.toArray.toHex
|
proc `$`*(id: SomeStorableId): string =
|
||||||
|
id.toArray.toHex
|
||||||
|
|
||||||
proc toErr[E1: ref CatchableError, E2: ReservationsError](
|
proc toErr[E1: ref CatchableError, E2: ReservationsError](
|
||||||
e1: E1,
|
e1: E1, _: type E2, msg: string = e1.msg
|
||||||
_: type E2,
|
): ref E2 =
|
||||||
msg: string = e1.msg): ref E2 =
|
|
||||||
|
|
||||||
return newException(E2, msg, e1)
|
return newException(E2, msg, e1)
|
||||||
|
|
||||||
logutils.formatIt(LogFormat.textLines, SomeStorableId): it.short0xHexLog
|
logutils.formatIt(LogFormat.textLines, SomeStorableId):
|
||||||
logutils.formatIt(LogFormat.json, SomeStorableId): it.to0xHexLog
|
it.short0xHexLog
|
||||||
|
logutils.formatIt(LogFormat.json, SomeStorableId):
|
||||||
|
it.to0xHexLog
|
||||||
|
|
||||||
proc `onAvailabilityAdded=`*(self: Reservations,
|
proc `onAvailabilityAdded=`*(
|
||||||
onAvailabilityAdded: OnAvailabilityAdded) =
|
self: Reservations, onAvailabilityAdded: OnAvailabilityAdded
|
||||||
|
) =
|
||||||
self.onAvailabilityAdded = some onAvailabilityAdded
|
self.onAvailabilityAdded = some onAvailabilityAdded
|
||||||
|
|
||||||
func key*(id: AvailabilityId): ?!Key =
|
func key*(id: AvailabilityId): ?!Key =
|
||||||
@ -179,24 +198,20 @@ func key*(availability: Availability): ?!Key =
|
|||||||
func key*(reservation: Reservation): ?!Key =
|
func key*(reservation: Reservation): ?!Key =
|
||||||
return key(reservation.id, reservation.availabilityId)
|
return key(reservation.id, reservation.availabilityId)
|
||||||
|
|
||||||
func available*(self: Reservations): uint = self.repo.available.uint
|
func available*(self: Reservations): uint =
|
||||||
|
self.repo.available.uint
|
||||||
|
|
||||||
func hasAvailable*(self: Reservations, bytes: uint): bool =
|
func hasAvailable*(self: Reservations, bytes: uint): bool =
|
||||||
self.repo.available(bytes.NBytes)
|
self.repo.available(bytes.NBytes)
|
||||||
|
|
||||||
proc exists*(
|
proc exists*(self: Reservations, key: Key): Future[bool] {.async.} =
|
||||||
self: Reservations,
|
|
||||||
key: Key): Future[bool] {.async.} =
|
|
||||||
|
|
||||||
let exists = await self.repo.metaDs.ds.contains(key)
|
let exists = await self.repo.metaDs.ds.contains(key)
|
||||||
return exists
|
return exists
|
||||||
|
|
||||||
proc getImpl(
|
proc getImpl(self: Reservations, key: Key): Future[?!seq[byte]] {.async.} =
|
||||||
self: Reservations,
|
|
||||||
key: Key): Future[?!seq[byte]] {.async.} =
|
|
||||||
|
|
||||||
if not await self.exists(key):
|
if not await self.exists(key):
|
||||||
let err = newException(NotExistsError, "object with key " & $key & " does not exist")
|
let err =
|
||||||
|
newException(NotExistsError, "object with key " & $key & " does not exist")
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
without serialized =? await self.repo.metaDs.ds.get(key), error:
|
without serialized =? await self.repo.metaDs.ds.get(key), error:
|
||||||
@ -205,10 +220,8 @@ proc getImpl(
|
|||||||
return success serialized
|
return success serialized
|
||||||
|
|
||||||
proc get*(
|
proc get*(
|
||||||
self: Reservations,
|
self: Reservations, key: Key, T: type SomeStorableObject
|
||||||
key: Key,
|
): Future[?!T] {.async.} =
|
||||||
T: type SomeStorableObject): Future[?!T] {.async.} =
|
|
||||||
|
|
||||||
without serialized =? await self.getImpl(key), error:
|
without serialized =? await self.getImpl(key), error:
|
||||||
return failure(error)
|
return failure(error)
|
||||||
|
|
||||||
@ -217,27 +230,20 @@ proc get*(
|
|||||||
|
|
||||||
return success obj
|
return success obj
|
||||||
|
|
||||||
proc updateImpl(
|
proc updateImpl(self: Reservations, obj: SomeStorableObject): Future[?!void] {.async.} =
|
||||||
self: Reservations,
|
|
||||||
obj: SomeStorableObject): Future[?!void] {.async.} =
|
|
||||||
|
|
||||||
trace "updating " & $(obj.type), id = obj.id
|
trace "updating " & $(obj.type), id = obj.id
|
||||||
|
|
||||||
without key =? obj.key, error:
|
without key =? obj.key, error:
|
||||||
return failure(error)
|
return failure(error)
|
||||||
|
|
||||||
if err =? (await self.repo.metaDs.ds.put(
|
if err =? (await self.repo.metaDs.ds.put(key, @(obj.toJson.toBytes))).errorOption:
|
||||||
key,
|
|
||||||
@(obj.toJson.toBytes)
|
|
||||||
)).errorOption:
|
|
||||||
return failure(err.toErr(UpdateFailedError))
|
return failure(err.toErr(UpdateFailedError))
|
||||||
|
|
||||||
return success()
|
return success()
|
||||||
|
|
||||||
proc updateAvailability(
|
proc updateAvailability(
|
||||||
self: Reservations,
|
self: Reservations, obj: Availability
|
||||||
obj: Availability): Future[?!void] {.async.} =
|
): Future[?!void] {.async.} =
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
availabilityId = obj.id
|
availabilityId = obj.id
|
||||||
|
|
||||||
@ -269,11 +275,18 @@ proc updateAvailability(
|
|||||||
if oldAvailability.totalSize != obj.totalSize:
|
if oldAvailability.totalSize != obj.totalSize:
|
||||||
trace "totalSize changed, updating repo reservation"
|
trace "totalSize changed, updating repo reservation"
|
||||||
if oldAvailability.totalSize < obj.totalSize: # storage added
|
if oldAvailability.totalSize < obj.totalSize: # storage added
|
||||||
if reserveErr =? (await self.repo.reserve((obj.totalSize - oldAvailability.totalSize).truncate(uint).NBytes)).errorOption:
|
if reserveErr =? (
|
||||||
|
await self.repo.reserve(
|
||||||
|
(obj.totalSize - oldAvailability.totalSize).truncate(uint).NBytes
|
||||||
|
)
|
||||||
|
).errorOption:
|
||||||
return failure(reserveErr.toErr(ReserveFailedError))
|
return failure(reserveErr.toErr(ReserveFailedError))
|
||||||
|
|
||||||
elif oldAvailability.totalSize > obj.totalSize: # storage removed
|
elif oldAvailability.totalSize > obj.totalSize: # storage removed
|
||||||
if reserveErr =? (await self.repo.release((oldAvailability.totalSize - obj.totalSize).truncate(uint).NBytes)).errorOption:
|
if reserveErr =? (
|
||||||
|
await self.repo.release(
|
||||||
|
(oldAvailability.totalSize - obj.totalSize).truncate(uint).NBytes
|
||||||
|
)
|
||||||
|
).errorOption:
|
||||||
return failure(reserveErr.toErr(ReleaseFailedError))
|
return failure(reserveErr.toErr(ReleaseFailedError))
|
||||||
|
|
||||||
let res = await self.updateImpl(obj)
|
let res = await self.updateImpl(obj)
|
||||||
@ -296,21 +309,14 @@ proc updateAvailability(
|
|||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
proc update*(
|
proc update*(self: Reservations, obj: Reservation): Future[?!void] {.async.} =
|
||||||
self: Reservations,
|
|
||||||
obj: Reservation): Future[?!void] {.async.} =
|
|
||||||
return await self.updateImpl(obj)
|
return await self.updateImpl(obj)
|
||||||
|
|
||||||
proc update*(
|
proc update*(self: Reservations, obj: Availability): Future[?!void] {.async.} =
|
||||||
self: Reservations,
|
|
||||||
obj: Availability): Future[?!void] {.async.} =
|
|
||||||
withLock(self.availabilityLock):
|
withLock(self.availabilityLock):
|
||||||
return await self.updateAvailability(obj)
|
return await self.updateAvailability(obj)
|
||||||
|
|
||||||
proc delete(
|
proc delete(self: Reservations, key: Key): Future[?!void] {.async.} =
|
||||||
self: Reservations,
|
|
||||||
key: Key): Future[?!void] {.async.} =
|
|
||||||
|
|
||||||
trace "deleting object", key
|
trace "deleting object", key
|
||||||
|
|
||||||
if not await self.exists(key):
|
if not await self.exists(key):
|
||||||
@ -322,10 +328,8 @@ proc delete(
|
|||||||
return success()
|
return success()
|
||||||
|
|
||||||
proc deleteReservation*(
|
proc deleteReservation*(
|
||||||
self: Reservations,
|
self: Reservations, reservationId: ReservationId, availabilityId: AvailabilityId
|
||||||
reservationId: ReservationId,
|
): Future[?!void] {.async.} =
|
||||||
availabilityId: AvailabilityId): Future[?!void] {.async.} =
|
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
reservationId
|
reservationId
|
||||||
availabilityId
|
availabilityId
|
||||||
@ -369,20 +373,17 @@ proc createAvailability*(
|
|||||||
size: UInt256,
|
size: UInt256,
|
||||||
duration: UInt256,
|
duration: UInt256,
|
||||||
minPrice: UInt256,
|
minPrice: UInt256,
|
||||||
maxCollateral: UInt256): Future[?!Availability] {.async.} =
|
maxCollateral: UInt256,
|
||||||
|
): Future[?!Availability] {.async.} =
|
||||||
trace "creating availability", size, duration, minPrice, maxCollateral
|
trace "creating availability", size, duration, minPrice, maxCollateral
|
||||||
|
|
||||||
let availability = Availability.init(
|
let availability = Availability.init(size, size, duration, minPrice, maxCollateral)
|
||||||
size, size, duration, minPrice, maxCollateral
|
|
||||||
)
|
|
||||||
let bytes = availability.freeSize.truncate(uint)
|
let bytes = availability.freeSize.truncate(uint)
|
||||||
|
|
||||||
if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
|
if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
|
||||||
return failure(reserveErr.toErr(ReserveFailedError))
|
return failure(reserveErr.toErr(ReserveFailedError))
|
||||||
|
|
||||||
if updateErr =? (await self.update(availability)).errorOption:
|
if updateErr =? (await self.update(availability)).errorOption:
|
||||||
|
|
||||||
# rollback the reserve
|
# rollback the reserve
|
||||||
trace "rolling back reserve"
|
trace "rolling back reserve"
|
||||||
if rollbackErr =? (await self.repo.release(bytes.NBytes)).errorOption:
|
if rollbackErr =? (await self.repo.release(bytes.NBytes)).errorOption:
|
||||||
@ -398,9 +399,8 @@ method createReservation*(
|
|||||||
availabilityId: AvailabilityId,
|
availabilityId: AvailabilityId,
|
||||||
slotSize: UInt256,
|
slotSize: UInt256,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
slotIndex: UInt256
|
slotIndex: UInt256,
|
||||||
): Future[?!Reservation] {.async, base.} =
|
): Future[?!Reservation] {.async, base.} =
|
||||||
|
|
||||||
withLock(self.availabilityLock):
|
withLock(self.availabilityLock):
|
||||||
without availabilityKey =? availabilityId.key, error:
|
without availabilityKey =? availabilityId.key, error:
|
||||||
return failure(error)
|
return failure(error)
|
||||||
@ -412,7 +412,8 @@ method createReservation*(
|
|||||||
if availability.freeSize < slotSize:
|
if availability.freeSize < slotSize:
|
||||||
let error = newException(
|
let error = newException(
|
||||||
BytesOutOfBoundsError,
|
BytesOutOfBoundsError,
|
||||||
"trying to reserve an amount of bytes that is greater than the total size of the Availability")
|
"trying to reserve an amount of bytes that is greater than the total size of the Availability",
|
||||||
|
)
|
||||||
return failure(error)
|
return failure(error)
|
||||||
|
|
||||||
trace "Creating reservation", availabilityId, slotSize, requestId, slotIndex
|
trace "Creating reservation", availabilityId, slotSize, requestId, slotIndex
|
||||||
@ -449,8 +450,8 @@ proc returnBytesToAvailability*(
|
|||||||
self: Reservations,
|
self: Reservations,
|
||||||
availabilityId: AvailabilityId,
|
availabilityId: AvailabilityId,
|
||||||
reservationId: ReservationId,
|
reservationId: ReservationId,
|
||||||
bytes: UInt256): Future[?!void] {.async.} =
|
bytes: UInt256,
|
||||||
|
): Future[?!void] {.async.} =
|
||||||
logScope:
|
logScope:
|
||||||
reservationId
|
reservationId
|
||||||
availabilityId
|
availabilityId
|
||||||
@ -467,14 +468,17 @@ proc returnBytesToAvailability*(
|
|||||||
let bytesToBeReturned = bytes - reservation.size
|
let bytesToBeReturned = bytes - reservation.size
|
||||||
|
|
||||||
if bytesToBeReturned == 0:
|
if bytesToBeReturned == 0:
|
||||||
trace "No bytes are returned", requestSizeBytes = bytes, returningBytes = bytesToBeReturned
|
trace "No bytes are returned",
|
||||||
|
requestSizeBytes = bytes, returningBytes = bytesToBeReturned
|
||||||
return success()
|
return success()
|
||||||
|
|
||||||
trace "Returning bytes", requestSizeBytes = bytes, returningBytes = bytesToBeReturned
|
trace "Returning bytes",
|
||||||
|
requestSizeBytes = bytes, returningBytes = bytesToBeReturned
|
||||||
|
|
||||||
# First lets see if we can re-reserve the bytes, if the Repo's quota
|
# First lets see if we can re-reserve the bytes, if the Repo's quota
|
||||||
# is depleted then we will fail-fast as there is nothing to be done atm.
|
# is depleted then we will fail-fast as there is nothing to be done atm.
|
||||||
if reserveErr =? (await self.repo.reserve(bytesToBeReturned.truncate(uint).NBytes)).errorOption:
|
if reserveErr =?
|
||||||
|
(await self.repo.reserve(bytesToBeReturned.truncate(uint).NBytes)).errorOption:
|
||||||
return failure(reserveErr.toErr(ReserveFailedError))
|
return failure(reserveErr.toErr(ReserveFailedError))
|
||||||
|
|
||||||
without availabilityKey =? availabilityId.key, error:
|
without availabilityKey =? availabilityId.key, error:
|
||||||
@ -487,9 +491,9 @@ proc returnBytesToAvailability*(
|
|||||||
|
|
||||||
# Update availability with returned size
|
# Update availability with returned size
|
||||||
if updateErr =? (await self.updateAvailability(availability)).errorOption:
|
if updateErr =? (await self.updateAvailability(availability)).errorOption:
|
||||||
|
|
||||||
trace "Rolling back returning bytes"
|
trace "Rolling back returning bytes"
|
||||||
if rollbackErr =? (await self.repo.release(bytesToBeReturned.truncate(uint).NBytes)).errorOption:
|
if rollbackErr =?
|
||||||
|
(await self.repo.release(bytesToBeReturned.truncate(uint).NBytes)).errorOption:
|
||||||
rollbackErr.parent = updateErr
|
rollbackErr.parent = updateErr
|
||||||
return failure(rollbackErr)
|
return failure(rollbackErr)
|
||||||
|
|
||||||
@ -501,8 +505,8 @@ proc release*(
|
|||||||
self: Reservations,
|
self: Reservations,
|
||||||
reservationId: ReservationId,
|
reservationId: ReservationId,
|
||||||
availabilityId: AvailabilityId,
|
availabilityId: AvailabilityId,
|
||||||
bytes: uint): Future[?!void] {.async.} =
|
bytes: uint,
|
||||||
|
): Future[?!void] {.async.} =
|
||||||
logScope:
|
logScope:
|
||||||
topics = "release"
|
topics = "release"
|
||||||
bytes
|
bytes
|
||||||
@ -520,7 +524,8 @@ proc release*(
|
|||||||
if reservation.size < bytes.u256:
|
if reservation.size < bytes.u256:
|
||||||
let error = newException(
|
let error = newException(
|
||||||
BytesOutOfBoundsError,
|
BytesOutOfBoundsError,
|
||||||
"trying to release an amount of bytes that is greater than the total size of the Reservation")
|
"trying to release an amount of bytes that is greater than the total size of the Reservation",
|
||||||
|
)
|
||||||
return failure(error)
|
return failure(error)
|
||||||
|
|
||||||
if releaseErr =? (await self.repo.release(bytes.NBytes)).errorOption:
|
if releaseErr =? (await self.repo.release(bytes.NBytes)).errorOption:
|
||||||
@ -530,7 +535,6 @@ proc release*(
|
|||||||
|
|
||||||
# persist partially used Reservation with updated size
|
# persist partially used Reservation with updated size
|
||||||
if err =? (await self.update(reservation)).errorOption:
|
if err =? (await self.update(reservation)).errorOption:
|
||||||
|
|
||||||
# rollback release if an update error encountered
|
# rollback release if an update error encountered
|
||||||
trace "rolling back release"
|
trace "rolling back release"
|
||||||
if rollbackErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
|
if rollbackErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
|
||||||
@ -545,11 +549,8 @@ iterator items(self: StorableIter): Future[?seq[byte]] =
|
|||||||
yield self.next()
|
yield self.next()
|
||||||
|
|
||||||
proc storables(
|
proc storables(
|
||||||
self: Reservations,
|
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
|
||||||
T: type SomeStorableObject,
|
|
||||||
queryKey: Key = ReservationsKey
|
|
||||||
): Future[?!StorableIter] {.async.} =
|
): Future[?!StorableIter] {.async.} =
|
||||||
|
|
||||||
var iter = StorableIter()
|
var iter = StorableIter()
|
||||||
let query = Query.init(queryKey)
|
let query = Query.init(queryKey)
|
||||||
when T is Availability:
|
when T is Availability:
|
||||||
@ -570,12 +571,8 @@ proc storables(
|
|||||||
proc next(): Future[?seq[byte]] {.async.} =
|
proc next(): Future[?seq[byte]] {.async.} =
|
||||||
await idleAsync()
|
await idleAsync()
|
||||||
iter.finished = results.finished
|
iter.finished = results.finished
|
||||||
if not results.finished and
|
if not results.finished and res =? (await results.next()) and res.data.len > 0 and
|
||||||
res =? (await results.next()) and
|
key =? res.key and key.namespaces.len == defaultKey.namespaces.len:
|
||||||
res.data.len > 0 and
|
|
||||||
key =? res.key and
|
|
||||||
key.namespaces.len == defaultKey.namespaces.len:
|
|
||||||
|
|
||||||
return some res.data
|
return some res.data
|
||||||
|
|
||||||
return none seq[byte]
|
return none seq[byte]
|
||||||
@ -588,11 +585,8 @@ proc storables(
|
|||||||
return success iter
|
return success iter
|
||||||
|
|
||||||
proc allImpl(
|
proc allImpl(
|
||||||
self: Reservations,
|
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
|
||||||
T: type SomeStorableObject,
|
|
||||||
queryKey: Key = ReservationsKey
|
|
||||||
): Future[?!seq[T]] {.async.} =
|
): Future[?!seq[T]] {.async.} =
|
||||||
|
|
||||||
var ret: seq[T] = @[]
|
var ret: seq[T] = @[]
|
||||||
|
|
||||||
without storables =? (await self.storables(T, queryKey)), error:
|
without storables =? (await self.storables(T, queryKey)), error:
|
||||||
@ -604,24 +598,18 @@ proc allImpl(
|
|||||||
|
|
||||||
without obj =? T.fromJson(bytes), error:
|
without obj =? T.fromJson(bytes), error:
|
||||||
error "json deserialization error",
|
error "json deserialization error",
|
||||||
json = string.fromBytes(bytes),
|
json = string.fromBytes(bytes), error = error.msg
|
||||||
error = error.msg
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
ret.add obj
|
ret.add obj
|
||||||
|
|
||||||
return success(ret)
|
return success(ret)
|
||||||
|
|
||||||
proc all*(
|
proc all*(self: Reservations, T: type SomeStorableObject): Future[?!seq[T]] {.async.} =
|
||||||
self: Reservations,
|
|
||||||
T: type SomeStorableObject
|
|
||||||
): Future[?!seq[T]] {.async.} =
|
|
||||||
return await self.allImpl(T)
|
return await self.allImpl(T)
|
||||||
|
|
||||||
proc all*(
|
proc all*(
|
||||||
self: Reservations,
|
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
|
||||||
T: type SomeStorableObject,
|
|
||||||
availabilityId: AvailabilityId
|
|
||||||
): Future[?!seq[T]] {.async.} =
|
): Future[?!seq[T]] {.async.} =
|
||||||
without key =? (ReservationsKey / $availabilityId):
|
without key =? (ReservationsKey / $availabilityId):
|
||||||
return failure("no key")
|
return failure("no key")
|
||||||
@ -629,29 +617,26 @@ proc all*(
|
|||||||
return await self.allImpl(T, key)
|
return await self.allImpl(T, key)
|
||||||
|
|
||||||
proc findAvailability*(
|
proc findAvailability*(
|
||||||
self: Reservations,
|
self: Reservations, size, duration, minPrice, collateral: UInt256
|
||||||
size, duration, minPrice, collateral: UInt256
|
|
||||||
): Future[?Availability] {.async.} =
|
): Future[?Availability] {.async.} =
|
||||||
|
|
||||||
without storables =? (await self.storables(Availability)), e:
|
without storables =? (await self.storables(Availability)), e:
|
||||||
error "failed to get all storables", error = e.msg
|
error "failed to get all storables", error = e.msg
|
||||||
return none Availability
|
return none Availability
|
||||||
|
|
||||||
for item in storables.items:
|
for item in storables.items:
|
||||||
if bytes =? (await item) and
|
if bytes =? (await item) and availability =? Availability.fromJson(bytes):
|
||||||
availability =? Availability.fromJson(bytes):
|
if size <= availability.freeSize and duration <= availability.duration and
|
||||||
|
collateral <= availability.maxCollateral and minPrice >= availability.minPrice:
|
||||||
if size <= availability.freeSize and
|
|
||||||
duration <= availability.duration and
|
|
||||||
collateral <= availability.maxCollateral and
|
|
||||||
minPrice >= availability.minPrice:
|
|
||||||
|
|
||||||
trace "availability matched",
|
trace "availability matched",
|
||||||
id = availability.id,
|
id = availability.id,
|
||||||
size, availFreeSize = availability.freeSize,
|
size,
|
||||||
duration, availDuration = availability.duration,
|
availFreeSize = availability.freeSize,
|
||||||
minPrice, availMinPrice = availability.minPrice,
|
duration,
|
||||||
collateral, availMaxCollateral = availability.maxCollateral
|
availDuration = availability.duration,
|
||||||
|
minPrice,
|
||||||
|
availMinPrice = availability.minPrice,
|
||||||
|
collateral,
|
||||||
|
availMaxCollateral = availability.maxCollateral
|
||||||
|
|
||||||
# TODO: As soon as we're on ARC-ORC, we can use destructors
|
# TODO: As soon as we're on ARC-ORC, we can use destructors
|
||||||
# to automatically dispose our iterators when they fall out of scope.
|
# to automatically dispose our iterators when they fall out of scope.
|
||||||
@ -663,7 +648,11 @@ proc findAvailability*(
|
|||||||
|
|
||||||
trace "availability did not match",
|
trace "availability did not match",
|
||||||
id = availability.id,
|
id = availability.id,
|
||||||
size, availFreeSize = availability.freeSize,
|
size,
|
||||||
duration, availDuration = availability.duration,
|
availFreeSize = availability.freeSize,
|
||||||
minPrice, availMinPrice = availability.minPrice,
|
duration,
|
||||||
collateral, availMaxCollateral = availability.maxCollateral
|
availDuration = availability.duration,
|
||||||
|
minPrice,
|
||||||
|
availMinPrice = availability.minPrice,
|
||||||
|
collateral,
|
||||||
|
availMaxCollateral = availability.maxCollateral
|
||||||
|
@ -25,27 +25,26 @@ type
|
|||||||
onCleanUp*: OnCleanUp
|
onCleanUp*: OnCleanUp
|
||||||
onFilled*: ?OnFilled
|
onFilled*: ?OnFilled
|
||||||
|
|
||||||
OnCleanUp* = proc (returnBytes = false, reprocessSlot = false): Future[void] {.gcsafe, upraises: [].}
|
OnCleanUp* = proc(returnBytes = false, reprocessSlot = false): Future[void] {.
|
||||||
OnFilled* = proc(request: StorageRequest,
|
gcsafe, upraises: []
|
||||||
slotIndex: UInt256) {.gcsafe, upraises: [].}
|
.}
|
||||||
|
OnFilled* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||||
|
|
||||||
SalesAgentError = object of CodexError
|
SalesAgentError = object of CodexError
|
||||||
AllSlotsFilledError* = object of SalesAgentError
|
AllSlotsFilledError* = object of SalesAgentError
|
||||||
|
|
||||||
func `==`*(a, b: SalesAgent): bool =
|
func `==`*(a, b: SalesAgent): bool =
|
||||||
a.data.requestId == b.data.requestId and
|
a.data.requestId == b.data.requestId and a.data.slotIndex == b.data.slotIndex
|
||||||
a.data.slotIndex == b.data.slotIndex
|
|
||||||
|
|
||||||
proc newSalesAgent*(context: SalesContext,
|
proc newSalesAgent*(
|
||||||
|
context: SalesContext,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
slotIndex: UInt256,
|
slotIndex: UInt256,
|
||||||
request: ?StorageRequest): SalesAgent =
|
request: ?StorageRequest,
|
||||||
|
): SalesAgent =
|
||||||
var agent = SalesAgent.new()
|
var agent = SalesAgent.new()
|
||||||
agent.context = context
|
agent.context = context
|
||||||
agent.data = SalesData(
|
agent.data = SalesData(requestId: requestId, slotIndex: slotIndex, request: request)
|
||||||
requestId: requestId,
|
|
||||||
slotIndex: slotIndex,
|
|
||||||
request: request)
|
|
||||||
return agent
|
return agent
|
||||||
|
|
||||||
proc retrieveRequest*(agent: SalesAgent) {.async.} =
|
proc retrieveRequest*(agent: SalesAgent) {.async.} =
|
||||||
@ -62,6 +61,7 @@ proc retrieveRequestState*(agent: SalesAgent): Future[?RequestState] {.async.} =
|
|||||||
func state*(agent: SalesAgent): ?string =
|
func state*(agent: SalesAgent): ?string =
|
||||||
proc description(state: State): string =
|
proc description(state: State): string =
|
||||||
$state
|
$state
|
||||||
|
|
||||||
agent.query(description)
|
agent.query(description)
|
||||||
|
|
||||||
proc subscribeCancellation(agent: SalesAgent) {.async.} =
|
proc subscribeCancellation(agent: SalesAgent) {.async.} =
|
||||||
@ -93,27 +93,29 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} =
|
|||||||
of RequestState.Started, RequestState.Finished, RequestState.Failed:
|
of RequestState.Started, RequestState.Finished, RequestState.Failed:
|
||||||
break
|
break
|
||||||
|
|
||||||
debug "The request is not yet canceled, even though it should be. Waiting for some more time.", currentState = state, now=clock.now
|
debug "The request is not yet canceled, even though it should be. Waiting for some more time.",
|
||||||
|
currentState = state, now = clock.now
|
||||||
|
|
||||||
data.cancelled = onCancelled()
|
data.cancelled = onCancelled()
|
||||||
|
|
||||||
method onFulfilled*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, upraises: [].} =
|
method onFulfilled*(
|
||||||
if agent.data.requestId == requestId and
|
agent: SalesAgent, requestId: RequestId
|
||||||
not agent.data.cancelled.isNil:
|
) {.base, gcsafe, upraises: [].} =
|
||||||
|
if agent.data.requestId == requestId and not agent.data.cancelled.isNil:
|
||||||
agent.data.cancelled.cancelSoon()
|
agent.data.cancelled.cancelSoon()
|
||||||
|
|
||||||
method onFailed*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, upraises: [].} =
|
method onFailed*(
|
||||||
|
agent: SalesAgent, requestId: RequestId
|
||||||
|
) {.base, gcsafe, upraises: [].} =
|
||||||
without request =? agent.data.request:
|
without request =? agent.data.request:
|
||||||
return
|
return
|
||||||
if agent.data.requestId == requestId:
|
if agent.data.requestId == requestId:
|
||||||
agent.schedule(failedEvent(request))
|
agent.schedule(failedEvent(request))
|
||||||
|
|
||||||
method onSlotFilled*(agent: SalesAgent,
|
method onSlotFilled*(
|
||||||
requestId: RequestId,
|
agent: SalesAgent, requestId: RequestId, slotIndex: UInt256
|
||||||
slotIndex: UInt256) {.base, gcsafe, upraises: [].} =
|
) {.base, gcsafe, upraises: [].} =
|
||||||
|
if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex:
|
||||||
if agent.data.requestId == requestId and
|
|
||||||
agent.data.slotIndex == slotIndex:
|
|
||||||
agent.schedule(slotFilledEvent(requestId, slotIndex))
|
agent.schedule(slotFilledEvent(requestId, slotIndex))
|
||||||
|
|
||||||
proc subscribe*(agent: SalesAgent) {.async.} =
|
proc subscribe*(agent: SalesAgent) {.async.} =
|
||||||
|
@ -24,12 +24,14 @@ type
|
|||||||
simulateProofFailures*: int
|
simulateProofFailures*: int
|
||||||
|
|
||||||
BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].}
|
BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].}
|
||||||
OnStore* = proc(request: StorageRequest,
|
OnStore* = proc(
|
||||||
slot: UInt256,
|
request: StorageRequest, slot: UInt256, blocksCb: BlocksCb
|
||||||
blocksCb: BlocksCb): Future[?!void] {.gcsafe, upraises: [].}
|
): Future[?!void] {.gcsafe, upraises: [].}
|
||||||
OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.gcsafe, upraises: [].}
|
OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.
|
||||||
OnExpiryUpdate* = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {.gcsafe, upraises: [].}
|
gcsafe, upraises: []
|
||||||
OnClear* = proc(request: StorageRequest,
|
.}
|
||||||
slotIndex: UInt256) {.gcsafe, upraises: [].}
|
OnExpiryUpdate* = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {.
|
||||||
OnSale* = proc(request: StorageRequest,
|
gcsafe, upraises: []
|
||||||
slotIndex: UInt256) {.gcsafe, upraises: [].}
|
.}
|
||||||
|
OnClear* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||||
|
OnSale* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].}
|
||||||
|
@ -3,8 +3,7 @@ import ../contracts/requests
|
|||||||
import ../market
|
import ../market
|
||||||
import ./reservations
|
import ./reservations
|
||||||
|
|
||||||
type
|
type SalesData* = ref object
|
||||||
SalesData* = ref object
|
|
||||||
requestId*: RequestId
|
requestId*: RequestId
|
||||||
ask*: StorageAsk
|
ask*: StorageAsk
|
||||||
request*: ?StorageRequest
|
request*: ?StorageRequest
|
||||||
|
@ -69,10 +69,12 @@ const DefaultMaxWorkers = 3
|
|||||||
const DefaultMaxSize = 128'u16
|
const DefaultMaxSize = 128'u16
|
||||||
|
|
||||||
proc profitability(item: SlotQueueItem): UInt256 =
|
proc profitability(item: SlotQueueItem): UInt256 =
|
||||||
StorageAsk(collateral: item.collateral,
|
StorageAsk(
|
||||||
|
collateral: item.collateral,
|
||||||
duration: item.duration,
|
duration: item.duration,
|
||||||
reward: item.reward,
|
reward: item.reward,
|
||||||
slotSize: item.slotSize).pricePerSlot
|
slotSize: item.slotSize,
|
||||||
|
).pricePerSlot
|
||||||
|
|
||||||
proc `<`*(a, b: SlotQueueItem): bool =
|
proc `<`*(a, b: SlotQueueItem): bool =
|
||||||
# for A to have a higher priority than B (in a min queue), A must be less than
|
# for A to have a higher priority than B (in a min queue), A must be less than
|
||||||
@ -102,13 +104,13 @@ proc `<`*(a, b: SlotQueueItem): bool =
|
|||||||
return scoreA > scoreB
|
return scoreA > scoreB
|
||||||
|
|
||||||
proc `==`*(a, b: SlotQueueItem): bool =
|
proc `==`*(a, b: SlotQueueItem): bool =
|
||||||
a.requestId == b.requestId and
|
a.requestId == b.requestId and a.slotIndex == b.slotIndex
|
||||||
a.slotIndex == b.slotIndex
|
|
||||||
|
|
||||||
proc new*(_: type SlotQueue,
|
proc new*(
|
||||||
|
_: type SlotQueue,
|
||||||
maxWorkers = DefaultMaxWorkers,
|
maxWorkers = DefaultMaxWorkers,
|
||||||
maxSize: SlotQueueSize = DefaultMaxSize): SlotQueue =
|
maxSize: SlotQueueSize = DefaultMaxSize,
|
||||||
|
): SlotQueue =
|
||||||
if maxWorkers <= 0:
|
if maxWorkers <= 0:
|
||||||
raise newException(ValueError, "maxWorkers must be positive")
|
raise newException(ValueError, "maxWorkers must be positive")
|
||||||
if maxWorkers.uint16 > maxSize:
|
if maxWorkers.uint16 > maxSize:
|
||||||
@ -121,23 +123,22 @@ proc new*(_: type SlotQueue,
|
|||||||
queue: newAsyncHeapQueue[SlotQueueItem](maxSize.int + 1),
|
queue: newAsyncHeapQueue[SlotQueueItem](maxSize.int + 1),
|
||||||
running: false,
|
running: false,
|
||||||
trackedFutures: TrackedFutures.new(),
|
trackedFutures: TrackedFutures.new(),
|
||||||
unpaused: newAsyncEvent()
|
unpaused: newAsyncEvent(),
|
||||||
)
|
)
|
||||||
# avoid instantiating `workers` in constructor to avoid side effects in
|
# avoid instantiating `workers` in constructor to avoid side effects in
|
||||||
# `newAsyncQueue` procedure
|
# `newAsyncQueue` procedure
|
||||||
|
|
||||||
proc init(_: type SlotQueueWorker): SlotQueueWorker =
|
proc init(_: type SlotQueueWorker): SlotQueueWorker =
|
||||||
SlotQueueWorker(
|
SlotQueueWorker(doneProcessing: newFuture[void]("slotqueue.worker.processing"))
|
||||||
doneProcessing: newFuture[void]("slotqueue.worker.processing")
|
|
||||||
)
|
|
||||||
|
|
||||||
proc init*(_: type SlotQueueItem,
|
proc init*(
|
||||||
|
_: type SlotQueueItem,
|
||||||
requestId: RequestId,
|
requestId: RequestId,
|
||||||
slotIndex: uint16,
|
slotIndex: uint16,
|
||||||
ask: StorageAsk,
|
ask: StorageAsk,
|
||||||
expiry: UInt256,
|
expiry: UInt256,
|
||||||
seen = false): SlotQueueItem =
|
seen = false,
|
||||||
|
): SlotQueueItem =
|
||||||
SlotQueueItem(
|
SlotQueueItem(
|
||||||
requestId: requestId,
|
requestId: requestId,
|
||||||
slotIndex: slotIndex,
|
slotIndex: slotIndex,
|
||||||
@ -146,28 +147,22 @@ proc init*(_: type SlotQueueItem,
|
|||||||
reward: ask.reward,
|
reward: ask.reward,
|
||||||
collateral: ask.collateral,
|
collateral: ask.collateral,
|
||||||
expiry: expiry,
|
expiry: expiry,
|
||||||
seen: seen
|
seen: seen,
|
||||||
)
|
)
|
||||||
|
|
||||||
proc init*(_: type SlotQueueItem,
|
proc init*(
|
||||||
request: StorageRequest,
|
_: type SlotQueueItem, request: StorageRequest, slotIndex: uint16
|
||||||
slotIndex: uint16): SlotQueueItem =
|
): SlotQueueItem =
|
||||||
|
SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry)
|
||||||
SlotQueueItem.init(request.id,
|
|
||||||
slotIndex,
|
|
||||||
request.ask,
|
|
||||||
request.expiry)
|
|
||||||
|
|
||||||
proc init*(_: type SlotQueueItem,
|
|
||||||
requestId: RequestId,
|
|
||||||
ask: StorageAsk,
|
|
||||||
expiry: UInt256): seq[SlotQueueItem] =
|
|
||||||
|
|
||||||
|
proc init*(
|
||||||
|
_: type SlotQueueItem, requestId: RequestId, ask: StorageAsk, expiry: UInt256
|
||||||
|
): seq[SlotQueueItem] =
|
||||||
if not ask.slots.inRange:
|
if not ask.slots.inRange:
|
||||||
raise newException(SlotsOutOfRangeError, "Too many slots")
|
raise newException(SlotsOutOfRangeError, "Too many slots")
|
||||||
|
|
||||||
var i = 0'u16
|
var i = 0'u16
|
||||||
proc initSlotQueueItem: SlotQueueItem =
|
proc initSlotQueueItem(): SlotQueueItem =
|
||||||
let item = SlotQueueItem.init(requestId, i, ask, expiry)
|
let item = SlotQueueItem.init(requestId, i, ask, expiry)
|
||||||
inc i
|
inc i
|
||||||
return item
|
return item
|
||||||
@ -176,37 +171,54 @@ proc init*(_: type SlotQueueItem,
|
|||||||
Rng.instance.shuffle(items)
|
Rng.instance.shuffle(items)
|
||||||
return items
|
return items
|
||||||
|
|
||||||
proc init*(_: type SlotQueueItem,
|
proc init*(_: type SlotQueueItem, request: StorageRequest): seq[SlotQueueItem] =
|
||||||
request: StorageRequest): seq[SlotQueueItem] =
|
|
||||||
|
|
||||||
return SlotQueueItem.init(request.id, request.ask, request.expiry)
|
return SlotQueueItem.init(request.id, request.ask, request.expiry)
|
||||||
|
|
||||||
proc inRange*(val: SomeUnsignedInt): bool =
|
proc inRange*(val: SomeUnsignedInt): bool =
|
||||||
val.uint16 in SlotQueueSize.low .. SlotQueueSize.high
|
val.uint16 in SlotQueueSize.low .. SlotQueueSize.high
|
||||||
|
|
||||||
proc requestId*(self: SlotQueueItem): RequestId = self.requestId
|
proc requestId*(self: SlotQueueItem): RequestId =
|
||||||
proc slotIndex*(self: SlotQueueItem): uint16 = self.slotIndex
|
self.requestId
|
||||||
proc slotSize*(self: SlotQueueItem): UInt256 = self.slotSize
|
|
||||||
proc duration*(self: SlotQueueItem): UInt256 = self.duration
|
|
||||||
proc reward*(self: SlotQueueItem): UInt256 = self.reward
|
|
||||||
proc collateral*(self: SlotQueueItem): UInt256 = self.collateral
|
|
||||||
proc seen*(self: SlotQueueItem): bool = self.seen
|
|
||||||
|
|
||||||
proc running*(self: SlotQueue): bool = self.running
|
proc slotIndex*(self: SlotQueueItem): uint16 =
|
||||||
|
self.slotIndex
|
||||||
|
|
||||||
proc len*(self: SlotQueue): int = self.queue.len
|
proc slotSize*(self: SlotQueueItem): UInt256 =
|
||||||
|
self.slotSize
|
||||||
|
|
||||||
proc size*(self: SlotQueue): int = self.queue.size - 1
|
proc duration*(self: SlotQueueItem): UInt256 =
|
||||||
|
self.duration
|
||||||
|
|
||||||
proc paused*(self: SlotQueue): bool = not self.unpaused.isSet
|
proc reward*(self: SlotQueueItem): UInt256 =
|
||||||
|
self.reward
|
||||||
|
|
||||||
proc `$`*(self: SlotQueue): string = $self.queue
|
proc collateral*(self: SlotQueueItem): UInt256 =
|
||||||
|
self.collateral
|
||||||
|
|
||||||
|
proc seen*(self: SlotQueueItem): bool =
|
||||||
|
self.seen
|
||||||
|
|
||||||
|
proc running*(self: SlotQueue): bool =
|
||||||
|
self.running
|
||||||
|
|
||||||
|
proc len*(self: SlotQueue): int =
|
||||||
|
self.queue.len
|
||||||
|
|
||||||
|
proc size*(self: SlotQueue): int =
|
||||||
|
self.queue.size - 1
|
||||||
|
|
||||||
|
proc paused*(self: SlotQueue): bool =
|
||||||
|
not self.unpaused.isSet
|
||||||
|
|
||||||
|
proc `$`*(self: SlotQueue): string =
|
||||||
|
$self.queue
|
||||||
|
|
||||||
proc `onProcessSlot=`*(self: SlotQueue, onProcessSlot: OnProcessSlot) =
|
proc `onProcessSlot=`*(self: SlotQueue, onProcessSlot: OnProcessSlot) =
|
||||||
self.onProcessSlot = some onProcessSlot
|
self.onProcessSlot = some onProcessSlot
|
||||||
|
|
||||||
proc activeWorkers*(self: SlotQueue): int =
|
proc activeWorkers*(self: SlotQueue): int =
|
||||||
if not self.running: return 0
|
if not self.running:
|
||||||
|
return 0
|
||||||
|
|
||||||
# active = capacity - available
|
# active = capacity - available
|
||||||
self.maxWorkers - self.workers.len
|
self.maxWorkers - self.workers.len
|
||||||
@ -222,10 +234,9 @@ proc unpause*(self: SlotQueue) =
|
|||||||
# set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait()
|
# set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait()
|
||||||
self.unpaused.fire()
|
self.unpaused.fire()
|
||||||
|
|
||||||
proc populateItem*(self: SlotQueue,
|
proc populateItem*(
|
||||||
requestId: RequestId,
|
self: SlotQueue, requestId: RequestId, slotIndex: uint16
|
||||||
slotIndex: uint16): ?SlotQueueItem =
|
): ?SlotQueueItem =
|
||||||
|
|
||||||
trace "populate item, items in queue", len = self.queue.len
|
trace "populate item, items in queue", len = self.queue.len
|
||||||
for item in self.queue.items:
|
for item in self.queue.items:
|
||||||
trace "populate item search", itemRequestId = item.requestId, requestId
|
trace "populate item search", itemRequestId = item.requestId, requestId
|
||||||
@ -237,12 +248,11 @@ proc populateItem*(self: SlotQueue,
|
|||||||
duration: item.duration,
|
duration: item.duration,
|
||||||
reward: item.reward,
|
reward: item.reward,
|
||||||
collateral: item.collateral,
|
collateral: item.collateral,
|
||||||
expiry: item.expiry
|
expiry: item.expiry,
|
||||||
)
|
)
|
||||||
return none SlotQueueItem
|
return none SlotQueueItem
|
||||||
|
|
||||||
proc push*(self: SlotQueue, item: SlotQueueItem): ?!void =
|
proc push*(self: SlotQueue, item: SlotQueueItem): ?!void =
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
requestId = item.requestId
|
requestId = item.requestId
|
||||||
slotIndex = item.slotIndex
|
slotIndex = item.slotIndex
|
||||||
@ -330,9 +340,9 @@ proc addWorker(self: SlotQueue): ?!void =
|
|||||||
|
|
||||||
return success()
|
return success()
|
||||||
|
|
||||||
proc dispatch(self: SlotQueue,
|
proc dispatch(
|
||||||
worker: SlotQueueWorker,
|
self: SlotQueue, worker: SlotQueueWorker, item: SlotQueueItem
|
||||||
item: SlotQueueItem) {.async: (raises: []).} =
|
) {.async: (raises: []).} =
|
||||||
logScope:
|
logScope:
|
||||||
requestId = item.requestId
|
requestId = item.requestId
|
||||||
slotIndex = item.slotIndex
|
slotIndex = item.slotIndex
|
||||||
@ -349,10 +359,8 @@ proc dispatch(self: SlotQueue,
|
|||||||
|
|
||||||
if err =? self.addWorker().errorOption:
|
if err =? self.addWorker().errorOption:
|
||||||
raise err # catch below
|
raise err # catch below
|
||||||
|
|
||||||
except QueueNotRunningError as e:
|
except QueueNotRunningError as e:
|
||||||
info "could not re-add worker to worker queue, queue not running",
|
info "could not re-add worker to worker queue, queue not running", error = e.msg
|
||||||
error = e.msg
|
|
||||||
except CancelledError:
|
except CancelledError:
|
||||||
# do not bubble exception up as it is called with `asyncSpawn` which would
|
# do not bubble exception up as it is called with `asyncSpawn` which would
|
||||||
# convert the exception into a `FutureDefect`
|
# convert the exception into a `FutureDefect`
|
||||||
@ -380,7 +388,6 @@ proc clearSeenFlags*(self: SlotQueue) =
|
|||||||
trace "all 'seen' flags cleared"
|
trace "all 'seen' flags cleared"
|
||||||
|
|
||||||
proc run(self: SlotQueue) {.async: (raises: []).} =
|
proc run(self: SlotQueue) {.async: (raises: []).} =
|
||||||
|
|
||||||
while self.running:
|
while self.running:
|
||||||
try:
|
try:
|
||||||
if self.paused:
|
if self.paused:
|
||||||
@ -389,7 +396,8 @@ proc run(self: SlotQueue) {.async: (raises: []).} =
|
|||||||
# block until unpaused is true/fired, ie wait for queue to be unpaused
|
# block until unpaused is true/fired, ie wait for queue to be unpaused
|
||||||
await self.unpaused.wait()
|
await self.unpaused.wait()
|
||||||
|
|
||||||
let worker = await self.workers.popFirst() # if workers saturated, wait here for new workers
|
let worker =
|
||||||
|
await self.workers.popFirst() # if workers saturated, wait here for new workers
|
||||||
let item = await self.queue.pop() # if queue empty, wait here for new items
|
let item = await self.queue.pop() # if queue empty, wait here for new items
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
|
@ -14,14 +14,19 @@ type
|
|||||||
SaleState* = ref object of State
|
SaleState* = ref object of State
|
||||||
SaleError* = ref object of CodexError
|
SaleError* = ref object of CodexError
|
||||||
|
|
||||||
method onCancelled*(state: SaleState, request: StorageRequest): ?State {.base, upraises:[].} =
|
method onCancelled*(
|
||||||
|
state: SaleState, request: StorageRequest
|
||||||
|
): ?State {.base, upraises: [].} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
method onFailed*(state: SaleState, request: StorageRequest): ?State {.base, upraises:[].} =
|
method onFailed*(
|
||||||
|
state: SaleState, request: StorageRequest
|
||||||
|
): ?State {.base, upraises: [].} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
method onSlotFilled*(state: SaleState, requestId: RequestId,
|
method onSlotFilled*(
|
||||||
slotIndex: UInt256): ?State {.base, upraises:[].} =
|
state: SaleState, requestId: RequestId, slotIndex: UInt256
|
||||||
|
): ?State {.base, upraises: [].} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc cancelledEvent*(request: StorageRequest): Event =
|
proc cancelledEvent*(request: StorageRequest): Event =
|
||||||
|
@ -6,10 +6,10 @@ import ./errorhandling
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "marketplace sales cancelled"
|
topics = "marketplace sales cancelled"
|
||||||
|
|
||||||
type
|
type SaleCancelled* = ref object of ErrorHandlingState
|
||||||
SaleCancelled* = ref object of ErrorHandlingState
|
|
||||||
|
|
||||||
method `$`*(state: SaleCancelled): string = "SaleCancelled"
|
method `$`*(state: SaleCancelled): string =
|
||||||
|
"SaleCancelled"
|
||||||
|
|
||||||
method run*(state: SaleCancelled, machine: Machine): Future[?State] {.async.} =
|
method run*(state: SaleCancelled, machine: Machine): Future[?State] {.async.} =
|
||||||
let agent = SalesAgent(machine)
|
let agent = SalesAgent(machine)
|
||||||
@ -20,14 +20,15 @@ method run*(state: SaleCancelled, machine: Machine): Future[?State] {.async.} =
|
|||||||
raiseAssert "no sale request"
|
raiseAssert "no sale request"
|
||||||
|
|
||||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||||
debug "Collecting collateral and partial payout", requestId = data.requestId, slotIndex = data.slotIndex
|
debug "Collecting collateral and partial payout",
|
||||||
|
requestId = data.requestId, slotIndex = data.slotIndex
|
||||||
await market.freeSlot(slot.id)
|
await market.freeSlot(slot.id)
|
||||||
|
|
||||||
if onClear =? agent.context.onClear and
|
if onClear =? agent.context.onClear and request =? data.request:
|
||||||
request =? data.request:
|
|
||||||
onClear(request, data.slotIndex)
|
onClear(request, data.slotIndex)
|
||||||
|
|
||||||
if onCleanUp =? agent.onCleanUp:
|
if onCleanUp =? agent.onCleanUp:
|
||||||
await onCleanUp(returnBytes = true, reprocessSlot = false)
|
await onCleanUp(returnBytes = true, reprocessSlot = false)
|
||||||
|
|
||||||
warn "Sale cancelled due to timeout", requestId = data.requestId, slotIndex = data.slotIndex
|
warn "Sale cancelled due to timeout",
|
||||||
|
requestId = data.requestId, slotIndex = data.slotIndex
|
||||||
|
@ -13,13 +13,13 @@ import ./filled
|
|||||||
import ./initialproving
|
import ./initialproving
|
||||||
import ./errored
|
import ./errored
|
||||||
|
|
||||||
type
|
type SaleDownloading* = ref object of ErrorHandlingState
|
||||||
SaleDownloading* = ref object of ErrorHandlingState
|
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "marketplace sales downloading"
|
topics = "marketplace sales downloading"
|
||||||
|
|
||||||
method `$`*(state: SaleDownloading): string = "SaleDownloading"
|
method `$`*(state: SaleDownloading): string =
|
||||||
|
"SaleDownloading"
|
||||||
|
|
||||||
method onCancelled*(state: SaleDownloading, request: StorageRequest): ?State =
|
method onCancelled*(state: SaleDownloading, request: StorageRequest): ?State =
|
||||||
return some State(SaleCancelled())
|
return some State(SaleCancelled())
|
||||||
@ -27,8 +27,9 @@ method onCancelled*(state: SaleDownloading, request: StorageRequest): ?State =
|
|||||||
method onFailed*(state: SaleDownloading, request: StorageRequest): ?State =
|
method onFailed*(state: SaleDownloading, request: StorageRequest): ?State =
|
||||||
return some State(SaleFailed())
|
return some State(SaleFailed())
|
||||||
|
|
||||||
method onSlotFilled*(state: SaleDownloading, requestId: RequestId,
|
method onSlotFilled*(
|
||||||
slotIndex: UInt256): ?State =
|
state: SaleDownloading, requestId: RequestId, slotIndex: UInt256
|
||||||
|
): ?State =
|
||||||
return some State(SaleFilled())
|
return some State(SaleFilled())
|
||||||
|
|
||||||
method run*(state: SaleDownloading, machine: Machine): Future[?State] {.async.} =
|
method run*(state: SaleDownloading, machine: Machine): Future[?State] {.async.} =
|
||||||
@ -61,14 +62,10 @@ method run*(state: SaleDownloading, machine: Machine): Future[?State] {.async.}
|
|||||||
bytes += blk.data.len.uint
|
bytes += blk.data.len.uint
|
||||||
|
|
||||||
trace "Releasing batch of bytes written to disk", bytes
|
trace "Releasing batch of bytes written to disk", bytes
|
||||||
return await reservations.release(reservation.id,
|
return await reservations.release(reservation.id, reservation.availabilityId, bytes)
|
||||||
reservation.availabilityId,
|
|
||||||
bytes)
|
|
||||||
|
|
||||||
trace "Starting download"
|
trace "Starting download"
|
||||||
if err =? (await onStore(request,
|
if err =? (await onStore(request, data.slotIndex, onBlocks)).errorOption:
|
||||||
data.slotIndex,
|
|
||||||
onBlocks)).errorOption:
|
|
||||||
return some State(SaleErrored(error: err, reprocessSlot: false))
|
return some State(SaleErrored(error: err, reprocessSlot: false))
|
||||||
|
|
||||||
trace "Download complete"
|
trace "Download complete"
|
||||||
|
@ -14,7 +14,8 @@ type SaleErrored* = ref object of SaleState
|
|||||||
error*: ref CatchableError
|
error*: ref CatchableError
|
||||||
reprocessSlot*: bool
|
reprocessSlot*: bool
|
||||||
|
|
||||||
method `$`*(state: SaleErrored): string = "SaleErrored"
|
method `$`*(state: SaleErrored): string =
|
||||||
|
"SaleErrored"
|
||||||
|
|
||||||
method onError*(state: SaleState, err: ref CatchableError): ?State {.upraises: [].} =
|
method onError*(state: SaleState, err: ref CatchableError): ?State {.upraises: [].} =
|
||||||
error "error during SaleErrored run", error = err.msg
|
error "error during SaleErrored run", error = err.msg
|
||||||
@ -24,12 +25,13 @@ method run*(state: SaleErrored, machine: Machine): Future[?State] {.async.} =
|
|||||||
let data = agent.data
|
let data = agent.data
|
||||||
let context = agent.context
|
let context = agent.context
|
||||||
|
|
||||||
error "Sale error", error=state.error.msgDetail, requestId = data.requestId, slotIndex = data.slotIndex
|
error "Sale error",
|
||||||
|
error = state.error.msgDetail,
|
||||||
|
requestId = data.requestId,
|
||||||
|
slotIndex = data.slotIndex
|
||||||
|
|
||||||
if onClear =? context.onClear and
|
if onClear =? context.onClear and request =? data.request:
|
||||||
request =? data.request:
|
|
||||||
onClear(request, data.slotIndex)
|
onClear(request, data.slotIndex)
|
||||||
|
|
||||||
if onCleanUp =? agent.onCleanUp:
|
if onCleanUp =? agent.onCleanUp:
|
||||||
await onCleanUp(returnBytes = true, reprocessSlot = state.reprocessSlot)
|
await onCleanUp(returnBytes = true, reprocessSlot = state.reprocessSlot)
|
||||||
|
|
||||||
|
@ -2,8 +2,7 @@ import pkg/questionable
|
|||||||
import ../statemachine
|
import ../statemachine
|
||||||
import ./errored
|
import ./errored
|
||||||
|
|
||||||
type
|
type ErrorHandlingState* = ref object of SaleState
|
||||||
ErrorHandlingState* = ref object of SaleState
|
|
||||||
|
|
||||||
method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State =
|
method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State =
|
||||||
some State(SaleErrored(error: error))
|
some State(SaleErrored(error: error))
|
||||||
|
@ -11,7 +11,8 @@ type
|
|||||||
SaleFailed* = ref object of ErrorHandlingState
|
SaleFailed* = ref object of ErrorHandlingState
|
||||||
SaleFailedError* = object of SaleError
|
SaleFailedError* = object of SaleError
|
||||||
|
|
||||||
method `$`*(state: SaleFailed): string = "SaleFailed"
|
method `$`*(state: SaleFailed): string =
|
||||||
|
"SaleFailed"
|
||||||
|
|
||||||
method run*(state: SaleFailed, machine: Machine): Future[?State] {.async.} =
|
method run*(state: SaleFailed, machine: Machine): Future[?State] {.async.} =
|
||||||
let data = SalesAgent(machine).data
|
let data = SalesAgent(machine).data
|
||||||
@ -21,7 +22,8 @@ method run*(state: SaleFailed, machine: Machine): Future[?State] {.async.} =
|
|||||||
raiseAssert "no sale request"
|
raiseAssert "no sale request"
|
||||||
|
|
||||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||||
debug "Removing slot from mySlots", requestId = data.requestId, slotIndex = data.slotIndex
|
debug "Removing slot from mySlots",
|
||||||
|
requestId = data.requestId, slotIndex = data.slotIndex
|
||||||
await market.freeSlot(slot.id)
|
await market.freeSlot(slot.id)
|
||||||
|
|
||||||
let error = newException(SaleFailedError, "Sale failed")
|
let error = newException(SaleFailedError, "Sale failed")
|
||||||
|
@ -27,7 +27,8 @@ method onCancelled*(state: SaleFilled, request: StorageRequest): ?State =
|
|||||||
method onFailed*(state: SaleFilled, request: StorageRequest): ?State =
|
method onFailed*(state: SaleFilled, request: StorageRequest): ?State =
|
||||||
return some State(SaleFailed())
|
return some State(SaleFailed())
|
||||||
|
|
||||||
method `$`*(state: SaleFilled): string = "SaleFilled"
|
method `$`*(state: SaleFilled): string =
|
||||||
|
"SaleFilled"
|
||||||
|
|
||||||
method run*(state: SaleFilled, machine: Machine): Future[?State] {.async.} =
|
method run*(state: SaleFilled, machine: Machine): Future[?State] {.async.} =
|
||||||
let agent = SalesAgent(machine)
|
let agent = SalesAgent(machine)
|
||||||
@ -39,7 +40,8 @@ method run*(state: SaleFilled, machine: Machine): Future[?State] {.async.} =
|
|||||||
let me = await market.getSigner()
|
let me = await market.getSigner()
|
||||||
|
|
||||||
if host == me.some:
|
if host == me.some:
|
||||||
info "Slot succesfully filled", requestId = data.requestId, slotIndex = data.slotIndex
|
info "Slot succesfully filled",
|
||||||
|
requestId = data.requestId, slotIndex = data.slotIndex
|
||||||
|
|
||||||
without request =? data.request:
|
without request =? data.request:
|
||||||
raiseAssert "no sale request"
|
raiseAssert "no sale request"
|
||||||
@ -57,10 +59,11 @@ method run*(state: SaleFilled, machine: Machine): Future[?State] {.async.} =
|
|||||||
when codex_enable_proof_failures:
|
when codex_enable_proof_failures:
|
||||||
if context.simulateProofFailures > 0:
|
if context.simulateProofFailures > 0:
|
||||||
info "Proving with failure rate", rate = context.simulateProofFailures
|
info "Proving with failure rate", rate = context.simulateProofFailures
|
||||||
return some State(SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures))
|
return some State(
|
||||||
|
SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures)
|
||||||
|
)
|
||||||
|
|
||||||
return some State(SaleProving())
|
return some State(SaleProving())
|
||||||
|
|
||||||
else:
|
else:
|
||||||
let error = newException(HostMismatchError, "Slot filled by other host")
|
let error = newException(HostMismatchError, "Slot filled by other host")
|
||||||
return some State(SaleErrored(error: error))
|
return some State(SaleErrored(error: error))
|
||||||
|
@ -13,11 +13,11 @@ import ./errored
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "marketplace sales filling"
|
topics = "marketplace sales filling"
|
||||||
|
|
||||||
type
|
type SaleFilling* = ref object of ErrorHandlingState
|
||||||
SaleFilling* = ref object of ErrorHandlingState
|
|
||||||
proof*: Groth16Proof
|
proof*: Groth16Proof
|
||||||
|
|
||||||
method `$`*(state: SaleFilling): string = "SaleFilling"
|
method `$`*(state: SaleFilling): string =
|
||||||
|
"SaleFilling"
|
||||||
|
|
||||||
method onCancelled*(state: SaleFilling, request: StorageRequest): ?State =
|
method onCancelled*(state: SaleFilling, request: StorageRequest): ?State =
|
||||||
return some State(SaleCancelled())
|
return some State(SaleCancelled())
|
||||||
@ -41,7 +41,8 @@ method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} =
|
|||||||
if slotState == SlotState.Repair:
|
if slotState == SlotState.Repair:
|
||||||
# When repairing the node gets "discount" on the collateral that it needs to
|
# When repairing the node gets "discount" on the collateral that it needs to
|
||||||
let repairRewardPercentage = (await market.repairRewardPercentage).u256
|
let repairRewardPercentage = (await market.repairRewardPercentage).u256
|
||||||
collateral = fullCollateral - ((fullCollateral * repairRewardPercentage)).div(100.u256)
|
collateral =
|
||||||
|
fullCollateral - ((fullCollateral * repairRewardPercentage)).div(100.u256)
|
||||||
else:
|
else:
|
||||||
collateral = fullCollateral
|
collateral = fullCollateral
|
||||||
|
|
||||||
|
@ -10,10 +10,10 @@ import ./failed
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "marketplace sales finished"
|
topics = "marketplace sales finished"
|
||||||
|
|
||||||
type
|
type SaleFinished* = ref object of ErrorHandlingState
|
||||||
SaleFinished* = ref object of ErrorHandlingState
|
|
||||||
|
|
||||||
method `$`*(state: SaleFinished): string = "SaleFinished"
|
method `$`*(state: SaleFinished): string =
|
||||||
|
"SaleFinished"
|
||||||
|
|
||||||
method onCancelled*(state: SaleFinished, request: StorageRequest): ?State =
|
method onCancelled*(state: SaleFinished, request: StorageRequest): ?State =
|
||||||
return some State(SaleCancelled())
|
return some State(SaleCancelled())
|
||||||
@ -28,7 +28,8 @@ method run*(state: SaleFinished, machine: Machine): Future[?State] {.async.} =
|
|||||||
without request =? data.request:
|
without request =? data.request:
|
||||||
raiseAssert "no sale request"
|
raiseAssert "no sale request"
|
||||||
|
|
||||||
info "Slot finished and paid out", requestId = data.requestId, slotIndex = data.slotIndex
|
info "Slot finished and paid out",
|
||||||
|
requestId = data.requestId, slotIndex = data.slotIndex
|
||||||
|
|
||||||
if onCleanUp =? agent.onCleanUp:
|
if onCleanUp =? agent.onCleanUp:
|
||||||
await onCleanUp()
|
await onCleanUp()
|
||||||
|
@ -11,16 +11,17 @@ logScope:
|
|||||||
# Ignored slots could mean there was no availability or that the slot could
|
# Ignored slots could mean there was no availability or that the slot could
|
||||||
# not be reserved.
|
# not be reserved.
|
||||||
|
|
||||||
type
|
type SaleIgnored* = ref object of ErrorHandlingState
|
||||||
SaleIgnored* = ref object of ErrorHandlingState
|
|
||||||
reprocessSlot*: bool # readd slot to queue with `seen` flag
|
reprocessSlot*: bool # readd slot to queue with `seen` flag
|
||||||
returnBytes*: bool # return unreleased bytes from Reservation to Availability
|
returnBytes*: bool # return unreleased bytes from Reservation to Availability
|
||||||
|
|
||||||
method `$`*(state: SaleIgnored): string = "SaleIgnored"
|
method `$`*(state: SaleIgnored): string =
|
||||||
|
"SaleIgnored"
|
||||||
|
|
||||||
method run*(state: SaleIgnored, machine: Machine): Future[?State] {.async.} =
|
method run*(state: SaleIgnored, machine: Machine): Future[?State] {.async.} =
|
||||||
let agent = SalesAgent(machine)
|
let agent = SalesAgent(machine)
|
||||||
|
|
||||||
if onCleanUp =? agent.onCleanUp:
|
if onCleanUp =? agent.onCleanUp:
|
||||||
await onCleanUp(reprocessSlot = state.reprocessSlot,
|
await onCleanUp(
|
||||||
returnBytes = state.returnBytes)
|
reprocessSlot = state.reprocessSlot, returnBytes = state.returnBytes
|
||||||
|
)
|
||||||
|
@ -12,10 +12,10 @@ import ./failed
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "marketplace sales initial-proving"
|
topics = "marketplace sales initial-proving"
|
||||||
|
|
||||||
type
|
type SaleInitialProving* = ref object of ErrorHandlingState
|
||||||
SaleInitialProving* = ref object of ErrorHandlingState
|
|
||||||
|
|
||||||
method `$`*(state: SaleInitialProving): string = "SaleInitialProving"
|
method `$`*(state: SaleInitialProving): string =
|
||||||
|
"SaleInitialProving"
|
||||||
|
|
||||||
method onCancelled*(state: SaleInitialProving, request: StorageRequest): ?State =
|
method onCancelled*(state: SaleInitialProving, request: StorageRequest): ?State =
|
||||||
return some State(SaleCancelled())
|
return some State(SaleCancelled())
|
||||||
|
@ -10,10 +10,10 @@ import ./finished
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "marketplace sales payout"
|
topics = "marketplace sales payout"
|
||||||
|
|
||||||
type
|
type SalePayout* = ref object of ErrorHandlingState
|
||||||
SalePayout* = ref object of ErrorHandlingState
|
|
||||||
|
|
||||||
method `$`*(state: SalePayout): string = "SalePayout"
|
method `$`*(state: SalePayout): string =
|
||||||
|
"SalePayout"
|
||||||
|
|
||||||
method onCancelled*(state: SalePayout, request: StorageRequest): ?State =
|
method onCancelled*(state: SalePayout, request: StorageRequest): ?State =
|
||||||
return some State(SaleCancelled())
|
return some State(SaleCancelled())
|
||||||
@ -29,7 +29,8 @@ method run(state: SalePayout, machine: Machine): Future[?State] {.async.} =
|
|||||||
raiseAssert "no sale request"
|
raiseAssert "no sale request"
|
||||||
|
|
||||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||||
debug "Collecting finished slot's reward", requestId = data.requestId, slotIndex = data.slotIndex
|
debug "Collecting finished slot's reward",
|
||||||
|
requestId = data.requestId, slotIndex = data.slotIndex
|
||||||
await market.freeSlot(slot.id)
|
await market.freeSlot(slot.id)
|
||||||
|
|
||||||
return some State(SaleFinished())
|
return some State(SaleFinished())
|
||||||
|
@ -14,15 +14,17 @@ import ./ignored
|
|||||||
import ./slotreserving
|
import ./slotreserving
|
||||||
import ./errored
|
import ./errored
|
||||||
|
|
||||||
declareCounter(codex_reservations_availability_mismatch, "codex reservations availability_mismatch")
|
declareCounter(
|
||||||
|
codex_reservations_availability_mismatch, "codex reservations availability_mismatch"
|
||||||
|
)
|
||||||
|
|
||||||
type
|
type SalePreparing* = ref object of ErrorHandlingState
|
||||||
SalePreparing* = ref object of ErrorHandlingState
|
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "marketplace sales preparing"
|
topics = "marketplace sales preparing"
|
||||||
|
|
||||||
method `$`*(state: SalePreparing): string = "SalePreparing"
|
method `$`*(state: SalePreparing): string =
|
||||||
|
"SalePreparing"
|
||||||
|
|
||||||
method onCancelled*(state: SalePreparing, request: StorageRequest): ?State =
|
method onCancelled*(state: SalePreparing, request: StorageRequest): ?State =
|
||||||
return some State(SaleCancelled())
|
return some State(SaleCancelled())
|
||||||
@ -30,8 +32,9 @@ method onCancelled*(state: SalePreparing, request: StorageRequest): ?State =
|
|||||||
method onFailed*(state: SalePreparing, request: StorageRequest): ?State =
|
method onFailed*(state: SalePreparing, request: StorageRequest): ?State =
|
||||||
return some State(SaleFailed())
|
return some State(SaleFailed())
|
||||||
|
|
||||||
method onSlotFilled*(state: SalePreparing, requestId: RequestId,
|
method onSlotFilled*(
|
||||||
slotIndex: UInt256): ?State =
|
state: SalePreparing, requestId: RequestId, slotIndex: UInt256
|
||||||
|
): ?State =
|
||||||
return some State(SaleFilled())
|
return some State(SaleFilled())
|
||||||
|
|
||||||
method run*(state: SalePreparing, machine: Machine): Future[?State] {.async.} =
|
method run*(state: SalePreparing, machine: Machine): Future[?State] {.async.} =
|
||||||
@ -64,22 +67,20 @@ method run*(state: SalePreparing, machine: Machine): Future[?State] {.async.} =
|
|||||||
# availability was checked for this slot when it entered the queue, however
|
# availability was checked for this slot when it entered the queue, however
|
||||||
# check to the ensure that there is still availability as they may have
|
# check to the ensure that there is still availability as they may have
|
||||||
# changed since being added (other slots may have been processed in that time)
|
# changed since being added (other slots may have been processed in that time)
|
||||||
without availability =? await reservations.findAvailability(
|
without availability =?
|
||||||
request.ask.slotSize,
|
await reservations.findAvailability(
|
||||||
request.ask.duration,
|
request.ask.slotSize, request.ask.duration, request.ask.pricePerSlot,
|
||||||
request.ask.pricePerSlot,
|
request.ask.collateral,
|
||||||
request.ask.collateral):
|
):
|
||||||
debug "No availability found for request, ignoring"
|
debug "No availability found for request, ignoring"
|
||||||
|
|
||||||
return some State(SaleIgnored(reprocessSlot: true))
|
return some State(SaleIgnored(reprocessSlot: true))
|
||||||
|
|
||||||
info "Availability found for request, creating reservation"
|
info "Availability found for request, creating reservation"
|
||||||
|
|
||||||
without reservation =? await reservations.createReservation(
|
without reservation =?
|
||||||
availability.id,
|
await reservations.createReservation(
|
||||||
request.ask.slotSize,
|
availability.id, request.ask.slotSize, request.id, data.slotIndex
|
||||||
request.id,
|
|
||||||
data.slotIndex
|
|
||||||
), error:
|
), error:
|
||||||
trace "Creation of reservation failed"
|
trace "Creation of reservation failed"
|
||||||
# Race condition:
|
# Race condition:
|
||||||
|
@ -27,7 +27,7 @@ method prove*(
|
|||||||
challenge: ProofChallenge,
|
challenge: ProofChallenge,
|
||||||
onProve: OnProve,
|
onProve: OnProve,
|
||||||
market: Market,
|
market: Market,
|
||||||
currentPeriod: Period
|
currentPeriod: Period,
|
||||||
) {.base, async.} =
|
) {.base, async.} =
|
||||||
try:
|
try:
|
||||||
without proof =? (await onProve(slot, challenge)), err:
|
without proof =? (await onProve(slot, challenge)), err:
|
||||||
@ -48,9 +48,8 @@ proc proveLoop(
|
|||||||
clock: Clock,
|
clock: Clock,
|
||||||
request: StorageRequest,
|
request: StorageRequest,
|
||||||
slotIndex: UInt256,
|
slotIndex: UInt256,
|
||||||
onProve: OnProve
|
onProve: OnProve,
|
||||||
) {.async.} =
|
) {.async.} =
|
||||||
|
|
||||||
let slot = Slot(request: request, slotIndex: slotIndex)
|
let slot = Slot(request: request, slotIndex: slotIndex)
|
||||||
let slotId = slot.id
|
let slotId = slot.id
|
||||||
|
|
||||||
@ -76,7 +75,8 @@ proc proveLoop(
|
|||||||
case slotState
|
case slotState
|
||||||
of SlotState.Filled:
|
of SlotState.Filled:
|
||||||
debug "Proving for new period", period = currentPeriod
|
debug "Proving for new period", period = currentPeriod
|
||||||
if (await market.isProofRequired(slotId)) or (await market.willProofBeRequired(slotId)):
|
if (await market.isProofRequired(slotId)) or
|
||||||
|
(await market.willProofBeRequired(slotId)):
|
||||||
let challenge = await market.getChallenge(slotId)
|
let challenge = await market.getChallenge(slotId)
|
||||||
debug "Proof is required", period = currentPeriod, challenge = challenge
|
debug "Proof is required", period = currentPeriod, challenge = challenge
|
||||||
await state.prove(slot, challenge, onProve, market, currentPeriod)
|
await state.prove(slot, challenge, onProve, market, currentPeriod)
|
||||||
@ -100,7 +100,8 @@ proc proveLoop(
|
|||||||
debug "waiting until next period"
|
debug "waiting until next period"
|
||||||
await waitUntilPeriod(currentPeriod + 1)
|
await waitUntilPeriod(currentPeriod + 1)
|
||||||
|
|
||||||
method `$`*(state: SaleProving): string = "SaleProving"
|
method `$`*(state: SaleProving): string =
|
||||||
|
"SaleProving"
|
||||||
|
|
||||||
method onCancelled*(state: SaleProving, request: StorageRequest): ?State =
|
method onCancelled*(state: SaleProving, request: StorageRequest): ?State =
|
||||||
# state.loop cancellation happens automatically when run is cancelled due to
|
# state.loop cancellation happens automatically when run is cancelled due to
|
||||||
|
@ -14,19 +14,24 @@ when codex_enable_proof_failures:
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "marketplace sales simulated-proving"
|
topics = "marketplace sales simulated-proving"
|
||||||
|
|
||||||
type
|
type SaleProvingSimulated* = ref object of SaleProving
|
||||||
SaleProvingSimulated* = ref object of SaleProving
|
|
||||||
failEveryNProofs*: int
|
failEveryNProofs*: int
|
||||||
proofCount: int
|
proofCount: int
|
||||||
|
|
||||||
proc onSubmitProofError(error: ref CatchableError, period: UInt256, slotId: SlotId) =
|
proc onSubmitProofError(error: ref CatchableError, period: UInt256, slotId: SlotId) =
|
||||||
error "Submitting invalid proof failed", period, slotId, msg = error.msgDetail
|
error "Submitting invalid proof failed", period, slotId, msg = error.msgDetail
|
||||||
|
|
||||||
method prove*(state: SaleProvingSimulated, slot: Slot, challenge: ProofChallenge, onProve: OnProve, market: Market, currentPeriod: Period) {.async.} =
|
method prove*(
|
||||||
|
state: SaleProvingSimulated,
|
||||||
|
slot: Slot,
|
||||||
|
challenge: ProofChallenge,
|
||||||
|
onProve: OnProve,
|
||||||
|
market: Market,
|
||||||
|
currentPeriod: Period,
|
||||||
|
) {.async.} =
|
||||||
trace "Processing proving in simulated mode"
|
trace "Processing proving in simulated mode"
|
||||||
state.proofCount += 1
|
state.proofCount += 1
|
||||||
if state.failEveryNProofs > 0 and
|
if state.failEveryNProofs > 0 and state.proofCount mod state.failEveryNProofs == 0:
|
||||||
state.proofCount mod state.failEveryNProofs == 0:
|
|
||||||
state.proofCount = 0
|
state.proofCount = 0
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -40,4 +45,6 @@ when codex_enable_proof_failures:
|
|||||||
except CatchableError as e:
|
except CatchableError as e:
|
||||||
onSubmitProofError(e, currentPeriod, slot.id)
|
onSubmitProofError(e, currentPeriod, slot.id)
|
||||||
else:
|
else:
|
||||||
await procCall SaleProving(state).prove(slot, challenge, onProve, market, currentPeriod)
|
await procCall SaleProving(state).prove(
|
||||||
|
slot, challenge, onProve, market, currentPeriod
|
||||||
|
)
|
||||||
|
@ -12,13 +12,13 @@ import ./ignored
|
|||||||
import ./downloading
|
import ./downloading
|
||||||
import ./errored
|
import ./errored
|
||||||
|
|
||||||
type
|
type SaleSlotReserving* = ref object of ErrorHandlingState
|
||||||
SaleSlotReserving* = ref object of ErrorHandlingState
|
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "marketplace sales reserving"
|
topics = "marketplace sales reserving"
|
||||||
|
|
||||||
method `$`*(state: SaleSlotReserving): string = "SaleSlotReserving"
|
method `$`*(state: SaleSlotReserving): string =
|
||||||
|
"SaleSlotReserving"
|
||||||
|
|
||||||
method onCancelled*(state: SaleSlotReserving, request: StorageRequest): ?State =
|
method onCancelled*(state: SaleSlotReserving, request: StorageRequest): ?State =
|
||||||
return some State(SaleCancelled())
|
return some State(SaleCancelled())
|
||||||
@ -51,10 +51,8 @@ method run*(state: SaleSlotReserving, machine: Machine): Future[?State] {.async.
|
|||||||
|
|
||||||
trace "Slot successfully reserved"
|
trace "Slot successfully reserved"
|
||||||
return some State(SaleDownloading())
|
return some State(SaleDownloading())
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# do not re-add this slot to the queue, and return bytes from Reservation to
|
# do not re-add this slot to the queue, and return bytes from Reservation to
|
||||||
# the Availability
|
# the Availability
|
||||||
debug "Slot cannot be reserved, ignoring"
|
debug "Slot cannot be reserved, ignoring"
|
||||||
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
|
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
|
||||||
|
|
||||||
|
@ -17,7 +17,8 @@ type
|
|||||||
SaleUnknownError* = object of CatchableError
|
SaleUnknownError* = object of CatchableError
|
||||||
UnexpectedSlotError* = object of SaleUnknownError
|
UnexpectedSlotError* = object of SaleUnknownError
|
||||||
|
|
||||||
method `$`*(state: SaleUnknown): string = "SaleUnknown"
|
method `$`*(state: SaleUnknown): string =
|
||||||
|
"SaleUnknown"
|
||||||
|
|
||||||
method onCancelled*(state: SaleUnknown, request: StorageRequest): ?State =
|
method onCancelled*(state: SaleUnknown, request: StorageRequest): ?State =
|
||||||
return some State(SaleCancelled())
|
return some State(SaleCancelled())
|
||||||
@ -38,8 +39,8 @@ method run*(state: SaleUnknown, machine: Machine): Future[?State] {.async.} =
|
|||||||
|
|
||||||
case slotState
|
case slotState
|
||||||
of SlotState.Free:
|
of SlotState.Free:
|
||||||
let error = newException(UnexpectedSlotError,
|
let error =
|
||||||
"Slot state on chain should not be 'free'")
|
newException(UnexpectedSlotError, "Slot state on chain should not be 'free'")
|
||||||
return some State(SaleErrored(error: error))
|
return some State(SaleErrored(error: error))
|
||||||
of SlotState.Filled:
|
of SlotState.Filled:
|
||||||
return some State(SaleFilled())
|
return some State(SaleFilled())
|
||||||
@ -52,6 +53,7 @@ method run*(state: SaleUnknown, machine: Machine): Future[?State] {.async.} =
|
|||||||
of SlotState.Cancelled:
|
of SlotState.Cancelled:
|
||||||
return some State(SaleCancelled())
|
return some State(SaleCancelled())
|
||||||
of SlotState.Repair:
|
of SlotState.Repair:
|
||||||
let error = newException(SlotFreedError,
|
let error = newException(
|
||||||
"Slot was forcible freed and host was removed from its hosting")
|
SlotFreedError, "Slot was forcible freed and host was removed from its hosting"
|
||||||
|
)
|
||||||
return some State(SaleErrored(error: error))
|
return some State(SaleErrored(error: error))
|
||||||
|
@ -5,5 +5,4 @@ import ../merkletree
|
|||||||
|
|
||||||
export builder, converters
|
export builder, converters
|
||||||
|
|
||||||
type
|
type Poseidon2Builder* = SlotsBuilder[Poseidon2Tree, Poseidon2Hash]
|
||||||
Poseidon2Builder* = SlotsBuilder[Poseidon2Tree, Poseidon2Hash]
|
|
||||||
|
@ -34,13 +34,13 @@ export converters, asynciter
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "codex slotsbuilder"
|
topics = "codex slotsbuilder"
|
||||||
|
|
||||||
type
|
type SlotsBuilder*[T, H] = ref object of RootObj
|
||||||
SlotsBuilder*[T, H] = ref object of RootObj
|
|
||||||
store: BlockStore
|
store: BlockStore
|
||||||
manifest: Manifest # current manifest
|
manifest: Manifest # current manifest
|
||||||
strategy: IndexingStrategy # indexing strategy
|
strategy: IndexingStrategy # indexing strategy
|
||||||
cellSize: NBytes # cell size
|
cellSize: NBytes # cell size
|
||||||
numSlotBlocks: Natural # number of blocks per slot (should yield a power of two number of cells)
|
numSlotBlocks: Natural
|
||||||
|
# number of blocks per slot (should yield a power of two number of cells)
|
||||||
slotRoots: seq[H] # roots of the slots
|
slotRoots: seq[H] # roots of the slots
|
||||||
emptyBlock: seq[byte] # empty block
|
emptyBlock: seq[byte] # empty block
|
||||||
verifiableTree: ?T # verification tree (dataset tree)
|
verifiableTree: ?T # verification tree (dataset tree)
|
||||||
@ -133,9 +133,8 @@ func manifest*[T, H](self: SlotsBuilder[T, H]): Manifest =
|
|||||||
self.manifest
|
self.manifest
|
||||||
|
|
||||||
proc buildBlockTree*[T, H](
|
proc buildBlockTree*[T, H](
|
||||||
self: SlotsBuilder[T, H],
|
self: SlotsBuilder[T, H], blkIdx: Natural, slotPos: Natural
|
||||||
blkIdx: Natural,
|
): Future[?!(seq[byte], T)] {.async.} =
|
||||||
slotPos: Natural): Future[?!(seq[byte], T)] {.async.} =
|
|
||||||
## Build the block digest tree and return a tuple with the
|
## Build the block digest tree and return a tuple with the
|
||||||
## block data and the tree.
|
## block data and the tree.
|
||||||
##
|
##
|
||||||
@ -160,16 +159,15 @@ proc buildBlockTree*[T, H](
|
|||||||
if blk.isEmpty:
|
if blk.isEmpty:
|
||||||
success (self.emptyBlock, self.emptyDigestTree)
|
success (self.emptyBlock, self.emptyDigestTree)
|
||||||
else:
|
else:
|
||||||
without tree =?
|
without tree =? T.digestTree(blk.data, self.cellSize.int), err:
|
||||||
T.digestTree(blk.data, self.cellSize.int), err:
|
|
||||||
error "Failed to create digest for block", err = err.msg
|
error "Failed to create digest for block", err = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
success (blk.data, tree)
|
success (blk.data, tree)
|
||||||
|
|
||||||
proc getCellHashes*[T, H](
|
proc getCellHashes*[T, H](
|
||||||
self: SlotsBuilder[T, H],
|
self: SlotsBuilder[T, H], slotIndex: Natural
|
||||||
slotIndex: Natural): Future[?!seq[H]] {.async.} =
|
): Future[?!seq[H]] {.async.} =
|
||||||
## Collect all the cells from a block and return
|
## Collect all the cells from a block and return
|
||||||
## their hashes.
|
## their hashes.
|
||||||
##
|
##
|
||||||
@ -192,8 +190,8 @@ proc getCellHashes*[T, H](
|
|||||||
pos = i
|
pos = i
|
||||||
|
|
||||||
trace "Getting block CID for tree at index"
|
trace "Getting block CID for tree at index"
|
||||||
without (_, tree) =? (await self.buildBlockTree(blkIdx, i)) and
|
without (_, tree) =? (await self.buildBlockTree(blkIdx, i)) and digest =? tree.root,
|
||||||
digest =? tree.root, err:
|
err:
|
||||||
error "Failed to get block CID for tree at index", err = err.msg
|
error "Failed to get block CID for tree at index", err = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
@ -203,8 +201,8 @@ proc getCellHashes*[T, H](
|
|||||||
success hashes
|
success hashes
|
||||||
|
|
||||||
proc buildSlotTree*[T, H](
|
proc buildSlotTree*[T, H](
|
||||||
self: SlotsBuilder[T, H],
|
self: SlotsBuilder[T, H], slotIndex: Natural
|
||||||
slotIndex: Natural): Future[?!T] {.async.} =
|
): Future[?!T] {.async.} =
|
||||||
## Build the slot tree from the block digest hashes
|
## Build the slot tree from the block digest hashes
|
||||||
## and return the tree.
|
## and return the tree.
|
||||||
|
|
||||||
@ -215,8 +213,8 @@ proc buildSlotTree*[T, H](
|
|||||||
T.init(cellHashes)
|
T.init(cellHashes)
|
||||||
|
|
||||||
proc buildSlot*[T, H](
|
proc buildSlot*[T, H](
|
||||||
self: SlotsBuilder[T, H],
|
self: SlotsBuilder[T, H], slotIndex: Natural
|
||||||
slotIndex: Natural): Future[?!H] {.async.} =
|
): Future[?!H] {.async.} =
|
||||||
## Build a slot tree and store the proofs in
|
## Build a slot tree and store the proofs in
|
||||||
## the block store.
|
## the block store.
|
||||||
##
|
##
|
||||||
@ -238,13 +236,12 @@ proc buildSlot*[T, H](
|
|||||||
error "Failed to get CID for slot cell", err = err.msg
|
error "Failed to get CID for slot cell", err = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
without proof =? tree.getProof(i) and
|
without proof =? tree.getProof(i) and encodableProof =? proof.toEncodableProof, err:
|
||||||
encodableProof =? proof.toEncodableProof, err:
|
|
||||||
error "Failed to get proof for slot tree", err = err.msg
|
error "Failed to get proof for slot tree", err = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
if err =? (await self.store.putCidAndProof(
|
if err =?
|
||||||
treeCid, i, cellCid, encodableProof)).errorOption:
|
(await self.store.putCidAndProof(treeCid, i, cellCid, encodableProof)).errorOption:
|
||||||
error "Failed to store slot tree", err = err.msg
|
error "Failed to store slot tree", err = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
@ -293,24 +290,21 @@ proc buildManifest*[T, H](self: SlotsBuilder[T, H]): Future[?!Manifest] {.async.
|
|||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
without rootProvingCidRes =? self.verifyRoot .? toVerifyCid() and
|
without rootProvingCidRes =? self.verifyRoot .? toVerifyCid() and
|
||||||
rootProvingCid =? rootProvingCidRes, err: # TODO: why doesn't `.?` unpack the result?
|
rootProvingCid =? rootProvingCidRes, err:
|
||||||
error "Failed to map slot roots to CIDs", err = err.msg
|
error "Failed to map slot roots to CIDs", err = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
Manifest.new(
|
Manifest.new(
|
||||||
self.manifest,
|
self.manifest, rootProvingCid, rootCids, self.cellSize, self.strategy.strategyType
|
||||||
rootProvingCid,
|
)
|
||||||
rootCids,
|
|
||||||
self.cellSize,
|
|
||||||
self.strategy.strategyType)
|
|
||||||
|
|
||||||
proc new*[T, H](
|
proc new*[T, H](
|
||||||
_: type SlotsBuilder[T, H],
|
_: type SlotsBuilder[T, H],
|
||||||
store: BlockStore,
|
store: BlockStore,
|
||||||
manifest: Manifest,
|
manifest: Manifest,
|
||||||
strategy = SteppedStrategy,
|
strategy = SteppedStrategy,
|
||||||
cellSize = DefaultCellSize): ?!SlotsBuilder[T, H] =
|
cellSize = DefaultCellSize,
|
||||||
|
): ?!SlotsBuilder[T, H] =
|
||||||
if not manifest.protected:
|
if not manifest.protected:
|
||||||
trace "Manifest is not protected."
|
trace "Manifest is not protected."
|
||||||
return failure("Manifest is not protected.")
|
return failure("Manifest is not protected.")
|
||||||
@ -332,11 +326,14 @@ proc new*[T, H](
|
|||||||
let
|
let
|
||||||
numSlotBlocks = manifest.numSlotBlocks
|
numSlotBlocks = manifest.numSlotBlocks
|
||||||
numBlockCells = (manifest.blockSize div cellSize).int # number of cells per block
|
numBlockCells = (manifest.blockSize div cellSize).int # number of cells per block
|
||||||
numSlotCells = manifest.numSlotBlocks * numBlockCells # number of uncorrected slot cells
|
numSlotCells = manifest.numSlotBlocks * numBlockCells
|
||||||
|
# number of uncorrected slot cells
|
||||||
pow2SlotCells = nextPowerOfTwo(numSlotCells) # pow2 cells per slot
|
pow2SlotCells = nextPowerOfTwo(numSlotCells) # pow2 cells per slot
|
||||||
numPadSlotBlocks = (pow2SlotCells div numBlockCells) - numSlotBlocks # pow2 blocks per slot
|
numPadSlotBlocks = (pow2SlotCells div numBlockCells) - numSlotBlocks
|
||||||
|
# pow2 blocks per slot
|
||||||
|
|
||||||
numSlotBlocksTotal = # pad blocks per slot
|
numSlotBlocksTotal =
|
||||||
|
# pad blocks per slot
|
||||||
if numPadSlotBlocks > 0:
|
if numPadSlotBlocks > 0:
|
||||||
numPadSlotBlocks + numSlotBlocks
|
numPadSlotBlocks + numSlotBlocks
|
||||||
else:
|
else:
|
||||||
@ -347,10 +344,7 @@ proc new*[T, H](
|
|||||||
emptyBlock = newSeq[byte](manifest.blockSize.int)
|
emptyBlock = newSeq[byte](manifest.blockSize.int)
|
||||||
emptyDigestTree = ?T.digestTree(emptyBlock, cellSize.int)
|
emptyDigestTree = ?T.digestTree(emptyBlock, cellSize.int)
|
||||||
|
|
||||||
strategy = ? strategy.init(
|
strategy = ?strategy.init(0, numBlocksTotal - 1, manifest.numSlots).catch
|
||||||
0,
|
|
||||||
numBlocksTotal - 1,
|
|
||||||
manifest.numSlots).catch
|
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
numSlotBlocks = numSlotBlocks
|
numSlotBlocks = numSlotBlocks
|
||||||
@ -364,19 +358,18 @@ proc new*[T, H](
|
|||||||
|
|
||||||
trace "Creating slots builder"
|
trace "Creating slots builder"
|
||||||
|
|
||||||
var
|
var self = SlotsBuilder[T, H](
|
||||||
self = SlotsBuilder[T, H](
|
|
||||||
store: store,
|
store: store,
|
||||||
manifest: manifest,
|
manifest: manifest,
|
||||||
strategy: strategy,
|
strategy: strategy,
|
||||||
cellSize: cellSize,
|
cellSize: cellSize,
|
||||||
emptyBlock: emptyBlock,
|
emptyBlock: emptyBlock,
|
||||||
numSlotBlocks: numSlotBlocksTotal,
|
numSlotBlocks: numSlotBlocksTotal,
|
||||||
emptyDigestTree: emptyDigestTree)
|
emptyDigestTree: emptyDigestTree,
|
||||||
|
)
|
||||||
|
|
||||||
if manifest.verifiable:
|
if manifest.verifiable:
|
||||||
if manifest.slotRoots.len == 0 or
|
if manifest.slotRoots.len == 0 or manifest.slotRoots.len != manifest.numSlots:
|
||||||
manifest.slotRoots.len != manifest.numSlots:
|
|
||||||
return failure "Manifest is verifiable but slot roots are missing or invalid."
|
return failure "Manifest is verifiable but slot roots are missing or invalid."
|
||||||
|
|
||||||
let
|
let
|
||||||
|
@ -27,12 +27,16 @@ func toCid(hash: Poseidon2Hash, mcodec: MultiCodec, cidCodec: MultiCodec): ?!Cid
|
|||||||
treeCid = ?Cid.init(CIDv1, cidCodec, mhash).mapFailure
|
treeCid = ?Cid.init(CIDv1, cidCodec, mhash).mapFailure
|
||||||
success treeCid
|
success treeCid
|
||||||
|
|
||||||
proc toPoseidon2Hash(cid: Cid, mcodec: MultiCodec, cidCodec: MultiCodec): ?!Poseidon2Hash =
|
proc toPoseidon2Hash(
|
||||||
|
cid: Cid, mcodec: MultiCodec, cidCodec: MultiCodec
|
||||||
|
): ?!Poseidon2Hash =
|
||||||
if cid.cidver != CIDv1:
|
if cid.cidver != CIDv1:
|
||||||
return failure("Unexpected CID version")
|
return failure("Unexpected CID version")
|
||||||
|
|
||||||
if cid.mcodec != cidCodec:
|
if cid.mcodec != cidCodec:
|
||||||
return failure("Cid is not of expected codec. Was: " & $cid.mcodec & " but expected: " & $cidCodec)
|
return failure(
|
||||||
|
"Cid is not of expected codec. Was: " & $cid.mcodec & " but expected: " & $cidCodec
|
||||||
|
)
|
||||||
|
|
||||||
let
|
let
|
||||||
mhash = ?cid.mhash.mapFailure
|
mhash = ?cid.mhash.mapFailure
|
||||||
@ -62,27 +66,17 @@ func toVerifyCid*(hash: Poseidon2Hash): ?!Cid =
|
|||||||
func fromVerifyCid*(cid: Cid): ?!Poseidon2Hash =
|
func fromVerifyCid*(cid: Cid): ?!Poseidon2Hash =
|
||||||
toPoseidon2Hash(cid, multiCodec("identity"), SlotProvingRootCodec)
|
toPoseidon2Hash(cid, multiCodec("identity"), SlotProvingRootCodec)
|
||||||
|
|
||||||
func toEncodableProof*(
|
func toEncodableProof*(proof: Poseidon2Proof): ?!CodexProof =
|
||||||
proof: Poseidon2Proof): ?!CodexProof =
|
let encodableProof = CodexProof(
|
||||||
|
|
||||||
let
|
|
||||||
encodableProof = CodexProof(
|
|
||||||
mcodec: multiCodec("identity"),
|
mcodec: multiCodec("identity"),
|
||||||
index: proof.index,
|
index: proof.index,
|
||||||
nleaves: proof.nleaves,
|
nleaves: proof.nleaves,
|
||||||
path: proof.path.mapIt( @( it.toBytes ) ))
|
path: proof.path.mapIt(@(it.toBytes)),
|
||||||
|
)
|
||||||
|
|
||||||
success encodableProof
|
success encodableProof
|
||||||
|
|
||||||
func toVerifiableProof*(
|
func toVerifiableProof*(proof: CodexProof): ?!Poseidon2Proof =
|
||||||
proof: CodexProof): ?!Poseidon2Proof =
|
let nodes = proof.path.mapIt(?Poseidon2Hash.fromBytes(it.toArray32).toFailure)
|
||||||
|
|
||||||
let
|
Poseidon2Proof.init(index = proof.index, nleaves = proof.nleaves, nodes = nodes)
|
||||||
nodes = proof.path.mapIt(
|
|
||||||
? Poseidon2Hash.fromBytes(it.toArray32).toFailure
|
|
||||||
)
|
|
||||||
|
|
||||||
Poseidon2Proof.init(
|
|
||||||
index = proof.index,
|
|
||||||
nleaves = proof.nleaves,
|
|
||||||
nodes = nodes)
|
|
||||||
|
@ -11,9 +11,7 @@ import ../../conf
|
|||||||
import ./backends
|
import ./backends
|
||||||
import ./backendutils
|
import ./backendutils
|
||||||
|
|
||||||
proc initializeFromConfig(
|
proc initializeFromConfig(config: CodexConf, utils: BackendUtils): ?!AnyBackend =
|
||||||
config: CodexConf,
|
|
||||||
utils: BackendUtils): ?!AnyBackend =
|
|
||||||
if not fileAccessible($config.circomR1cs, {AccessFlags.Read}) or
|
if not fileAccessible($config.circomR1cs, {AccessFlags.Read}) or
|
||||||
not endsWith($config.circomR1cs, ".r1cs"):
|
not endsWith($config.circomR1cs, ".r1cs"):
|
||||||
return failure("Circom R1CS file not accessible")
|
return failure("Circom R1CS file not accessible")
|
||||||
@ -27,10 +25,11 @@ proc initializeFromConfig(
|
|||||||
return failure("Circom zkey file not accessible")
|
return failure("Circom zkey file not accessible")
|
||||||
|
|
||||||
trace "Initialized prover backend from cli config"
|
trace "Initialized prover backend from cli config"
|
||||||
success(utils.initializeCircomBackend(
|
success(
|
||||||
$config.circomR1cs,
|
utils.initializeCircomBackend(
|
||||||
$config.circomWasm,
|
$config.circomR1cs, $config.circomWasm, $config.circomZkey
|
||||||
$config.circomZkey))
|
)
|
||||||
|
)
|
||||||
|
|
||||||
proc r1csFilePath(config: CodexConf): string =
|
proc r1csFilePath(config: CodexConf): string =
|
||||||
config.circuitDir / "proof_main.r1cs"
|
config.circuitDir / "proof_main.r1cs"
|
||||||
@ -42,42 +41,40 @@ proc zkeyFilePath(config: CodexConf): string =
|
|||||||
config.circuitDir / "proof_main.zkey"
|
config.circuitDir / "proof_main.zkey"
|
||||||
|
|
||||||
proc initializeFromCircuitDirFiles(
|
proc initializeFromCircuitDirFiles(
|
||||||
config: CodexConf,
|
config: CodexConf, utils: BackendUtils
|
||||||
utils: BackendUtils): ?!AnyBackend {.gcsafe.} =
|
): ?!AnyBackend {.gcsafe.} =
|
||||||
if fileExists(config.r1csFilePath) and
|
if fileExists(config.r1csFilePath) and fileExists(config.wasmFilePath) and
|
||||||
fileExists(config.wasmFilePath) and
|
|
||||||
fileExists(config.zkeyFilePath):
|
fileExists(config.zkeyFilePath):
|
||||||
trace "Initialized prover backend from local files"
|
trace "Initialized prover backend from local files"
|
||||||
return success(utils.initializeCircomBackend(
|
return success(
|
||||||
config.r1csFilePath,
|
utils.initializeCircomBackend(
|
||||||
config.wasmFilePath,
|
config.r1csFilePath, config.wasmFilePath, config.zkeyFilePath
|
||||||
config.zkeyFilePath))
|
)
|
||||||
|
)
|
||||||
|
|
||||||
failure("Circuit files not found")
|
failure("Circuit files not found")
|
||||||
|
|
||||||
proc suggestDownloadTool(config: CodexConf) =
|
proc suggestDownloadTool(config: CodexConf) =
|
||||||
without address =? config.marketplaceAddress:
|
without address =? config.marketplaceAddress:
|
||||||
raise (ref Defect)(msg: "Proving backend initializing while marketplace address not set.")
|
raise (ref Defect)(
|
||||||
|
msg: "Proving backend initializing while marketplace address not set."
|
||||||
|
)
|
||||||
|
|
||||||
let
|
let
|
||||||
tokens = [
|
tokens = ["cirdl", "\"" & $config.circuitDir & "\"", config.ethProvider, $address]
|
||||||
"cirdl",
|
|
||||||
"\"" & $config.circuitDir & "\"",
|
|
||||||
config.ethProvider,
|
|
||||||
$address
|
|
||||||
]
|
|
||||||
instructions = "'./" & tokens.join(" ") & "'"
|
instructions = "'./" & tokens.join(" ") & "'"
|
||||||
|
|
||||||
warn "Proving circuit files are not found. Please run the following to download them:", instructions
|
warn "Proving circuit files are not found. Please run the following to download them:",
|
||||||
|
instructions
|
||||||
|
|
||||||
proc initializeBackend*(
|
proc initializeBackend*(
|
||||||
config: CodexConf,
|
config: CodexConf, utils: BackendUtils = BackendUtils()
|
||||||
utils: BackendUtils = BackendUtils()): ?!AnyBackend =
|
): ?!AnyBackend =
|
||||||
|
|
||||||
without backend =? initializeFromConfig(config, utils), cliErr:
|
without backend =? initializeFromConfig(config, utils), cliErr:
|
||||||
info "Could not initialize prover backend from CLI options...", msg = cliErr.msg
|
info "Could not initialize prover backend from CLI options...", msg = cliErr.msg
|
||||||
without backend =? initializeFromCircuitDirFiles(config, utils), localErr:
|
without backend =? initializeFromCircuitDirFiles(config, utils), localErr:
|
||||||
info "Could not initialize prover backend from circuit dir files...", msg = localErr.msg
|
info "Could not initialize prover backend from circuit dir files...",
|
||||||
|
msg = localErr.msg
|
||||||
suggestDownloadTool(config)
|
suggestDownloadTool(config)
|
||||||
return failure("CircuitFilesNotFound")
|
return failure("CircuitFilesNotFound")
|
||||||
# Unexpected: value of backend does not survive leaving each scope. (definition does though...)
|
# Unexpected: value of backend does not survive leaving each scope. (definition does though...)
|
||||||
|
@ -2,5 +2,4 @@ import ./backends/circomcompat
|
|||||||
|
|
||||||
export circomcompat
|
export circomcompat
|
||||||
|
|
||||||
type
|
type AnyBackend* = CircomCompat
|
||||||
AnyBackend* = CircomCompat
|
|
||||||
|
@ -38,8 +38,9 @@ type
|
|||||||
|
|
||||||
NormalizedProofInputs*[H] {.borrow: `.`.} = distinct ProofInputs[H]
|
NormalizedProofInputs*[H] {.borrow: `.`.} = distinct ProofInputs[H]
|
||||||
|
|
||||||
func normalizeInput*[H](self: CircomCompat, input: ProofInputs[H]):
|
func normalizeInput*[H](
|
||||||
NormalizedProofInputs[H] =
|
self: CircomCompat, input: ProofInputs[H]
|
||||||
|
): NormalizedProofInputs[H] =
|
||||||
## Parameters in CIRCOM circuits are statically sized and must be properly
|
## Parameters in CIRCOM circuits are statically sized and must be properly
|
||||||
## padded before they can be passed onto the circuit. This function takes
|
## padded before they can be passed onto the circuit. This function takes
|
||||||
## variable length parameters and performs that padding.
|
## variable length parameters and performs that padding.
|
||||||
@ -52,10 +53,7 @@ func normalizeInput*[H](self: CircomCompat, input: ProofInputs[H]):
|
|||||||
for sample in input.samples:
|
for sample in input.samples:
|
||||||
var merklePaths = sample.merklePaths
|
var merklePaths = sample.merklePaths
|
||||||
merklePaths.setLen(self.slotDepth)
|
merklePaths.setLen(self.slotDepth)
|
||||||
Sample[H](
|
Sample[H](cellData: sample.cellData, merklePaths: merklePaths)
|
||||||
cellData: sample.cellData,
|
|
||||||
merklePaths: merklePaths
|
|
||||||
)
|
|
||||||
|
|
||||||
var normSlotProof = input.slotProof
|
var normSlotProof = input.slotProof
|
||||||
normSlotProof.setLen(self.datasetDepth)
|
normSlotProof.setLen(self.datasetDepth)
|
||||||
@ -68,7 +66,7 @@ func normalizeInput*[H](self: CircomCompat, input: ProofInputs[H]):
|
|||||||
nCellsPerSlot: input.nCellsPerSlot,
|
nCellsPerSlot: input.nCellsPerSlot,
|
||||||
nSlotsPerDataSet: input.nSlotsPerDataSet,
|
nSlotsPerDataSet: input.nSlotsPerDataSet,
|
||||||
slotProof: normSlotProof,
|
slotProof: normSlotProof,
|
||||||
samples: normSamples
|
samples: normSamples,
|
||||||
)
|
)
|
||||||
|
|
||||||
proc release*(self: CircomCompat) =
|
proc release*(self: CircomCompat) =
|
||||||
@ -81,32 +79,28 @@ proc release*(self: CircomCompat) =
|
|||||||
if not isNil(self.vkp):
|
if not isNil(self.vkp):
|
||||||
self.vkp.unsafeAddr.release_key()
|
self.vkp.unsafeAddr.release_key()
|
||||||
|
|
||||||
proc prove[H](
|
proc prove[H](self: CircomCompat, input: NormalizedProofInputs[H]): ?!CircomProof =
|
||||||
self: CircomCompat,
|
doAssert input.samples.len == self.numSamples, "Number of samples does not match"
|
||||||
input: NormalizedProofInputs[H]): ?!CircomProof =
|
|
||||||
|
|
||||||
doAssert input.samples.len == self.numSamples,
|
|
||||||
"Number of samples does not match"
|
|
||||||
|
|
||||||
doAssert input.slotProof.len <= self.datasetDepth,
|
doAssert input.slotProof.len <= self.datasetDepth,
|
||||||
"Slot proof is too deep - dataset has more slots than what we can handle?"
|
"Slot proof is too deep - dataset has more slots than what we can handle?"
|
||||||
|
|
||||||
doAssert input.samples.allIt(
|
doAssert input.samples.allIt(
|
||||||
block:
|
block:
|
||||||
(it.merklePaths.len <= self.slotDepth + self.blkDepth and
|
(
|
||||||
it.cellData.len == self.cellElms)), "Merkle paths too deep or cells too big for circuit"
|
it.merklePaths.len <= self.slotDepth + self.blkDepth and
|
||||||
|
it.cellData.len == self.cellElms
|
||||||
|
)
|
||||||
|
), "Merkle paths too deep or cells too big for circuit"
|
||||||
|
|
||||||
# TODO: All parameters should match circom's static parametter
|
# TODO: All parameters should match circom's static parametter
|
||||||
var
|
var ctx: ptr CircomCompatCtx
|
||||||
ctx: ptr CircomCompatCtx
|
|
||||||
|
|
||||||
defer:
|
defer:
|
||||||
if ctx != nil:
|
if ctx != nil:
|
||||||
ctx.addr.release_circom_compat()
|
ctx.addr.release_circom_compat()
|
||||||
|
|
||||||
if init_circom_compat(
|
if init_circom_compat(self.backendCfg, addr ctx) != ERR_OK or ctx == nil:
|
||||||
self.backendCfg,
|
|
||||||
addr ctx) != ERR_OK or ctx == nil:
|
|
||||||
raiseAssert("failed to initialize CircomCompat ctx")
|
raiseAssert("failed to initialize CircomCompat ctx")
|
||||||
|
|
||||||
var
|
var
|
||||||
@ -114,39 +108,37 @@ proc prove[H](
|
|||||||
dataSetRoot = input.datasetRoot.toBytes
|
dataSetRoot = input.datasetRoot.toBytes
|
||||||
slotRoot = input.slotRoot.toBytes
|
slotRoot = input.slotRoot.toBytes
|
||||||
|
|
||||||
if ctx.push_input_u256_array(
|
if ctx.push_input_u256_array("entropy".cstring, entropy[0].addr, entropy.len.uint32) !=
|
||||||
"entropy".cstring, entropy[0].addr, entropy.len.uint32) != ERR_OK:
|
ERR_OK:
|
||||||
return failure("Failed to push entropy")
|
return failure("Failed to push entropy")
|
||||||
|
|
||||||
if ctx.push_input_u256_array(
|
if ctx.push_input_u256_array(
|
||||||
"dataSetRoot".cstring, dataSetRoot[0].addr, dataSetRoot.len.uint32) != ERR_OK:
|
"dataSetRoot".cstring, dataSetRoot[0].addr, dataSetRoot.len.uint32
|
||||||
|
) != ERR_OK:
|
||||||
return failure("Failed to push data set root")
|
return failure("Failed to push data set root")
|
||||||
|
|
||||||
if ctx.push_input_u256_array(
|
if ctx.push_input_u256_array(
|
||||||
"slotRoot".cstring, slotRoot[0].addr, slotRoot.len.uint32) != ERR_OK:
|
"slotRoot".cstring, slotRoot[0].addr, slotRoot.len.uint32
|
||||||
|
) != ERR_OK:
|
||||||
return failure("Failed to push data set root")
|
return failure("Failed to push data set root")
|
||||||
|
|
||||||
if ctx.push_input_u32(
|
if ctx.push_input_u32("nCellsPerSlot".cstring, input.nCellsPerSlot.uint32) != ERR_OK:
|
||||||
"nCellsPerSlot".cstring, input.nCellsPerSlot.uint32) != ERR_OK:
|
|
||||||
return failure("Failed to push nCellsPerSlot")
|
return failure("Failed to push nCellsPerSlot")
|
||||||
|
|
||||||
if ctx.push_input_u32(
|
if ctx.push_input_u32("nSlotsPerDataSet".cstring, input.nSlotsPerDataSet.uint32) !=
|
||||||
"nSlotsPerDataSet".cstring, input.nSlotsPerDataSet.uint32) != ERR_OK:
|
ERR_OK:
|
||||||
return failure("Failed to push nSlotsPerDataSet")
|
return failure("Failed to push nSlotsPerDataSet")
|
||||||
|
|
||||||
if ctx.push_input_u32(
|
if ctx.push_input_u32("slotIndex".cstring, input.slotIndex.uint32) != ERR_OK:
|
||||||
"slotIndex".cstring, input.slotIndex.uint32) != ERR_OK:
|
|
||||||
return failure("Failed to push slotIndex")
|
return failure("Failed to push slotIndex")
|
||||||
|
|
||||||
var
|
var slotProof = input.slotProof.mapIt(it.toBytes).concat
|
||||||
slotProof = input.slotProof.mapIt( it.toBytes ).concat
|
|
||||||
|
|
||||||
doAssert(slotProof.len == self.datasetDepth)
|
doAssert(slotProof.len == self.datasetDepth)
|
||||||
# arrays are always flattened
|
# arrays are always flattened
|
||||||
if ctx.push_input_u256_array(
|
if ctx.push_input_u256_array(
|
||||||
"slotProof".cstring,
|
"slotProof".cstring, slotProof[0].addr, uint (slotProof[0].len * slotProof.len)
|
||||||
slotProof[0].addr,
|
) != ERR_OK:
|
||||||
uint (slotProof[0].len * slotProof.len)) != ERR_OK:
|
|
||||||
return failure("Failed to push slot proof")
|
return failure("Failed to push slot proof")
|
||||||
|
|
||||||
for s in input.samples:
|
for s in input.samples:
|
||||||
@ -157,23 +149,19 @@ proc prove[H](
|
|||||||
if ctx.push_input_u256_array(
|
if ctx.push_input_u256_array(
|
||||||
"merklePaths".cstring,
|
"merklePaths".cstring,
|
||||||
merklePaths[0].addr,
|
merklePaths[0].addr,
|
||||||
uint (merklePaths[0].len * merklePaths.len)) != ERR_OK:
|
uint (merklePaths[0].len * merklePaths.len),
|
||||||
|
) != ERR_OK:
|
||||||
return failure("Failed to push merkle paths")
|
return failure("Failed to push merkle paths")
|
||||||
|
|
||||||
if ctx.push_input_u256_array(
|
if ctx.push_input_u256_array("cellData".cstring, data[0].addr, data.len.uint) !=
|
||||||
"cellData".cstring,
|
ERR_OK:
|
||||||
data[0].addr,
|
|
||||||
data.len.uint) != ERR_OK:
|
|
||||||
return failure("Failed to push cell data")
|
return failure("Failed to push cell data")
|
||||||
|
|
||||||
var
|
var proofPtr: ptr Proof = nil
|
||||||
proofPtr: ptr Proof = nil
|
|
||||||
|
|
||||||
let proof =
|
let proof =
|
||||||
try:
|
try:
|
||||||
if (
|
if (let res = self.backendCfg.prove_circuit(ctx, proofPtr.addr); res != ERR_OK) or
|
||||||
let res = self.backendCfg.prove_circuit(ctx, proofPtr.addr);
|
|
||||||
res != ERR_OK) or
|
|
||||||
proofPtr == nil:
|
proofPtr == nil:
|
||||||
return failure("Failed to prove - err code: " & $res)
|
return failure("Failed to prove - err code: " & $res)
|
||||||
|
|
||||||
@ -184,16 +172,12 @@ proc prove[H](
|
|||||||
|
|
||||||
success proof
|
success proof
|
||||||
|
|
||||||
proc prove*[H](
|
proc prove*[H](self: CircomCompat, input: ProofInputs[H]): ?!CircomProof =
|
||||||
self: CircomCompat,
|
|
||||||
input: ProofInputs[H]): ?!CircomProof =
|
|
||||||
|
|
||||||
self.prove(self.normalizeInput(input))
|
self.prove(self.normalizeInput(input))
|
||||||
|
|
||||||
proc verify*[H](
|
proc verify*[H](
|
||||||
self: CircomCompat,
|
self: CircomCompat, proof: CircomProof, inputs: ProofInputs[H]
|
||||||
proof: CircomProof,
|
): ?!bool =
|
||||||
inputs: ProofInputs[H]): ?!bool =
|
|
||||||
## Verify a proof using a ctx
|
## Verify a proof using a ctx
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -221,25 +205,25 @@ proc init*(
|
|||||||
datasetDepth = DefaultMaxDatasetDepth,
|
datasetDepth = DefaultMaxDatasetDepth,
|
||||||
blkDepth = DefaultBlockDepth,
|
blkDepth = DefaultBlockDepth,
|
||||||
cellElms = DefaultCellElms,
|
cellElms = DefaultCellElms,
|
||||||
numSamples = DefaultSamplesNum): CircomCompat =
|
numSamples = DefaultSamplesNum,
|
||||||
|
): CircomCompat =
|
||||||
## Create a new ctx
|
## Create a new ctx
|
||||||
##
|
##
|
||||||
|
|
||||||
var cfg: ptr CircomBn254Cfg
|
var cfg: ptr CircomBn254Cfg
|
||||||
var zkey = if zkeyPath.len > 0: zkeyPath.cstring else: nil
|
var zkey = if zkeyPath.len > 0: zkeyPath.cstring else: nil
|
||||||
|
|
||||||
if init_circom_config(
|
if init_circom_config(r1csPath.cstring, wasmPath.cstring, zkey, cfg.addr) != ERR_OK or
|
||||||
r1csPath.cstring,
|
cfg == nil:
|
||||||
wasmPath.cstring,
|
if cfg != nil:
|
||||||
zkey, cfg.addr) != ERR_OK or cfg == nil:
|
cfg.addr.release_cfg()
|
||||||
if cfg != nil: cfg.addr.release_cfg()
|
|
||||||
raiseAssert("failed to initialize circom compat config")
|
raiseAssert("failed to initialize circom compat config")
|
||||||
|
|
||||||
var
|
var vkpPtr: ptr VerifyingKey = nil
|
||||||
vkpPtr: ptr VerifyingKey = nil
|
|
||||||
|
|
||||||
if cfg.get_verifying_key(vkpPtr.addr) != ERR_OK or vkpPtr == nil:
|
if cfg.get_verifying_key(vkpPtr.addr) != ERR_OK or vkpPtr == nil:
|
||||||
if vkpPtr != nil: vkpPtr.addr.release_key()
|
if vkpPtr != nil:
|
||||||
|
vkpPtr.addr.release_key()
|
||||||
raiseAssert("Failed to get verifying key")
|
raiseAssert("Failed to get verifying key")
|
||||||
|
|
||||||
CircomCompat(
|
CircomCompat(
|
||||||
@ -252,4 +236,5 @@ proc init*(
|
|||||||
cellElms: cellElms,
|
cellElms: cellElms,
|
||||||
numSamples: numSamples,
|
numSamples: numSamples,
|
||||||
backendCfg: cfg,
|
backendCfg: cfg,
|
||||||
vkp : vkpPtr)
|
vkp: vkpPtr,
|
||||||
|
)
|
||||||
|
@ -29,18 +29,12 @@ proc toCircomInputs*(inputs: ProofInputs[Poseidon2Hash]): CircomInputs =
|
|||||||
datasetRoot = inputs.datasetRoot.toBytes.toArray32
|
datasetRoot = inputs.datasetRoot.toBytes.toArray32
|
||||||
entropy = inputs.entropy.toBytes.toArray32
|
entropy = inputs.entropy.toBytes.toArray32
|
||||||
|
|
||||||
elms = [
|
elms = [entropy, datasetRoot, slotIndex]
|
||||||
entropy,
|
|
||||||
datasetRoot,
|
|
||||||
slotIndex
|
|
||||||
]
|
|
||||||
|
|
||||||
let inputsPtr = allocShared0(32 * elms.len)
|
let inputsPtr = allocShared0(32 * elms.len)
|
||||||
copyMem(inputsPtr, addr elms[0], elms.len * 32)
|
copyMem(inputsPtr, addr elms[0], elms.len * 32)
|
||||||
|
|
||||||
CircomInputs(
|
CircomInputs(elms: cast[ptr array[32, byte]](inputsPtr), len: elms.len.uint)
|
||||||
elms: cast[ptr array[32, byte]](inputsPtr),
|
|
||||||
len: elms.len.uint)
|
|
||||||
|
|
||||||
proc releaseCircomInputs*(inputs: var CircomInputs) =
|
proc releaseCircomInputs*(inputs: var CircomInputs) =
|
||||||
if not inputs.elms.isNil:
|
if not inputs.elms.isNil:
|
||||||
@ -48,23 +42,13 @@ proc releaseCircomInputs*(inputs: var CircomInputs) =
|
|||||||
inputs.elms = nil
|
inputs.elms = nil
|
||||||
|
|
||||||
func toG1*(g: CircomG1): G1Point =
|
func toG1*(g: CircomG1): G1Point =
|
||||||
G1Point(
|
G1Point(x: UInt256.fromBytesLE(g.x), y: UInt256.fromBytesLE(g.y))
|
||||||
x: UInt256.fromBytesLE(g.x),
|
|
||||||
y: UInt256.fromBytesLE(g.y))
|
|
||||||
|
|
||||||
func toG2*(g: CircomG2): G2Point =
|
func toG2*(g: CircomG2): G2Point =
|
||||||
G2Point(
|
G2Point(
|
||||||
x: Fp2Element(
|
x: Fp2Element(real: UInt256.fromBytesLE(g.x[0]), imag: UInt256.fromBytesLE(g.x[1])),
|
||||||
real: UInt256.fromBytesLE(g.x[0]),
|
y: Fp2Element(real: UInt256.fromBytesLE(g.y[0]), imag: UInt256.fromBytesLE(g.y[1])),
|
||||||
imag: UInt256.fromBytesLE(g.x[1])
|
)
|
||||||
),
|
|
||||||
y: Fp2Element(
|
|
||||||
real: UInt256.fromBytesLE(g.y[0]),
|
|
||||||
imag: UInt256.fromBytesLE(g.y[1])
|
|
||||||
))
|
|
||||||
|
|
||||||
func toGroth16Proof*(proof: CircomProof): Groth16Proof =
|
func toGroth16Proof*(proof: CircomProof): Groth16Proof =
|
||||||
Groth16Proof(
|
Groth16Proof(a: proof.a.toG1, b: proof.b.toG2, c: proof.c.toG1)
|
||||||
a: proof.a.toG1,
|
|
||||||
b: proof.b.toG2,
|
|
||||||
c: proof.c.toG1)
|
|
||||||
|
@ -1,12 +1,8 @@
|
|||||||
import ./backends
|
import ./backends
|
||||||
|
|
||||||
type
|
type BackendUtils* = ref object of RootObj
|
||||||
BackendUtils* = ref object of RootObj
|
|
||||||
|
|
||||||
method initializeCircomBackend*(
|
method initializeCircomBackend*(
|
||||||
self: BackendUtils,
|
self: BackendUtils, r1csFile: string, wasmFile: string, zKeyFile: string
|
||||||
r1csFile: string,
|
|
||||||
wasmFile: string,
|
|
||||||
zKeyFile: string
|
|
||||||
): AnyBackend {.base, gcsafe.} =
|
): AnyBackend {.base, gcsafe.} =
|
||||||
CircomCompat.init(r1csFile, wasmFile, zKeyFile)
|
CircomCompat.init(r1csFile, wasmFile, zKeyFile)
|
||||||
|
@ -47,10 +47,8 @@ type
|
|||||||
nSamples: int
|
nSamples: int
|
||||||
|
|
||||||
proc prove*(
|
proc prove*(
|
||||||
self: Prover,
|
self: Prover, slotIdx: int, manifest: Manifest, challenge: ProofChallenge
|
||||||
slotIdx: int,
|
): Future[?!(AnyProofInputs, AnyProof)] {.async.} =
|
||||||
manifest: Manifest,
|
|
||||||
challenge: ProofChallenge): Future[?!(AnyProofInputs, AnyProof)] {.async.} =
|
|
||||||
## Prove a statement using backend.
|
## Prove a statement using backend.
|
||||||
## Returns a future that resolves to a proof.
|
## Returns a future that resolves to a proof.
|
||||||
|
|
||||||
@ -81,20 +79,13 @@ proc prove*(
|
|||||||
success (proofInput, proof)
|
success (proofInput, proof)
|
||||||
|
|
||||||
proc verify*(
|
proc verify*(
|
||||||
self: Prover,
|
self: Prover, proof: AnyProof, inputs: AnyProofInputs
|
||||||
proof: AnyProof,
|
): Future[?!bool] {.async.} =
|
||||||
inputs: AnyProofInputs): Future[?!bool] {.async.} =
|
|
||||||
## Prove a statement using backend.
|
## Prove a statement using backend.
|
||||||
## Returns a future that resolves to a proof.
|
## Returns a future that resolves to a proof.
|
||||||
self.backend.verify(proof, inputs)
|
self.backend.verify(proof, inputs)
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
_: type Prover,
|
_: type Prover, store: BlockStore, backend: AnyBackend, nSamples: int
|
||||||
store: BlockStore,
|
): Prover =
|
||||||
backend: AnyBackend,
|
Prover(store: store, backend: backend, nSamples: nSamples)
|
||||||
nSamples: int): Prover =
|
|
||||||
|
|
||||||
Prover(
|
|
||||||
store: store,
|
|
||||||
backend: backend,
|
|
||||||
nSamples: nSamples)
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user