refactor!: remove unused modules

Removed modules:
- sales (including reservations, slot queue, marketplace abstractions, state machines, etc)
- purchasing
- erasure coding
- contract interactions
- prover
- slot builder
- block exchange payments
- sales/purchasing from REST api
- removed persistence command and all config params from cli configuration
- CI workflows (devnet, dist tests, cirdl build, start eth node, contracts version reporting)
- unused modules from tests
- marketplace integration tests, and starting provider/validator/hardhat nodes
- unused manifest properties
- integration tests using the above

# Conflicts:
#	.github/workflows/ci-reusable.yml
#	.github/workflows/docker.yml
#	build.nims
#	codex/blockexchange/engine/payments.nim
#	codex/codex.nim
#	codex/conf.nim
#	codex/contracts/Readme.md
#	codex/erasure.nim
#	codex/erasure/backend.nim
#	codex/erasure/backends/leopard.nim
#	codex/erasure/erasure.nim
#	codex/rest/api.nim
#	codex/sales.nim
#	codex/sales/reservations.nim
#	codex/sales/states/filled.nim
#	codex/sales/states/preparing.nim
#	codex/sales/states/provingsimulated.nim
#	codex/slots/builder/builder.nim
#	codex/slots/converters.nim
#	codex/slots/proofs/backends/circomcompat.nim
#	codex/slots/proofs/backends/converters.nim
#	codex/slots/proofs/prover.nim
#	codex/slots/sampler/sampler.nim
#	codex/slots/sampler/utils.nim
#	codex/slots/types.nim
#	tests/integration/5_minutes/testrestapivalidation.nim
#	tests/integration/hardhatprocess.nim
#	tests/integration/multinodes.nim
#	tools/cirdl/cirdl.nim
This commit is contained in:
E M 2025-12-17 15:20:48 +11:00
parent 60861d6af8
commit 7fdb0b5942
No known key found for this signature in database
202 changed files with 91 additions and 22108 deletions

View File

@ -48,29 +48,7 @@ jobs:
if: matrix.tests == 'unittest' || matrix.tests == 'all'
run: make -j${ncpu} test
- name: Setup Node.js
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
uses: actions/setup-node@v4
with:
node-version: 22
- name: Start Ethereum node with Logos Storage contracts
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
working-directory: vendor/logos-storage-contracts-eth
env:
MSYS2_PATH_TYPE: inherit
run: |
npm ci
npm start &
# Wait for the contracts to be deployed
sleep 5
## Part 2 Tests ##
- name: Contract tests
if: matrix.tests == 'contract' || matrix.tests == 'all'
run: make -j${ncpu} testContracts
## Part 3 Tests ##
- name: Integration tests
if: matrix.tests == 'integration' || matrix.tests == 'all'
env:
@ -85,11 +63,6 @@ jobs:
path: tests/integration/logs/
retention-days: 1
## Part 4 Tools ##
- name: Tools tests
if: matrix.tests == 'tools' || matrix.tests == 'all'
run: make -j${ncpu} testTools
status:
if: always()
needs: [build]

View File

@ -19,26 +19,10 @@ on:
workflow_dispatch:
jobs:
get-contracts-hash:
runs-on: ubuntu-latest
outputs:
hash: ${{ steps.get-hash.outputs.hash }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Get submodule short hash
id: get-hash
run: |
hash=$(git rev-parse --short HEAD:vendor/logos-storage-contracts-eth)
echo "hash=$hash" >> $GITHUB_OUTPUT
build-and-push:
name: Build and Push
uses: ./.github/workflows/docker-reusable.yml
needs: get-contracts-hash
with:
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
tag_stable: ${{ startsWith(github.ref, 'refs/tags/') }}
contract_image: "codexstorage/codex-contracts-eth:sha-${{ needs.get-contracts-hash.outputs.hash }}"
secrets: inherit

View File

@ -98,11 +98,6 @@ all: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim storage $(NIM_PARAMS) build.nims
# Build tools/cirdl
cirdl: | deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim toolsCirdl $(NIM_PARAMS) build.nims
# must be included after the default target
-include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk
@ -135,11 +130,6 @@ test: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim test $(NIM_PARAMS) build.nims
# Builds and runs the smart contract tests
testContracts: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) --define:ws_resubscribe=240 build.nims
# Builds and runs the integration tests
testIntegration: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
@ -150,16 +140,6 @@ testAll: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testAll $(NIM_PARAMS) build.nims
# Builds and runs Taiko L2 tests
testTaiko: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testTaiko $(NIM_PARAMS) build.nims
# Builds and runs tool tests
testTools: | cirdl
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testTools $(NIM_PARAMS) build.nims
# nim-libbacktrace
LIBBACKTRACE_MAKE_FLAGS := -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0
libbacktrace:

View File

@ -3,7 +3,7 @@ mode = ScriptMode.Verbose
import std/os except commandLineParams
### Helper functions
proc buildBinary(srcName: string, outName = os.lastPathPart(srcName), srcDir = "./", params = "", lang = "c") =
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
if not dirExists "build":
mkDir "build"
@ -18,9 +18,10 @@ proc buildBinary(srcName: string, outName = os.lastPathPart(srcName), srcDir =
let
# Place build output in 'build' folder, even if name includes a longer path.
outName = os.lastPathPart(name)
cmd =
"nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir &
srcName & ".nim"
name & ".nim"
exec(cmd)
@ -36,65 +37,50 @@ proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "dynamic")
)
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header --d:metrics " &
"--nimMainPrefix:libstorage -d:noSignalHandler " &
"--nimMainPrefix:libcodex -d:noSignalHandler " &
"-d:LeopardExtraCompilerFlags=-fPIC " & "-d:chronicles_runtime_filtering " &
"-d:chronicles_log_level=TRACE " & params & " " & srcDir & name & ".nim"
else:
exec "nim c" & " --out:build/" & name &
".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --d:metrics " &
"--nimMainPrefix:libstorage -d:noSignalHandler " &
"--nimMainPrefix:libcodex -d:noSignalHandler " &
"-d:LeopardExtraCompilerFlags=-fPIC " &
"-d:chronicles_runtime_filtering " &
"-d:chronicles_log_level=TRACE " &
params & " " & srcDir & name & ".nim"
proc test(name: string, outName = name, srcDir = "tests/", params = "", lang = "c") =
buildBinary name, outName, srcDir, params
exec "build/" & outName
proc test(name: string, srcDir = "tests/", params = "", lang = "c") =
buildBinary name, srcDir, params
exec "build/" & name
task storage, "build logos storage binary":
task codex, "build codex binary":
buildBinary "codex",
outname = "storage",
params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
task toolsCirdl, "build tools/cirdl binary":
buildBinary "tools/cirdl/cirdl"
task testStorage, "Build & run Logos Storage tests":
test "testCodex", outName = "testStorage", params = "-d:storage_enable_proof_failures=true"
task testContracts, "Build & run Logos Storage Contract tests":
test "testContracts"
test "testCodex", outName = "testStorage"
task testIntegration, "Run integration tests":
buildBinary "codex",
outName = "storage",
params =
"-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:storage_enable_proof_failures=true"
"-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
test "testIntegration"
# use params to enable logging from the integration test executable
# test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " &
# "-d:chronicles_enabled_topics:integration:TRACE"
task build, "build Logos Storage binary":
storageTask()
task build, "build codex binary":
codexTask()
task test, "Run tests":
testStorageTask()
task testTools, "Run Tools tests":
toolsCirdlTask()
test "testTools"
testCodexTask()
task testAll, "Run all tests (except for Taiko L2 tests)":
testStorageTask()
testContractsTask()
testIntegrationTask()
testToolsTask()
task testTaiko, "Run Taiko L2 tests":
storageTask()
test "testTaiko"
import strutils
import os
@ -126,7 +112,7 @@ task coverage, "generates code coverage report":
test "coverage",
srcDir = "tests/",
params =
" --nimcache:nimcache/coverage -d:release -d:storage_enable_proof_failures=true"
" --nimcache:nimcache/coverage -d:release"
exec("rm nimcache/coverage/*.c")
rmDir("coverage")
mkDir("coverage")
@ -147,22 +133,22 @@ task showCoverage, "open coverage html":
if findExe("open") != "":
exec("open coverage/report/index.html")
task libstorageDynamic, "Generate bindings":
task libcodexDynamic, "Generate bindings":
var params = ""
when compiles(commandLineParams):
for param in commandLineParams():
if param.len > 0 and param.startsWith("-"):
params.add " " & param
let name = "libstorage"
let name = "libcodex"
buildLibrary name, "library/", params, "dynamic"
task libstorageStatic, "Generate bindings":
task libcodexStatic, "Generate bindings":
var params = ""
when compiles(commandLineParams):
for param in commandLineParams():
if param.len > 0 and param.startsWith("-"):
params.add " " & param
let name = "libstorage"
let name = "libcodex"
buildLibrary name, "library/", params, "static"

View File

@ -71,9 +71,6 @@ when isMainModule:
# permissions are insecure.
quit QuitFailure
if config.prover() and not (checkAndCreateDataDir((config.circuitDir).string)):
quit QuitFailure
trace "Data dir initialized", dir = $config.dataDir
if not (checkAndCreateDataDir((config.dataDir / "repo"))):

View File

@ -1,6 +1,5 @@
import ./engine/discovery
import ./engine/advertiser
import ./engine/engine
import ./engine/payments
export discovery, advertiser, engine, payments
export discovery, advertiser, engine

View File

@ -37,12 +37,11 @@ import ../protobuf/presence
import ../network
import ../peers
import ./payments
import ./discovery
import ./advertiser
import ./pendingblocks
export peers, pendingblocks, payments, discovery
export peers, pendingblocks, discovery
logScope:
topics = "codex blockexcengine"
@ -113,16 +112,10 @@ type
maxBlocksPerMessage: int
# Maximum number of blocks we can squeeze in a single message
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
wallet*: WalletRef # Nitro wallet for micropayments
pricing*: ?Pricing # Optional bandwidth pricing
discovery*: DiscoveryEngine
advertiser*: Advertiser
lastDiscRequest: Moment # time of last discovery request
Pricing* = object
address*: EthAddress
price*: UInt256
# attach task scheduler to engine
proc scheduleTask(self: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, raises: [].} =
if self.taskQueue.pushOrUpdateNoWait(task).isOk():
@ -644,17 +637,6 @@ proc resolveBlocks*(
)
)
proc payForBlocks(
self: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery]
) {.async: (raises: [CancelledError]).} =
let
sendPayment = self.network.request.sendPayment
price = peer.price(blocksDelivery.mapIt(it.address))
if payment =? self.wallet.pay(peer, price):
trace "Sending payment for blocks", price, len = blocksDelivery.len
await sendPayment(peer.id, payment)
proc validateBlockDelivery(self: BlockExcEngine, bd: BlockDelivery): ?!void =
if bd.address notin self.pendingBlocks:
return failure("Received block is not currently a pending block")
@ -749,11 +731,6 @@ proc blocksDeliveryHandler*(
codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64)
if peerCtx != nil:
if err =? catch(await self.payForBlocks(peerCtx, blocksDelivery)).errorOption:
warn "Error paying for blocks", err = err.msg
return
if err =? catch(await self.resolveBlocks(validatedBlocksDelivery)).errorOption:
warn "Error resolving blocks", err = err.msg
return
@ -790,7 +767,6 @@ proc wantListHandler*(
except CatchableError as exc:
# TODO: should not be necessary once we have proper exception tracking on the BlockStore interface
false
price = @(self.pricing.get(Pricing(price: 0.u256)).price.toBytesBE)
if e.cancel:
# This is sort of expected if we sent the block to the peer, as we have removed
@ -806,7 +782,7 @@ proc wantListHandler*(
trace "We HAVE the block", address = e.address
presence.add(
BlockPresence(
address: e.address, `type`: BlockPresenceType.Have, price: price
address: e.address, `type`: BlockPresenceType.Have
)
)
else:
@ -814,7 +790,7 @@ proc wantListHandler*(
if e.sendDontHave:
presence.add(
BlockPresence(
address: e.address, `type`: BlockPresenceType.DontHave, price: price
address: e.address, `type`: BlockPresenceType.DontHave
)
)
@ -856,30 +832,6 @@ proc wantListHandler*(
except CancelledError as exc: #TODO: replace with CancelledError
warn "Error processing want list", error = exc.msg
proc accountHandler*(
self: BlockExcEngine, peer: PeerId, account: Account
) {.async: (raises: []).} =
let context = self.peers.get(peer)
if context.isNil:
return
context.account = account.some
proc paymentHandler*(
self: BlockExcEngine, peer: PeerId, payment: SignedState
) {.async: (raises: []).} =
trace "Handling payments", peer
without context =? self.peers.get(peer).option and account =? context.account:
trace "No context or account for peer", peer
return
if channel =? context.paymentChannel:
let sender = account.address
discard self.wallet.acceptPayment(channel, Asset, sender, payment)
else:
context.paymentChannel = self.wallet.acceptChannel(payment).option
proc peerAddedHandler*(
self: BlockExcEngine, peer: PeerId
) {.async: (raises: [CancelledError]).} =
@ -896,10 +848,6 @@ proc peerAddedHandler*(
trace "Added peer", peers = self.peers.len
await self.refreshBlockKnowledge(peerCtx)
if address =? self.pricing .? address:
trace "Sending account to peer", peer
await self.network.request.sendAccount(peer, Account(address: address))
proc localLookup(
self: BlockExcEngine, address: BlockAddress
): Future[?!BlockDelivery] {.async: (raises: [CancelledError]).} =
@ -1023,7 +971,6 @@ proc selectRandom*(
proc new*(
T: type BlockExcEngine,
localStore: BlockStore,
wallet: WalletRef,
network: BlockExcNetwork,
discovery: DiscoveryEngine,
advertiser: Advertiser,
@ -1041,7 +988,6 @@ proc new*(
peers: peerStore,
pendingBlocks: pendingBlocks,
network: network,
wallet: wallet,
concurrentTasks: concurrentTasks,
trackedFutures: TrackedFutures(),
maxBlocksPerMessage: maxBlocksPerMessage,
@ -1066,16 +1012,6 @@ proc new*(
): Future[void] {.async: (raises: []).} =
self.blocksDeliveryHandler(peer, blocksDelivery)
proc accountHandler(
peer: PeerId, account: Account
): Future[void] {.async: (raises: []).} =
self.accountHandler(peer, account)
proc paymentHandler(
peer: PeerId, payment: SignedState
): Future[void] {.async: (raises: []).} =
self.paymentHandler(peer, payment)
proc peerAddedHandler(
peer: PeerId
): Future[void] {.async: (raises: [CancelledError]).} =
@ -1090,8 +1026,6 @@ proc new*(
onWantList: blockWantListHandler,
onBlocksDelivery: blocksDeliveryHandler,
onPresence: blockPresenceHandler,
onAccount: accountHandler,
onPayment: paymentHandler,
onPeerJoined: peerAddedHandler,
onPeerDeparted: peerDepartedHandler,
)

View File

@ -1,46 +0,0 @@
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/math
import pkg/nitro
import pkg/questionable/results
import ../peers
export nitro
export results
const ChainId* = 0.u256 # invalid chain id for now
const Asset* = EthAddress.zero # invalid ERC20 asset address for now
const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals
func openLedgerChannel*(
wallet: WalletRef, hub: EthAddress, asset: EthAddress
): ?!ChannelId =
wallet.openLedgerChannel(hub, ChainId, asset, AmountPerChannel)
func getOrOpenChannel(wallet: WalletRef, peer: BlockExcPeerCtx): ?!ChannelId =
if channel =? peer.paymentChannel:
success channel
elif account =? peer.account:
let channel = ?wallet.openLedgerChannel(account.address, Asset)
peer.paymentChannel = channel.some
success channel
else:
failure "no account set for peer"
func pay*(wallet: WalletRef, peer: BlockExcPeerCtx, amount: UInt256): ?!SignedState =
if account =? peer.account:
let asset = Asset
let receiver = account.address
let channel = ?wallet.getOrOpenChannel(peer)
wallet.pay(channel, asset, receiver, amount)
else:
failure "no account set for peer"

View File

@ -20,12 +20,11 @@ import pkg/questionable/results
import ../../blocktype as bt
import ../../logutils
import ../protobuf/blockexc as pb
import ../protobuf/payments
import ../../utils/trackedfutures
import ./networkpeer
export networkpeer, payments
export networkpeer
logScope:
topics = "codex blockexcnetwork"
@ -40,16 +39,12 @@ type
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.async: (raises: []).}
BlockPresenceHandler* =
proc(peer: PeerId, precense: seq[BlockPresence]) {.async: (raises: []).}
AccountHandler* = proc(peer: PeerId, account: Account) {.async: (raises: []).}
PaymentHandler* = proc(peer: PeerId, payment: SignedState) {.async: (raises: []).}
PeerEventHandler* = proc(peer: PeerId) {.async: (raises: [CancelledError]).}
BlockExcHandlers* = object
onWantList*: WantListHandler
onBlocksDelivery*: BlocksDeliveryHandler
onPresence*: BlockPresenceHandler
onAccount*: AccountHandler
onPayment*: PaymentHandler
onPeerJoined*: PeerEventHandler
onPeerDeparted*: PeerEventHandler
onPeerDropped*: PeerEventHandler
@ -72,18 +67,12 @@ type
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]) {.
async: (raises: [CancelledError])
.}
AccountSender* =
proc(peer: PeerId, account: Account) {.async: (raises: [CancelledError]).}
PaymentSender* =
proc(peer: PeerId, payment: SignedState) {.async: (raises: [CancelledError]).}
BlockExcRequest* = object
sendWantList*: WantListSender
sendWantCancellations*: WantCancellationSender
sendBlocksDelivery*: BlocksDeliverySender
sendPresence*: PresenceSender
sendAccount*: AccountSender
sendPayment*: PaymentSender
BlockExcNetwork* = ref object of LPProtocol
peers*: Table[PeerId, NetworkPeer]
@ -207,40 +196,6 @@ proc sendBlockPresence*(
b.send(id, Message(blockPresences: @presence))
proc handleAccount(
network: BlockExcNetwork, peer: NetworkPeer, account: Account
) {.async: (raises: []).} =
## Handle account info
##
if not network.handlers.onAccount.isNil:
await network.handlers.onAccount(peer.id, account)
proc sendAccount*(
b: BlockExcNetwork, id: PeerId, account: Account
) {.async: (raw: true, raises: [CancelledError]).} =
## Send account info to remote
##
b.send(id, Message(account: AccountMessage.init(account)))
proc sendPayment*(
b: BlockExcNetwork, id: PeerId, payment: SignedState
) {.async: (raw: true, raises: [CancelledError]).} =
## Send payment to remote
##
b.send(id, Message(payment: StateChannelUpdate.init(payment)))
proc handlePayment(
network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState
) {.async: (raises: []).} =
## Handle payment
##
if not network.handlers.onPayment.isNil:
await network.handlers.onPayment(peer.id, payment)
proc rpcHandler(
self: BlockExcNetwork, peer: NetworkPeer, msg: Message
) {.async: (raises: []).} =
@ -255,12 +210,6 @@ proc rpcHandler(
if msg.blockPresences.len > 0:
self.trackedFutures.track(self.handleBlockPresence(peer, msg.blockPresences))
if account =? Account.init(msg.account):
self.trackedFutures.track(self.handleAccount(peer, account))
if payment =? SignedState.init(msg.payment):
self.trackedFutures.track(self.handlePayment(peer, payment))
proc getOrCreatePeer(self: BlockExcNetwork, peer: PeerId): NetworkPeer =
## Creates or retrieves a BlockExcNetwork Peer
##
@ -413,23 +362,11 @@ proc new*(
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendBlockPresence(id, presence)
proc sendAccount(
id: PeerId, account: Account
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendAccount(id, account)
proc sendPayment(
id: PeerId, payment: SignedState
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendPayment(id, payment)
self.request = BlockExcRequest(
sendWantList: sendWantList,
sendWantCancellations: sendWantCancellations,
sendBlocksDelivery: sendBlocksDelivery,
sendPresence: sendPresence,
sendAccount: sendAccount,
sendPayment: sendPayment,
)
self.init()

View File

@ -13,18 +13,14 @@ import std/sets
import pkg/libp2p
import pkg/chronos
import pkg/nitro
import pkg/questionable
import ../protobuf/blockexc
import ../protobuf/payments
import ../protobuf/presence
import ../../blocktype
import ../../logutils
export payments, nitro
const
MinRefreshInterval = 1.seconds
MaxRefreshBackoff = 36 # 36 seconds
@ -32,14 +28,12 @@ const
type BlockExcPeerCtx* = ref object of RootObj
id*: PeerId
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
blocks*: Table[BlockAddress, Presence] # remote peer have list
wantedBlocks*: HashSet[BlockAddress] # blocks that the peer wants
exchanged*: int # times peer has exchanged with us
refreshInProgress*: bool # indicates if a refresh is in progress
lastRefresh*: Moment # last time we refreshed our knowledge of the blocks this peer has
refreshBackoff*: int = 1 # backoff factor for refresh requests
account*: ?Account # ethereum account of this peer
paymentChannel*: ?ChannelId # payment channel id
blocksSent*: HashSet[BlockAddress] # blocks sent to peer
blocksRequested*: HashSet[BlockAddress] # pending block requests to this peer
lastExchange*: Moment # last time peer has sent us a block
@ -105,14 +99,6 @@ func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
func cleanPresence*(self: BlockExcPeerCtx, address: BlockAddress) =
self.cleanPresence(@[address])
func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 =
var price = 0.u256
for a in addresses:
self.blocks.withValue(a, precense):
price += precense[].price
price
proc blockRequestScheduled*(self: BlockExcPeerCtx, address: BlockAddress) =
## Adds a block the set of blocks that have been requested to this peer
## (its request schedule).

View File

@ -17,7 +17,6 @@ import ../../blocktype
export Message, protobufEncode, protobufDecode
export Wantlist, WantType, WantListEntry
export BlockDelivery, BlockPresenceType, BlockPresence
export AccountMessage, StateChannelUpdate
proc hash*(e: WantListEntry): Hash =
hash(e.address)

View File

@ -51,10 +51,6 @@ type
BlockPresence* = object
address*: BlockAddress
`type`*: BlockPresenceType
price*: seq[byte] # Amount of assets to pay for the block (UInt256)
AccountMessage* = object
address*: seq[byte] # Ethereum address to which payments should be made
StateChannelUpdate* = object
update*: seq[byte] # Signed Nitro state, serialized as JSON
@ -64,8 +60,6 @@ type
payload*: seq[BlockDelivery]
blockPresences*: seq[BlockPresence]
pendingBytes*: uint
account*: AccountMessage
payment*: StateChannelUpdate
#
# Encoding Message into seq[byte] in Protobuf format
@ -115,19 +109,6 @@ proc write*(pb: var ProtoBuffer, field: int, value: BlockPresence) =
var ipb = initProtoBuffer()
ipb.write(1, value.address)
ipb.write(2, value.`type`.uint)
ipb.write(3, value.price)
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: AccountMessage) =
var ipb = initProtoBuffer()
ipb.write(1, value.address)
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
var ipb = initProtoBuffer()
ipb.write(1, value.update)
ipb.finish()
pb.write(field, ipb)
@ -135,12 +116,10 @@ proc protobufEncode*(value: Message): seq[byte] =
var ipb = initProtoBuffer()
ipb.write(1, value.wantList)
for v in value.payload:
ipb.write(3, v)
ipb.write(3, v) # is this meant to be 2?
for v in value.blockPresences:
ipb.write(4, v)
ipb.write(5, value.pendingBytes)
ipb.write(6, value.account)
ipb.write(7, value.payment)
ipb.finish()
ipb.buffer
@ -240,19 +219,6 @@ proc decode*(_: type BlockPresence, pb: ProtoBuffer): ProtoResult[BlockPresence]
value.address = ?BlockAddress.decode(ipb)
if ?pb.getField(2, field):
value.`type` = BlockPresenceType(field)
discard ?pb.getField(3, value.price)
ok(value)
proc decode*(_: type AccountMessage, pb: ProtoBuffer): ProtoResult[AccountMessage] =
var value = AccountMessage()
discard ?pb.getField(1, value.address)
ok(value)
proc decode*(
_: type StateChannelUpdate, pb: ProtoBuffer
): ProtoResult[StateChannelUpdate] =
var value = StateChannelUpdate()
discard ?pb.getField(1, value.update)
ok(value)
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
@ -263,15 +229,11 @@ proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
sublist: seq[seq[byte]]
if ?pb.getField(1, ipb):
value.wantList = ?WantList.decode(ipb)
if ?pb.getRepeatedField(3, sublist):
if ?pb.getRepeatedField(3, sublist): # meant to be 2?
for item in sublist:
value.payload.add(?BlockDelivery.decode(initProtoBuffer(item)))
if ?pb.getRepeatedField(4, sublist):
for item in sublist:
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))
discard ?pb.getField(5, value.pendingBytes)
if ?pb.getField(6, ipb):
value.account = ?AccountMessage.decode(ipb)
if ?pb.getField(7, ipb):
value.payment = ?StateChannelUpdate.decode(ipb)
ok(value)

View File

@ -38,21 +38,10 @@ message Message {
message BlockPresence {
bytes cid = 1;
BlockPresenceType type = 2;
bytes price = 3; // Amount of assets to pay for the block (UInt256)
}
message AccountMessage {
bytes address = 1; // Ethereum address to which payments should be made
}
message StateChannelUpdate {
bytes update = 1; // Signed Nitro state, serialized as JSON
}
Wantlist wantlist = 1;
repeated Block payload = 3;
repeated Block payload = 3; // what happened to 2?
repeated BlockPresence blockPresences = 4;
int32 pendingBytes = 5;
AccountMessage account = 6;
StateChannelUpdate payment = 7;
}

View File

@ -1,38 +0,0 @@
{.push raises: [].}
import pkg/stew/byteutils
import pkg/stint
import pkg/nitro
import pkg/questionable
import ./blockexc
export AccountMessage
export StateChannelUpdate
export stint
export nitro
type Account* = object
address*: EthAddress
func init*(_: type AccountMessage, account: Account): AccountMessage =
AccountMessage(address: @(account.address.toArray))
func parse(_: type EthAddress, bytes: seq[byte]): ?EthAddress =
var address: array[20, byte]
if bytes.len != address.len:
return EthAddress.none
for i in 0 ..< address.len:
address[i] = bytes[i]
EthAddress(address).some
func init*(_: type Account, message: AccountMessage): ?Account =
without address =? EthAddress.parse(message.address):
return none Account
some Account(address: address)
func init*(_: type StateChannelUpdate, state: SignedState): StateChannelUpdate =
StateChannelUpdate(update: state.toJson.toBytes)
proc init*(_: type SignedState, update: StateChannelUpdate): ?SignedState =
SignedState.fromJson(string.fromBytes(update.update))

View File

@ -17,7 +17,6 @@ type
Presence* = object
address*: BlockAddress
have*: bool
price*: UInt256
func parse(_: type UInt256, bytes: seq[byte]): ?UInt256 =
if bytes.len > 32:
@ -25,18 +24,14 @@ func parse(_: type UInt256, bytes: seq[byte]): ?UInt256 =
UInt256.fromBytesBE(bytes).some
func init*(_: type Presence, message: PresenceMessage): ?Presence =
without price =? UInt256.parse(message.price):
return none Presence
some Presence(
address: message.address,
have: message.`type` == BlockPresenceType.Have,
price: price,
)
func init*(_: type PresenceMessage, presence: Presence): PresenceMessage =
PresenceMessage(
address: presence.address,
`type`: if presence.have: BlockPresenceType.Have else: BlockPresenceType.DontHave,
price: @(presence.price.toBytesBE),
)

View File

@ -20,10 +20,8 @@ import pkg/presto
import pkg/libp2p
import pkg/confutils
import pkg/confutils/defs
import pkg/nitro
import pkg/stew/io2
import pkg/datastore
import pkg/ethers except Rng
import pkg/stew/io2
import ./node
@ -31,15 +29,10 @@ import ./conf
import ./rng as random
import ./rest/api
import ./stores
import ./slots
import ./blockexchange
import ./utils/fileutils
import ./erasure
import ./discovery
import ./contracts
import ./systemclock
import ./contracts/clock
import ./contracts/deployment
import ./utils/addrutils
import ./namespaces
import ./codextypes
@ -60,7 +53,6 @@ type
isStarted: bool
CodexPrivateKey* = libp2p.PrivateKey # alias
EthWallet = ethers.Wallet
func config*(self: CodexServer): CodexConf =
return self.config
@ -71,103 +63,6 @@ func node*(self: CodexServer): CodexNodeRef =
func repoStore*(self: CodexServer): RepoStore =
return self.repoStore
proc waitForSync(provider: Provider): Future[void] {.async.} =
var sleepTime = 1
trace "Checking sync state of Ethereum provider..."
while await provider.isSyncing:
notice "Waiting for Ethereum provider to sync..."
await sleepAsync(sleepTime.seconds)
if sleepTime < 10:
inc sleepTime
trace "Ethereum provider is synced."
proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
## bootstrap interactions and return contracts
## using clients, hosts, validators pairings
##
let
config = s.config
repo = s.repoStore
if config.persistence:
if not config.ethAccount.isSome and not config.ethPrivateKey.isSome:
error "Persistence enabled, but no Ethereum account was set"
quit QuitFailure
let provider = JsonRpcProvider.new(
config.ethProvider, maxPriorityFeePerGas = config.maxPriorityFeePerGas.u256
)
await waitForSync(provider)
var signer: Signer
if account =? config.ethAccount:
signer = provider.getSigner(account)
elif keyFile =? config.ethPrivateKey:
without isSecure =? checkSecureFile(keyFile):
error "Could not check file permissions: does Ethereum private key file exist?"
quit QuitFailure
if not isSecure:
error "Ethereum private key file does not have safe file permissions"
quit QuitFailure
without key =? keyFile.readAllChars():
error "Unable to read Ethereum private key file"
quit QuitFailure
without wallet =? EthWallet.new(key.strip(), provider):
error "Invalid Ethereum private key in file"
quit QuitFailure
signer = wallet
let deploy = Deployment.new(provider, config.marketplaceAddress)
without marketplaceAddress =? await deploy.address(Marketplace):
error "No Marketplace address was specified or there is no known address for the current network"
quit QuitFailure
let marketplace = Marketplace.new(marketplaceAddress, signer)
let market = OnChainMarket.new(
marketplace, config.rewardRecipient, config.marketplaceRequestCacheSize
)
let clock = OnChainClock.new(provider)
var client: ?ClientInteractions
var host: ?HostInteractions
var validator: ?ValidatorInteractions
if config.validator or config.persistence:
s.codexNode.clock = clock
else:
s.codexNode.clock = SystemClock()
# This is used for simulation purposes. Normal nodes won't be compiled with this flag
# and hence the proof failure will always be 0.
when storage_enable_proof_failures:
let proofFailures = config.simulateProofFailures
if proofFailures > 0:
warn "Enabling proof failure simulation!"
else:
let proofFailures = 0
if config.simulateProofFailures > 0:
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
if error =? (await market.loadConfig()).errorOption:
fatal "Cannot load market configuration", error = error.msg
quit QuitFailure
let purchasing = Purchasing.new(market, clock)
let sales = Sales.new(market, clock, repo, proofFailures)
client = some ClientInteractions.new(clock, purchasing)
host = some HostInteractions.new(clock, sales)
if config.validator:
without validationConfig =?
ValidationConfig.init(
config.validatorMaxSlots, config.validatorGroups, config.validatorGroupIndex
), err:
error "Invalid validation parameters", err = err.msg
quit QuitFailure
let validation = Validation.new(clock, market, validationConfig)
validator = some ValidatorInteractions.new(clock, validation)
s.codexNode.contracts = (client, host, validator)
proc start*(s: CodexServer) {.async.} =
if s.isStarted:
warn "Storage server already started, skipping"
@ -187,7 +82,6 @@ proc start*(s: CodexServer) {.async.} =
s.codexNode.discovery.updateAnnounceRecord(announceAddrs)
s.codexNode.discovery.updateDhtRecord(discoveryAddrs)
await s.bootstrapInteractions()
await s.codexNode.start()
if s.restServer != nil:
@ -297,7 +191,6 @@ proc new*(
store = discoveryStore,
)
wallet = WalletRef.new(EthPrivateKey.random())
network = BlockExcNetwork.new(switch)
repoData =
@ -342,23 +235,15 @@ proc new*(
blockDiscovery =
DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
engine = BlockExcEngine.new(
repoStore, wallet, network, blockDiscovery, advertiser, peerStore, pendingBlocks
repoStore, network, blockDiscovery, advertiser, peerStore, pendingBlocks
)
store = NetworkStore.new(engine, repoStore)
prover =
if config.prover:
let backend =
config.initializeBackend().expect("Unable to create prover backend.")
some Prover.new(store, backend, config.numProofSamples)
else:
none Prover
codexNode = CodexNodeRef.new(
switch = switch,
networkStore = store,
engine = engine,
discovery = discovery,
prover = prover,
taskPool = taskpool,
)

View File

@ -31,7 +31,6 @@ import pkg/metrics
import pkg/metrics/chronos_httpserver
import pkg/stew/byteutils
import pkg/libp2p
import pkg/ethers
import pkg/questionable
import pkg/questionable/results
import pkg/stew/base64
@ -45,16 +44,13 @@ import ./utils
import ./nat
import ./utils/natutils
from ./contracts/config import DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas
from ./validationconfig import MaxSlots, ValidationGroups
from ./blockexchange/engine/pendingblocks import DefaultBlockRetries
export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig
export ValidationGroups, MaxSlots
export
DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval,
DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas, DefaultBlockRetries
DefaultBlockRetries
type ThreadCount* = distinct Natural
@ -73,7 +69,6 @@ proc defaultDataDir*(): string =
const
storage_enable_api_debug_peers* {.booldefine.} = false
storage_enable_proof_failures* {.booldefine.} = false
storage_enable_log_counter* {.booldefine.} = false
DefaultThreadCount* = ThreadCount(0)
@ -83,10 +78,6 @@ type
noCmd
persistence
PersistenceCmd* {.pure.} = enum
noCmd
prover
LogKind* {.pure.} = enum
Auto = "auto"
Colors = "colors"
@ -286,204 +277,12 @@ type
desc: "Logs to file", defaultValue: string.none, name: "log-file", hidden
.}: Option[string]
case cmd* {.defaultValue: noCmd, command.}: StartUpCmd
of persistence:
ethProvider* {.
desc: "The URL of the JSON-RPC API of the Ethereum node",
defaultValue: "ws://localhost:8545",
name: "eth-provider"
.}: string
ethAccount* {.
desc: "The Ethereum account that is used for storage contracts",
defaultValue: EthAddress.none,
defaultValueDesc: "",
name: "eth-account"
.}: Option[EthAddress]
ethPrivateKey* {.
desc: "File containing Ethereum private key for storage contracts",
defaultValue: string.none,
defaultValueDesc: "",
name: "eth-private-key"
.}: Option[string]
marketplaceAddress* {.
desc: "Address of deployed Marketplace contract",
defaultValue: EthAddress.none,
defaultValueDesc: "",
name: "marketplace-address"
.}: Option[EthAddress]
# TODO: should go behind a feature flag
simulateProofFailures* {.
desc: "Simulates proof failures once every N proofs. 0 = disabled.",
defaultValue: 0,
name: "simulate-proof-failures",
hidden
.}: int
validator* {.
desc: "Enables validator, requires an Ethereum node",
defaultValue: false,
name: "validator"
.}: bool
validatorMaxSlots* {.
desc: "Maximum number of slots that the validator monitors",
longDesc:
"If set to 0, the validator will not limit " &
"the maximum number of slots it monitors",
defaultValue: 1000,
name: "validator-max-slots"
.}: MaxSlots
validatorGroups* {.
desc: "Slot validation groups",
longDesc:
"A number indicating total number of groups into " &
"which the whole slot id space will be divided. " &
"The value must be in the range [2, 65535]. " &
"If not provided, the validator will observe " &
"the whole slot id space and the value of " &
"the --validator-group-index parameter will be ignored. " &
"Powers of twos are advised for even distribution",
defaultValue: ValidationGroups.none,
name: "validator-groups"
.}: Option[ValidationGroups]
validatorGroupIndex* {.
desc: "Slot validation group index",
longDesc:
"The value provided must be in the range " &
"[0, validatorGroups). Ignored when --validator-groups " &
"is not provided. Only slot ids satisfying condition " &
"[(slotId mod validationGroups) == groupIndex] will be " &
"observed by the validator",
defaultValue: 0,
name: "validator-group-index"
.}: uint16
rewardRecipient* {.
desc: "Address to send payouts to (eg rewards and refunds)",
name: "reward-recipient"
.}: Option[EthAddress]
marketplaceRequestCacheSize* {.
desc:
"Maximum number of StorageRequests kept in memory." &
"Reduces fetching of StorageRequest data from the contract.",
defaultValue: DefaultRequestCacheSize,
defaultValueDesc: $DefaultRequestCacheSize,
name: "request-cache-size",
hidden
.}: uint16
maxPriorityFeePerGas* {.
desc:
"Sets the default maximum priority fee per gas for Ethereum EIP-1559 transactions, in wei, when not provided by the network.",
defaultValue: DefaultMaxPriorityFeePerGas,
defaultValueDesc: $DefaultMaxPriorityFeePerGas,
name: "max-priority-fee-per-gas",
hidden
.}: uint64
case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd
of PersistenceCmd.prover:
circuitDir* {.
desc: "Directory where Storage will store proof circuit data",
defaultValue: defaultDataDir() / "circuits",
defaultValueDesc: "data/circuits",
abbr: "cd",
name: "circuit-dir"
.}: OutDir
circomR1cs* {.
desc: "The r1cs file for the storage circuit",
defaultValue: defaultDataDir() / "circuits" / "proof_main.r1cs",
defaultValueDesc: "data/circuits/proof_main.r1cs",
name: "circom-r1cs"
.}: InputFile
circomWasm* {.
desc: "The wasm file for the storage circuit",
defaultValue: defaultDataDir() / "circuits" / "proof_main.wasm",
defaultValueDesc: "data/circuits/proof_main.wasm",
name: "circom-wasm"
.}: InputFile
circomZkey* {.
desc: "The zkey file for the storage circuit",
defaultValue: defaultDataDir() / "circuits" / "proof_main.zkey",
defaultValueDesc: "data/circuits/proof_main.zkey",
name: "circom-zkey"
.}: InputFile
# TODO: should probably be hidden and behind a feature flag
circomNoZkey* {.
desc: "Ignore the zkey file - use only for testing!",
defaultValue: false,
name: "circom-no-zkey"
.}: bool
numProofSamples* {.
desc: "Number of samples to prove",
defaultValue: DefaultSamplesNum,
defaultValueDesc: $DefaultSamplesNum,
name: "proof-samples"
.}: int
maxSlotDepth* {.
desc: "The maximum depth of the slot tree",
defaultValue: DefaultMaxSlotDepth,
defaultValueDesc: $DefaultMaxSlotDepth,
name: "max-slot-depth"
.}: int
maxDatasetDepth* {.
desc: "The maximum depth of the dataset tree",
defaultValue: DefaultMaxDatasetDepth,
defaultValueDesc: $DefaultMaxDatasetDepth,
name: "max-dataset-depth"
.}: int
maxBlockDepth* {.
desc: "The maximum depth of the network block merkle tree",
defaultValue: DefaultBlockDepth,
defaultValueDesc: $DefaultBlockDepth,
name: "max-block-depth"
.}: int
maxCellElms* {.
desc: "The maximum number of elements in a cell",
defaultValue: DefaultCellElms,
defaultValueDesc: $DefaultCellElms,
name: "max-cell-elements"
.}: int
of PersistenceCmd.noCmd:
discard
of StartUpCmd.noCmd:
discard # end of persistence
EthAddress* = ethers.Address
logutils.formatIt(LogFormat.textLines, EthAddress):
it.short0xHexLog
logutils.formatIt(LogFormat.json, EthAddress):
%it
func defaultAddress*(conf: CodexConf): IpAddress =
result = static parseIpAddress("127.0.0.1")
func defaultNatConfig*(): NatConfig =
result = NatConfig(hasExtIp: false, nat: NatStrategy.NatAny)
func persistence*(self: CodexConf): bool =
self.cmd == StartUpCmd.persistence
func prover*(self: CodexConf): bool =
self.persistence and self.persistenceCmd == PersistenceCmd.prover
proc getCodexVersion(): string =
let tag = strip(staticExec("git describe --tags --abbrev=0"))
if tag.isEmptyOrWhitespace:
@ -495,23 +294,16 @@ proc getCodexRevision(): string =
var res = strip(staticExec("git rev-parse --short HEAD"))
return res
proc getCodexContractsRevision(): string =
let res =
strip(staticExec("git rev-parse --short HEAD:vendor/logos-storage-contracts-eth"))
return res
proc getNimBanner(): string =
staticExec("nim --version | grep Version")
const
codexVersion* = getCodexVersion()
codexRevision* = getCodexRevision()
codexContractsRevision* = getCodexContractsRevision()
nimBanner* = getNimBanner()
codexFullVersion* =
"Storage version: " & codexVersion & "\p" & "Storage revision: " & codexRevision &
"\p" & "Storage contracts revision: " & codexContractsRevision & "\p" & nimBanner
"Storage version: " & codexVersion & "\p" & "Storage revision: " & codexRevision & "\p"
proc parseCmdArg*(
T: typedesc[MultiAddress], input: string
@ -593,9 +385,6 @@ proc parseCmdArg*(T: type NatConfig, p: string): T =
proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
return @[]
proc parseCmdArg*(T: type EthAddress, address: string): T =
EthAddress.init($address).get()
func parse*(T: type NBytes, p: string): Result[NBytes, string] =
var num = 0'i64
let count = parseSize(p, num, alwaysBin = true)
@ -618,11 +407,6 @@ proc parseCmdArg*(T: type Duration, val: string): T =
quit QuitFailure
dur
proc readValue*(
r: var TomlReader, val: var EthAddress
) {.raises: [SerializationError, IOError].} =
val = EthAddress.init(r.readValue(string)).get()
proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
without uri =? r.readValue(string).catch, err:
error "invalid SignedPeerRecord configuration value", error = err.msg
@ -687,9 +471,6 @@ proc readValue*(
raise newException(SerializationError, err.msg)
# no idea why confutils needs this:
proc completeCmdArg*(T: type EthAddress, val: string): seq[string] =
discard
proc completeCmdArg*(T: type NBytes, val: string): seq[string] =
discard

View File

@ -1,11 +0,0 @@
import contracts/requests
import contracts/marketplace
import contracts/market
import contracts/interactions
import contracts/provider
export requests
export marketplace
export market
export interactions
export provider

View File

@ -1,148 +0,0 @@
Logos Storage Contracts in Nim
=======================
Nim API for the [Logos Storage smart contracts][1].
Usage
-----
For a global overview of the steps involved in starting and fulfilling a
storage contract, see [Logos Storage Contracts][1].
Smart contract
--------------
Connecting to the smart contract on an Ethereum node:
```nim
import codex/contracts
import ethers
let address = # fill in address where the contract was deployed
let provider = JsonRpcProvider.new("ws://localhost:8545")
let marketplace = Marketplace.new(address, provider)
```
Setup client and host so that they can sign transactions; here we use the first
two accounts on the Ethereum node:
```nim
let accounts = await provider.listAccounts()
let client = provider.getSigner(accounts[0])
let host = provider.getSigner(accounts[1])
```
Storage requests
----------------
Creating a request for storage:
```nim
let request : StorageRequest = (
client: # address of the client requesting storage
duration: # duration of the contract in seconds
size: # size in bytes
contentHash: # SHA256 hash of the content that's going to be stored
proofProbability: # require a storage proof roughly once every N periods
maxPrice: # maximum price the client is willing to pay
expiry: # expiration time of the request (in unix time)
nonce: # random nonce to differentiate between similar requests
)
```
When a client wants to submit this request to the network, it needs to pay the
maximum price to the smart contract in advance. The difference between the
maximum price and the offered price will be reimbursed later.
Once the payment has been prepared, the client can submit the request to the
network:
```nim
await storage
.connect(client)
.requestStorage(request)
```
Storage offers
--------------
Creating a storage offer:
```nim
let offer: StorageOffer = (
host: # address of the host that is offering storage
requestId: request.id,
price: # offered price (in number of tokens)
expiry: # expiration time of the offer (in unix time)
)
```
Hosts submits an offer:
```nim
await storage
.connect(host)
.offerStorage(offer)
```
Client selects an offer:
```nim
await storage
.connect(client)
.selectOffer(offer.id)
```
Starting and finishing a storage contract
-----------------------------------------
The host whose offer got selected can start the storage contract once it
received the data that needs to be stored:
```nim
await storage
.connect(host)
.startContract(offer.id)
```
Once the storage contract is finished, the host can release payment:
```nim
await storage
.connect(host)
.finishContract(id)
```
Storage proofs
--------------
Time is divided into periods, and each period a storage proof may be required
from the host. The odds of requiring a storage proof are negotiated through the
storage request. For more details about the timing of storage proofs, please
refer to the [design document][2].
At the start of each period of time, the host can check whether a storage proof
is required:
```nim
let isProofRequired = await storage.isProofRequired(offer.id)
```
If a proof is required, the host can submit it before the end of the period:
```nim
await storage
.connect(host)
.submitProof(id, proof)
```
If a proof is not submitted, then a validator can mark a proof as missing:
```nim
await storage
.connect(validator)
.markProofAsMissing(id, period)
```
[1]: https://github.com/logos-storage/logos-storage-contracts-eth/
[2]: https://github.com/logos-storage/logos-storage-research/blob/master/design/storage-proof-timing.md

View File

@ -1,82 +0,0 @@
{.push raises: [].}
import std/times
import pkg/ethers
import pkg/questionable
import pkg/chronos
import pkg/stint
import ../clock
import ../conf
import ../utils/trackedfutures
export clock
logScope:
topics = "contracts clock"
type OnChainClock* = ref object of Clock
provider: Provider
subscription: Subscription
offset: times.Duration
blockNumber: UInt256
started: bool
newBlock: AsyncEvent
trackedFutures: TrackedFutures
proc new*(_: type OnChainClock, provider: Provider): OnChainClock =
OnChainClock(
provider: provider, newBlock: newAsyncEvent(), trackedFutures: TrackedFutures()
)
proc update(clock: OnChainClock, blck: Block) =
if number =? blck.number and number > clock.blockNumber:
let blockTime = initTime(blck.timestamp.truncate(int64), 0)
let computerTime = getTime()
clock.offset = blockTime - computerTime
clock.blockNumber = number
trace "updated clock",
blockTime = blck.timestamp, blockNumber = number, offset = clock.offset
clock.newBlock.fire()
proc update(clock: OnChainClock) {.async: (raises: []).} =
try:
if latest =? (await clock.provider.getBlock(BlockTag.latest)):
clock.update(latest)
except CatchableError as error:
debug "error updating clock: ", error = error.msg
method start*(clock: OnChainClock) {.async.} =
if clock.started:
return
proc onBlock(blckResult: ?!Block) =
if eventError =? blckResult.errorOption:
error "There was an error in block subscription", msg = eventError.msg
return
# ignore block parameter; hardhat may call this with pending blocks
clock.trackedFutures.track(clock.update())
await clock.update()
clock.subscription = await clock.provider.subscribe(onBlock)
clock.started = true
method stop*(clock: OnChainClock) {.async.} =
if not clock.started:
return
await clock.subscription.unsubscribe()
await clock.trackedFutures.cancelTracked()
clock.started = false
method now*(clock: OnChainClock): SecondsSince1970 =
doAssert clock.started, "clock should be started before calling now()"
return toUnix(getTime() + clock.offset)
method waitUntil*(
clock: OnChainClock, time: SecondsSince1970
) {.async: (raises: [CancelledError]).} =
while (let difference = time - clock.now(); difference > 0):
clock.newBlock.clear()
discard await clock.newBlock.wait().withTimeout(chronos.seconds(difference))

View File

@ -1,104 +0,0 @@
import pkg/contractabi
import pkg/ethers/contracts/fields
import pkg/questionable/results
export contractabi
const DefaultRequestCacheSize* = 128.uint16
const DefaultMaxPriorityFeePerGas* = 1_000_000_000.uint64
type
MarketplaceConfig* = object
collateral*: CollateralConfig
proofs*: ProofConfig
reservations*: SlotReservationsConfig
requestDurationLimit*: uint64
CollateralConfig* = object
repairRewardPercentage*: uint8
# percentage of remaining collateral slot has after it has been freed
maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value
slashPercentage*: uint8 # percentage of the collateral that is slashed
validatorRewardPercentage*: uint8
# percentage of the slashed amount going to the validators
ProofConfig* = object
period*: uint64 # proofs requirements are calculated per period (in seconds)
timeout*: uint64 # mark proofs as missing before the timeout (in seconds)
downtime*: uint8 # ignore this much recent blocks for proof requirements
downtimeProduct*: uint8
zkeyHash*: string # hash of the zkey file which is linked to the verifier
# Ensures the pointer does not remain in downtime for many consecutive
# periods. For each period increase, move the pointer `pointerProduct`
# blocks. Should be a prime number to ensure there are no cycles.
SlotReservationsConfig* = object
maxReservations*: uint8
func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig =
ProofConfig(
period: tupl[0],
timeout: tupl[1],
downtime: tupl[2],
downtimeProduct: tupl[3],
zkeyHash: tupl[4],
)
func fromTuple(_: type SlotReservationsConfig, tupl: tuple): SlotReservationsConfig =
SlotReservationsConfig(maxReservations: tupl[0])
func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig =
CollateralConfig(
repairRewardPercentage: tupl[0],
maxNumberOfSlashes: tupl[1],
slashPercentage: tupl[2],
validatorRewardPercentage: tupl[3],
)
func fromTuple(_: type MarketplaceConfig, tupl: tuple): MarketplaceConfig =
MarketplaceConfig(
collateral: tupl[0],
proofs: tupl[1],
reservations: tupl[2],
requestDurationLimit: tupl[3],
)
func solidityType*(_: type SlotReservationsConfig): string =
solidityType(SlotReservationsConfig.fieldTypes)
func solidityType*(_: type ProofConfig): string =
solidityType(ProofConfig.fieldTypes)
func solidityType*(_: type CollateralConfig): string =
solidityType(CollateralConfig.fieldTypes)
func solidityType*(_: type MarketplaceConfig): string =
solidityType(MarketplaceConfig.fieldTypes)
func encode*(encoder: var AbiEncoder, slot: SlotReservationsConfig) =
encoder.write(slot.fieldValues)
func encode*(encoder: var AbiEncoder, slot: ProofConfig) =
encoder.write(slot.fieldValues)
func encode*(encoder: var AbiEncoder, slot: CollateralConfig) =
encoder.write(slot.fieldValues)
func encode*(encoder: var AbiEncoder, slot: MarketplaceConfig) =
encoder.write(slot.fieldValues)
func decode*(decoder: var AbiDecoder, T: type ProofConfig): ?!T =
let tupl = ?decoder.read(ProofConfig.fieldTypes)
success ProofConfig.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type SlotReservationsConfig): ?!T =
let tupl = ?decoder.read(SlotReservationsConfig.fieldTypes)
success SlotReservationsConfig.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type CollateralConfig): ?!T =
let tupl = ?decoder.read(CollateralConfig.fieldTypes)
success CollateralConfig.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type MarketplaceConfig): ?!T =
let tupl = ?decoder.read(MarketplaceConfig.fieldTypes)
success MarketplaceConfig.fromTuple(tupl)

View File

@ -1,51 +0,0 @@
import std/os
import std/tables
import pkg/ethers
import pkg/questionable
import ../conf
import ../logutils
import ./marketplace
type Deployment* = ref object
provider: Provider
marketplaceAddressOverride: ?Address
const knownAddresses = {
# Hardhat localhost network
"31337":
{"Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44")}.toTable,
# Taiko Alpha-3 Testnet
"167005":
{"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable,
# Codex Testnet - Jun 19 2025 13:11:56 PM (+00:00 UTC)
"789987":
{"Marketplace": Address.init("0x5378a4EA5dA2a548ce22630A3AE74b052000C62D")}.toTable,
# Linea (Status)
"1660990954":
{"Marketplace": Address.init("0x34F606C65869277f236ce07aBe9af0B8c88F486B")}.toTable,
}.toTable
proc getKnownAddress(T: type, chainId: UInt256): ?Address =
let id = chainId.toString(10)
notice "Looking for well-known contract address with ChainID ", chainId = id
if not (id in knownAddresses):
return none Address
return knownAddresses[id].getOrDefault($T, Address.none)
proc new*(
_: type Deployment,
provider: Provider,
marketplaceAddressOverride: ?Address = none Address,
): Deployment =
Deployment(provider: provider, marketplaceAddressOverride: marketplaceAddressOverride)
proc address*(deployment: Deployment, contract: type): Future[?Address] {.async.} =
when contract is Marketplace:
if address =? deployment.marketplaceAddressOverride:
return some address
let chainId = await deployment.provider.getChainId()
return contract.getKnownAddress(chainId)

View File

@ -1,9 +0,0 @@
import ./interactions/interactions
import ./interactions/hostinteractions
import ./interactions/clientinteractions
import ./interactions/validatorinteractions
export interactions
export hostinteractions
export clientinteractions
export validatorinteractions

View File

@ -1,26 +0,0 @@
import pkg/ethers
import ../../purchasing
import ../../logutils
import ../market
import ../clock
import ./interactions
export purchasing
export logutils
type ClientInteractions* = ref object of ContractInteractions
purchasing*: Purchasing
proc new*(
_: type ClientInteractions, clock: OnChainClock, purchasing: Purchasing
): ClientInteractions =
ClientInteractions(clock: clock, purchasing: purchasing)
proc start*(self: ClientInteractions) {.async.} =
await procCall ContractInteractions(self).start()
await self.purchasing.start()
proc stop*(self: ClientInteractions) {.async.} =
await self.purchasing.stop()
await procCall ContractInteractions(self).stop()

View File

@ -1,24 +0,0 @@
import pkg/chronos
import ../../logutils
import ../../sales
import ./interactions
export sales
export logutils
type HostInteractions* = ref object of ContractInteractions
sales*: Sales
proc new*(_: type HostInteractions, clock: Clock, sales: Sales): HostInteractions =
## Create a new HostInteractions instance
##
HostInteractions(clock: clock, sales: sales)
method start*(self: HostInteractions) {.async.} =
await procCall ContractInteractions(self).start()
await self.sales.start()
method stop*(self: HostInteractions) {.async.} =
await self.sales.stop()
await procCall ContractInteractions(self).start()

View File

@ -1,15 +0,0 @@
import pkg/ethers
import ../clock
import ../marketplace
import ../market
export clock
type ContractInteractions* = ref object of RootObj
clock*: Clock
method start*(self: ContractInteractions) {.async, base.} =
discard
method stop*(self: ContractInteractions) {.async, base.} =
discard

View File

@ -1,20 +0,0 @@
import ./interactions
import ../../validation
export validation
type ValidatorInteractions* = ref object of ContractInteractions
validation: Validation
proc new*(
_: type ValidatorInteractions, clock: OnChainClock, validation: Validation
): ValidatorInteractions =
ValidatorInteractions(clock: clock, validation: validation)
proc start*(self: ValidatorInteractions) {.async.} =
await procCall ContractInteractions(self).start()
await self.validation.start()
proc stop*(self: ValidatorInteractions) {.async.} =
await self.validation.stop()
await procCall ContractInteractions(self).stop()

View File

@ -1,680 +0,0 @@
import std/strformat
import std/strutils
import pkg/ethers
import pkg/questionable
import pkg/lrucache
import ../utils/exceptions
import ../logutils
import ../market
import ./marketplace
import ./proofs
import ./provider
export market
logScope:
topics = "marketplace onchain market"
type
OnChainMarket* = ref object of Market
contract: Marketplace
signer: Signer
rewardRecipient: ?Address
configuration: ?MarketplaceConfig
requestCache: LruCache[string, StorageRequest]
allowanceLock: AsyncLock
MarketSubscription = market.Subscription
EventSubscription = ethers.Subscription
OnChainMarketSubscription = ref object of MarketSubscription
eventSubscription: EventSubscription
func new*(
_: type OnChainMarket,
contract: Marketplace,
rewardRecipient = Address.none,
requestCacheSize: uint16 = DefaultRequestCacheSize,
): OnChainMarket =
without signer =? contract.signer:
raiseAssert("Marketplace contract should have a signer")
var requestCache = newLruCache[string, StorageRequest](int(requestCacheSize))
OnChainMarket(
contract: contract,
signer: signer,
rewardRecipient: rewardRecipient,
requestCache: requestCache,
)
proc raiseMarketError(message: string) {.raises: [MarketError].} =
raise newException(MarketError, message)
func prefixWith(suffix, prefix: string, separator = ": "): string =
if prefix.len > 0:
return &"{prefix}{separator}{suffix}"
else:
return suffix
template convertEthersError(msg: string = "", body) =
try:
body
except EthersError as error:
raiseMarketError(error.msgDetail.prefixWith(msg))
proc config(
market: OnChainMarket
): Future[MarketplaceConfig] {.async: (raises: [CancelledError, MarketError]).} =
without resolvedConfig =? market.configuration:
if err =? (await market.loadConfig()).errorOption:
raiseMarketError(err.msg)
without config =? market.configuration:
raiseMarketError("Failed to access to config from the Marketplace contract")
return config
return resolvedConfig
template withAllowanceLock*(market: OnChainMarket, body: untyped) =
if market.allowanceLock.isNil:
market.allowanceLock = newAsyncLock()
await market.allowanceLock.acquire()
try:
body
finally:
try:
market.allowanceLock.release()
except AsyncLockError as error:
raise newException(Defect, error.msg, error)
proc approveFunds(
market: OnChainMarket, amount: UInt256
) {.async: (raises: [CancelledError, MarketError]).} =
debug "Approving tokens", amount
convertEthersError("Failed to approve funds"):
let tokenAddress = await market.contract.token()
let token = Erc20Token.new(tokenAddress, market.signer)
let owner = await market.signer.getAddress()
let spender = market.contract.address
market.withAllowanceLock:
let allowance = await token.allowance(owner, spender)
discard await token.approve(spender, allowance + amount).confirm(1)
method loadConfig*(
market: OnChainMarket
): Future[?!void] {.async: (raises: [CancelledError]).} =
try:
without config =? market.configuration:
let fetchedConfig = await market.contract.configuration()
market.configuration = some fetchedConfig
return success()
except EthersError as err:
return failure newException(
MarketError,
"Failed to fetch the config from the Marketplace contract: " & err.msg,
)
method getZkeyHash*(
market: OnChainMarket
): Future[?string] {.async: (raises: [CancelledError, MarketError]).} =
let config = await market.config()
return some config.proofs.zkeyHash
method getSigner*(
market: OnChainMarket
): Future[Address] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get signer address"):
return await market.signer.getAddress()
method periodicity*(
market: OnChainMarket
): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
let period = config.proofs.period
return Periodicity(seconds: period)
method proofTimeout*(
market: OnChainMarket
): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.proofs.timeout
method repairRewardPercentage*(
market: OnChainMarket
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.collateral.repairRewardPercentage
method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.requestDurationLimit
method proofDowntime*(
market: OnChainMarket
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.proofs.downtime
method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} =
convertEthersError("Failed to get slot pointer"):
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.getPointer(slotId, overrides)
method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} =
convertEthersError("Failed to get my requests"):
return await market.contract.myRequests
method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} =
convertEthersError("Failed to get my slots"):
let slots = await market.contract.mySlots()
debug "Fetched my slots", numSlots = len(slots)
return slots
method requestStorage(
market: OnChainMarket, request: StorageRequest
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to request storage"):
debug "Requesting storage"
await market.approveFunds(request.totalPrice())
discard await market.contract.requestStorage(request).confirm(1)
method getRequest*(
market: OnChainMarket, id: RequestId
): Future[?StorageRequest] {.async: (raises: [CancelledError]).} =
try:
let key = $id
if key in market.requestCache:
return some market.requestCache[key]
let request = await market.contract.getRequest(id)
market.requestCache[key] = request
return some request
except Marketplace_UnknownRequest, KeyError:
warn "Cannot retrieve the request", error = getCurrentExceptionMsg()
return none StorageRequest
except EthersError as e:
error "Cannot retrieve the request", error = e.msg
return none StorageRequest
method requestState*(
market: OnChainMarket, requestId: RequestId
): Future[?RequestState] {.async.} =
convertEthersError("Failed to get request state"):
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return some await market.contract.requestState(requestId, overrides)
except Marketplace_UnknownRequest:
return none RequestState
method slotState*(
market: OnChainMarket, slotId: SlotId
): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to fetch the slot state from the Marketplace contract"):
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.slotState(slotId, overrides)
method getRequestEnd*(
market: OnChainMarket, id: RequestId
): Future[SecondsSince1970] {.async.} =
convertEthersError("Failed to get request end"):
return await market.contract.requestEnd(id)
method requestExpiresAt*(
market: OnChainMarket, id: RequestId
): Future[SecondsSince1970] {.async.} =
convertEthersError("Failed to get request expiry"):
return await market.contract.requestExpiry(id)
method getHost(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[?Address] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get slot's host"):
let slotId = slotId(requestId, slotIndex)
let address = await market.contract.getHost(slotId)
if address != Address.default:
return some address
else:
return none Address
method currentCollateral*(
market: OnChainMarket, slotId: SlotId
): Future[UInt256] {.async: (raises: [MarketError, CancelledError]).} =
convertEthersError("Failed to get slot's current collateral"):
return await market.contract.currentCollateral(slotId)
method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} =
convertEthersError("Failed to get active slot"):
try:
return some await market.contract.getActiveSlot(slotId)
except Marketplace_SlotIsFree:
return none Slot
method fillSlot(
market: OnChainMarket,
requestId: RequestId,
slotIndex: uint64,
proof: Groth16Proof,
collateral: UInt256,
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to fill slot"):
logScope:
requestId
slotIndex
try:
await market.approveFunds(collateral)
# Add 10% to gas estimate to deal with different evm code flow when we
# happen to be the last one to fill a slot in this request
trace "estimating gas for fillSlot"
let gas = await market.contract.estimateGas.fillSlot(requestId, slotIndex, proof)
let gasLimit = (gas * 110) div 100
let overrides = TransactionOverrides(gasLimit: some gasLimit)
trace "calling fillSlot on contract", estimatedGas = gas, gasLimit = gasLimit
discard await market.contract
.fillSlot(requestId, slotIndex, proof, overrides)
.confirm(1)
trace "fillSlot transaction completed"
except Marketplace_SlotNotFree as parent:
raise newException(
SlotStateMismatchError, "Failed to fill slot because the slot is not free",
parent,
)
method freeSlot*(
market: OnChainMarket, slotId: SlotId
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to free slot"):
try:
var freeSlot: Future[Confirmable]
if rewardRecipient =? market.rewardRecipient:
# If --reward-recipient specified, use it as the reward recipient, and use
# the SP's address as the collateral recipient
let collateralRecipient = await market.getSigner()
# Add 200% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail
let gas = await market.contract.estimateGas.freeSlot(
slotId, rewardRecipient, collateralRecipient
)
let gasLimit = gas * 3
let overrides = TransactionOverrides(gasLimit: some gasLimit)
trace "calling freeSlot on contract", estimatedGas = gas, gasLimit = gasLimit
freeSlot = market.contract.freeSlot(
slotId,
rewardRecipient, # --reward-recipient
collateralRecipient, # SP's address
overrides,
)
else:
# Otherwise, use the SP's address as both the reward and collateral
# recipient (the contract will use msg.sender for both)
# Add 200% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail
let gas = await market.contract.estimateGas.freeSlot(slotId)
let gasLimit = gas * 3
let overrides = TransactionOverrides(gasLimit: some (gasLimit))
trace "calling freeSlot on contract", estimatedGas = gas, gasLimit = gasLimit
freeSlot = market.contract.freeSlot(slotId, overrides)
discard await freeSlot.confirm(1)
except Marketplace_SlotIsFree as parent:
raise newException(
SlotStateMismatchError, "Failed to free slot, slot is already free", parent
)
method withdrawFunds(
market: OnChainMarket, requestId: RequestId
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to withdraw funds"):
discard await market.contract.withdrawFunds(requestId).confirm(1)
method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
convertEthersError("Failed to get proof requirement"):
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.isProofRequired(id, overrides)
except Marketplace_SlotIsFree:
return false
method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
convertEthersError("Failed to get future proof requirement"):
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.willProofBeRequired(id, overrides)
except Marketplace_SlotIsFree:
return false
method getChallenge*(
market: OnChainMarket, id: SlotId
): Future[ProofChallenge] {.async.} =
convertEthersError("Failed to get proof challenge"):
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.getChallenge(id, overrides)
method submitProof*(
market: OnChainMarket, id: SlotId, proof: Groth16Proof
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to submit proof"):
try:
discard await market.contract.submitProof(id, proof).confirm(1)
except Proofs_InvalidProof as parent:
raise newException(
ProofInvalidError, "Failed to submit proof because the proof is invalid", parent
)
method markProofAsMissing*(
market: OnChainMarket, id: SlotId, period: Period
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to mark proof as missing"):
# Add 50% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail
let gas = await market.contract.estimateGas.markProofAsMissing(id, period)
let gasLimit = (gas * 150) div 100
let overrides = TransactionOverrides(gasLimit: some gasLimit)
trace "calling markProofAsMissing on contract",
estimatedGas = gas, gasLimit = gasLimit
discard await market.contract.markProofAsMissing(id, period, overrides).confirm(1)
method canMarkProofAsMissing*(
market: OnChainMarket, id: SlotId, period: Period
): Future[bool] {.async: (raises: [CancelledError]).} =
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
discard await market.contract.canMarkProofAsMissing(id, period, overrides)
return true
except EthersError as e:
trace "Proof cannot be marked as missing", msg = e.msg
return false
method reserveSlot*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to reserve slot"):
try:
# Add 25% to gas estimate to deal with different evm code flow when we
# happen to be the last one that is allowed to reserve the slot
let gas = await market.contract.estimateGas.reserveSlot(requestId, slotIndex)
let gasLimit = (gas * 125) div 100
let overrides = TransactionOverrides(gasLimit: some gasLimit)
trace "calling reserveSlot on contract", estimatedGas = gas, gasLimit = gasLimit
discard
await market.contract.reserveSlot(requestId, slotIndex, overrides).confirm(1)
except SlotReservations_ReservationNotAllowed:
raise newException(
SlotReservationNotAllowedError,
"Failed to reserve slot because reservation is not allowed",
)
method canReserveSlot*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[bool] {.async.} =
convertEthersError("Unable to determine if slot can be reserved"):
return await market.contract.canReserveSlot(requestId, slotIndex)
method subscribeRequests*(
market: OnChainMarket, callback: OnRequest
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!StorageRequested) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in Request subscription", msg = eventErr.msg
return
callback(event.requestId, event.ask, event.expiry)
convertEthersError("Failed to subscribe to StorageRequested events"):
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeSlotFilled*(
market: OnChainMarket, callback: OnSlotFilled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFilled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotFilled subscription", msg = eventErr.msg
return
callback(event.requestId, event.slotIndex)
convertEthersError("Failed to subscribe to SlotFilled events"):
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeSlotFilled*(
market: OnChainMarket,
requestId: RequestId,
slotIndex: uint64,
callback: OnSlotFilled,
): Future[MarketSubscription] {.async.} =
proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: uint64) =
if eventRequestId == requestId and eventSlotIndex == slotIndex:
callback(requestId, slotIndex)
convertEthersError("Failed to subscribe to SlotFilled events"):
return await market.subscribeSlotFilled(onSlotFilled)
method subscribeSlotFreed*(
market: OnChainMarket, callback: OnSlotFreed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFreed) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotFreed subscription", msg = eventErr.msg
return
callback(event.requestId, event.slotIndex)
convertEthersError("Failed to subscribe to SlotFreed events"):
let subscription = await market.contract.subscribe(SlotFreed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeSlotReservationsFull*(
market: OnChainMarket, callback: OnSlotReservationsFull
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotReservationsFull) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotReservationsFull subscription",
msg = eventErr.msg
return
callback(event.requestId, event.slotIndex)
convertEthersError("Failed to subscribe to SlotReservationsFull events"):
let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeFulfillment(
market: OnChainMarket, callback: OnFulfillment
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
return
callback(event.requestId)
convertEthersError("Failed to subscribe to RequestFulfilled events"):
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeFulfillment(
market: OnChainMarket, requestId: RequestId, callback: OnFulfillment
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
return
if event.requestId == requestId:
callback(event.requestId)
convertEthersError("Failed to subscribe to RequestFulfilled events"):
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestCancelled*(
market: OnChainMarket, callback: OnRequestCancelled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
return
callback(event.requestId)
convertEthersError("Failed to subscribe to RequestCancelled events"):
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestCancelled*(
market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
return
if event.requestId == requestId:
callback(event.requestId)
convertEthersError("Failed to subscribe to RequestCancelled events"):
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestFailed*(
market: OnChainMarket, callback: OnRequestFailed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFailed subscription", msg = eventErr.msg
return
callback(event.requestId)
convertEthersError("Failed to subscribe to RequestFailed events"):
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestFailed*(
market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFailed subscription", msg = eventErr.msg
return
if event.requestId == requestId:
callback(event.requestId)
convertEthersError("Failed to subscribe to RequestFailed events"):
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeProofSubmission*(
market: OnChainMarket, callback: OnProofSubmitted
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!ProofSubmitted) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in ProofSubmitted subscription", msg = eventErr.msg
return
callback(event.id)
convertEthersError("Failed to subscribe to ProofSubmitted events"):
let subscription = await market.contract.subscribe(ProofSubmitted, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
await subscription.eventSubscription.unsubscribe()
method queryPastSlotFilledEvents*(
market: OnChainMarket, fromBlock: BlockTag
): Future[seq[SlotFilled]] {.async.} =
convertEthersError("Failed to get past SlotFilled events from block"):
return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest)
method queryPastSlotFilledEvents*(
market: OnChainMarket, blocksAgo: int
): Future[seq[SlotFilled]] {.async.} =
convertEthersError("Failed to get past SlotFilled events"):
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
return await market.queryPastSlotFilledEvents(fromBlock)
method queryPastSlotFilledEvents*(
market: OnChainMarket, fromTime: SecondsSince1970
): Future[seq[SlotFilled]] {.async.} =
convertEthersError("Failed to get past SlotFilled events from time"):
let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime)
return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock))
method queryPastStorageRequestedEvents*(
market: OnChainMarket, fromBlock: BlockTag
): Future[seq[StorageRequested]] {.async.} =
convertEthersError("Failed to get past StorageRequested events from block"):
return
await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest)
method queryPastStorageRequestedEvents*(
market: OnChainMarket, blocksAgo: int
): Future[seq[StorageRequested]] {.async.} =
convertEthersError("Failed to get past StorageRequested events"):
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
return await market.queryPastStorageRequestedEvents(fromBlock)
method slotCollateral*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[?!UInt256] {.async: (raises: [CancelledError]).} =
let slotid = slotId(requestId, slotIndex)
try:
let slotState = await market.slotState(slotid)
without request =? await market.getRequest(requestId):
return failure newException(
MarketError, "Failure calculating the slotCollateral, cannot get the request"
)
return market.slotCollateral(request.ask.collateralPerSlot, slotState)
except MarketError as error:
error "Error when trying to calculate the slotCollateral", error = error.msg
return failure error
method slotCollateral*(
market: OnChainMarket, collateralPerSlot: UInt256, slotState: SlotState
): ?!UInt256 {.raises: [].} =
if slotState == SlotState.Repair:
without repairRewardPercentage =?
market.configuration .? collateral .? repairRewardPercentage:
return failure newException(
MarketError,
"Failure calculating the slotCollateral, cannot get the reward percentage",
)
return success (
collateralPerSlot - (collateralPerSlot * repairRewardPercentage.u256).div(
100.u256
)
)
return success(collateralPerSlot)

View File

@ -1,198 +0,0 @@
import pkg/ethers
import pkg/ethers/erc20
import pkg/json_rpc/rpcclient
import pkg/stint
import pkg/chronos
import ../clock
import ./requests
import ./proofs
import ./config
export stint
export ethers except `%`, `%*`, toJson
export erc20 except `%`, `%*`, toJson
export config
export requests
type
Marketplace* = ref object of Contract
Marketplace_RepairRewardPercentageTooHigh* = object of SolidityError
Marketplace_SlashPercentageTooHigh* = object of SolidityError
Marketplace_MaximumSlashingTooHigh* = object of SolidityError
Marketplace_InvalidExpiry* = object of SolidityError
Marketplace_InvalidMaxSlotLoss* = object of SolidityError
Marketplace_InsufficientSlots* = object of SolidityError
Marketplace_InvalidClientAddress* = object of SolidityError
Marketplace_RequestAlreadyExists* = object of SolidityError
Marketplace_InvalidSlot* = object of SolidityError
Marketplace_SlotNotFree* = object of SolidityError
Marketplace_InvalidSlotHost* = object of SolidityError
Marketplace_AlreadyPaid* = object of SolidityError
Marketplace_TransferFailed* = object of SolidityError
Marketplace_UnknownRequest* = object of SolidityError
Marketplace_InvalidState* = object of SolidityError
Marketplace_StartNotBeforeExpiry* = object of SolidityError
Marketplace_SlotNotAcceptingProofs* = object of SolidityError
Marketplace_SlotIsFree* = object of SolidityError
Marketplace_ReservationRequired* = object of SolidityError
Marketplace_NothingToWithdraw* = object of SolidityError
Marketplace_InsufficientDuration* = object of SolidityError
Marketplace_InsufficientProofProbability* = object of SolidityError
Marketplace_InsufficientCollateral* = object of SolidityError
Marketplace_InsufficientReward* = object of SolidityError
Marketplace_InvalidCid* = object of SolidityError
Marketplace_DurationExceedsLimit* = object of SolidityError
Proofs_InsufficientBlockHeight* = object of SolidityError
Proofs_InvalidProof* = object of SolidityError
Proofs_ProofAlreadySubmitted* = object of SolidityError
Proofs_PeriodNotEnded* = object of SolidityError
Proofs_ValidationTimedOut* = object of SolidityError
Proofs_ProofNotMissing* = object of SolidityError
Proofs_ProofNotRequired* = object of SolidityError
Proofs_ProofAlreadyMarkedMissing* = object of SolidityError
Periods_InvalidSecondsPerPeriod* = object of SolidityError
SlotReservations_ReservationNotAllowed* = object of SolidityError
proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
proc token*(marketplace: Marketplace): Address {.contract, view.}
proc currentCollateral*(
marketplace: Marketplace, id: SlotId
): UInt256 {.contract, view.}
proc requestStorage*(
marketplace: Marketplace, request: StorageRequest
): Confirmable {.
contract,
errors: [
Marketplace_InvalidClientAddress, Marketplace_RequestAlreadyExists,
Marketplace_InvalidExpiry, Marketplace_InsufficientSlots,
Marketplace_InvalidMaxSlotLoss, Marketplace_InsufficientDuration,
Marketplace_InsufficientProofProbability, Marketplace_InsufficientCollateral,
Marketplace_InsufficientReward, Marketplace_InvalidCid,
]
.}
proc fillSlot*(
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64, proof: Groth16Proof
): Confirmable {.
contract,
errors: [
Marketplace_InvalidSlot, Marketplace_ReservationRequired, Marketplace_SlotNotFree,
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest,
]
.}
proc withdrawFunds*(
marketplace: Marketplace, requestId: RequestId
): Confirmable {.
contract,
errors: [
Marketplace_InvalidClientAddress, Marketplace_InvalidState,
Marketplace_NothingToWithdraw, Marketplace_UnknownRequest,
]
.}
proc withdrawFunds*(
marketplace: Marketplace, requestId: RequestId, withdrawAddress: Address
): Confirmable {.
contract,
errors: [
Marketplace_InvalidClientAddress, Marketplace_InvalidState,
Marketplace_NothingToWithdraw, Marketplace_UnknownRequest,
]
.}
proc freeSlot*(
marketplace: Marketplace, id: SlotId
): Confirmable {.
contract,
errors: [
Marketplace_InvalidSlotHost, Marketplace_AlreadyPaid,
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, Marketplace_SlotIsFree,
]
.}
proc freeSlot*(
marketplace: Marketplace,
id: SlotId,
rewardRecipient: Address,
collateralRecipient: Address,
): Confirmable {.
contract,
errors: [
Marketplace_InvalidSlotHost, Marketplace_AlreadyPaid,
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, Marketplace_SlotIsFree,
]
.}
proc getRequest*(
marketplace: Marketplace, id: RequestId
): StorageRequest {.contract, view, errors: [Marketplace_UnknownRequest].}
proc getHost*(marketplace: Marketplace, id: SlotId): Address {.contract, view.}
proc getActiveSlot*(
marketplace: Marketplace, id: SlotId
): Slot {.contract, view, errors: [Marketplace_SlotIsFree].}
proc myRequests*(marketplace: Marketplace): seq[RequestId] {.contract, view.}
proc mySlots*(marketplace: Marketplace): seq[SlotId] {.contract, view.}
proc requestState*(
marketplace: Marketplace, requestId: RequestId
): RequestState {.contract, view, errors: [Marketplace_UnknownRequest].}
proc slotState*(marketplace: Marketplace, slotId: SlotId): SlotState {.contract, view.}
proc requestEnd*(
marketplace: Marketplace, requestId: RequestId
): SecondsSince1970 {.contract, view.}
proc requestExpiry*(
marketplace: Marketplace, requestId: RequestId
): SecondsSince1970 {.contract, view.}
proc missingProofs*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
proc isProofRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
proc willProofBeRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
proc getChallenge*(
marketplace: Marketplace, id: SlotId
): array[32, byte] {.contract, view.}
proc getPointer*(marketplace: Marketplace, id: SlotId): uint8 {.contract, view.}
proc submitProof*(
marketplace: Marketplace, id: SlotId, proof: Groth16Proof
): Confirmable {.
contract,
errors:
[Proofs_ProofAlreadySubmitted, Proofs_InvalidProof, Marketplace_UnknownRequest]
.}
proc markProofAsMissing*(
marketplace: Marketplace, id: SlotId, period: uint64
): Confirmable {.
contract,
errors: [
Marketplace_SlotNotAcceptingProofs, Marketplace_StartNotBeforeExpiry,
Proofs_PeriodNotEnded, Proofs_ValidationTimedOut, Proofs_ProofNotMissing,
Proofs_ProofNotRequired, Proofs_ProofAlreadyMarkedMissing,
]
.}
proc canMarkProofAsMissing*(
marketplace: Marketplace, id: SlotId, period: uint64
): Confirmable {.
contract,
errors: [
Marketplace_SlotNotAcceptingProofs, Proofs_PeriodNotEnded,
Proofs_ValidationTimedOut, Proofs_ProofNotMissing, Proofs_ProofNotRequired,
Proofs_ProofAlreadyMarkedMissing,
]
.}
proc reserveSlot*(
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64
): Confirmable {.contract.}
proc canReserveSlot*(
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64
): bool {.contract, view.}

View File

@ -1,46 +0,0 @@
import pkg/stint
import pkg/contractabi
import pkg/ethers/contracts/fields
type
Groth16Proof* = object
a*: G1Point
b*: G2Point
c*: G1Point
G1Point* = object
x*: UInt256
y*: UInt256
# A field element F_{p^2} encoded as `real + i * imag`
Fp2Element* = object
real*: UInt256
imag*: UInt256
G2Point* = object
x*: Fp2Element
y*: Fp2Element
func solidityType*(_: type G1Point): string =
solidityType(G1Point.fieldTypes)
func solidityType*(_: type Fp2Element): string =
solidityType(Fp2Element.fieldTypes)
func solidityType*(_: type G2Point): string =
solidityType(G2Point.fieldTypes)
func solidityType*(_: type Groth16Proof): string =
solidityType(Groth16Proof.fieldTypes)
func encode*(encoder: var AbiEncoder, point: G1Point) =
encoder.write(point.fieldValues)
func encode*(encoder: var AbiEncoder, element: Fp2Element) =
encoder.write(element.fieldValues)
func encode*(encoder: var AbiEncoder, point: G2Point) =
encoder.write(point.fieldValues)
func encode*(encoder: var AbiEncoder, proof: Groth16Proof) =
encoder.write(proof.fieldValues)

View File

@ -1,123 +0,0 @@
import pkg/ethers/provider
import pkg/chronos
import pkg/questionable
import ../logutils
from ../clock import SecondsSince1970
logScope:
topics = "marketplace onchain provider"
proc raiseProviderError(message: string) {.raises: [ProviderError].} =
raise newException(ProviderError, message)
proc blockNumberAndTimestamp*(
provider: Provider, blockTag: BlockTag
): Future[(UInt256, UInt256)] {.async: (raises: [ProviderError, CancelledError]).} =
without latestBlock =? await provider.getBlock(blockTag):
raiseProviderError("Could not get latest block")
without latestBlockNumber =? latestBlock.number:
raiseProviderError("Could not get latest block number")
return (latestBlockNumber, latestBlock.timestamp)
proc binarySearchFindClosestBlock(
provider: Provider, epochTime: int, low: UInt256, high: UInt256
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
let (_, lowTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(low))
let (_, highTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(high))
if abs(lowTimestamp.truncate(int) - epochTime) <
abs(highTimestamp.truncate(int) - epochTime):
return low
else:
return high
proc binarySearchBlockNumberForEpoch(
provider: Provider,
epochTime: UInt256,
latestBlockNumber: UInt256,
earliestBlockNumber: UInt256,
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
var low = earliestBlockNumber
var high = latestBlockNumber
while low <= high:
if low == 0 and high == 0:
return low
let mid = (low + high) div 2
let (midBlockNumber, midBlockTimestamp) =
await provider.blockNumberAndTimestamp(BlockTag.init(mid))
if midBlockTimestamp < epochTime:
low = mid + 1
elif midBlockTimestamp > epochTime:
high = mid - 1
else:
return midBlockNumber
# NOTICE that by how the binary search is implemented, when it finishes
# low is always greater than high - this is why we use high, where
# intuitively we would use low:
await provider.binarySearchFindClosestBlock(
epochTime.truncate(int), low = high, high = low
)
proc blockNumberForEpoch*(
provider: Provider, epochTime: SecondsSince1970
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
let epochTimeUInt256 = epochTime.u256
let (latestBlockNumber, latestBlockTimestamp) =
await provider.blockNumberAndTimestamp(BlockTag.latest)
let (earliestBlockNumber, earliestBlockTimestamp) =
await provider.blockNumberAndTimestamp(BlockTag.earliest)
# Initially we used the average block time to predict
# the number of blocks we need to look back in order to find
# the block number corresponding to the given epoch time.
# This estimation can be highly inaccurate if block time
# was changing in the past or is fluctuating and therefore
# we used that information initially only to find out
# if the available history is long enough to perform effective search.
# It turns out we do not have to do that. There is an easier way.
#
# First we check if the given epoch time equals the timestamp of either
# the earliest or the latest block. If it does, we just return the
# block number of that block.
#
# Otherwise, if the earliest available block is not the genesis block,
# we should check the timestamp of that earliest block and if it is greater
# than the epoch time, we should issue a warning and return
# that earliest block number.
# In all other cases, thus when the earliest block is not the genesis
# block but its timestamp is not greater than the requested epoch time, or
# if the earliest available block is the genesis block,
# (which means we have the whole history available), we should proceed with
# the binary search.
#
# Additional benefit of this method is that we do not have to rely
# on the average block time, which not only makes the whole thing
# more reliable, but also easier to test.
# Are lucky today?
if earliestBlockTimestamp == epochTimeUInt256:
return earliestBlockNumber
if latestBlockTimestamp == epochTimeUInt256:
return latestBlockNumber
if earliestBlockNumber > 0 and earliestBlockTimestamp > epochTimeUInt256:
let availableHistoryInDays =
(latestBlockTimestamp - earliestBlockTimestamp) div 1.days.secs.u256
warn "Short block history detected.",
earliestBlockTimestamp = earliestBlockTimestamp, days = availableHistoryInDays
return earliestBlockNumber
return await provider.binarySearchBlockNumberForEpoch(
epochTimeUInt256, latestBlockNumber, earliestBlockNumber
)
proc pastBlockTag*(
provider: Provider, blocksAgo: int
): Future[BlockTag] {.async: (raises: [ProviderError, CancelledError]).} =
let head = await provider.getBlockNumber()
return BlockTag.init(head - blocksAgo.abs.u256)

View File

@ -1,206 +0,0 @@
import std/hashes
import std/sequtils
import std/typetraits
import pkg/contractabi
import pkg/nimcrypto/keccak
import pkg/ethers/contracts/fields
import pkg/questionable/results
import pkg/stew/byteutils
import pkg/libp2p/[cid, multicodec]
import ../logutils
import ../utils/json
from ../errors import mapFailure
export contractabi
type
StorageRequest* = object
client* {.serialize.}: Address
ask* {.serialize.}: StorageAsk
content* {.serialize.}: StorageContent
expiry* {.serialize.}: uint64
nonce*: Nonce
StorageAsk* = object
proofProbability* {.serialize.}: UInt256
pricePerBytePerSecond* {.serialize.}: UInt256
collateralPerByte* {.serialize.}: UInt256
slots* {.serialize.}: uint64
slotSize* {.serialize.}: uint64
duration* {.serialize.}: uint64
maxSlotLoss* {.serialize.}: uint64
StorageContent* = object
cid* {.serialize.}: Cid
merkleRoot*: array[32, byte]
Slot* = object
request* {.serialize.}: StorageRequest
slotIndex* {.serialize.}: uint64
SlotId* = distinct array[32, byte]
RequestId* = distinct array[32, byte]
Nonce* = distinct array[32, byte]
RequestState* {.pure.} = enum
New
Started
Cancelled
Finished
Failed
SlotState* {.pure.} = enum
Free
Filled
Finished
Failed
Paid
Cancelled
Repair
proc `==`*(x, y: Nonce): bool {.borrow.}
proc `==`*(x, y: RequestId): bool {.borrow.}
proc `==`*(x, y: SlotId): bool {.borrow.}
proc hash*(x: SlotId): Hash {.borrow.}
proc hash*(x: Nonce): Hash {.borrow.}
proc hash*(x: Address): Hash {.borrow.}
func toArray*(id: RequestId | SlotId | Nonce): array[32, byte] =
array[32, byte](id)
proc `$`*(id: RequestId | SlotId | Nonce): string =
id.toArray.toHex
proc fromHex*(T: type RequestId, hex: string): T =
T array[32, byte].fromHex(hex)
proc fromHex*(T: type SlotId, hex: string): T =
T array[32, byte].fromHex(hex)
proc fromHex*(T: type Nonce, hex: string): T =
T array[32, byte].fromHex(hex)
proc fromHex*[T: distinct](_: type T, hex: string): T =
type baseType = T.distinctBase
T baseType.fromHex(hex)
proc toHex*[T: distinct](id: T): string =
type baseType = T.distinctBase
baseType(id).toHex
logutils.formatIt(LogFormat.textLines, Nonce):
it.short0xHexLog
logutils.formatIt(LogFormat.textLines, RequestId):
it.short0xHexLog
logutils.formatIt(LogFormat.textLines, SlotId):
it.short0xHexLog
logutils.formatIt(LogFormat.json, Nonce):
it.to0xHexLog
logutils.formatIt(LogFormat.json, RequestId):
it.to0xHexLog
logutils.formatIt(LogFormat.json, SlotId):
it.to0xHexLog
func fromTuple(_: type StorageRequest, tupl: tuple): StorageRequest =
StorageRequest(
client: tupl[0], ask: tupl[1], content: tupl[2], expiry: tupl[3], nonce: tupl[4]
)
func fromTuple(_: type Slot, tupl: tuple): Slot =
Slot(request: tupl[0], slotIndex: tupl[1])
func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk =
StorageAsk(
proofProbability: tupl[0],
pricePerBytePerSecond: tupl[1],
collateralPerByte: tupl[2],
slots: tupl[3],
slotSize: tupl[4],
duration: tupl[5],
maxSlotLoss: tupl[6],
)
func fromTuple(_: type StorageContent, tupl: tuple): StorageContent =
StorageContent(cid: tupl[0], merkleRoot: tupl[1])
func solidityType*(_: type Cid): string =
solidityType(seq[byte])
func solidityType*(_: type StorageContent): string =
solidityType(StorageContent.fieldTypes)
func solidityType*(_: type StorageAsk): string =
solidityType(StorageAsk.fieldTypes)
func solidityType*(_: type StorageRequest): string =
solidityType(StorageRequest.fieldTypes)
# Note: it seems to be ok to ignore the vbuffer offset for now
func encode*(encoder: var AbiEncoder, cid: Cid) =
encoder.write(cid.data.buffer)
func encode*(encoder: var AbiEncoder, content: StorageContent) =
encoder.write(content.fieldValues)
func encode*(encoder: var AbiEncoder, ask: StorageAsk) =
encoder.write(ask.fieldValues)
func encode*(encoder: var AbiEncoder, id: RequestId | SlotId | Nonce) =
encoder.write(id.toArray)
func encode*(encoder: var AbiEncoder, request: StorageRequest) =
encoder.write(request.fieldValues)
func encode*(encoder: var AbiEncoder, slot: Slot) =
encoder.write(slot.fieldValues)
func decode*(decoder: var AbiDecoder, T: type Cid): ?!T =
let data = ?decoder.read(seq[byte])
Cid.init(data).mapFailure
func decode*(decoder: var AbiDecoder, T: type StorageContent): ?!T =
let tupl = ?decoder.read(StorageContent.fieldTypes)
success StorageContent.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type StorageAsk): ?!T =
let tupl = ?decoder.read(StorageAsk.fieldTypes)
success StorageAsk.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type StorageRequest): ?!T =
let tupl = ?decoder.read(StorageRequest.fieldTypes)
success StorageRequest.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type Slot): ?!T =
let tupl = ?decoder.read(Slot.fieldTypes)
success Slot.fromTuple(tupl)
func id*(request: StorageRequest): RequestId =
let encoding = AbiEncoder.encode((request,))
RequestId(keccak256.digest(encoding).data)
func slotId*(requestId: RequestId, slotIndex: uint64): SlotId =
let encoding = AbiEncoder.encode((requestId, slotIndex))
SlotId(keccak256.digest(encoding).data)
func slotId*(request: StorageRequest, slotIndex: uint64): SlotId =
slotId(request.id, slotIndex)
func id*(slot: Slot): SlotId =
slotId(slot.request, slot.slotIndex)
func pricePerSlotPerSecond*(ask: StorageAsk): UInt256 =
ask.pricePerBytePerSecond * ask.slotSize.u256
func pricePerSlot*(ask: StorageAsk): UInt256 =
ask.duration.u256 * ask.pricePerSlotPerSecond
func totalPrice*(ask: StorageAsk): UInt256 =
ask.slots.u256 * ask.pricePerSlot
func totalPrice*(request: StorageRequest): UInt256 =
request.ask.totalPrice
func collateralPerSlot*(ask: StorageAsk): UInt256 =
ask.collateralPerByte * ask.slotSize.u256
func size*(ask: StorageAsk): uint64 =
ask.slots * ask.slotSize

View File

@ -1,25 +0,0 @@
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import ./erasure/erasure
import ./erasure/backends/leopard
export erasure
func leoEncoderProvider*(
size, buffers, parity: int
): EncoderBackend {.raises: [Defect].} =
## create new Leo Encoder
LeoEncoderBackend.new(size, buffers, parity)
func leoDecoderProvider*(
size, buffers, parity: int
): DecoderBackend {.raises: [Defect].} =
## create new Leo Decoder
LeoDecoderBackend.new(size, buffers, parity)

View File

@ -1,44 +0,0 @@
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [], gcsafe.}
import ../stores
type
ErasureBackend* = ref object of RootObj
blockSize*: int # block size in bytes
buffers*: int # number of original pieces
parity*: int # number of redundancy pieces
EncoderBackend* = ref object of ErasureBackend
DecoderBackend* = ref object of ErasureBackend
method release*(self: ErasureBackend) {.base, gcsafe.} =
## release the backend
##
raiseAssert("not implemented!")
method encode*(
self: EncoderBackend,
buffers, parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
dataLen, parityLen: int,
): Result[void, cstring] {.base, gcsafe.} =
## encode buffers using a backend
##
raiseAssert("not implemented!")
method decode*(
self: DecoderBackend,
buffers, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
dataLen, parityLen, recoveredLen: int,
): Result[void, cstring] {.base, gcsafe.} =
## decode buffers using a backend
##
raiseAssert("not implemented!")

View File

@ -1,79 +0,0 @@
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/options
import pkg/leopard
import pkg/results
import ../backend
type
LeoEncoderBackend* = ref object of EncoderBackend
encoder*: Option[LeoEncoder]
LeoDecoderBackend* = ref object of DecoderBackend
decoder*: Option[LeoDecoder]
method encode*(
self: LeoEncoderBackend,
data, parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
dataLen, parityLen: int,
): Result[void, cstring] =
## Encode data using Leopard backend
if parityLen == 0:
return ok()
var encoder =
if self.encoder.isNone:
self.encoder = (?LeoEncoder.init(self.blockSize, self.buffers, self.parity)).some
self.encoder.get()
else:
self.encoder.get()
encoder.encode(data, parity, dataLen, parityLen)
method decode*(
self: LeoDecoderBackend,
data, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
dataLen, parityLen, recoveredLen: int,
): Result[void, cstring] =
## Decode data using given Leopard backend
var decoder =
if self.decoder.isNone:
self.decoder = (?LeoDecoder.init(self.blockSize, self.buffers, self.parity)).some
self.decoder.get()
else:
self.decoder.get()
decoder.decode(data, parity, recovered, dataLen, parityLen, recoveredLen)
method release*(self: LeoEncoderBackend) =
if self.encoder.isSome:
self.encoder.get().free()
method release*(self: LeoDecoderBackend) =
if self.decoder.isSome:
self.decoder.get().free()
proc new*(
T: type LeoEncoderBackend, blockSize, buffers, parity: int
): LeoEncoderBackend =
## Create an instance of an Leopard Encoder backend
##
LeoEncoderBackend(blockSize: blockSize, buffers: buffers, parity: parity)
proc new*(
T: type LeoDecoderBackend, blockSize, buffers, parity: int
): LeoDecoderBackend =
## Create an instance of an Leopard Decoder backend
##
LeoDecoderBackend(blockSize: blockSize, buffers: buffers, parity: parity)

View File

@ -1,728 +0,0 @@
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [], gcsafe.}
import std/[sugar, atomics, sequtils]
import pkg/chronos
import pkg/chronos/threadsync
import pkg/chronicles
import pkg/libp2p/[multicodec, cid, multihash]
import pkg/libp2p/protobuf/minprotobuf
import pkg/taskpools
import ../logutils
import ../manifest
import ../merkletree
import ../stores
import ../clock
import ../blocktype as bt
import ../utils
import ../utils/asynciter
import ../indexingstrategy
import ../errors
import ../utils/arrayutils
import pkg/stew/byteutils
import ./backend
export backend
logScope:
topics = "codex erasure"
type
## Encode a manifest into one that is erasure protected.
##
## The new manifest has K `blocks` that are encoded into
## additional M `parity` blocks. The resulting dataset
## is padded with empty blocks if it doesn't have a square
## shape.
##
## NOTE: The padding blocks could be excluded
## from transmission, but they aren't for now.
##
## The resulting dataset is logically divided into rows
## where a row is made up of B blocks. There are then,
## K + M = N rows in total, each of length B blocks. Rows
## are assumed to be of the same number of (B) blocks.
##
## The encoding is systematic and the rows can be
## read sequentially by any node without decoding.
##
## Decoding is possible with any K rows or partial K
## columns (with up to M blocks missing per column),
## or any combination there of.
##
EncoderProvider* =
proc(size, blocks, parity: int): EncoderBackend {.raises: [Defect], noSideEffect.}
DecoderProvider* =
proc(size, blocks, parity: int): DecoderBackend {.raises: [Defect], noSideEffect.}
Erasure* = ref object
taskPool: Taskpool
encoderProvider*: EncoderProvider
decoderProvider*: DecoderProvider
store*: BlockStore
EncodingParams = object
ecK: Natural
ecM: Natural
rounded: Natural
steps: Natural
blocksCount: Natural
strategy: StrategyType
ErasureError* = object of CodexError
InsufficientBlocksError* = object of ErasureError
# Minimum size, in bytes, that the dataset must have had
# for the encoding request to have succeeded with the parameters
# provided.
minSize*: NBytes
EncodeTask = object
success: Atomic[bool]
erasure: ptr Erasure
blocks: ptr UncheckedArray[ptr UncheckedArray[byte]]
parity: ptr UncheckedArray[ptr UncheckedArray[byte]]
blockSize, blocksLen, parityLen: int
signal: ThreadSignalPtr
DecodeTask = object
success: Atomic[bool]
erasure: ptr Erasure
blocks: ptr UncheckedArray[ptr UncheckedArray[byte]]
parity: ptr UncheckedArray[ptr UncheckedArray[byte]]
recovered: ptr UncheckedArray[ptr UncheckedArray[byte]]
blockSize, blocksLen: int
parityLen, recoveredLen: int
signal: ThreadSignalPtr
func indexToPos(steps, idx, step: int): int {.inline.} =
## Convert an index to a position in the encoded
## dataset
## `idx` - the index to convert
## `step` - the current step
## `pos` - the position in the encoded dataset
##
(idx - step) div steps
proc getPendingBlocks(
self: Erasure, manifest: Manifest, indices: seq[int]
): AsyncIter[(?!bt.Block, int)] =
## Get pending blocks iterator
##
var pendingBlocks: seq[Future[(?!bt.Block, int)]] = @[]
proc attachIndex(
fut: Future[?!bt.Block], i: int
): Future[(?!bt.Block, int)] {.async.} =
## avoids closure capture issues
return (await fut, i)
for blockIndex in indices:
# request blocks from the store
let fut = self.store.getBlock(BlockAddress.init(manifest.treeCid, blockIndex))
pendingBlocks.add(attachIndex(fut, blockIndex))
proc isFinished(): bool =
pendingBlocks.len == 0
proc genNext(): Future[(?!bt.Block, int)] {.async.} =
let completedFut = await one(pendingBlocks)
if (let i = pendingBlocks.find(completedFut); i >= 0):
pendingBlocks.del(i)
return await completedFut
else:
let (_, index) = await completedFut
raise newException(
CatchableError,
"Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " &
$index,
)
AsyncIter[(?!bt.Block, int)].new(genNext, isFinished)
proc prepareEncodingData(
self: Erasure,
manifest: Manifest,
params: EncodingParams,
step: Natural,
data: ref seq[seq[byte]],
cids: ref seq[Cid],
emptyBlock: seq[byte],
): Future[?!Natural] {.async.} =
## Prepare data for encoding
##
let
strategy = params.strategy.init(
firstIndex = 0, lastIndex = params.rounded - 1, iterations = params.steps
)
indices = toSeq(strategy.getIndices(step))
pendingBlocksIter =
self.getPendingBlocks(manifest, indices.filterIt(it < manifest.blocksCount))
var resolved = 0
for fut in pendingBlocksIter:
let (blkOrErr, idx) = await fut
without blk =? blkOrErr, err:
warn "Failed retrieving a block", treeCid = manifest.treeCid, idx, msg = err.msg
return failure(err)
let pos = indexToPos(params.steps, idx, step)
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
cids[idx] = blk.cid
resolved.inc()
for idx in indices.filterIt(it >= manifest.blocksCount):
let pos = indexToPos(params.steps, idx, step)
trace "Padding with empty block", idx
shallowCopy(data[pos], emptyBlock)
without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec),
err:
return failure(err)
cids[idx] = emptyBlockCid
success(resolved.Natural)
proc prepareDecodingData(
self: Erasure,
encoded: Manifest,
step: Natural,
data: ref seq[seq[byte]],
parityData: ref seq[seq[byte]],
cids: ref seq[Cid],
emptyBlock: seq[byte],
): Future[?!(Natural, Natural)] {.async.} =
## Prepare data for decoding
## `encoded` - the encoded manifest
## `step` - the current step
## `data` - the data to be prepared
## `parityData` - the parityData to be prepared
## `cids` - cids of prepared data
## `emptyBlock` - the empty block to be used for padding
##
let
strategy = encoded.protectedStrategy.init(
firstIndex = 0, lastIndex = encoded.blocksCount - 1, iterations = encoded.steps
)
indices = toSeq(strategy.getIndices(step))
pendingBlocksIter = self.getPendingBlocks(encoded, indices)
var
dataPieces = 0
parityPieces = 0
resolved = 0
for fut in pendingBlocksIter:
# Continue to receive blocks until we have just enough for decoding
# or no more blocks can arrive
if resolved >= encoded.ecK:
break
let (blkOrErr, idx) = await fut
without blk =? blkOrErr, err:
trace "Failed retrieving a block", idx, treeCid = encoded.treeCid, msg = err.msg
continue
let pos = indexToPos(encoded.steps, idx, step)
logScope:
cid = blk.cid
idx = idx
pos = pos
step = step
empty = blk.isEmpty
cids[idx] = blk.cid
if idx >= encoded.rounded:
trace "Retrieved parity block"
shallowCopy(
parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data
)
parityPieces.inc
else:
trace "Retrieved data block"
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
dataPieces.inc
resolved.inc
return success (dataPieces.Natural, parityPieces.Natural)
proc init*(
_: type EncodingParams,
manifest: Manifest,
ecK: Natural,
ecM: Natural,
strategy: StrategyType,
): ?!EncodingParams =
if ecK > manifest.blocksCount:
let exc = (ref InsufficientBlocksError)(
msg:
"Unable to encode manifest, not enough blocks, ecK = " & $ecK &
", blocksCount = " & $manifest.blocksCount,
minSize: ecK.NBytes * manifest.blockSize,
)
return failure(exc)
let
rounded = roundUp(manifest.blocksCount, ecK)
steps = divUp(rounded, ecK)
blocksCount = rounded + (steps * ecM)
success EncodingParams(
ecK: ecK,
ecM: ecM,
rounded: rounded,
steps: steps,
blocksCount: blocksCount,
strategy: strategy,
)
proc leopardEncodeTask(tp: Taskpool, task: ptr EncodeTask) {.gcsafe.} =
# Task suitable for running in taskpools - look, no GC!
let encoder =
task[].erasure.encoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
defer:
encoder.release()
discard task[].signal.fireSync()
if (
let res =
encoder.encode(task[].blocks, task[].parity, task[].blocksLen, task[].parityLen)
res.isErr
):
warn "Error from leopard encoder backend!", error = $res.error
task[].success.store(false)
else:
task[].success.store(true)
proc asyncEncode*(
self: Erasure,
blockSize, blocksLen, parityLen: int,
blocks: ref seq[seq[byte]],
parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
): Future[?!void] {.async: (raises: [CancelledError]).} =
without threadPtr =? ThreadSignalPtr.new():
return failure("Unable to create thread signal")
defer:
threadPtr.close().expect("closing once works")
var data = makeUncheckedArray(blocks)
defer:
dealloc(data)
## Create an ecode task with block data
var task = EncodeTask(
erasure: addr self,
blockSize: blockSize,
blocksLen: blocksLen,
parityLen: parityLen,
blocks: data,
parity: parity,
signal: threadPtr,
)
doAssert self.taskPool.numThreads > 1,
"Must have at least one separate thread or signal will never be fired"
self.taskPool.spawn leopardEncodeTask(self.taskPool, addr task)
let threadFut = threadPtr.wait()
if joinErr =? catch(await threadFut.join()).errorOption:
if err =? catch(await noCancel threadFut).errorOption:
return failure(err)
if joinErr of CancelledError:
raise (ref CancelledError) joinErr
else:
return failure(joinErr)
if not task.success.load():
return failure("Leopard encoding task failed")
success()
proc encodeData(
self: Erasure, manifest: Manifest, params: EncodingParams
): Future[?!Manifest] {.async.} =
## Encode blocks pointed to by the protected manifest
##
## `manifest` - the manifest to encode
##
logScope:
steps = params.steps
rounded_blocks = params.rounded
blocks_count = params.blocksCount
ecK = params.ecK
ecM = params.ecM
var
cids = seq[Cid].new()
emptyBlock = newSeq[byte](manifest.blockSize.int)
cids[].setLen(params.blocksCount)
try:
for step in 0 ..< params.steps:
# TODO: Don't allocate a new seq every time, allocate once and zero out
var
data = seq[seq[byte]].new() # number of blocks to encode
parity = createDoubleArray(params.ecM, manifest.blockSize.int)
defer:
freeDoubleArray(parity, params.ecM)
data[].setLen(params.ecK)
# TODO: this is a tight blocking loop so we sleep here to allow
# other events to be processed, this should be addressed
# by threading
await sleepAsync(10.millis)
without resolved =?
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)),
err:
trace "Unable to prepare data", error = err.msg
return failure(err)
trace "Erasure coding data", data = data[].len
try:
if err =? (
await self.asyncEncode(
manifest.blockSize.int, params.ecK, params.ecM, data, parity
)
).errorOption:
return failure(err)
except CancelledError as exc:
raise exc
var idx = params.rounded + step
for j in 0 ..< params.ecM:
var innerPtr: ptr UncheckedArray[byte] = parity[][j]
without blk =? bt.Block.new(innerPtr.toOpenArray(0, manifest.blockSize.int - 1)),
error:
trace "Unable to create parity block", err = error.msg
return failure(error)
trace "Adding parity block", cid = blk.cid, idx
cids[idx] = blk.cid
if error =? (await self.store.putBlock(blk)).errorOption:
warn "Unable to store block!", cid = blk.cid, msg = error.msg
return failure("Unable to store block!")
idx.inc(params.steps)
without tree =? CodexTree.init(cids[]), err:
return failure(err)
without treeCid =? tree.rootCid, err:
return failure(err)
if err =? (await self.store.putAllProofs(tree)).errorOption:
return failure(err)
let encodedManifest = Manifest.new(
manifest = manifest,
treeCid = treeCid,
datasetSize = (manifest.blockSize.int * params.blocksCount).NBytes,
ecK = params.ecK,
ecM = params.ecM,
strategy = params.strategy,
)
trace "Encoded data successfully", treeCid, blocksCount = params.blocksCount
success encodedManifest
except CancelledError as exc:
trace "Erasure coding encoding cancelled"
raise exc # cancellation needs to be propagated
except CatchableError as exc:
trace "Erasure coding encoding error", exc = exc.msg
return failure(exc)
proc encode*(
self: Erasure,
manifest: Manifest,
blocks: Natural,
parity: Natural,
strategy = SteppedStrategy,
): Future[?!Manifest] {.async.} =
## Encode a manifest into one that is erasure protected.
##
## `manifest` - the original manifest to be encoded
## `blocks` - the number of blocks to be encoded - K
## `parity` - the number of parity blocks to generate - M
##
without params =? EncodingParams.init(manifest, blocks.int, parity.int, strategy), err:
return failure(err)
without encodedManifest =? await self.encodeData(manifest, params), err:
return failure(err)
return success encodedManifest
proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} =
# Task suitable for running in taskpools - look, no GC!
let decoder =
task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
defer:
decoder.release()
discard task[].signal.fireSync()
if (
let res = decoder.decode(
task[].blocks,
task[].parity,
task[].recovered,
task[].blocksLen,
task[].parityLen,
task[].recoveredLen,
)
res.isErr
):
warn "Error from leopard decoder backend!", error = $res.error
task[].success.store(false)
else:
task[].success.store(true)
proc asyncDecode*(
self: Erasure,
blockSize, blocksLen, parityLen: int,
blocks, parity: ref seq[seq[byte]],
recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
): Future[?!void] {.async: (raises: [CancelledError]).} =
without threadPtr =? ThreadSignalPtr.new():
return failure("Unable to create thread signal")
defer:
threadPtr.close().expect("closing once works")
var
blockData = makeUncheckedArray(blocks)
parityData = makeUncheckedArray(parity)
defer:
dealloc(blockData)
dealloc(parityData)
## Create an decode task with block data
var task = DecodeTask(
erasure: addr self,
blockSize: blockSize,
blocksLen: blocksLen,
parityLen: parityLen,
recoveredLen: blocksLen,
blocks: blockData,
parity: parityData,
recovered: recovered,
signal: threadPtr,
)
doAssert self.taskPool.numThreads > 1,
"Must have at least one separate thread or signal will never be fired"
self.taskPool.spawn leopardDecodeTask(self.taskPool, addr task)
let threadFut = threadPtr.wait()
if joinErr =? catch(await threadFut.join()).errorOption:
if err =? catch(await noCancel threadFut).errorOption:
return failure(err)
if joinErr of CancelledError:
raise (ref CancelledError) joinErr
else:
return failure(joinErr)
if not task.success.load():
return failure("Leopard decoding task failed")
success()
proc decodeInternal(
self: Erasure, encoded: Manifest
): Future[?!(ref seq[Cid], seq[Natural])] {.async.} =
logScope:
steps = encoded.steps
rounded_blocks = encoded.rounded
new_manifest = encoded.blocksCount
var
cids = seq[Cid].new()
recoveredIndices = newSeq[Natural]()
decoder = self.decoderProvider(encoded.blockSize.int, encoded.ecK, encoded.ecM)
emptyBlock = newSeq[byte](encoded.blockSize.int)
cids[].setLen(encoded.blocksCount)
try:
for step in 0 ..< encoded.steps:
# TODO: this is a tight blocking loop so we sleep here to allow
# other events to be processed, this should be addressed
# by threading
await sleepAsync(10.millis)
var
data = seq[seq[byte]].new()
parityData = seq[seq[byte]].new()
recovered = createDoubleArray(encoded.ecK, encoded.blockSize.int)
defer:
freeDoubleArray(recovered, encoded.ecK)
data[].setLen(encoded.ecK) # set len to K
parityData[].setLen(encoded.ecM) # set len to M
without (dataPieces, _) =? (
await self.prepareDecodingData(
encoded, step, data, parityData, cids, emptyBlock
)
), err:
trace "Unable to prepare data", error = err.msg
return failure(err)
if dataPieces >= encoded.ecK:
trace "Retrieved all the required data blocks"
continue
trace "Erasure decoding data"
try:
if err =? (
await self.asyncDecode(
encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered
)
).errorOption:
return failure(err)
except CancelledError as exc:
raise exc
for i in 0 ..< encoded.ecK:
let idx = i * encoded.steps + step
if data[i].len <= 0 and not cids[idx].isEmpty:
var innerPtr: ptr UncheckedArray[byte] = recovered[][i]
without blk =? bt.Block.new(
innerPtr.toOpenArray(0, encoded.blockSize.int - 1)
), error:
trace "Unable to create block!", exc = error.msg
return failure(error)
trace "Recovered block", cid = blk.cid, index = i
if error =? (await self.store.putBlock(blk)).errorOption:
warn "Unable to store block!", cid = blk.cid, msg = error.msg
return failure("Unable to store block!")
self.store.completeBlock(BlockAddress.init(encoded.treeCid, idx), blk)
cids[idx] = blk.cid
recoveredIndices.add(idx)
except CancelledError as exc:
trace "Erasure coding decoding cancelled"
raise exc # cancellation needs to be propagated
except CatchableError as exc:
trace "Erasure coding decoding error", exc = exc.msg
return failure(exc)
finally:
decoder.release()
return (cids, recoveredIndices).success
proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
## Decode a protected manifest into it's original
## manifest
##
## `encoded` - the encoded (protected) manifest to
## be recovered
##
without (cids, recoveredIndices) =? (await self.decodeInternal(encoded)), err:
return failure(err)
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
return failure(err)
without treeCid =? tree.rootCid, err:
return failure(err)
if treeCid != encoded.originalTreeCid:
return failure(
"Original tree root differs from the tree root computed out of recovered data"
)
let idxIter =
Iter[Natural].new(recoveredIndices).filter((i: Natural) => i < tree.leavesCount)
if err =? (await self.store.putSomeProofs(tree, idxIter)).errorOption:
return failure(err)
let decoded = Manifest.new(encoded)
return decoded.success
proc repair*(self: Erasure, encoded: Manifest): Future[?!void] {.async.} =
## Repair a protected manifest by reconstructing the full dataset
##
## `encoded` - the encoded (protected) manifest to
## be repaired
##
without (cids, _) =? (await self.decodeInternal(encoded)), err:
return failure(err)
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
return failure(err)
without treeCid =? tree.rootCid, err:
return failure(err)
if treeCid != encoded.originalTreeCid:
return failure(
"Original tree root differs from the tree root computed out of recovered data"
)
if err =? (await self.store.putAllProofs(tree)).errorOption:
return failure(err)
without repaired =? (
await self.encode(
Manifest.new(encoded), encoded.ecK, encoded.ecM, encoded.protectedStrategy
)
), err:
return failure(err)
if repaired.treeCid != encoded.treeCid:
return failure(
"Original tree root differs from the repaired tree root encoded out of recovered data"
)
return success()
proc start*(self: Erasure) {.async.} =
return
proc stop*(self: Erasure) {.async.} =
return
proc new*(
T: type Erasure,
store: BlockStore,
encoderProvider: EncoderProvider,
decoderProvider: DecoderProvider,
taskPool: Taskpool,
): Erasure =
## Create a new Erasure instance for encoding and decoding manifests
##
Erasure(
store: store,
encoderProvider: encoderProvider,
decoderProvider: decoderProvider,
taskPool: taskPool,
)

View File

@ -32,25 +32,12 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
## multicodec container (Dag-pb) for now
##
?manifest.verify()
var pbNode = initProtoBuffer()
# NOTE: The `Data` field in the the `dag-pb`
# contains the following protobuf `Message`
#
# ```protobuf
# Message VerificationInfo {
# bytes verifyRoot = 1; # Decimal encoded field-element
# repeated bytes slotRoots = 2; # Decimal encoded field-elements
# }
# Message ErasureInfo {
# optional uint32 ecK = 1; # number of encoded blocks
# optional uint32 ecM = 2; # number of parity blocks
# optional bytes originalTreeCid = 3; # cid of the original dataset
# optional uint32 originalDatasetSize = 4; # size of the original dataset
# optional VerificationInformation verification = 5; # verification information
# }
#
# Message Header {
# optional bytes treeCid = 1; # cid (root) of the tree
# optional uint32 blockSize = 2; # size of a single block
@ -58,9 +45,8 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
# optional codec: MultiCodec = 4; # Dataset codec
# optional hcodec: MultiCodec = 5 # Multihash codec
# optional version: CidVersion = 6; # Cid version
# optional ErasureInfo erasure = 7; # erasure coding info
# optional filename: ?string = 8; # original filename
# optional mimetype: ?string = 9; # original mimetype
# optional filename: ?string = 7; # original filename
# optional mimetype: ?string = 8; # original mimetype
# }
# ```
#
@ -73,31 +59,11 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
header.write(5, manifest.hcodec.uint32)
header.write(6, manifest.version.uint32)
if manifest.protected:
var erasureInfo = initProtoBuffer()
erasureInfo.write(1, manifest.ecK.uint32)
erasureInfo.write(2, manifest.ecM.uint32)
erasureInfo.write(3, manifest.originalTreeCid.data.buffer)
erasureInfo.write(4, manifest.originalDatasetSize.uint64)
erasureInfo.write(5, manifest.protectedStrategy.uint32)
if manifest.verifiable:
var verificationInfo = initProtoBuffer()
verificationInfo.write(1, manifest.verifyRoot.data.buffer)
for slotRoot in manifest.slotRoots:
verificationInfo.write(2, slotRoot.data.buffer)
verificationInfo.write(3, manifest.cellSize.uint32)
verificationInfo.write(4, manifest.verifiableStrategy.uint32)
erasureInfo.write(6, verificationInfo)
erasureInfo.finish()
header.write(7, erasureInfo)
if manifest.filename.isSome:
header.write(8, manifest.filename.get())
header.write(7, manifest.filename.get())
if manifest.mimetype.isSome:
header.write(9, manifest.mimetype.get())
header.write(8, manifest.mimetype.get())
pbNode.write(1, header) # set the treeCid as the data field
pbNode.finish()
@ -111,22 +77,12 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
var
pbNode = initProtoBuffer(data)
pbHeader: ProtoBuffer
pbErasureInfo: ProtoBuffer
pbVerificationInfo: ProtoBuffer
treeCidBuf: seq[byte]
originalTreeCid: seq[byte]
datasetSize: uint64
codec: uint32
hcodec: uint32
version: uint32
blockSize: uint32
originalDatasetSize: uint64
ecK, ecM: uint32
protectedStrategy: uint32
verifyRoot: seq[byte]
slotRoots: seq[seq[byte]]
cellSize: uint32
verifiableStrategy: uint32
filename: string
mimetype: string
@ -153,98 +109,27 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
if pbHeader.getField(6, version).isErr:
return failure("Unable to decode `version` from manifest!")
if pbHeader.getField(7, pbErasureInfo).isErr:
return failure("Unable to decode `erasureInfo` from manifest!")
if pbHeader.getField(8, filename).isErr:
if pbHeader.getField(7, filename).isErr:
return failure("Unable to decode `filename` from manifest!")
if pbHeader.getField(9, mimetype).isErr:
if pbHeader.getField(8, mimetype).isErr:
return failure("Unable to decode `mimetype` from manifest!")
let protected = pbErasureInfo.buffer.len > 0
var verifiable = false
if protected:
if pbErasureInfo.getField(1, ecK).isErr:
return failure("Unable to decode `K` from manifest!")
if pbErasureInfo.getField(2, ecM).isErr:
return failure("Unable to decode `M` from manifest!")
if pbErasureInfo.getField(3, originalTreeCid).isErr:
return failure("Unable to decode `originalTreeCid` from manifest!")
if pbErasureInfo.getField(4, originalDatasetSize).isErr:
return failure("Unable to decode `originalDatasetSize` from manifest!")
if pbErasureInfo.getField(5, protectedStrategy).isErr:
return failure("Unable to decode `protectedStrategy` from manifest!")
if pbErasureInfo.getField(6, pbVerificationInfo).isErr:
return failure("Unable to decode `verificationInfo` from manifest!")
verifiable = pbVerificationInfo.buffer.len > 0
if verifiable:
if pbVerificationInfo.getField(1, verifyRoot).isErr:
return failure("Unable to decode `verifyRoot` from manifest!")
if pbVerificationInfo.getRequiredRepeatedField(2, slotRoots).isErr:
return failure("Unable to decode `slotRoots` from manifest!")
if pbVerificationInfo.getField(3, cellSize).isErr:
return failure("Unable to decode `cellSize` from manifest!")
if pbVerificationInfo.getField(4, verifiableStrategy).isErr:
return failure("Unable to decode `verifiableStrategy` from manifest!")
let treeCid = ?Cid.init(treeCidBuf).mapFailure
var filenameOption = if filename.len == 0: string.none else: filename.some
var mimetypeOption = if mimetype.len == 0: string.none else: mimetype.some
let self =
if protected:
Manifest.new(
treeCid = treeCid,
datasetSize = datasetSize.NBytes,
blockSize = blockSize.NBytes,
version = CidVersion(version),
hcodec = hcodec.MultiCodec,
codec = codec.MultiCodec,
ecK = ecK.int,
ecM = ecM.int,
originalTreeCid = ?Cid.init(originalTreeCid).mapFailure,
originalDatasetSize = originalDatasetSize.NBytes,
strategy = StrategyType(protectedStrategy),
filename = filenameOption,
mimetype = mimetypeOption,
)
else:
Manifest.new(
treeCid = treeCid,
datasetSize = datasetSize.NBytes,
blockSize = blockSize.NBytes,
version = CidVersion(version),
hcodec = hcodec.MultiCodec,
codec = codec.MultiCodec,
filename = filenameOption,
mimetype = mimetypeOption,
)
?self.verify()
if verifiable:
let
verifyRootCid = ?Cid.init(verifyRoot).mapFailure
slotRootCids = slotRoots.mapIt(?Cid.init(it).mapFailure)
return Manifest.new(
manifest = self,
verifyRoot = verifyRootCid,
slotRoots = slotRootCids,
cellSize = cellSize.NBytes,
strategy = StrategyType(verifiableStrategy),
)
let self = Manifest.new(
treeCid = treeCid,
datasetSize = datasetSize.NBytes,
blockSize = blockSize.NBytes,
version = CidVersion(version),
hcodec = hcodec.MultiCodec,
codec = codec.MultiCodec,
filename = filenameOption,
mimetype = mimetypeOption,
)
self.success

View File

@ -35,24 +35,6 @@ type Manifest* = ref object of RootObj
version: CidVersion # Cid version
filename {.serialize.}: ?string # The filename of the content uploaded (optional)
mimetype {.serialize.}: ?string # The mimetype of the content uploaded (optional)
case protected {.serialize.}: bool # Protected datasets have erasure coded info
of true:
ecK: int # Number of blocks to encode
ecM: int # Number of resulting parity blocks
originalTreeCid: Cid # The original root of the dataset being erasure coded
originalDatasetSize: NBytes
protectedStrategy: StrategyType # Indexing strategy used to build the slot roots
case verifiable {.serialize.}: bool
# Verifiable datasets can be used to generate storage proofs
of true:
verifyRoot: Cid # Root of the top level merkle tree built from slot roots
slotRoots: seq[Cid] # Individual slot root built from the original dataset blocks
cellSize: NBytes # Size of each slot cell
verifiableStrategy: StrategyType # Indexing strategy used to build the slot roots
else:
discard
else:
discard
############################################################
# Accessors
@ -73,54 +55,12 @@ func hcodec*(self: Manifest): MultiCodec =
func codec*(self: Manifest): MultiCodec =
self.codec
func protected*(self: Manifest): bool =
self.protected
func ecK*(self: Manifest): int =
self.ecK
func ecM*(self: Manifest): int =
self.ecM
func originalTreeCid*(self: Manifest): Cid =
self.originalTreeCid
func originalBlocksCount*(self: Manifest): int =
divUp(self.originalDatasetSize.int, self.blockSize.int)
func originalDatasetSize*(self: Manifest): NBytes =
self.originalDatasetSize
func treeCid*(self: Manifest): Cid =
self.treeCid
func blocksCount*(self: Manifest): int =
divUp(self.datasetSize.int, self.blockSize.int)
func verifiable*(self: Manifest): bool =
bool (self.protected and self.verifiable)
func verifyRoot*(self: Manifest): Cid =
self.verifyRoot
func slotRoots*(self: Manifest): seq[Cid] =
self.slotRoots
func numSlots*(self: Manifest): int =
self.ecK + self.ecM
func cellSize*(self: Manifest): NBytes =
self.cellSize
func protectedStrategy*(self: Manifest): StrategyType =
self.protectedStrategy
func verifiableStrategy*(self: Manifest): StrategyType =
self.verifiableStrategy
func numSlotBlocks*(self: Manifest): int =
divUp(self.blocksCount, self.numSlots)
func filename*(self: Manifest): ?string =
self.filename
@ -141,51 +81,17 @@ func isManifest*(mc: MultiCodec): ?!bool =
# Various sizes and verification
############################################################
func rounded*(self: Manifest): int =
## Number of data blocks in *protected* manifest including padding at the end
roundUp(self.originalBlocksCount, self.ecK)
func steps*(self: Manifest): int =
## Number of EC groups in *protected* manifest
divUp(self.rounded, self.ecK)
func verify*(self: Manifest): ?!void =
## Check manifest correctness
##
if self.protected and (self.blocksCount != self.steps * (self.ecK + self.ecM)):
return
failure newException(CodexError, "Broken manifest: wrong originalBlocksCount")
return success()
func `==`*(a, b: Manifest): bool =
(a.treeCid == b.treeCid) and (a.datasetSize == b.datasetSize) and
(a.blockSize == b.blockSize) and (a.version == b.version) and (a.hcodec == b.hcodec) and
(a.codec == b.codec) and (a.protected == b.protected) and (a.filename == b.filename) and
(a.mimetype == b.mimetype) and (
if a.protected:
(a.ecK == b.ecK) and (a.ecM == b.ecM) and (a.originalTreeCid == b.originalTreeCid) and
(a.originalDatasetSize == b.originalDatasetSize) and
(a.protectedStrategy == b.protectedStrategy) and (a.verifiable == b.verifiable) and
(
if a.verifiable:
(a.verifyRoot == b.verifyRoot) and (a.slotRoots == b.slotRoots) and
(a.cellSize == b.cellSize) and (
a.verifiableStrategy == b.verifiableStrategy
)
else:
true
)
else:
true
)
(a.codec == b.codec) and (a.filename == b.filename) and
(a.mimetype == b.mimetype)
func `$`*(self: Manifest): string =
result =
"treeCid: " & $self.treeCid & ", datasetSize: " & $self.datasetSize & ", blockSize: " &
$self.blockSize & ", version: " & $self.version & ", hcodec: " & $self.hcodec &
", codec: " & $self.codec & ", protected: " & $self.protected
", codec: " & $self.codec
if self.filename.isSome:
result &= ", filename: " & $self.filename
@ -193,20 +99,6 @@ func `$`*(self: Manifest): string =
if self.mimetype.isSome:
result &= ", mimetype: " & $self.mimetype
result &= (
if self.protected:
", ecK: " & $self.ecK & ", ecM: " & $self.ecM & ", originalTreeCid: " &
$self.originalTreeCid & ", originalDatasetSize: " & $self.originalDatasetSize &
", verifiable: " & $self.verifiable & (
if self.verifiable:
", verifyRoot: " & $self.verifyRoot & ", slotRoots: " & $self.slotRoots
else:
""
)
else:
""
)
return result
############################################################
@ -221,7 +113,6 @@ func new*(
version: CidVersion = CIDv1,
hcodec = Sha256HashCodec,
codec = BlockCodec,
protected = false,
filename: ?string = string.none,
mimetype: ?string = string.none,
): Manifest =
@ -232,132 +123,10 @@ func new*(
version: version,
codec: codec,
hcodec: hcodec,
protected: protected,
filename: filename,
mimetype: mimetype,
)
func new*(
T: type Manifest,
manifest: Manifest,
treeCid: Cid,
datasetSize: NBytes,
ecK, ecM: int,
strategy = SteppedStrategy,
): Manifest =
## Create an erasure protected dataset from an
## unprotected one
##
Manifest(
treeCid: treeCid,
datasetSize: datasetSize,
version: manifest.version,
codec: manifest.codec,
hcodec: manifest.hcodec,
blockSize: manifest.blockSize,
protected: true,
ecK: ecK,
ecM: ecM,
originalTreeCid: manifest.treeCid,
originalDatasetSize: manifest.datasetSize,
protectedStrategy: strategy,
filename: manifest.filename,
mimetype: manifest.mimetype,
)
func new*(T: type Manifest, manifest: Manifest): Manifest =
## Create an unprotected dataset from an
## erasure protected one
##
Manifest(
treeCid: manifest.originalTreeCid,
datasetSize: manifest.originalDatasetSize,
version: manifest.version,
codec: manifest.codec,
hcodec: manifest.hcodec,
blockSize: manifest.blockSize,
protected: false,
filename: manifest.filename,
mimetype: manifest.mimetype,
)
func new*(
T: type Manifest,
treeCid: Cid,
datasetSize: NBytes,
blockSize: NBytes,
version: CidVersion,
hcodec: MultiCodec,
codec: MultiCodec,
ecK: int,
ecM: int,
originalTreeCid: Cid,
originalDatasetSize: NBytes,
strategy = SteppedStrategy,
filename: ?string = string.none,
mimetype: ?string = string.none,
): Manifest =
Manifest(
treeCid: treeCid,
datasetSize: datasetSize,
blockSize: blockSize,
version: version,
hcodec: hcodec,
codec: codec,
protected: true,
ecK: ecK,
ecM: ecM,
originalTreeCid: originalTreeCid,
originalDatasetSize: originalDatasetSize,
protectedStrategy: strategy,
filename: filename,
mimetype: mimetype,
)
func new*(
T: type Manifest,
manifest: Manifest,
verifyRoot: Cid,
slotRoots: openArray[Cid],
cellSize = DefaultCellSize,
strategy = LinearStrategy,
): ?!Manifest =
## Create a verifiable dataset from an
## protected one
##
if not manifest.protected:
return failure newException(
CodexError, "Can create verifiable manifest only from protected manifest."
)
if slotRoots.len != manifest.numSlots:
return failure newException(CodexError, "Wrong number of slot roots.")
success Manifest(
treeCid: manifest.treeCid,
datasetSize: manifest.datasetSize,
version: manifest.version,
codec: manifest.codec,
hcodec: manifest.hcodec,
blockSize: manifest.blockSize,
protected: true,
ecK: manifest.ecK,
ecM: manifest.ecM,
originalTreeCid: manifest.originalTreeCid,
originalDatasetSize: manifest.originalDatasetSize,
protectedStrategy: manifest.protectedStrategy,
verifiable: true,
verifyRoot: verifyRoot,
slotRoots: @slotRoots,
cellSize: cellSize,
verifiableStrategy: strategy,
filename: manifest.filename,
mimetype: manifest.mimetype,
)
func new*(T: type Manifest, data: openArray[byte]): ?!Manifest =
## Create a manifest instance from given data
##

View File

@ -1,313 +0,0 @@
import pkg/chronos
import pkg/questionable
import pkg/ethers/erc20
import ./contracts/requests
import ./contracts/proofs
import ./clock
import ./errors
import ./periods
export chronos
export questionable
export requests
export proofs
export SecondsSince1970
export periods
type
Market* = ref object of RootObj
MarketError* = object of CodexError
SlotStateMismatchError* = object of MarketError
SlotReservationNotAllowedError* = object of MarketError
ProofInvalidError* = object of MarketError
Subscription* = ref object of RootObj
OnRequest* =
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, raises: [].}
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, raises: [].}
OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
OnSlotReservationsFull* =
proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, raises: [].}
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, raises: [].}
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, raises: [].}
ProofChallenge* = array[32, byte]
# Marketplace events -- located here due to the Market abstraction
MarketplaceEvent* = Event
StorageRequested* = object of MarketplaceEvent
requestId*: RequestId
ask*: StorageAsk
expiry*: uint64
SlotFilled* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
slotIndex*: uint64
SlotFreed* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
slotIndex*: uint64
SlotReservationsFull* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
slotIndex*: uint64
RequestFulfilled* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
RequestCancelled* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
RequestFailed* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
ProofSubmitted* = object of MarketplaceEvent
id*: SlotId
method loadConfig*(
market: Market
): Future[?!void] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method getZkeyHash*(
market: Market
): Future[?string] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method getSigner*(
market: Market
): Future[Address] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method periodicity*(
market: Market
): Future[Periodicity] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method proofTimeout*(
market: Market
): Future[uint64] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method repairRewardPercentage*(
market: Market
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method requestDurationLimit*(market: Market): Future[uint64] {.base, async.} =
raiseAssert("not implemented")
method proofDowntime*(
market: Market
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method getPointer*(market: Market, slotId: SlotId): Future[uint8] {.base, async.} =
raiseAssert("not implemented")
proc inDowntime*(market: Market, slotId: SlotId): Future[bool] {.async.} =
let downtime = await market.proofDowntime
let pntr = await market.getPointer(slotId)
return pntr < downtime
method requestStorage*(
market: Market, request: StorageRequest
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
raiseAssert("not implemented")
method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} =
raiseAssert("not implemented")
method getRequest*(
market: Market, id: RequestId
): Future[?StorageRequest] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method requestState*(
market: Market, requestId: RequestId
): Future[?RequestState] {.base, async.} =
raiseAssert("not implemented")
method slotState*(
market: Market, slotId: SlotId
): Future[SlotState] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method getRequestEnd*(
market: Market, id: RequestId
): Future[SecondsSince1970] {.base, async.} =
raiseAssert("not implemented")
method requestExpiresAt*(
market: Market, id: RequestId
): Future[SecondsSince1970] {.base, async.} =
raiseAssert("not implemented")
method getHost*(
market: Market, requestId: RequestId, slotIndex: uint64
): Future[?Address] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method currentCollateral*(
market: Market, slotId: SlotId
): Future[UInt256] {.base, async: (raises: [MarketError, CancelledError]).} =
raiseAssert("not implemented")
method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, async.} =
raiseAssert("not implemented")
method fillSlot*(
market: Market,
requestId: RequestId,
slotIndex: uint64,
proof: Groth16Proof,
collateral: UInt256,
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method freeSlot*(
market: Market, slotId: SlotId
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method withdrawFunds*(
market: Market, requestId: RequestId
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method subscribeRequests*(
market: Market, callback: OnRequest
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method isProofRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} =
raiseAssert("not implemented")
method willProofBeRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} =
raiseAssert("not implemented")
method getChallenge*(
market: Market, id: SlotId
): Future[ProofChallenge] {.base, async.} =
raiseAssert("not implemented")
method submitProof*(
market: Market, id: SlotId, proof: Groth16Proof
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method markProofAsMissing*(
market: Market, id: SlotId, period: Period
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method canMarkProofAsMissing*(
market: Market, id: SlotId, period: Period
): Future[bool] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method reserveSlot*(
market: Market, requestId: RequestId, slotIndex: uint64
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method canReserveSlot*(
market: Market, requestId: RequestId, slotIndex: uint64
): Future[bool] {.base, async.} =
raiseAssert("not implemented")
method subscribeFulfillment*(
market: Market, callback: OnFulfillment
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeFulfillment*(
market: Market, requestId: RequestId, callback: OnFulfillment
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotFilled*(
market: Market, callback: OnSlotFilled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotFilled*(
market: Market, requestId: RequestId, slotIndex: uint64, callback: OnSlotFilled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotFreed*(
market: Market, callback: OnSlotFreed
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotReservationsFull*(
market: Market, callback: OnSlotReservationsFull
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestCancelled*(
market: Market, callback: OnRequestCancelled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestCancelled*(
market: Market, requestId: RequestId, callback: OnRequestCancelled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestFailed*(
market: Market, callback: OnRequestFailed
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestFailed*(
market: Market, requestId: RequestId, callback: OnRequestFailed
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeProofSubmission*(
market: Market, callback: OnProofSubmitted
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method unsubscribe*(subscription: Subscription) {.base, async.} =
raiseAssert("not implemented")
method queryPastSlotFilledEvents*(
market: Market, fromBlock: BlockTag
): Future[seq[SlotFilled]] {.base, async.} =
raiseAssert("not implemented")
method queryPastSlotFilledEvents*(
market: Market, blocksAgo: int
): Future[seq[SlotFilled]] {.base, async.} =
raiseAssert("not implemented")
method queryPastSlotFilledEvents*(
market: Market, fromTime: SecondsSince1970
): Future[seq[SlotFilled]] {.base, async.} =
raiseAssert("not implemented")
method queryPastStorageRequestedEvents*(
market: Market, fromBlock: BlockTag
): Future[seq[StorageRequested]] {.base, async.} =
raiseAssert("not implemented")
method queryPastStorageRequestedEvents*(
market: Market, blocksAgo: int
): Future[seq[StorageRequested]] {.base, async.} =
raiseAssert("not implemented")
method slotCollateral*(
market: Market, requestId: RequestId, slotIndex: uint64
): Future[?!UInt256] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method slotCollateral*(
market: Market, collateralPerSlot: UInt256, slotState: SlotState
): ?!UInt256 {.base, gcsafe, raises: [].} =
raiseAssert("not implemented")

View File

@ -19,7 +19,6 @@ import pkg/taskpools
import pkg/questionable
import pkg/questionable/results
import pkg/chronos
import pkg/poseidon2
import pkg/libp2p/[switch, multicodec, multihash]
import pkg/libp2p/stream/bufferstream
@ -29,7 +28,6 @@ import pkg/libp2p/routing_record
import pkg/libp2p/signed_envelope
import ./chunker
import ./slots
import ./clock
import ./blocktype as bt
import ./manifest
@ -37,9 +35,7 @@ import ./merkletree
import ./stores
import ./blockexchange
import ./streams
import ./erasure
import ./discovery
import ./contracts
import ./indexingstrategy
import ./utils
import ./errors
@ -58,23 +54,13 @@ const
BatchRefillThreshold = 0.75 # Refill when 75% of window completes
type
Contracts* =
tuple[
client: ?ClientInteractions,
host: ?HostInteractions,
validator: ?ValidatorInteractions,
]
CodexNode* = object
switch: Switch
networkId: PeerId
networkStore: NetworkStore
engine: BlockExcEngine
prover: ?Prover
discovery: Discovery
contracts*: Contracts
clock*: Clock
storage*: Contracts
taskpool: Taskpool
trackedFutures: TrackedFutures
@ -319,20 +305,6 @@ proc streamEntireDataset(
var jobs: seq[Future[void]]
let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false))
if manifest.protected:
# Retrieve, decode and save to the local store all EС groups
proc erasureJob(): Future[void] {.async: (raises: []).} =
try:
# Spawn an erasure decoding job
let erasure = Erasure.new(
self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
)
without _ =? (await erasure.decode(manifest)), error:
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
except CatchableError as exc:
trace "Error erasure decoding manifest", manifestCid, exc = exc.msg
jobs.add(erasureJob())
jobs.add(self.fetchDatasetAsync(manifest, fetchLocal = false))
@ -530,295 +502,11 @@ proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} =
onManifest(cid, manifest)
proc setupRequest(
self: CodexNodeRef,
cid: Cid,
duration: uint64,
proofProbability: UInt256,
nodes: uint,
tolerance: uint,
pricePerBytePerSecond: UInt256,
collateralPerByte: UInt256,
expiry: uint64,
): Future[?!StorageRequest] {.async.} =
## Setup slots for a given dataset
##
let
ecK = nodes - tolerance
ecM = tolerance
logScope:
cid = cid
duration = duration
nodes = nodes
tolerance = tolerance
pricePerBytePerSecond = pricePerBytePerSecond
proofProbability = proofProbability
collateralPerByte = collateralPerByte
expiry = expiry
ecK = ecK
ecM = ecM
trace "Setting up slots"
without manifest =? await self.fetchManifest(cid), error:
trace "Unable to fetch manifest for cid"
return failure error
# Erasure code the dataset according to provided parameters
let erasure = Erasure.new(
self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
)
without encoded =? (await erasure.encode(manifest, ecK, ecM)), error:
trace "Unable to erasure code dataset"
return failure(error)
without builder =? Poseidon2Builder.new(self.networkStore.localStore, encoded), err:
trace "Unable to create slot builder"
return failure(err)
without verifiable =? (await builder.buildManifest()), err:
trace "Unable to build verifiable manifest"
return failure(err)
without manifestBlk =? await self.storeManifest(verifiable), err:
trace "Unable to store verifiable manifest"
return failure(err)
let
verifyRoot =
if builder.verifyRoot.isNone:
return failure("No slots root")
else:
builder.verifyRoot.get.toBytes
request = StorageRequest(
ask: StorageAsk(
slots: verifiable.numSlots.uint64,
slotSize: builder.slotBytes.uint64,
duration: duration,
proofProbability: proofProbability,
pricePerBytePerSecond: pricePerBytePerSecond,
collateralPerByte: collateralPerByte,
maxSlotLoss: tolerance,
),
content: StorageContent(cid: manifestBlk.cid, merkleRoot: verifyRoot),
expiry: expiry,
)
trace "Request created", request = $request
success request
proc requestStorage*(
self: CodexNodeRef,
cid: Cid,
duration: uint64,
proofProbability: UInt256,
nodes: uint,
tolerance: uint,
pricePerBytePerSecond: UInt256,
collateralPerByte: UInt256,
expiry: uint64,
): Future[?!PurchaseId] {.async.} =
## Initiate a request for storage sequence, this might
## be a multistep procedure.
##
logScope:
cid = cid
duration = duration
nodes = nodes
tolerance = tolerance
pricePerBytePerSecond = pricePerBytePerSecond
proofProbability = proofProbability
collateralPerByte = collateralPerByte
expiry = expiry
now = self.clock.now
trace "Received a request for storage!"
without contracts =? self.contracts.client:
trace "Purchasing not available"
return failure "Purchasing not available"
without request =? (
await self.setupRequest(
cid, duration, proofProbability, nodes, tolerance, pricePerBytePerSecond,
collateralPerByte, expiry,
)
), err:
trace "Unable to setup request"
return failure err
let purchase = await contracts.purchasing.purchase(request)
success purchase.id
proc onStore(
self: CodexNodeRef,
request: StorageRequest,
expiry: SecondsSince1970,
slotIdx: uint64,
blocksCb: BlocksCb,
isRepairing: bool = false,
): Future[?!void] {.async: (raises: [CancelledError]).} =
## store data in local storage
##
let cid = request.content.cid
logScope:
cid = $cid
slotIdx = slotIdx
trace "Received a request to store a slot"
without manifest =? (await self.fetchManifest(cid)), err:
trace "Unable to fetch manifest for cid", cid, err = err.msg
return failure(err)
without builder =?
Poseidon2Builder.new(self.networkStore, manifest, manifest.verifiableStrategy), err:
trace "Unable to create slots builder", err = err.msg
return failure(err)
if slotIdx > manifest.slotRoots.high.uint64:
trace "Slot index not in manifest", slotIdx
return failure(newException(CodexError, "Slot index not in manifest"))
proc updateExpiry(
blocks: seq[bt.Block]
): Future[?!void] {.async: (raises: [CancelledError]).} =
trace "Updating expiry for blocks", blocks = blocks.len
let ensureExpiryFutures =
blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry))
let res = await allFinishedFailed[?!void](ensureExpiryFutures)
if res.failure.len > 0:
trace "Some blocks failed to update expiry", len = res.failure.len
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
if not blocksCb.isNil and err =? (await blocksCb(blocks)).errorOption:
trace "Unable to process blocks", err = err.msg
return failure(err)
return success()
if slotIdx > int.high.uint64:
error "Cannot cast slot index to int", slotIndex = slotIdx
return
if isRepairing:
trace "start repairing slot", slotIdx
try:
let erasure = Erasure.new(
self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
)
if err =? (await erasure.repair(manifest)).errorOption:
error "Unable to erasure decode repairing manifest",
cid = manifest.treeCid, exc = err.msg
return failure(err)
except CatchableError as exc:
error "Error erasure decoding repairing manifest",
cid = manifest.treeCid, exc = exc.msg
return failure(exc.msg)
else:
without indexer =?
manifest.verifiableStrategy.init(0, manifest.blocksCount - 1, manifest.numSlots).catch,
err:
trace "Unable to create indexing strategy from protected manifest", err = err.msg
return failure(err)
without blksIter =? indexer.getIndices(slotIdx.int).catch, err:
trace "Unable to get indices from strategy", err = err.msg
return failure(err)
if err =? (
await self.fetchBatched(manifest.treeCid, blksIter, onBatch = updateExpiry)
).errorOption:
trace "Unable to fetch blocks", err = err.msg
return failure(err)
without slotRoot =? (await builder.buildSlot(slotIdx.int)), err:
trace "Unable to build slot", err = err.msg
return failure(err)
if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx]:
trace "Slot root mismatch",
manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid()
return failure(newException(CodexError, "Slot root mismatch"))
trace "Slot successfully retrieved and reconstructed"
return success()
proc onProve(
self: CodexNodeRef, slot: Slot, challenge: ProofChallenge
): Future[?!Groth16Proof] {.async: (raises: [CancelledError]).} =
## Generats a proof for a given slot and challenge
##
let
cidStr = $slot.request.content.cid
slotIdx = slot.slotIndex
logScope:
cid = cidStr
slot = slotIdx
challenge = challenge
trace "Received proof challenge"
if prover =? self.prover:
trace "Prover enabled"
without cid =? Cid.init(cidStr).mapFailure, err:
error "Unable to parse Cid", cid, err = err.msg
return failure(err)
without manifest =? await self.fetchManifest(cid), err:
error "Unable to fetch manifest for cid", err = err.msg
return failure(err)
when defined(verify_circuit):
without (inputs, proof) =? await prover.prove(slotIdx.int, manifest, challenge),
err:
error "Unable to generate proof", err = err.msg
return failure(err)
without checked =? await prover.verify(proof, inputs), err:
error "Unable to verify proof", err = err.msg
return failure(err)
if not checked:
error "Proof verification failed"
return failure("Proof verification failed")
trace "Proof verified successfully"
else:
without (_, proof) =? await prover.prove(slotIdx.int, manifest, challenge), err:
error "Unable to generate proof", err = err.msg
return failure(err)
let groth16Proof = proof.toGroth16Proof()
trace "Proof generated successfully", groth16Proof
success groth16Proof
else:
warn "Prover not enabled"
failure "Prover not enabled"
proc onExpiryUpdate(
self: CodexNodeRef, rootCid: Cid, expiry: SecondsSince1970
): Future[?!void] {.async: (raises: [CancelledError]).} =
return await self.updateExpiry(rootCid, expiry)
proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: uint64) =
# TODO: remove data from local storage
discard
proc start*(self: CodexNodeRef) {.async.} =
if not self.engine.isNil:
await self.engine.start()
@ -829,57 +517,6 @@ proc start*(self: CodexNodeRef) {.async.} =
if not self.clock.isNil:
await self.clock.start()
if hostContracts =? self.contracts.host:
hostContracts.sales.onStore = proc(
request: StorageRequest,
expiry: SecondsSince1970,
slot: uint64,
onBatch: BatchProc,
isRepairing: bool = false,
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
self.onStore(request, expiry, slot, onBatch, isRepairing)
hostContracts.sales.onExpiryUpdate = proc(
rootCid: Cid, expiry: SecondsSince1970
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
self.onExpiryUpdate(rootCid, expiry)
hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: uint64) =
# TODO: remove data from local storage
self.onClear(request, slotIndex)
hostContracts.sales.onProve = proc(
slot: Slot, challenge: ProofChallenge
): Future[?!Groth16Proof] {.async: (raw: true, raises: [CancelledError]).} =
# TODO: generate proof
self.onProve(slot, challenge)
try:
await hostContracts.start()
except CancelledError as error:
raise error
except CatchableError as error:
error "Unable to start host contract interactions", error = error.msg
self.contracts.host = HostInteractions.none
if clientContracts =? self.contracts.client:
try:
await clientContracts.start()
except CancelledError as error:
raise error
except CatchableError as error:
error "Unable to start client contract interactions: ", error = error.msg
self.contracts.client = ClientInteractions.none
if validatorContracts =? self.contracts.validator:
try:
await validatorContracts.start()
except CancelledError as error:
raise error
except CatchableError as error:
error "Unable to start validator contract interactions: ", error = error.msg
self.contracts.validator = ValidatorInteractions.none
self.networkId = self.switch.peerInfo.peerId
notice "Started Storage node", id = self.networkId, addrs = self.switch.peerInfo.addrs
@ -894,15 +531,6 @@ proc stop*(self: CodexNodeRef) {.async.} =
if not self.discovery.isNil:
await self.discovery.stop()
if clientContracts =? self.contracts.client:
await clientContracts.stop()
if hostContracts =? self.contracts.host:
await hostContracts.stop()
if validatorContracts =? self.contracts.validator:
await validatorContracts.stop()
if not self.clock.isNil:
await self.clock.stop()
@ -917,8 +545,6 @@ proc new*(
engine: BlockExcEngine,
discovery: Discovery,
taskpool: Taskpool,
prover = Prover.none,
contracts = Contracts.default,
): CodexNodeRef =
## Create new instance of a Codex self, call `start` to run it
##
@ -927,10 +553,8 @@ proc new*(
switch: switch,
networkStore: networkStore,
engine: engine,
prover: prover,
discovery: discovery,
taskPool: taskpool,
contracts: contracts,
trackedFutures: TrackedFutures(),
)

View File

@ -1,17 +0,0 @@
import pkg/stint
type
Periodicity* = object
seconds*: uint64
Period* = uint64
Timestamp* = uint64
func periodOf*(periodicity: Periodicity, timestamp: Timestamp): Period =
timestamp div periodicity.seconds
func periodStart*(periodicity: Periodicity, period: Period): Timestamp =
period * periodicity.seconds
func periodEnd*(periodicity: Periodicity, period: Period): Timestamp =
periodicity.periodStart(period + 1)

View File

@ -1,74 +0,0 @@
import std/tables
import pkg/stint
import pkg/chronos
import pkg/questionable
import pkg/nimcrypto
import ./market
import ./clock
import ./purchasing/purchase
export questionable
export chronos
export market
export purchase
type
Purchasing* = ref object
market*: Market
clock: Clock
purchases: Table[PurchaseId, Purchase]
proofProbability*: UInt256
PurchaseTimeout* = Timeout
const DefaultProofProbability = 100.u256
proc new*(_: type Purchasing, market: Market, clock: Clock): Purchasing =
Purchasing(market: market, clock: clock, proofProbability: DefaultProofProbability)
proc load*(purchasing: Purchasing) {.async.} =
let market = purchasing.market
let requestIds = await market.myRequests()
for requestId in requestIds:
let purchase = Purchase.new(requestId, purchasing.market, purchasing.clock)
purchase.load()
purchasing.purchases[purchase.id] = purchase
proc start*(purchasing: Purchasing) {.async.} =
await purchasing.load()
proc stop*(purchasing: Purchasing) {.async.} =
discard
proc populate*(
purchasing: Purchasing, request: StorageRequest
): Future[StorageRequest] {.async.} =
result = request
if result.ask.proofProbability == 0.u256:
result.ask.proofProbability = purchasing.proofProbability
if result.nonce == Nonce.default:
var id = result.nonce.toArray
doAssert randomBytes(id) == 32
result.nonce = Nonce(id)
result.client = await purchasing.market.getSigner()
proc purchase*(
purchasing: Purchasing, request: StorageRequest
): Future[Purchase] {.async.} =
let request = await purchasing.populate(request)
let purchase = Purchase.new(request, purchasing.market, purchasing.clock)
purchase.start()
purchasing.purchases[purchase.id] = purchase
return purchase
func getPurchase*(purchasing: Purchasing, id: PurchaseId): ?Purchase =
if purchasing.purchases.hasKey(id):
some purchasing.purchases[id]
else:
none Purchase
func getPurchaseIds*(purchasing: Purchasing): seq[PurchaseId] =
var pIds: seq[PurchaseId] = @[]
for key in purchasing.purchases.keys:
pIds.add(key)
return pIds

View File

@ -1,74 +0,0 @@
import ./statemachine
import ./states/pending
import ./states/unknown
import ./purchaseid
# Purchase is implemented as a state machine.
#
# It can either be a new (pending) purchase that still needs to be submitted
# on-chain, or it is a purchase that was previously submitted on-chain, and
# we're just restoring its (unknown) state after a node restart.
#
# |
# v
# ------------------------- unknown
# | / /
# v v /
# pending ----> submitted ----> started ---------> finished <----/
# \ \ /
# \ ------------> failed <----/
# \ /
# --> cancelled <-----------------------
export Purchase
export purchaseid
export statemachine
func new*(
_: type Purchase, requestId: RequestId, market: Market, clock: Clock
): Purchase =
## create a new instance of a Purchase
##
var purchase = Purchase.new()
{.cast(noSideEffect).}:
purchase.future = newFuture[void]()
purchase.requestId = requestId
purchase.market = market
purchase.clock = clock
return purchase
func new*(
_: type Purchase, request: StorageRequest, market: Market, clock: Clock
): Purchase =
## Create a new purchase using the given market and clock
let purchase = Purchase.new(request.id, market, clock)
purchase.request = some request
return purchase
proc start*(purchase: Purchase) =
purchase.start(PurchasePending())
proc load*(purchase: Purchase) =
purchase.start(PurchaseUnknown())
proc wait*(purchase: Purchase) {.async.} =
await purchase.future
func id*(purchase: Purchase): PurchaseId =
PurchaseId(purchase.requestId)
func finished*(purchase: Purchase): bool =
purchase.future.finished
func error*(purchase: Purchase): ?(ref CatchableError) =
if purchase.future.failed:
some purchase.future.error
else:
none (ref CatchableError)
func state*(purchase: Purchase): ?string =
proc description(state: State): string =
$state
purchase.query(description)

View File

@ -1,14 +0,0 @@
import std/hashes
import ../logutils
type PurchaseId* = distinct array[32, byte]
logutils.formatIt(LogFormat.textLines, PurchaseId):
it.short0xHexLog
logutils.formatIt(LogFormat.json, PurchaseId):
it.to0xHexLog
proc hash*(x: PurchaseId): Hash {.borrow.}
proc `==`*(x, y: PurchaseId): bool {.borrow.}
proc toHex*(x: PurchaseId): string =
array[32, byte](x).toHex

View File

@ -1,19 +0,0 @@
import ../utils/asyncstatemachine
import ../market
import ../clock
import ../errors
export market
export clock
export asyncstatemachine
type
Purchase* = ref object of Machine
future*: Future[void]
market*: Market
clock*: Clock
requestId*: RequestId
request*: ?StorageRequest
PurchaseState* = ref object of State
PurchaseError* = object of CodexError

View File

@ -1,35 +0,0 @@
import pkg/metrics
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ./error
declareCounter(codex_purchases_cancelled, "codex purchases cancelled")
logScope:
topics = "marketplace purchases cancelled"
type PurchaseCancelled* = ref object of PurchaseState
method `$`*(state: PurchaseCancelled): string =
"cancelled"
method run*(
state: PurchaseCancelled, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_cancelled.inc()
let purchase = Purchase(machine)
try:
warn "Request cancelled, withdrawing remaining funds",
requestId = purchase.requestId
await purchase.market.withdrawFunds(purchase.requestId)
let error = newException(Timeout, "Purchase cancelled due to timeout")
purchase.future.fail(error)
except CancelledError as e:
trace "PurchaseCancelled.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseCancelled.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

View File

@ -1,26 +0,0 @@
import pkg/metrics
import ../statemachine
import ../../utils/exceptions
import ../../logutils
declareCounter(codex_purchases_error, "codex purchases error")
logScope:
topics = "marketplace purchases errored"
type PurchaseErrored* = ref object of PurchaseState
error*: ref CatchableError
method `$`*(state: PurchaseErrored): string =
"errored"
method run*(
state: PurchaseErrored, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_error.inc()
let purchase = Purchase(machine)
error "Purchasing error",
error = state.error.msgDetail, requestId = purchase.requestId
purchase.future.fail(state.error)

View File

@ -1,30 +0,0 @@
import pkg/metrics
import ../statemachine
import ../../logutils
import ../../utils/exceptions
import ./error
declareCounter(codex_purchases_failed, "codex purchases failed")
type PurchaseFailed* = ref object of PurchaseState
method `$`*(state: PurchaseFailed): string =
"failed"
method run*(
state: PurchaseFailed, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_failed.inc()
let purchase = Purchase(machine)
try:
warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId
await purchase.market.withdrawFunds(purchase.requestId)
except CancelledError as e:
trace "PurchaseFailed.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseFailed.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))
let error = newException(PurchaseError, "Purchase failed")
return some State(PurchaseErrored(error: error))

View File

@ -1,33 +0,0 @@
import pkg/metrics
import ../statemachine
import ../../utils/exceptions
import ../../logutils
import ./error
declareCounter(codex_purchases_finished, "codex purchases finished")
logScope:
topics = "marketplace purchases finished"
type PurchaseFinished* = ref object of PurchaseState
method `$`*(state: PurchaseFinished): string =
"finished"
method run*(
state: PurchaseFinished, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_finished.inc()
let purchase = Purchase(machine)
try:
info "Purchase finished, withdrawing remaining funds",
requestId = purchase.requestId
await purchase.market.withdrawFunds(purchase.requestId)
purchase.future.complete()
except CancelledError as e:
trace "PurchaseFinished.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseFinished.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

View File

@ -1,28 +0,0 @@
import pkg/metrics
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ./submitted
import ./error
declareCounter(codex_purchases_pending, "codex purchases pending")
type PurchasePending* = ref object of PurchaseState
method `$`*(state: PurchasePending): string =
"pending"
method run*(
state: PurchasePending, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_pending.inc()
let purchase = Purchase(machine)
try:
let request = !purchase.request
await purchase.market.requestStorage(request)
return some State(PurchaseSubmitted())
except CancelledError as e:
trace "PurchasePending.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchasePending.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

View File

@ -1,54 +0,0 @@
import pkg/metrics
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ./finished
import ./failed
import ./error
declareCounter(codex_purchases_started, "codex purchases started")
logScope:
topics = "marketplace purchases started"
type PurchaseStarted* = ref object of PurchaseState
method `$`*(state: PurchaseStarted): string =
"started"
method run*(
state: PurchaseStarted, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_started.inc()
let purchase = Purchase(machine)
let clock = purchase.clock
let market = purchase.market
info "All required slots filled, purchase started", requestId = purchase.requestId
let failed = newFuture[void]()
proc callback(_: RequestId) =
failed.complete()
var ended: Future[void]
try:
let subscription = await market.subscribeRequestFailed(purchase.requestId, callback)
# Ensure that we're past the request end by waiting an additional second
ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1)
let fut = await one(ended, failed)
await subscription.unsubscribe()
if fut.id == failed.id:
ended.cancelSoon()
return some State(PurchaseFailed())
else:
failed.cancelSoon()
return some State(PurchaseFinished())
except CancelledError as e:
ended.cancelSoon()
failed.cancelSoon()
trace "PurchaseStarted.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseStarted.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

View File

@ -1,56 +0,0 @@
import pkg/metrics
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ./started
import ./cancelled
import ./error
logScope:
topics = "marketplace purchases submitted"
declareCounter(codex_purchases_submitted, "codex purchases submitted")
type PurchaseSubmitted* = ref object of PurchaseState
method `$`*(state: PurchaseSubmitted): string =
"submitted"
method run*(
state: PurchaseSubmitted, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_submitted.inc()
let purchase = Purchase(machine)
let request = !purchase.request
let market = purchase.market
let clock = purchase.clock
info "Request submitted, waiting for slots to be filled",
requestId = purchase.requestId
proc wait() {.async.} =
let done = newAsyncEvent()
proc callback(_: RequestId) =
done.fire()
let subscription = await market.subscribeFulfillment(request.id, callback)
await done.wait()
await subscription.unsubscribe()
proc withTimeout(future: Future[void]) {.async.} =
let expiry = (await market.requestExpiresAt(request.id)) + 1
trace "waiting for request fulfillment or expiry", expiry
await future.withTimeout(clock, expiry)
try:
await wait().withTimeout()
except Timeout:
return some State(PurchaseCancelled())
except CancelledError as e:
trace "PurchaseSubmitted.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseSubmitted.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))
return some State(PurchaseStarted())

View File

@ -1,44 +0,0 @@
import pkg/metrics
import ../../utils/exceptions
import ../../logutils
import ../statemachine
import ./submitted
import ./started
import ./cancelled
import ./finished
import ./failed
import ./error
declareCounter(codex_purchases_unknown, "codex purchases unknown")
type PurchaseUnknown* = ref object of PurchaseState
method `$`*(state: PurchaseUnknown): string =
"unknown"
method run*(
state: PurchaseUnknown, machine: Machine
): Future[?State] {.async: (raises: []).} =
try:
codex_purchases_unknown.inc()
let purchase = Purchase(machine)
if (request =? await purchase.market.getRequest(purchase.requestId)) and
(requestState =? await purchase.market.requestState(purchase.requestId)):
purchase.request = some request
case requestState
of RequestState.New:
return some State(PurchaseSubmitted())
of RequestState.Started:
return some State(PurchaseStarted())
of RequestState.Cancelled:
return some State(PurchaseCancelled())
of RequestState.Finished:
return some State(PurchaseFinished())
of RequestState.Failed:
return some State(PurchaseFailed())
except CancelledError as e:
trace "PurchaseUnknown.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseUnknown.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

View File

@ -30,8 +30,6 @@ import ../logutils
import ../node
import ../blocktype
import ../conf
import ../contracts
import ../erasure/erasure
import ../manifest
import ../streams/asyncstreamwrapper
import ../stores
@ -116,9 +114,7 @@ proc retrieveCid(
# For erasure-coded datasets, we need to return the _original_ length; i.e.,
# the length of the non-erasure-coded dataset, as that's what we will be
# returning to the client.
let contentLength =
if manifest.protected: manifest.originalDatasetSize else: manifest.datasetSize
resp.setHeader("Content-Length", $(contentLength.int))
resp.setHeader("Content-Length", $(manifest.datasetSize.int))
await resp.prepare(HttpResponseStreamType.Plain)
@ -388,459 +384,6 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
)
return RestApiResponse.response($json, contentType = "application/json")
proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin
router.api(MethodGet, "/api/storage/v1/sales/slots") do() -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)
## Returns active slots for the host
try:
without contracts =? node.contracts.host:
return RestApiResponse.error(
Http503, "Persistence is not enabled", headers = headers
)
let json = %(await contracts.sales.mySlots())
return RestApiResponse.response(
$json, contentType = "application/json", headers = headers
)
except CatchableError as exc:
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/storage/v1/sales/slots/{slotId}") do(
slotId: SlotId
) -> RestApiResponse:
## Returns active slot with id {slotId} for the host. Returns 404 if the
## slot is not active for the host.
var headers = buildCorsHeaders("GET", allowedOrigin)
without contracts =? node.contracts.host:
return
RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers)
without slotId =? slotId.tryGet.catch, error:
return RestApiResponse.error(Http400, error.msg, headers = headers)
without agent =? await contracts.sales.activeSale(slotId):
return
RestApiResponse.error(Http404, "Provider not filling slot", headers = headers)
let restAgent = RestSalesAgent(
state: agent.state() |? "none",
slotIndex: agent.data.slotIndex,
requestId: agent.data.requestId,
request: agent.data.request,
reservation: agent.data.reservation,
)
return RestApiResponse.response(
restAgent.toJson, contentType = "application/json", headers = headers
)
router.api(MethodGet, "/api/storage/v1/sales/availability") do() -> RestApiResponse:
## Returns storage that is for sale
var headers = buildCorsHeaders("GET", allowedOrigin)
try:
without contracts =? node.contracts.host:
return RestApiResponse.error(
Http503, "Persistence is not enabled", headers = headers
)
without avails =? (await contracts.sales.context.reservations.all(Availability)),
err:
return RestApiResponse.error(Http500, err.msg, headers = headers)
let json = %avails
return RestApiResponse.response(
$json, contentType = "application/json", headers = headers
)
except CatchableError as exc:
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.rawApi(MethodPost, "/api/storage/v1/sales/availability") do() -> RestApiResponse:
## Add available storage to sell.
## Every time Availability's offer finishes, its capacity is
## returned to the availability.
##
## totalSize - size of available storage in bytes
## duration - maximum time the storage should be sold for (in seconds)
## minPricePerBytePerSecond - minimal price per byte paid (in amount of
## tokens) to be matched against the request's pricePerBytePerSecond
## totalCollateral - total collateral (in amount of
## tokens) that can be distributed among matching requests
var headers = buildCorsHeaders("POST", allowedOrigin)
try:
without contracts =? node.contracts.host:
return RestApiResponse.error(
Http503, "Persistence is not enabled", headers = headers
)
let body = await request.getBody()
without restAv =? RestAvailability.fromJson(body), error:
return RestApiResponse.error(Http400, error.msg, headers = headers)
let reservations = contracts.sales.context.reservations
if restAv.totalSize == 0:
return RestApiResponse.error(
Http422, "Total size must be larger then zero", headers = headers
)
if restAv.duration == 0:
return RestApiResponse.error(
Http422, "duration must be larger then zero", headers = headers
)
if restAv.minPricePerBytePerSecond == 0:
return RestApiResponse.error(
Http422,
"minPricePerBytePerSecond must be larger then zero",
headers = headers,
)
if restAv.totalCollateral == 0:
return RestApiResponse.error(
Http422, "totalCollateral must be larger then zero", headers = headers
)
if not reservations.hasAvailable(restAv.totalSize):
return
RestApiResponse.error(Http422, "Not enough storage quota", headers = headers)
without availability =? (
await reservations.createAvailability(
restAv.totalSize,
restAv.duration,
restAv.minPricePerBytePerSecond,
restAv.totalCollateral,
enabled = restAv.enabled |? true,
until = restAv.until |? 0,
)
), error:
if error of CancelledError:
raise error
if error of UntilOutOfBoundsError:
return RestApiResponse.error(Http422, error.msg)
return RestApiResponse.error(Http500, error.msg, headers = headers)
return RestApiResponse.response(
availability.toJson,
Http201,
contentType = "application/json",
headers = headers,
)
except CatchableError as exc:
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodOptions, "/api/storage/v1/sales/availability/{id}") do(
id: AvailabilityId, resp: HttpResponseRef
) -> RestApiResponse:
if corsOrigin =? allowedOrigin:
resp.setCorsHeaders("PATCH", corsOrigin)
resp.status = Http204
await resp.sendBody("")
router.rawApi(MethodPatch, "/api/storage/v1/sales/availability/{id}") do(
id: AvailabilityId
) -> RestApiResponse:
## Updates Availability.
## The new parameters will be only considered for new requests.
## Existing Requests linked to this Availability will continue as is.
##
## totalSize - size of available storage in bytes.
## When decreasing the size, then lower limit is
## the currently `totalSize - freeSize`.
## duration - maximum time the storage should be sold for (in seconds)
## minPricePerBytePerSecond - minimal price per byte paid (in amount of
## tokens) to be matched against the request's pricePerBytePerSecond
## totalCollateral - total collateral (in amount of
## tokens) that can be distributed among matching requests
try:
without contracts =? node.contracts.host:
return RestApiResponse.error(Http503, "Persistence is not enabled")
without id =? id.tryGet.catch, error:
return RestApiResponse.error(Http400, error.msg)
without keyId =? id.key.tryGet.catch, error:
return RestApiResponse.error(Http400, error.msg)
let
body = await request.getBody()
reservations = contracts.sales.context.reservations
type OptRestAvailability = Optionalize(RestAvailability)
without restAv =? OptRestAvailability.fromJson(body), error:
return RestApiResponse.error(Http400, error.msg)
without availability =? (await reservations.get(keyId, Availability)), error:
if error of NotExistsError:
return RestApiResponse.error(Http404, "Availability not found")
return RestApiResponse.error(Http500, error.msg)
if isSome restAv.freeSize:
return RestApiResponse.error(Http422, "Updating freeSize is not allowed")
if size =? restAv.totalSize:
if size == 0:
return RestApiResponse.error(Http422, "Total size must be larger then zero")
# we don't allow lowering the totalSize bellow currently utilized size
if size < (availability.totalSize - availability.freeSize):
return RestApiResponse.error(
Http422,
"New totalSize must be larger then current totalSize - freeSize, which is currently: " &
$(availability.totalSize - availability.freeSize),
)
if not reservations.hasAvailable(size):
return RestApiResponse.error(Http422, "Not enough storage quota")
availability.freeSize += size - availability.totalSize
availability.totalSize = size
if duration =? restAv.duration:
availability.duration = duration
if minPricePerBytePerSecond =? restAv.minPricePerBytePerSecond:
availability.minPricePerBytePerSecond = minPricePerBytePerSecond
if totalCollateral =? restAv.totalCollateral:
availability.totalCollateral = totalCollateral
if until =? restAv.until:
availability.until = until
if enabled =? restAv.enabled:
availability.enabled = enabled
if err =? (await reservations.update(availability)).errorOption:
if err of CancelledError:
raise err
if err of UntilOutOfBoundsError:
return RestApiResponse.error(Http422, err.msg)
else:
return RestApiResponse.error(Http500, err.msg)
return RestApiResponse.response(Http204)
except CatchableError as exc:
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500)
router.rawApi(MethodGet, "/api/storage/v1/sales/availability/{id}/reservations") do(
id: AvailabilityId
) -> RestApiResponse:
## Gets Availability's reservations.
var headers = buildCorsHeaders("GET", allowedOrigin)
try:
without contracts =? node.contracts.host:
return RestApiResponse.error(
Http503, "Persistence is not enabled", headers = headers
)
without id =? id.tryGet.catch, error:
return RestApiResponse.error(Http400, error.msg, headers = headers)
without keyId =? id.key.tryGet.catch, error:
return RestApiResponse.error(Http400, error.msg, headers = headers)
let reservations = contracts.sales.context.reservations
let market = contracts.sales.context.market
if error =? (await reservations.get(keyId, Availability)).errorOption:
if error of NotExistsError:
return
RestApiResponse.error(Http404, "Availability not found", headers = headers)
else:
return RestApiResponse.error(Http500, error.msg, headers = headers)
without availabilitysReservations =? (await reservations.all(Reservation, id)),
err:
return RestApiResponse.error(Http500, err.msg, headers = headers)
# TODO: Expand this structure with information about the linked StorageRequest not only RequestID
return RestApiResponse.response(
availabilitysReservations.toJson,
contentType = "application/json",
headers = headers,
)
except CatchableError as exc:
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin
router.rawApi(MethodPost, "/api/storage/v1/storage/request/{cid}") do(
cid: Cid
) -> RestApiResponse:
var headers = buildCorsHeaders("POST", allowedOrigin)
## Create a request for storage
##
## cid - the cid of a previously uploaded dataset
## duration - the duration of the request in seconds
## proofProbability - how often storage proofs are required
## pricePerBytePerSecond - the amount of tokens paid per byte per second to hosts the client is willing to pay
## expiry - specifies threshold in seconds from now when the request expires if the Request does not find requested amount of nodes to host the data
## nodes - number of nodes the content should be stored on
## tolerance - allowed number of nodes that can be lost before content is lost
## colateralPerByte - requested collateral per byte from hosts when they fill slot
try:
without contracts =? node.contracts.client:
return RestApiResponse.error(
Http503, "Persistence is not enabled", headers = headers
)
without cid =? cid.tryGet.catch, error:
return RestApiResponse.error(Http400, error.msg, headers = headers)
let body = await request.getBody()
without params =? StorageRequestParams.fromJson(body), error:
return RestApiResponse.error(Http400, error.msg, headers = headers)
let expiry = params.expiry
if expiry <= 0 or expiry >= params.duration:
return RestApiResponse.error(
Http422,
"Expiry must be greater than zero and less than the request's duration",
headers = headers,
)
if params.proofProbability <= 0:
return RestApiResponse.error(
Http422, "Proof probability must be greater than zero", headers = headers
)
if params.collateralPerByte <= 0:
return RestApiResponse.error(
Http422, "Collateral per byte must be greater than zero", headers = headers
)
if params.pricePerBytePerSecond <= 0:
return RestApiResponse.error(
Http422,
"Price per byte per second must be greater than zero",
headers = headers,
)
let requestDurationLimit = await contracts.purchasing.market.requestDurationLimit
if params.duration > requestDurationLimit:
return RestApiResponse.error(
Http422,
"Duration exceeds limit of " & $requestDurationLimit & " seconds",
headers = headers,
)
let nodes = params.nodes |? 3
let tolerance = params.tolerance |? 1
if tolerance == 0:
return RestApiResponse.error(
Http422, "Tolerance needs to be bigger then zero", headers = headers
)
# prevent underflow
if tolerance > nodes:
return RestApiResponse.error(
Http422,
"Invalid parameters: `tolerance` cannot be greater than `nodes`",
headers = headers,
)
let ecK = nodes - tolerance
let ecM = tolerance # for readability
# ensure leopard constrainst of 1 < K ≥ M
if ecK <= 1 or ecK < ecM:
return RestApiResponse.error(
Http422,
"Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`",
headers = headers,
)
without purchaseId =?
await node.requestStorage(
cid, params.duration, params.proofProbability, nodes, tolerance,
params.pricePerBytePerSecond, params.collateralPerByte, expiry,
), error:
if error of InsufficientBlocksError:
return RestApiResponse.error(
Http422,
"Dataset too small for erasure parameters, need at least " &
$(ref InsufficientBlocksError)(error).minSize.int & " bytes",
headers = headers,
)
return RestApiResponse.error(Http500, error.msg, headers = headers)
return RestApiResponse.response(purchaseId.toHex)
except CatchableError as exc:
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/storage/v1/storage/purchases/{id}") do(
id: PurchaseId
) -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)
try:
without contracts =? node.contracts.client:
return RestApiResponse.error(
Http503, "Persistence is not enabled", headers = headers
)
without id =? id.tryGet.catch, error:
return RestApiResponse.error(Http400, error.msg, headers = headers)
without purchase =? contracts.purchasing.getPurchase(id):
return RestApiResponse.error(Http404, headers = headers)
let json =
%RestPurchase(
state: purchase.state |? "none",
error: purchase.error .? msg,
request: purchase.request,
requestId: purchase.requestId,
)
return RestApiResponse.response(
$json, contentType = "application/json", headers = headers
)
except CatchableError as exc:
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/storage/v1/storage/purchases") do() -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)
try:
without contracts =? node.contracts.client:
return RestApiResponse.error(
Http503, "Persistence is not enabled", headers = headers
)
let purchaseIds = contracts.purchasing.getPurchaseIds()
return RestApiResponse.response(
$ %purchaseIds, contentType = "application/json", headers = headers
)
except CatchableError as exc:
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin
@ -949,7 +492,6 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
"storage": {
"version": $codexVersion,
"revision": $codexRevision,
"contracts": $codexContractsRevision,
},
}
@ -1016,8 +558,6 @@ proc initRestApi*(
var router = RestRouter.init(validate, corsAllowedOrigin)
initDataApi(node, repoStore, router)
initSalesApi(node, router)
initPurchasingApi(node, router)
initNodeApi(node, conf, router)
initDebugApi(node, conf, router)

View File

@ -17,8 +17,6 @@ import pkg/stew/byteutils
import pkg/results
import pkg/stint
import ../sales
import ../purchasing
import ../utils/stintutils
proc encodeString*(cid: type Cid): Result[string, cstring] =
@ -82,11 +80,6 @@ proc decodeString*(
except ValueError as e:
err e.msg.cstring
proc decodeString*[T: PurchaseId | RequestId | Nonce | SlotId | AvailabilityId](
_: type T, value: string
): Result[T, cstring] =
array[32, byte].decodeString(value).map(id => T(id))
proc decodeString*(t: typedesc[string], value: string): Result[string, cstring] =
ok(value)

View File

@ -3,8 +3,6 @@ import pkg/stew/byteutils
import pkg/libp2p
import pkg/codexdht/discv5/node as dn
import pkg/codexdht/discv5/routing_table as rt
import ../sales
import ../purchasing
import ../utils/json
import ../manifest
import ../units
@ -12,36 +10,6 @@ import ../units
export json
type
StorageRequestParams* = object
duration* {.serialize.}: uint64
proofProbability* {.serialize.}: UInt256
pricePerBytePerSecond* {.serialize.}: UInt256
collateralPerByte* {.serialize.}: UInt256
expiry* {.serialize.}: uint64
nodes* {.serialize.}: ?uint
tolerance* {.serialize.}: ?uint
RestPurchase* = object
requestId* {.serialize.}: RequestId
request* {.serialize.}: ?StorageRequest
state* {.serialize.}: string
error* {.serialize.}: ?string
RestAvailability* = object
totalSize* {.serialize.}: uint64
duration* {.serialize.}: uint64
minPricePerBytePerSecond* {.serialize.}: UInt256
totalCollateral* {.serialize.}: UInt256
freeSize* {.serialize.}: ?uint64
enabled* {.serialize.}: ?bool
until* {.serialize.}: ?SecondsSince1970
RestSalesAgent* = object
state* {.serialize.}: string
requestId* {.serialize.}: RequestId
slotIndex* {.serialize.}: uint64
request* {.serialize.}: ?StorageRequest
reservation* {.serialize.}: ?Reservation
RestContent* = object
cid* {.serialize.}: Cid
@ -106,13 +74,5 @@ proc init*(_: type RestPeerRecord, peerRecord: PeerRecord): RestPeerRecord =
proc init*(_: type RestNodeId, id: NodeId): RestNodeId =
RestNodeId(id: id)
proc `%`*(obj: StorageRequest | Slot): JsonNode =
let jsonObj = newJObject()
for k, v in obj.fieldPairs:
jsonObj[k] = %v
jsonObj["id"] = %(obj.id)
return jsonObj
proc `%`*(obj: RestNodeId): JsonNode =
% $obj.id

View File

@ -1,555 +0,0 @@
import std/sequtils
import std/sugar
import pkg/questionable
import pkg/questionable/results
import pkg/stint
import pkg/datastore
import ./market
import ./clock
import ./stores
import ./contracts/requests
import ./contracts/marketplace
import ./logutils
import ./sales/salescontext
import ./sales/salesagent
import ./sales/statemachine
import ./sales/slotqueue
import ./sales/states/preparing
import ./sales/states/unknown
import ./utils/trackedfutures
import ./utils/exceptions
## Sales holds a list of available storage that it may sell.
##
## When storage is requested on the market that matches availability, the Sales
## object will instruct the Logos Storage node to persist the requested data. Once the
## data has been persisted, it uploads a proof of storage to the market in an
## attempt to win a storage contract.
##
## Node Sales Market
## | | |
## | -- add availability --> | |
## | | <-- storage request --- |
## | <----- store data ------ | |
## | -----------------------> | |
## | | |
## | <----- prove data ---- | |
## | -----------------------> | |
## | | ---- storage proof ---> |
export stint
export reservations
export salesagent
export salescontext
logScope:
topics = "sales marketplace"
type Sales* = ref object
context*: SalesContext
agents*: seq[SalesAgent]
running: bool
subscriptions: seq[market.Subscription]
trackedFutures: TrackedFutures
proc `onStore=`*(sales: Sales, onStore: OnStore) =
sales.context.onStore = some onStore
proc `onClear=`*(sales: Sales, onClear: OnClear) =
sales.context.onClear = some onClear
proc `onSale=`*(sales: Sales, callback: OnSale) =
sales.context.onSale = some callback
proc `onProve=`*(sales: Sales, callback: OnProve) =
sales.context.onProve = some callback
proc `onExpiryUpdate=`*(sales: Sales, callback: OnExpiryUpdate) =
sales.context.onExpiryUpdate = some callback
proc onStore*(sales: Sales): ?OnStore =
sales.context.onStore
proc onClear*(sales: Sales): ?OnClear =
sales.context.onClear
proc onSale*(sales: Sales): ?OnSale =
sales.context.onSale
proc onProve*(sales: Sales): ?OnProve =
sales.context.onProve
proc onExpiryUpdate*(sales: Sales): ?OnExpiryUpdate =
sales.context.onExpiryUpdate
proc new*(_: type Sales, market: Market, clock: Clock, repo: RepoStore): Sales =
Sales.new(market, clock, repo, 0)
proc new*(
_: type Sales,
market: Market,
clock: Clock,
repo: RepoStore,
simulateProofFailures: int,
): Sales =
let reservations = Reservations.new(repo)
Sales(
context: SalesContext(
market: market,
clock: clock,
reservations: reservations,
slotQueue: SlotQueue.new(),
simulateProofFailures: simulateProofFailures,
),
trackedFutures: TrackedFutures.new(),
subscriptions: @[],
)
proc remove(sales: Sales, agent: SalesAgent) {.async: (raises: []).} =
await agent.stop()
if sales.running:
sales.agents.keepItIf(it != agent)
proc cleanUp(
sales: Sales, agent: SalesAgent, reprocessSlot: bool, returnedCollateral: ?UInt256
) {.async: (raises: []).} =
let data = agent.data
logScope:
topics = "sales cleanUp"
requestId = data.requestId
slotIndex = data.slotIndex
reservationId = data.reservation .? id |? ReservationId.default
availabilityId = data.reservation .? availabilityId |? AvailabilityId.default
trace "cleaning up sales agent"
# if reservation for the SalesAgent was not created, then it means
# that the cleanUp was called before the sales process really started, so
# there are not really any bytes to be returned
if request =? data.request and reservation =? data.reservation:
if returnErr =? (
await noCancel sales.context.reservations.returnBytesToAvailability(
reservation.availabilityId, reservation.id, request.ask.slotSize
)
).errorOption:
error "failure returning bytes",
error = returnErr.msg, bytes = request.ask.slotSize
# delete reservation and return reservation bytes back to the availability
if reservation =? data.reservation and
deleteErr =? (
await noCancel sales.context.reservations.deleteReservation(
reservation.id, reservation.availabilityId, returnedCollateral
)
).errorOption:
error "failure deleting reservation", error = deleteErr.msg
# Re-add items back into the queue to prevent small availabilities from
# draining the queue. Seen items will be ordered last.
if reprocessSlot and request =? data.request and var item =? agent.data.slotQueueItem:
let queue = sales.context.slotQueue
item.seen = true
trace "pushing ignored item to queue, marked as seen"
if err =? queue.push(item).errorOption:
error "failed to readd slot to queue", errorType = $(type err), error = err.msg
let fut = sales.remove(agent)
sales.trackedFutures.track(fut)
proc filled(sales: Sales, request: StorageRequest, slotIndex: uint64) =
if onSale =? sales.context.onSale:
onSale(request, slotIndex)
proc processSlot(
sales: Sales, item: SlotQueueItem
) {.async: (raises: [CancelledError]).} =
debug "Processing slot from queue", requestId = item.requestId, slot = item.slotIndex
let agent = newSalesAgent(
sales.context, item.requestId, item.slotIndex, none StorageRequest, some item
)
let completed = newAsyncEvent()
agent.onCleanUp = proc(
reprocessSlot = false, returnedCollateral = UInt256.none
) {.async: (raises: []).} =
trace "slot cleanup"
await sales.cleanUp(agent, reprocessSlot, returnedCollateral)
completed.fire()
agent.onFilled = some proc(request: StorageRequest, slotIndex: uint64) =
trace "slot filled"
sales.filled(request, slotIndex)
completed.fire()
agent.start(SalePreparing())
sales.agents.add agent
trace "waiting for slot processing to complete"
await completed.wait()
trace "slot processing completed"
proc deleteInactiveReservations(sales: Sales, activeSlots: seq[Slot]) {.async.} =
let reservations = sales.context.reservations
without reservs =? await reservations.all(Reservation):
return
let unused = reservs.filter(
r => (
let slotId = slotId(r.requestId, r.slotIndex)
not activeSlots.any(slot => slot.id == slotId)
)
)
if unused.len == 0:
return
info "Found unused reservations for deletion", unused = unused.len
for reservation in unused:
logScope:
reservationId = reservation.id
availabilityId = reservation.availabilityId
if err =? (
await reservations.deleteReservation(reservation.id, reservation.availabilityId)
).errorOption:
error "Failed to delete unused reservation", error = err.msg
else:
trace "Deleted unused reservation"
proc mySlots*(sales: Sales): Future[seq[Slot]] {.async.} =
let market = sales.context.market
let slotIds = await market.mySlots()
var slots: seq[Slot] = @[]
info "Loading active slots", slotsCount = len(slots)
for slotId in slotIds:
if slot =? (await market.getActiveSlot(slotId)):
slots.add slot
return slots
proc activeSale*(sales: Sales, slotId: SlotId): Future[?SalesAgent] {.async.} =
for agent in sales.agents:
if slotId(agent.data.requestId, agent.data.slotIndex) == slotId:
return some agent
return none SalesAgent
proc load*(sales: Sales) {.async.} =
let activeSlots = await sales.mySlots()
await sales.deleteInactiveReservations(activeSlots)
for slot in activeSlots:
let agent =
newSalesAgent(sales.context, slot.request.id, slot.slotIndex, some slot.request)
agent.onCleanUp = proc(
reprocessSlot = false, returnedCollateral = UInt256.none
) {.async: (raises: []).} =
await sales.cleanUp(agent, reprocessSlot, returnedCollateral)
# There is no need to assign agent.onFilled as slots loaded from `mySlots`
# are inherently already filled and so assigning agent.onFilled would be
# superfluous.
agent.start(SaleUnknown())
sales.agents.add agent
proc OnAvailabilitySaved(
sales: Sales, availability: Availability
) {.async: (raises: []).} =
## When availabilities are modified or added, the queue should be unpaused if
## it was paused and any slots in the queue should have their `seen` flag
## cleared.
let queue = sales.context.slotQueue
queue.clearSeenFlags()
if queue.paused:
trace "unpausing queue after new availability added"
queue.unpause()
proc onStorageRequested(
sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: uint64
) {.raises: [].} =
logScope:
topics = "marketplace sales onStorageRequested"
requestId
slots = ask.slots
expiry
let slotQueue = sales.context.slotQueue
trace "storage requested, adding slots to queue"
let market = sales.context.market
without collateral =? market.slotCollateral(ask.collateralPerSlot, SlotState.Free),
err:
error "Request failure, unable to calculate collateral", error = err.msg
return
without items =? SlotQueueItem.init(requestId, ask, expiry, collateral).catch, err:
if err of SlotsOutOfRangeError:
warn "Too many slots, cannot add to queue"
else:
warn "Failed to create slot queue items from request", error = err.msg
return
for item in items:
# continue on failure
if err =? slotQueue.push(item).errorOption:
if err of SlotQueueItemExistsError:
error "Failed to push item to queue becaue it already exists"
elif err of QueueNotRunningError:
warn "Failed to push item to queue becaue queue is not running"
else:
warn "Error adding request to SlotQueue", error = err.msg
proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) =
logScope:
topics = "marketplace sales onSlotFreed"
requestId
slotIndex
trace "slot freed, adding to queue"
proc addSlotToQueue() {.async: (raises: []).} =
let context = sales.context
let market = context.market
let queue = context.slotQueue
try:
without request =? (await market.getRequest(requestId)), err:
error "unknown request in contract", error = err.msgDetail
return
# Take the repairing state into consideration to calculate the collateral.
# This is particularly needed because it will affect the priority in the queue
# and we want to give the user the ability to tweak the parameters.
# Adding the repairing state directly in the queue priority calculation
# would not allow this flexibility.
without collateral =?
market.slotCollateral(request.ask.collateralPerSlot, SlotState.Repair), err:
error "Failed to add freed slot to queue: unable to calculate collateral",
error = err.msg
return
if slotIndex > uint16.high.uint64:
error "Cannot cast slot index to uint16, value = ", slotIndex
return
without slotQueueItem =?
SlotQueueItem.init(request, slotIndex.uint16, collateral = collateral).catch,
err:
warn "Too many slots, cannot add to queue", error = err.msgDetail
return
if err =? queue.push(slotQueueItem).errorOption:
if err of SlotQueueItemExistsError:
error "Failed to push item to queue because it already exists",
error = err.msgDetail
elif err of QueueNotRunningError:
warn "Failed to push item to queue because queue is not running",
error = err.msgDetail
except CancelledError as e:
trace "sales.addSlotToQueue was cancelled"
# We could get rid of this by adding the storage ask in the SlotFreed event,
# so we would not need to call getRequest to get the collateralPerSlot.
let fut = addSlotToQueue()
sales.trackedFutures.track(fut)
proc subscribeRequested(sales: Sales) {.async.} =
let context = sales.context
let market = context.market
proc onStorageRequested(
requestId: RequestId, ask: StorageAsk, expiry: uint64
) {.raises: [].} =
sales.onStorageRequested(requestId, ask, expiry)
try:
let sub = await market.subscribeRequests(onStorageRequested)
sales.subscriptions.add(sub)
except CancelledError as error:
raise error
except CatchableError as e:
error "Unable to subscribe to storage request events", msg = e.msg
proc subscribeCancellation(sales: Sales) {.async.} =
let context = sales.context
let market = context.market
let queue = context.slotQueue
proc onCancelled(requestId: RequestId) =
trace "request cancelled (via contract RequestCancelled event), removing all request slots from queue"
queue.delete(requestId)
try:
let sub = await market.subscribeRequestCancelled(onCancelled)
sales.subscriptions.add(sub)
except CancelledError as error:
raise error
except CatchableError as e:
error "Unable to subscribe to cancellation events", msg = e.msg
proc subscribeFulfilled*(sales: Sales) {.async.} =
let context = sales.context
let market = context.market
let queue = context.slotQueue
proc onFulfilled(requestId: RequestId) =
trace "request fulfilled, removing all request slots from queue"
queue.delete(requestId)
for agent in sales.agents:
agent.onFulfilled(requestId)
try:
let sub = await market.subscribeFulfillment(onFulfilled)
sales.subscriptions.add(sub)
except CancelledError as error:
raise error
except CatchableError as e:
error "Unable to subscribe to storage fulfilled events", msg = e.msg
proc subscribeFailure(sales: Sales) {.async.} =
let context = sales.context
let market = context.market
let queue = context.slotQueue
proc onFailed(requestId: RequestId) =
trace "request failed, removing all request slots from queue"
queue.delete(requestId)
for agent in sales.agents:
agent.onFailed(requestId)
try:
let sub = await market.subscribeRequestFailed(onFailed)
sales.subscriptions.add(sub)
except CancelledError as error:
raise error
except CatchableError as e:
error "Unable to subscribe to storage failure events", msg = e.msg
proc subscribeSlotFilled(sales: Sales) {.async.} =
let context = sales.context
let market = context.market
let queue = context.slotQueue
proc onSlotFilled(requestId: RequestId, slotIndex: uint64) =
if slotIndex > uint16.high.uint64:
error "Cannot cast slot index to uint16, value = ", slotIndex
return
trace "slot filled, removing from slot queue", requestId, slotIndex
queue.delete(requestId, slotIndex.uint16)
for agent in sales.agents:
agent.onSlotFilled(requestId, slotIndex)
try:
let sub = await market.subscribeSlotFilled(onSlotFilled)
sales.subscriptions.add(sub)
except CancelledError as error:
raise error
except CatchableError as e:
error "Unable to subscribe to slot filled events", msg = e.msg
proc subscribeSlotFreed(sales: Sales) {.async.} =
let context = sales.context
let market = context.market
proc onSlotFreed(requestId: RequestId, slotIndex: uint64) =
sales.onSlotFreed(requestId, slotIndex)
try:
let sub = await market.subscribeSlotFreed(onSlotFreed)
sales.subscriptions.add(sub)
except CancelledError as error:
raise error
except CatchableError as e:
error "Unable to subscribe to slot freed events", msg = e.msg
proc subscribeSlotReservationsFull(sales: Sales) {.async.} =
let context = sales.context
let market = context.market
let queue = context.slotQueue
proc onSlotReservationsFull(requestId: RequestId, slotIndex: uint64) =
if slotIndex > uint16.high.uint64:
error "Cannot cast slot index to uint16, value = ", slotIndex
return
trace "reservations for slot full, removing from slot queue", requestId, slotIndex
queue.delete(requestId, slotIndex.uint16)
try:
let sub = await market.subscribeSlotReservationsFull(onSlotReservationsFull)
sales.subscriptions.add(sub)
except CancelledError as error:
raise error
except CatchableError as e:
error "Unable to subscribe to slot filled events", msg = e.msg
proc startSlotQueue(sales: Sales) =
let slotQueue = sales.context.slotQueue
let reservations = sales.context.reservations
slotQueue.onProcessSlot = proc(item: SlotQueueItem) {.async: (raises: []).} =
trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex
try:
await sales.processSlot(item)
except CancelledError:
discard
slotQueue.start()
proc OnAvailabilitySaved(availability: Availability) {.async: (raises: []).} =
if availability.enabled:
await sales.OnAvailabilitySaved(availability)
reservations.OnAvailabilitySaved = OnAvailabilitySaved
proc subscribe(sales: Sales) {.async.} =
await sales.subscribeRequested()
await sales.subscribeFulfilled()
await sales.subscribeFailure()
await sales.subscribeSlotFilled()
await sales.subscribeSlotFreed()
await sales.subscribeCancellation()
await sales.subscribeSlotReservationsFull()
proc unsubscribe(sales: Sales) {.async.} =
for sub in sales.subscriptions:
try:
await sub.unsubscribe()
except CancelledError as error:
raise error
except CatchableError as e:
error "Unable to unsubscribe from subscription", error = e.msg
proc start*(sales: Sales) {.async.} =
await sales.load()
sales.startSlotQueue()
await sales.subscribe()
sales.running = true
proc stop*(sales: Sales) {.async.} =
trace "stopping sales"
sales.running = false
await sales.context.slotQueue.stop()
await sales.unsubscribe()
await sales.trackedFutures.cancelTracked()
for agent in sales.agents:
await agent.stop()
sales.agents = @[]

View File

@ -1,759 +0,0 @@
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
##
## +--------------------------------------+
## | RESERVATION |
## +---------------------------------------------------+ |--------------------------------------|
## | AVAILABILITY | | ReservationId | id | PK |
## |---------------------------------------------------| |--------------------------------------|
## | AvailabilityId | id | PK |<-||-------o<-| AvailabilityId | availabilityId | FK |
## |---------------------------------------------------| |--------------------------------------|
## | UInt256 | totalSize | | | UInt256 | size | |
## |---------------------------------------------------| |--------------------------------------|
## | UInt256 | freeSize | | | UInt256 | slotIndex | |
## |---------------------------------------------------| +--------------------------------------+
## | UInt256 | duration | |
## |---------------------------------------------------|
## | UInt256 | minPricePerBytePerSecond | |
## |---------------------------------------------------|
## | UInt256 | totalCollateral | |
## |---------------------------------------------------|
## | UInt256 | totalRemainingCollateral | |
## +---------------------------------------------------+
{.push raises: [], gcsafe.}
import std/sequtils
import std/sugar
import std/typetraits
import std/sequtils
import std/times
import pkg/chronos
import pkg/datastore
import pkg/questionable
import pkg/questionable/results
import pkg/stint
import pkg/stew/byteutils
import ../codextypes
import ../logutils
import ../clock
import ../stores
import ../market
import ../contracts/requests
import ../utils/json
import ../units
export requests
export logutils
from nimcrypto import randomBytes
logScope:
topics = "marketplace sales reservations"
type
AvailabilityId* = distinct array[32, byte]
ReservationId* = distinct array[32, byte]
SomeStorableObject = Availability | Reservation
SomeStorableId = AvailabilityId | ReservationId
Availability* = ref object
id* {.serialize.}: AvailabilityId
totalSize* {.serialize.}: uint64
freeSize* {.serialize.}: uint64
duration* {.serialize.}: uint64
minPricePerBytePerSecond* {.serialize.}: UInt256
totalCollateral {.serialize.}: UInt256
totalRemainingCollateral* {.serialize.}: UInt256
# If set to false, the availability will not accept new slots.
# If enabled, it will not impact any existing slots that are already being hosted.
enabled* {.serialize.}: bool
# Specifies the latest timestamp after which the availability will no longer host any slots.
# If set to 0, there will be no restrictions.
until* {.serialize.}: SecondsSince1970
Reservation* = ref object
id* {.serialize.}: ReservationId
availabilityId* {.serialize.}: AvailabilityId
size* {.serialize.}: uint64
requestId* {.serialize.}: RequestId
slotIndex* {.serialize.}: uint64
validUntil* {.serialize.}: SecondsSince1970
Reservations* = ref object of RootObj
availabilityLock: AsyncLock
# Lock for protecting assertions of availability's sizes when searching for matching availability
repo: RepoStore
OnAvailabilitySaved: ?OnAvailabilitySaved
GetNext* = proc(): Future[?seq[byte]] {.async: (raises: [CancelledError]), closure.}
IterDispose* = proc(): Future[?!void] {.async: (raises: [CancelledError]), closure.}
OnAvailabilitySaved* =
proc(availability: Availability): Future[void] {.async: (raises: []).}
StorableIter* = ref object
finished*: bool
next*: GetNext
dispose*: IterDispose
ReservationsError* = object of CodexError
ReserveFailedError* = object of ReservationsError
ReleaseFailedError* = object of ReservationsError
DeleteFailedError* = object of ReservationsError
GetFailedError* = object of ReservationsError
NotExistsError* = object of ReservationsError
SerializationError* = object of ReservationsError
UpdateFailedError* = object of ReservationsError
BytesOutOfBoundsError* = object of ReservationsError
UntilOutOfBoundsError* = object of ReservationsError
const
SalesKey = (CodexMetaKey / "sales").tryGet # TODO: move to sales module
ReservationsKey = (SalesKey / "reservations").tryGet
proc hash*(x: AvailabilityId): Hash {.borrow.}
proc all*(
self: Reservations, T: type SomeStorableObject
): Future[?!seq[T]] {.async: (raises: [CancelledError]).}
proc all*(
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
): Future[?!seq[T]] {.async: (raises: [CancelledError]).}
template withLock(lock, body) =
try:
await lock.acquire()
body
finally:
if lock.locked:
lock.release()
proc new*(T: type Reservations, repo: RepoStore): Reservations =
T(availabilityLock: newAsyncLock(), repo: repo)
proc init*(
_: type Availability,
totalSize: uint64,
freeSize: uint64,
duration: uint64,
minPricePerBytePerSecond: UInt256,
totalCollateral: UInt256,
enabled: bool,
until: SecondsSince1970,
): Availability =
var id: array[32, byte]
doAssert randomBytes(id) == 32
Availability(
id: AvailabilityId(id),
totalSize: totalSize,
freeSize: freeSize,
duration: duration,
minPricePerBytePerSecond: minPricePerBytePerSecond,
totalCollateral: totalCollateral,
totalRemainingCollateral: totalCollateral,
enabled: enabled,
until: until,
)
func totalCollateral*(self: Availability): UInt256 {.inline.} =
return self.totalCollateral
proc `totalCollateral=`*(self: Availability, value: UInt256) {.inline.} =
self.totalCollateral = value
self.totalRemainingCollateral = value
proc init*(
_: type Reservation,
availabilityId: AvailabilityId,
size: uint64,
requestId: RequestId,
slotIndex: uint64,
validUntil: SecondsSince1970,
): Reservation =
var id: array[32, byte]
doAssert randomBytes(id) == 32
Reservation(
id: ReservationId(id),
availabilityId: availabilityId,
size: size,
requestId: requestId,
slotIndex: slotIndex,
validUntil: validUntil,
)
func toArray(id: SomeStorableId): array[32, byte] =
array[32, byte](id)
proc `==`*(x, y: AvailabilityId): bool {.borrow.}
proc `==`*(x, y: ReservationId): bool {.borrow.}
proc `==`*(x, y: Reservation): bool =
x.id == y.id
proc `==`*(x, y: Availability): bool =
x.id == y.id
proc `$`*(id: SomeStorableId): string =
id.toArray.toHex
proc toErr[E1: ref CatchableError, E2: ReservationsError](
e1: E1, _: type E2, msg: string = e1.msg
): ref E2 =
return newException(E2, msg, e1)
logutils.formatIt(LogFormat.textLines, SomeStorableId):
it.short0xHexLog
logutils.formatIt(LogFormat.json, SomeStorableId):
it.to0xHexLog
proc `OnAvailabilitySaved=`*(
self: Reservations, OnAvailabilitySaved: OnAvailabilitySaved
) =
self.OnAvailabilitySaved = some OnAvailabilitySaved
func key*(id: AvailabilityId): ?!Key =
## sales / reservations / <availabilityId>
(ReservationsKey / $id)
func key*(reservationId: ReservationId, availabilityId: AvailabilityId): ?!Key =
## sales / reservations / <availabilityId> / <reservationId>
(availabilityId.key / $reservationId)
func key*(availability: Availability): ?!Key =
return availability.id.key
func maxCollateralPerByte*(availability: Availability): UInt256 =
# If freeSize happens to be zero, we convention that the maxCollateralPerByte
# should be equal to totalRemainingCollateral.
if availability.freeSize == 0.uint64:
return availability.totalRemainingCollateral
return availability.totalRemainingCollateral div availability.freeSize.stuint(256)
func key*(reservation: Reservation): ?!Key =
return key(reservation.id, reservation.availabilityId)
func available*(self: Reservations): uint =
self.repo.available.uint
func hasAvailable*(self: Reservations, bytes: uint): bool =
self.repo.available(bytes.NBytes)
proc exists*(
self: Reservations, key: Key
): Future[bool] {.async: (raises: [CancelledError]).} =
let exists = await self.repo.metaDs.ds.contains(key)
return exists
iterator items(self: StorableIter): auto =
while not self.finished:
yield self.next()
proc getImpl(
self: Reservations, key: Key
): Future[?!seq[byte]] {.async: (raises: [CancelledError]).} =
if not await self.exists(key):
let err =
newException(NotExistsError, "object with key " & $key & " does not exist")
return failure(err)
without serialized =? await self.repo.metaDs.ds.get(key), error:
return failure(error.toErr(GetFailedError))
return success serialized
proc get*(
self: Reservations, key: Key, T: type SomeStorableObject
): Future[?!T] {.async: (raises: [CancelledError]).} =
without serialized =? await self.getImpl(key), error:
return failure(error)
without obj =? T.fromJson(serialized), error:
return failure(error.toErr(SerializationError))
return success obj
proc updateImpl(
self: Reservations, obj: SomeStorableObject
): Future[?!void] {.async: (raises: [CancelledError]).} =
trace "updating " & $(obj.type), id = obj.id
without key =? obj.key, error:
return failure(error)
if err =? (await self.repo.metaDs.ds.put(key, @(obj.toJson.toBytes))).errorOption:
return failure(err.toErr(UpdateFailedError))
return success()
proc updateAvailability(
self: Reservations, obj: Availability
): Future[?!void] {.async: (raises: [CancelledError]).} =
logScope:
availabilityId = obj.id
if obj.until < 0:
let error =
newException(UntilOutOfBoundsError, "Cannot set until to a negative value")
return failure(error)
without key =? obj.key, error:
return failure(error)
without oldAvailability =? await self.get(key, Availability), err:
if err of NotExistsError:
trace "Creating new Availability"
let res = await self.updateImpl(obj)
# inform subscribers that Availability has been added
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
await OnAvailabilitySaved(obj)
return res
else:
return failure(err)
if obj.until > 0:
without allReservations =? await self.all(Reservation, obj.id), error:
error.msg = "Error updating reservation: " & error.msg
return failure(error)
let requestEnds = allReservations.mapIt(it.validUntil)
if requestEnds.len > 0 and requestEnds.max > obj.until:
let error = newException(
UntilOutOfBoundsError,
"Until parameter must be greater or equal to the longest currently hosted slot",
)
return failure(error)
# Sizing of the availability changed, we need to adjust the repo reservation accordingly
if oldAvailability.totalSize != obj.totalSize:
trace "totalSize changed, updating repo reservation"
if oldAvailability.totalSize < obj.totalSize: # storage added
if reserveErr =? (
await self.repo.reserve((obj.totalSize - oldAvailability.totalSize).NBytes)
).errorOption:
return failure(reserveErr.toErr(ReserveFailedError))
elif oldAvailability.totalSize > obj.totalSize: # storage removed
if reserveErr =? (
await self.repo.release((oldAvailability.totalSize - obj.totalSize).NBytes)
).errorOption:
return failure(reserveErr.toErr(ReleaseFailedError))
let res = await self.updateImpl(obj)
if oldAvailability.freeSize < obj.freeSize or oldAvailability.duration < obj.duration or
oldAvailability.minPricePerBytePerSecond < obj.minPricePerBytePerSecond or
oldAvailability.totalRemainingCollateral < obj.totalRemainingCollateral:
# availability updated
# inform subscribers that Availability has been modified (with increased
# size)
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
await OnAvailabilitySaved(obj)
return res
proc update*(
self: Reservations, obj: Reservation
): Future[?!void] {.async: (raises: [CancelledError]).} =
return await self.updateImpl(obj)
proc update*(
self: Reservations, obj: Availability
): Future[?!void] {.async: (raises: [CancelledError]).} =
try:
withLock(self.availabilityLock):
return await self.updateAvailability(obj)
except AsyncLockError as e:
error "Lock error when trying to update the availability", err = e.msg
return failure(e)
proc delete(
self: Reservations, key: Key
): Future[?!void] {.async: (raises: [CancelledError]).} =
trace "deleting object", key
if not await self.exists(key):
return success()
if err =? (await self.repo.metaDs.ds.delete(key)).errorOption:
return failure(err.toErr(DeleteFailedError))
return success()
proc deleteReservation*(
self: Reservations,
reservationId: ReservationId,
availabilityId: AvailabilityId,
returnedCollateral: ?UInt256 = UInt256.none,
): Future[?!void] {.async: (raises: [CancelledError]).} =
logScope:
reservationId
availabilityId
trace "deleting reservation"
without key =? key(reservationId, availabilityId), error:
return failure(error)
try:
withLock(self.availabilityLock):
without reservation =? (await self.get(key, Reservation)), error:
if error of NotExistsError:
return success()
else:
return failure(error)
without availabilityKey =? availabilityId.key, error:
return failure(error)
without var availability =? await self.get(availabilityKey, Availability), error:
return failure(error)
if reservation.size > 0.uint64:
trace "returning remaining reservation bytes to availability",
size = reservation.size
availability.freeSize += reservation.size
if collateral =? returnedCollateral:
availability.totalRemainingCollateral += collateral
if updateErr =? (await self.updateAvailability(availability)).errorOption:
return failure(updateErr)
if err =? (await self.repo.metaDs.ds.delete(key)).errorOption:
return failure(err.toErr(DeleteFailedError))
return success()
except AsyncLockError as e:
error "Lock error when trying to delete the availability", err = e.msg
return failure(e)
# TODO: add support for deleting availabilities
# To delete, must not have any active sales.
proc createAvailability*(
self: Reservations,
size: uint64,
duration: uint64,
minPricePerBytePerSecond: UInt256,
totalCollateral: UInt256,
enabled: bool,
until: SecondsSince1970,
): Future[?!Availability] {.async: (raises: [CancelledError]).} =
trace "creating availability",
size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until
if until < 0:
let error =
newException(UntilOutOfBoundsError, "Cannot set until to a negative value")
return failure(error)
let availability = Availability.init(
size, size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until
)
let bytes = availability.freeSize
if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
return failure(reserveErr.toErr(ReserveFailedError))
if updateErr =? (await self.update(availability)).errorOption:
# rollback the reserve
trace "rolling back reserve"
if rollbackErr =? (await self.repo.release(bytes.NBytes)).errorOption:
rollbackErr.parent = updateErr
return failure(rollbackErr)
return failure(updateErr)
return success(availability)
method createReservation*(
self: Reservations,
availabilityId: AvailabilityId,
slotSize: uint64,
requestId: RequestId,
slotIndex: uint64,
collateralPerByte: UInt256,
validUntil: SecondsSince1970,
): Future[?!Reservation] {.async: (raises: [CancelledError]), base.} =
try:
withLock(self.availabilityLock):
without availabilityKey =? availabilityId.key, error:
return failure(error)
without availability =? await self.get(availabilityKey, Availability), error:
return failure(error)
# Check that the found availability has enough free space after the lock has been acquired, to prevent asynchronous Availiability modifications
if availability.freeSize < slotSize:
let error = newException(
BytesOutOfBoundsError,
"trying to reserve an amount of bytes that is greater than the free size of the Availability",
)
return failure(error)
trace "Creating reservation",
availabilityId, slotSize, requestId, slotIndex, validUntil = validUntil
let reservation =
Reservation.init(availabilityId, slotSize, requestId, slotIndex, validUntil)
if createResErr =? (await self.update(reservation)).errorOption:
return failure(createResErr)
# reduce availability freeSize by the slot size, which is now accounted for in
# the newly created Reservation
availability.freeSize -= slotSize
# adjust the remaining totalRemainingCollateral
availability.totalRemainingCollateral -= slotSize.u256 * collateralPerByte
# update availability with reduced size
trace "Updating availability with reduced size", freeSize = availability.freeSize
if updateErr =? (await self.updateAvailability(availability)).errorOption:
trace "Updating availability failed, rolling back reservation creation"
without key =? reservation.key, keyError:
keyError.parent = updateErr
return failure(keyError)
# rollback the reservation creation
if rollbackErr =? (await self.delete(key)).errorOption:
rollbackErr.parent = updateErr
return failure(rollbackErr)
return failure(updateErr)
trace "Reservation succesfully created"
return success(reservation)
except AsyncLockError as e:
error "Lock error when trying to delete the availability", err = e.msg
return failure(e)
proc returnBytesToAvailability*(
self: Reservations,
availabilityId: AvailabilityId,
reservationId: ReservationId,
bytes: uint64,
): Future[?!void] {.async: (raises: [CancelledError]).} =
logScope:
reservationId
availabilityId
try:
withLock(self.availabilityLock):
without key =? key(reservationId, availabilityId), error:
return failure(error)
without var reservation =? (await self.get(key, Reservation)), error:
return failure(error)
# We are ignoring bytes that are still present in the Reservation because
# they will be returned to Availability through `deleteReservation`.
let bytesToBeReturned = bytes - reservation.size
if bytesToBeReturned == 0:
trace "No bytes are returned",
requestSizeBytes = bytes, returningBytes = bytesToBeReturned
return success()
trace "Returning bytes",
requestSizeBytes = bytes, returningBytes = bytesToBeReturned
# First lets see if we can re-reserve the bytes, if the Repo's quota
# is depleted then we will fail-fast as there is nothing to be done atm.
if reserveErr =? (await self.repo.reserve(bytesToBeReturned.NBytes)).errorOption:
return failure(reserveErr.toErr(ReserveFailedError))
without availabilityKey =? availabilityId.key, error:
return failure(error)
without var availability =? await self.get(availabilityKey, Availability), error:
return failure(error)
availability.freeSize += bytesToBeReturned
# Update availability with returned size
if updateErr =? (await self.updateAvailability(availability)).errorOption:
trace "Rolling back returning bytes"
if rollbackErr =? (await self.repo.release(bytesToBeReturned.NBytes)).errorOption:
rollbackErr.parent = updateErr
return failure(rollbackErr)
return failure(updateErr)
return success()
except AsyncLockError as e:
error "Lock error when returning bytes to the availability", err = e.msg
return failure(e)
proc release*(
self: Reservations,
reservationId: ReservationId,
availabilityId: AvailabilityId,
bytes: uint,
): Future[?!void] {.async: (raises: [CancelledError]).} =
logScope:
topics = "release"
bytes
reservationId
availabilityId
trace "releasing bytes and updating reservation"
without key =? key(reservationId, availabilityId), error:
return failure(error)
without var reservation =? (await self.get(key, Reservation)), error:
return failure(error)
if reservation.size < bytes:
let error = newException(
BytesOutOfBoundsError,
"trying to release an amount of bytes that is greater than the total size of the Reservation",
)
return failure(error)
if releaseErr =? (await self.repo.release(bytes.NBytes)).errorOption:
return failure(releaseErr.toErr(ReleaseFailedError))
reservation.size -= bytes
# persist partially used Reservation with updated size
if err =? (await self.update(reservation)).errorOption:
# rollback release if an update error encountered
trace "rolling back release"
if rollbackErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
rollbackErr.parent = err
return failure(rollbackErr)
return failure(err)
return success()
proc storables(
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
): Future[?!StorableIter] {.async: (raises: [CancelledError]).} =
var iter = StorableIter()
let query = Query.init(queryKey)
when T is Availability:
# should indicate key length of 4, but let the .key logic determine it
without defaultKey =? AvailabilityId.default.key, error:
return failure(error)
elif T is Reservation:
# should indicate key length of 5, but let the .key logic determine it
without defaultKey =? key(ReservationId.default, AvailabilityId.default), error:
return failure(error)
else:
raiseAssert "unknown type"
without results =? await self.repo.metaDs.ds.query(query), error:
return failure(error)
# /sales/reservations
proc next(): Future[?seq[byte]] {.async: (raises: [CancelledError]).} =
await idleAsync()
iter.finished = results.finished
if not results.finished and res =? (await results.next()) and res.data.len > 0 and
key =? res.key and key.namespaces.len == defaultKey.namespaces.len:
return some res.data
return none seq[byte]
proc dispose(): Future[?!void] {.async: (raises: [CancelledError]).} =
return await results.dispose()
iter.next = next
iter.dispose = dispose
return success iter
proc allImpl(
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
var ret: seq[T] = @[]
without storables =? (await self.storables(T, queryKey)), error:
return failure(error)
for storable in storables.items:
try:
without bytes =? (await storable):
continue
without obj =? T.fromJson(bytes), error:
error "json deserialization error",
json = string.fromBytes(bytes), error = error.msg
continue
ret.add obj
except CancelledError as err:
raise err
except CatchableError as err:
error "Error when retrieving storable", error = err.msg
continue
return success(ret)
proc all*(
self: Reservations, T: type SomeStorableObject
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
return await self.allImpl(T)
proc all*(
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
without key =? key(availabilityId):
return failure("no key")
return await self.allImpl(T, key)
proc findAvailability*(
self: Reservations,
size, duration: uint64,
pricePerBytePerSecond, collateralPerByte: UInt256,
validUntil: SecondsSince1970,
): Future[?Availability] {.async: (raises: [CancelledError]).} =
without storables =? (await self.storables(Availability)), e:
error "failed to get all storables", error = e.msg
return none Availability
for item in storables.items:
if bytes =? (await item) and availability =? Availability.fromJson(bytes):
if availability.enabled and size <= availability.freeSize and
duration <= availability.duration and
collateralPerByte <= availability.maxCollateralPerByte and
pricePerBytePerSecond >= availability.minPricePerBytePerSecond and
(availability.until == 0 or availability.until >= validUntil):
trace "availability matched",
id = availability.id,
enabled = availability.enabled,
size,
availFreeSize = availability.freeSize,
duration,
availDuration = availability.duration,
pricePerBytePerSecond,
availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond,
collateralPerByte,
availMaxCollateralPerByte = availability.maxCollateralPerByte,
until = availability.until
# TODO: As soon as we're on ARC-ORC, we can use destructors
# to automatically dispose our iterators when they fall out of scope.
# For now:
if err =? (await storables.dispose()).errorOption:
error "failed to dispose storables iter", error = err.msg
return none Availability
return some availability
trace "availability did not match",
id = availability.id,
enabled = availability.enabled,
size,
availFreeSize = availability.freeSize,
duration,
availDuration = availability.duration,
pricePerBytePerSecond,
availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond,
collateralPerByte,
availMaxCollateralPerByte = availability.maxCollateralPerByte,
until = availability.until

View File

@ -1,152 +0,0 @@
import pkg/chronos
import pkg/questionable
import pkg/questionable/results
import pkg/stint
import ../contracts/requests
import ../errors
import ../logutils
import ../utils/exceptions
import ./statemachine
import ./salescontext
import ./salesdata
import ./reservations
import ./slotqueue
export reservations
logScope:
topics = "marketplace sales"
type
SalesAgent* = ref object of Machine
context*: SalesContext
data*: SalesData
subscribed: bool
# Slot-level callbacks.
onCleanUp*: OnCleanUp
onFilled*: ?OnFilled
OnCleanUp* = proc(reprocessSlot = false, returnedCollateral = UInt256.none) {.
async: (raises: [])
.}
OnFilled* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}
SalesAgentError = object of CodexError
AllSlotsFilledError* = object of SalesAgentError
func `==`*(a, b: SalesAgent): bool =
a.data.requestId == b.data.requestId and a.data.slotIndex == b.data.slotIndex
proc newSalesAgent*(
context: SalesContext,
requestId: RequestId,
slotIndex: uint64,
request: ?StorageRequest,
slotQueueItem = SlotQueueItem.none,
): SalesAgent =
var agent = SalesAgent.new()
agent.context = context
agent.data = SalesData(
requestId: requestId,
slotIndex: slotIndex,
request: request,
slotQueueItem: slotQueueItem,
)
return agent
proc retrieveRequest*(agent: SalesAgent) {.async.} =
let data = agent.data
let market = agent.context.market
if data.request.isNone:
data.request = await market.getRequest(data.requestId)
proc retrieveRequestState*(agent: SalesAgent): Future[?RequestState] {.async.} =
let data = agent.data
let market = agent.context.market
return await market.requestState(data.requestId)
func state*(agent: SalesAgent): ?string =
proc description(state: State): string =
$state
agent.query(description)
proc subscribeCancellation(agent: SalesAgent) {.async.} =
let data = agent.data
let clock = agent.context.clock
proc onCancelled() {.async: (raises: []).} =
without request =? data.request:
return
try:
let market = agent.context.market
let expiry = await market.requestExpiresAt(data.requestId)
while true:
let deadline = max(clock.now, expiry) + 1
trace "Waiting for request to be cancelled", now = clock.now, expiry = deadline
await clock.waitUntil(deadline)
without state =? await agent.retrieveRequestState():
error "Unknown request", requestId = data.requestId
return
case state
of New:
discard
of RequestState.Cancelled:
agent.schedule(cancelledEvent(request))
break
of RequestState.Started, RequestState.Finished, RequestState.Failed:
break
debug "The request is not yet canceled, even though it should be. Waiting for some more time.",
currentState = state, now = clock.now
except CancelledError:
trace "Waiting for expiry to lapse was cancelled", requestId = data.requestId
except CatchableError as e:
error "Error while waiting for expiry to lapse", error = e.msgDetail
data.cancelled = onCancelled()
method onFulfilled*(
agent: SalesAgent, requestId: RequestId
) {.base, gcsafe, raises: [].} =
let cancelled = agent.data.cancelled
if agent.data.requestId == requestId and not cancelled.isNil and not cancelled.finished:
cancelled.cancelSoon()
method onFailed*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, raises: [].} =
without request =? agent.data.request:
return
if agent.data.requestId == requestId:
agent.schedule(failedEvent(request))
method onSlotFilled*(
agent: SalesAgent, requestId: RequestId, slotIndex: uint64
) {.base, gcsafe, raises: [].} =
if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex:
agent.schedule(slotFilledEvent(requestId, slotIndex))
proc subscribe*(agent: SalesAgent) {.async.} =
if agent.subscribed:
return
await agent.subscribeCancellation()
agent.subscribed = true
proc unsubscribe*(agent: SalesAgent) {.async: (raises: []).} =
if not agent.subscribed:
return
let data = agent.data
if not data.cancelled.isNil and not data.cancelled.finished:
await data.cancelled.cancelAndWait()
data.cancelled = nil
agent.subscribed = false
proc stop*(agent: SalesAgent) {.async: (raises: []).} =
await Machine(agent).stop()
await agent.unsubscribe()

View File

@ -1,42 +0,0 @@
import pkg/questionable
import pkg/questionable/results
import pkg/libp2p/cid
import ../market
import ../clock
import ./slotqueue
import ./reservations
import ../blocktype as bt
type
SalesContext* = ref object
market*: Market
clock*: Clock
# Sales-level callbacks. Closure will be overwritten each time a slot is
# processed.
onStore*: ?OnStore
onClear*: ?OnClear
onSale*: ?OnSale
onProve*: ?OnProve
onExpiryUpdate*: ?OnExpiryUpdate
reservations*: Reservations
slotQueue*: SlotQueue
simulateProofFailures*: int
BlocksCb* =
proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).}
OnStore* = proc(
request: StorageRequest,
expiry: SecondsSince1970,
slot: uint64,
blocksCb: BlocksCb,
isRepairing: bool,
): Future[?!void] {.async: (raises: [CancelledError]).}
OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.
async: (raises: [CancelledError])
.}
OnExpiryUpdate* = proc(rootCid: Cid, expiry: SecondsSince1970): Future[?!void] {.
async: (raises: [CancelledError])
.}
OnClear* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}
OnSale* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}

View File

@ -1,14 +0,0 @@
import pkg/chronos
import ../contracts/requests
import ../market
import ./reservations
import ./slotqueue
type SalesData* = ref object
requestId*: RequestId
ask*: StorageAsk
request*: ?StorageRequest
slotIndex*: uint64
cancelled*: Future[void]
reservation*: ?Reservation
slotQueueItem*: ?SlotQueueItem

View File

@ -1,408 +0,0 @@
import std/sequtils
import std/tables
import pkg/chronos
import pkg/questionable
import pkg/questionable/results
import ../errors
import ../logutils
import ../rng
import ../utils
import ../contracts/requests
import ../utils/asyncheapqueue
import ../utils/trackedfutures
logScope:
topics = "marketplace slotqueue"
type
OnProcessSlot* = proc(item: SlotQueueItem): Future[void] {.async: (raises: []).}
# Non-ref obj copies value when assigned, preventing accidental modification
# of values which could cause an incorrect order (eg
# ``slotQueue[1].collateral = 1`` would cause ``collateral`` to be updated,
# but the heap invariant would no longer be honoured. When non-ref, the
# compiler can ensure that statement will fail).
SlotQueueItem* = object
requestId: RequestId
slotIndex: uint16
slotSize: uint64
duration: uint64
pricePerBytePerSecond: UInt256
collateral: UInt256 # Collateral computed
expiry: ?uint64
seen: bool
# don't need to -1 to prevent overflow when adding 1 (to always allow push)
# because AsyncHeapQueue size is of type `int`, which is larger than `uint16`
SlotQueueSize = range[1'u16 .. uint16.high]
SlotQueue* = ref object
maxWorkers: int
onProcessSlot: ?OnProcessSlot
queue: AsyncHeapQueue[SlotQueueItem]
running: bool
trackedFutures: TrackedFutures
unpaused: AsyncEvent
SlotQueueError = object of CodexError
SlotQueueItemExistsError* = object of SlotQueueError
SlotQueueItemNotExistsError* = object of SlotQueueError
SlotsOutOfRangeError* = object of SlotQueueError
QueueNotRunningError* = object of SlotQueueError
# Number of concurrent workers used for processing SlotQueueItems
const DefaultMaxWorkers = 3
# Cap slot queue size to prevent unbounded growth and make sifting more
# efficient. Max size is not equivalent to the number of slots a host can
# service, which is limited by host availabilities and new requests circulating
# the network. Additionally, each new request/slot in the network will be
# included in the queue if it is higher priority than any of the exisiting
# items. Older slots should be unfillable over time as other hosts fill the
# slots.
const DefaultMaxSize = 128'u16
proc profitability(item: SlotQueueItem): UInt256 =
StorageAsk(
duration: item.duration,
pricePerBytePerSecond: item.pricePerBytePerSecond,
slotSize: item.slotSize,
).pricePerSlot
proc `<`*(a, b: SlotQueueItem): bool =
# for A to have a higher priority than B (in a min queue), A must be less than
# B.
var scoreA: uint8 = 0
var scoreB: uint8 = 0
proc addIf(score: var uint8, condition: bool, addition: int) =
if condition:
score += 1'u8 shl addition
scoreA.addIf(a.seen < b.seen, 4)
scoreB.addIf(a.seen > b.seen, 4)
scoreA.addIf(a.profitability > b.profitability, 3)
scoreB.addIf(a.profitability < b.profitability, 3)
scoreA.addIf(a.collateral < b.collateral, 2)
scoreB.addIf(a.collateral > b.collateral, 2)
if expiryA =? a.expiry and expiryB =? b.expiry:
scoreA.addIf(expiryA > expiryB, 1)
scoreB.addIf(expiryA < expiryB, 1)
return scoreA > scoreB
proc `==`*(a, b: SlotQueueItem): bool =
a.requestId == b.requestId and a.slotIndex == b.slotIndex
proc new*(
_: type SlotQueue,
maxWorkers = DefaultMaxWorkers,
maxSize: SlotQueueSize = DefaultMaxSize,
): SlotQueue =
if maxWorkers <= 0:
raise newException(ValueError, "maxWorkers must be positive")
if maxWorkers.uint16 > maxSize:
raise newException(ValueError, "maxWorkers must be less than maxSize")
SlotQueue(
maxWorkers: maxWorkers,
# Add 1 to always allow for an extra item to be pushed onto the queue
# temporarily. After push (and sort), the bottom-most item will be deleted
queue: newAsyncHeapQueue[SlotQueueItem](maxSize.int + 1),
running: false,
trackedFutures: TrackedFutures.new(),
unpaused: newAsyncEvent(),
)
# avoid instantiating `workers` in constructor to avoid side effects in
# `newAsyncQueue` procedure
proc init*(
_: type SlotQueueItem,
requestId: RequestId,
slotIndex: uint16,
ask: StorageAsk,
expiry: ?uint64,
collateral: UInt256,
seen = false,
): SlotQueueItem =
SlotQueueItem(
requestId: requestId,
slotIndex: slotIndex,
slotSize: ask.slotSize,
duration: ask.duration,
pricePerBytePerSecond: ask.pricePerBytePerSecond,
collateral: collateral,
expiry: expiry,
seen: seen,
)
proc init*(
_: type SlotQueueItem,
requestId: RequestId,
slotIndex: uint16,
ask: StorageAsk,
expiry: uint64,
collateral: UInt256,
seen = false,
): SlotQueueItem =
SlotQueueItem.init(requestId, slotIndex, ask, some expiry, collateral, seen)
proc init*(
_: type SlotQueueItem,
request: StorageRequest,
slotIndex: uint16,
collateral: UInt256,
): SlotQueueItem =
SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry, collateral)
proc init*(
_: type SlotQueueItem,
requestId: RequestId,
ask: StorageAsk,
expiry: ?uint64,
collateral: UInt256,
): seq[SlotQueueItem] {.raises: [SlotsOutOfRangeError].} =
if not ask.slots.inRange:
raise newException(SlotsOutOfRangeError, "Too many slots")
var i = 0'u16
proc initSlotQueueItem(): SlotQueueItem =
let item = SlotQueueItem.init(requestId, i, ask, expiry, collateral)
inc i
return item
var items = newSeqWith(ask.slots.int, initSlotQueueItem())
Rng.instance.shuffle(items)
return items
proc init*(
_: type SlotQueueItem,
requestId: RequestId,
ask: StorageAsk,
expiry: uint64,
collateral: UInt256,
): seq[SlotQueueItem] {.raises: [SlotsOutOfRangeError].} =
SlotQueueItem.init(requestId, ask, some expiry, collateral)
proc init*(
_: type SlotQueueItem, request: StorageRequest, collateral: UInt256
): seq[SlotQueueItem] =
return SlotQueueItem.init(request.id, request.ask, uint64.none, collateral)
proc inRange*(val: SomeUnsignedInt): bool =
val.uint16 in SlotQueueSize.low .. SlotQueueSize.high
proc requestId*(self: SlotQueueItem): RequestId =
self.requestId
proc slotIndex*(self: SlotQueueItem): uint16 =
self.slotIndex
proc slotSize*(self: SlotQueueItem): uint64 =
self.slotSize
proc duration*(self: SlotQueueItem): uint64 =
self.duration
proc pricePerBytePerSecond*(self: SlotQueueItem): UInt256 =
self.pricePerBytePerSecond
proc collateralPerByte*(self: SlotQueueItem): UInt256 =
self.collateralPerByte
proc seen*(self: SlotQueueItem): bool =
self.seen
proc `seen=`*(self: var SlotQueueItem, seen: bool) =
self.seen = seen
proc running*(self: SlotQueue): bool =
self.running
proc len*(self: SlotQueue): int =
self.queue.len
proc size*(self: SlotQueue): int =
self.queue.size - 1
proc paused*(self: SlotQueue): bool =
not self.unpaused.isSet
proc `$`*(self: SlotQueue): string =
$self.queue
proc `onProcessSlot=`*(self: SlotQueue, onProcessSlot: OnProcessSlot) =
self.onProcessSlot = some onProcessSlot
proc contains*(self: SlotQueue, item: SlotQueueItem): bool =
self.queue.contains(item)
proc pause*(self: SlotQueue) =
# set unpaused flag to false -- coroutines will block on unpaused.wait()
self.unpaused.clear()
proc unpause*(self: SlotQueue) =
# set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait()
self.unpaused.fire()
proc push*(self: SlotQueue, item: SlotQueueItem): ?!void {.raises: [].} =
logScope:
requestId = item.requestId
slotIndex = item.slotIndex
seen = item.seen
trace "pushing item to queue"
if not self.running:
let err = newException(QueueNotRunningError, "queue not running")
return failure(err)
if self.contains(item):
let err = newException(SlotQueueItemExistsError, "item already exists")
return failure(err)
if err =? self.queue.pushNoWait(item).mapFailure.errorOption:
return failure(err)
if self.queue.full():
# delete the last item
self.queue.del(self.queue.size - 1)
doAssert self.queue.len <= self.queue.size - 1
# when slots are pushed to the queue, the queue should be unpaused if it was
# paused
if self.paused and not item.seen:
trace "unpausing queue after new slot pushed"
self.unpause()
return success()
proc push*(self: SlotQueue, items: seq[SlotQueueItem]): ?!void =
for item in items:
if err =? self.push(item).errorOption:
return failure(err)
return success()
proc findByRequest(self: SlotQueue, requestId: RequestId): seq[SlotQueueItem] =
var items: seq[SlotQueueItem] = @[]
for item in self.queue.items:
if item.requestId == requestId:
items.add item
return items
proc delete*(self: SlotQueue, item: SlotQueueItem) =
logScope:
requestId = item.requestId
slotIndex = item.slotIndex
trace "removing item from queue"
if not self.running:
trace "cannot delete item from queue, queue not running"
return
self.queue.delete(item)
proc delete*(self: SlotQueue, requestId: RequestId, slotIndex: uint16) =
let item = SlotQueueItem(requestId: requestId, slotIndex: slotIndex)
self.delete(item)
proc delete*(self: SlotQueue, requestId: RequestId) =
let items = self.findByRequest(requestId)
for item in items:
self.delete(item)
proc `[]`*(self: SlotQueue, i: Natural): SlotQueueItem =
self.queue[i]
proc clearSeenFlags*(self: SlotQueue) =
# Enumerate all items in the queue, overwriting each item with `seen = false`.
# To avoid issues with new queue items being pushed to the queue while all
# items are being iterated (eg if a new storage request comes in and pushes
# new slots to the queue), this routine must remain synchronous.
if self.queue.empty:
return
for item in self.queue.mitems:
item.seen = false # does not maintain the heap invariant
# force heap reshuffling to maintain the heap invariant
doAssert self.queue.update(self.queue[0]), "slot queue failed to reshuffle"
trace "all 'seen' flags cleared"
proc runWorker(self: SlotQueue) {.async: (raises: []).} =
trace "slot queue worker loop started"
while self.running:
try:
if self.paused:
trace "Queue is paused, waiting for new slots or availabilities to be modified/added"
# block until unpaused is true/fired, ie wait for queue to be unpaused
await self.unpaused.wait()
let item = await self.queue.pop() # if queue empty, wait here for new items
logScope:
reqId = item.requestId
slotIdx = item.slotIndex
seen = item.seen
if not self.running: # may have changed after waiting for pop
trace "not running, exiting"
break
# If, upon processing a slot, the slot item already has a `seen` flag set,
# the queue should be paused.
if item.seen:
trace "processing already seen item, pausing queue",
reqId = item.requestId, slotIdx = item.slotIndex
self.pause()
# put item back in queue so that if other items are pushed while paused,
# it will be sorted accordingly. Otherwise, this item would be processed
# immediately (with priority over other items) once unpaused
trace "readding seen item back into the queue"
discard self.push(item) # on error, drop the item and continue
continue
trace "processing item"
without onProcessSlot =? self.onProcessSlot:
raiseAssert "slot queue onProcessSlot not set"
await onProcessSlot(item)
except CancelledError:
trace "slot queue worker cancelled"
break
except CatchableError as e: # raised from self.queue.pop()
warn "slot queue worker error encountered during processing", error = e.msg
trace "slot queue worker loop stopped"
proc start*(self: SlotQueue) =
if self.running:
return
trace "starting slot queue"
self.running = true
# Add initial workers to the `AsyncHeapQueue`. Once a worker has completed its
# task, a new worker will be pushed to the queue
for i in 0 ..< self.maxWorkers:
let worker = self.runWorker()
self.trackedFutures.track(worker)
proc stop*(self: SlotQueue) {.async.} =
if not self.running:
return
trace "stopping slot queue"
self.running = false
await self.trackedFutures.cancelTracked()

View File

@ -1,41 +0,0 @@
import pkg/questionable
import ../errors
import ../utils/asyncstatemachine
import ../market
import ../clock
import ../contracts/requests
export market
export clock
export asyncstatemachine
type
SaleState* = ref object of State
SaleError* = object of CodexError
method onCancelled*(
state: SaleState, request: StorageRequest
): ?State {.base, raises: [].} =
discard
method onFailed*(
state: SaleState, request: StorageRequest
): ?State {.base, raises: [].} =
discard
method onSlotFilled*(
state: SaleState, requestId: RequestId, slotIndex: uint64
): ?State {.base, raises: [].} =
discard
proc cancelledEvent*(request: StorageRequest): Event =
return proc(state: State): ?State =
SaleState(state).onCancelled(request)
proc failedEvent*(request: StorageRequest): Event =
return proc(state: State): ?State =
SaleState(state).onFailed(request)
proc slotFilledEvent*(requestId: RequestId, slotIndex: uint64): Event =
return proc(state: State): ?State =
SaleState(state).onSlotFilled(requestId, slotIndex)

View File

@ -1,62 +0,0 @@
import ../../logutils
import ../../utils/exceptions
import ../salesagent
import ../statemachine
import ./errored
logScope:
topics = "marketplace sales cancelled"
type SaleCancelled* = ref object of SaleState
method `$`*(state: SaleCancelled): string =
"SaleCancelled"
proc slotIsFilledByMe(
market: Market, requestId: RequestId, slotIndex: uint64
): Future[bool] {.async: (raises: [CancelledError, MarketError]).} =
let host = await market.getHost(requestId, slotIndex)
let me = await market.getSigner()
return host == me.some
method run*(
state: SaleCancelled, machine: Machine
): Future[?State] {.async: (raises: []).} =
let agent = SalesAgent(machine)
let data = agent.data
let market = agent.context.market
without request =? data.request:
raiseAssert "no sale request"
try:
var returnedCollateral = UInt256.none
if await slotIsFilledByMe(market, data.requestId, data.slotIndex):
debug "Collecting collateral and partial payout",
requestId = data.requestId, slotIndex = data.slotIndex
let slot = Slot(request: request, slotIndex: data.slotIndex)
let currentCollateral = await market.currentCollateral(slot.id)
try:
await market.freeSlot(slot.id)
except SlotStateMismatchError as e:
warn "Failed to free slot because slot is already free", error = e.msg
returnedCollateral = currentCollateral.some
if onClear =? agent.context.onClear and request =? data.request:
onClear(request, data.slotIndex)
if onCleanUp =? agent.onCleanUp:
await onCleanUp(reprocessSlot = false, returnedCollateral = returnedCollateral)
warn "Sale cancelled due to timeout",
requestId = data.requestId, slotIndex = data.slotIndex
except CancelledError as e:
trace "SaleCancelled.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SaleCancelled.run", error = e.msgDetail
return some State(SaleErrored(error: e))

View File

@ -1,96 +0,0 @@
import pkg/questionable
import pkg/questionable/results
import ../../blocktype as bt
import ../../logutils
import ../../market
import ../../utils/exceptions
import ../salesagent
import ../statemachine
import ./cancelled
import ./failed
import ./filled
import ./initialproving
import ./errored
type SaleDownloading* = ref object of SaleState
logScope:
topics = "marketplace sales downloading"
method `$`*(state: SaleDownloading): string =
"SaleDownloading"
method onCancelled*(state: SaleDownloading, request: StorageRequest): ?State =
return some State(SaleCancelled())
method onFailed*(state: SaleDownloading, request: StorageRequest): ?State =
return some State(SaleFailed())
method onSlotFilled*(
state: SaleDownloading, requestId: RequestId, slotIndex: uint64
): ?State =
return some State(SaleFilled())
method run*(
state: SaleDownloading, machine: Machine
): Future[?State] {.async: (raises: []).} =
let agent = SalesAgent(machine)
let data = agent.data
let context = agent.context
let market = context.market
let reservations = context.reservations
without onStore =? context.onStore:
raiseAssert "onStore callback not set"
without request =? data.request:
raiseAssert "no sale request"
without reservation =? data.reservation:
raiseAssert("no reservation")
logScope:
requestId = request.id
slotIndex = data.slotIndex
reservationId = reservation.id
availabilityId = reservation.availabilityId
proc onBlocks(
blocks: seq[bt.Block]
): Future[?!void] {.async: (raises: [CancelledError]).} =
# release batches of blocks as they are written to disk and
# update availability size
var bytes: uint = 0
for blk in blocks:
if not blk.cid.isEmpty:
bytes += blk.data.len.uint
trace "Releasing batch of bytes written to disk", bytes
return await reservations.release(reservation.id, reservation.availabilityId, bytes)
try:
let requestId = request.id
let slotId = slotId(requestId, data.slotIndex)
let requestState = await market.requestState(requestId)
let isRepairing = (await market.slotState(slotId)) == SlotState.Repair
trace "Retrieving expiry"
var expiry: SecondsSince1970
if state =? requestState and state == RequestState.Started:
expiry = await market.getRequestEnd(requestId)
else:
expiry = await market.requestExpiresAt(requestId)
trace "Starting download"
if err =?
(await onStore(request, expiry, data.slotIndex, onBlocks, isRepairing)).errorOption:
return some State(SaleErrored(error: err, reprocessSlot: false))
trace "Download complete"
return some State(SaleInitialProving())
except CancelledError as e:
trace "SaleDownloading.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SaleDownloading.run", error = e.msgDetail
return some State(SaleErrored(error: e))

View File

@ -1,40 +0,0 @@
import pkg/questionable
import pkg/questionable/results
import ../statemachine
import ../salesagent
import ../../logutils
import ../../utils/exceptions
logScope:
topics = "marketplace sales errored"
type SaleErrored* = ref object of SaleState
error*: ref CatchableError
reprocessSlot*: bool
method `$`*(state: SaleErrored): string =
"SaleErrored"
method run*(
state: SaleErrored, machine: Machine
): Future[?State] {.async: (raises: []).} =
let agent = SalesAgent(machine)
let data = agent.data
let context = agent.context
error "Sale error",
error = state.error.msgDetail,
requestId = data.requestId,
slotIndex = data.slotIndex
try:
if onClear =? context.onClear and request =? data.request:
onClear(request, data.slotIndex)
if onCleanUp =? agent.onCleanUp:
await onCleanUp(reprocessSlot = state.reprocessSlot)
except CancelledError as e:
trace "SaleErrored.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SaleErrored.run", error = e.msgDetail

View File

@ -1,40 +0,0 @@
import ../../logutils
import ../../utils/exceptions
import ../../utils/exceptions
import ../salesagent
import ../statemachine
import ./errored
logScope:
topics = "marketplace sales failed"
type
SaleFailed* = ref object of SaleState
SaleFailedError* = object of SaleError
method `$`*(state: SaleFailed): string =
"SaleFailed"
method run*(
state: SaleFailed, machine: Machine
): Future[?State] {.async: (raises: []).} =
let data = SalesAgent(machine).data
let market = SalesAgent(machine).context.market
without request =? data.request:
raiseAssert "no sale request"
try:
let slot = Slot(request: request, slotIndex: data.slotIndex)
debug "Removing slot from mySlots",
requestId = data.requestId, slotIndex = data.slotIndex
await market.freeSlot(slot.id)
let error = newException(SaleFailedError, "Sale failed")
return some State(SaleErrored(error: error))
except CancelledError as e:
trace "SaleFailed.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SaleFailed.run", error = e.msgDetail
return some State(SaleErrored(error: e))

View File

@ -1,77 +0,0 @@
import pkg/questionable
import pkg/questionable/results
import ../../conf
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ../salesagent
import ./errored
import ./cancelled
import ./failed
import ./proving
when storage_enable_proof_failures:
import ./provingsimulated
logScope:
topics = "marketplace sales filled"
type
SaleFilled* = ref object of SaleState
HostMismatchError* = object of CatchableError
method onCancelled*(state: SaleFilled, request: StorageRequest): ?State =
return some State(SaleCancelled())
method onFailed*(state: SaleFilled, request: StorageRequest): ?State =
return some State(SaleFailed())
method `$`*(state: SaleFilled): string =
"SaleFilled"
method run*(
state: SaleFilled, machine: Machine
): Future[?State] {.async: (raises: []).} =
let agent = SalesAgent(machine)
let data = agent.data
let context = agent.context
let market = context.market
try:
let host = await market.getHost(data.requestId, data.slotIndex)
let me = await market.getSigner()
if host == me.some:
info "Slot succesfully filled",
requestId = data.requestId, slotIndex = data.slotIndex
without request =? data.request:
raiseAssert "no sale request"
if onFilled =? agent.onFilled:
onFilled(request, data.slotIndex)
without onExpiryUpdate =? context.onExpiryUpdate:
raiseAssert "onExpiryUpdate callback not set"
let requestEnd = await market.getRequestEnd(data.requestId)
if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption:
return some State(SaleErrored(error: err))
when storage_enable_proof_failures:
if context.simulateProofFailures > 0:
info "Proving with failure rate", rate = context.simulateProofFailures
return some State(
SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures)
)
return some State(SaleProving())
else:
let error = newException(HostMismatchError, "Slot filled by other host")
return some State(SaleErrored(error: error))
except CancelledError as e:
trace "SaleFilled.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SaleFilled.run", error = e.msgDetail
return some State(SaleErrored(error: e))

View File

@ -1,63 +0,0 @@
import pkg/stint
import ../../logutils
import ../../market
import ../../utils/exceptions
import ../statemachine
import ../salesagent
import ./filled
import ./cancelled
import ./failed
import ./ignored
import ./errored
logScope:
topics = "marketplace sales filling"
type SaleFilling* = ref object of SaleState
proof*: Groth16Proof
method `$`*(state: SaleFilling): string =
"SaleFilling"
method onCancelled*(state: SaleFilling, request: StorageRequest): ?State =
return some State(SaleCancelled())
method onFailed*(state: SaleFilling, request: StorageRequest): ?State =
return some State(SaleFailed())
method run*(
state: SaleFilling, machine: Machine
): Future[?State] {.async: (raises: []).} =
let data = SalesAgent(machine).data
let market = SalesAgent(machine).context.market
without (request =? data.request):
raiseAssert "Request not set"
logScope:
requestId = data.requestId
slotIndex = data.slotIndex
try:
without collateral =? await market.slotCollateral(data.requestId, data.slotIndex),
err:
error "Failure attempting to fill slot: unable to calculate collateral",
error = err.msg
return some State(SaleErrored(error: err))
debug "Filling slot"
try:
await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral)
except SlotStateMismatchError as e:
debug "Slot is already filled, ignoring slot"
return some State(SaleIgnored(reprocessSlot: false, returnsCollateral: true))
except MarketError as e:
return some State(SaleErrored(error: e))
# other CatchableErrors are handled "automatically" by the SaleState
return some State(SaleFilled())
except CancelledError as e:
trace "SaleFilling.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SaleFilling.run", error = e.msgDetail
return some State(SaleErrored(error: e))

View File

@ -1,48 +0,0 @@
import pkg/chronos
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ../salesagent
import ./cancelled
import ./failed
import ./errored
logScope:
topics = "marketplace sales finished"
type SaleFinished* = ref object of SaleState
returnedCollateral*: ?UInt256
method `$`*(state: SaleFinished): string =
"SaleFinished"
method onCancelled*(state: SaleFinished, request: StorageRequest): ?State =
return some State(SaleCancelled())
method onFailed*(state: SaleFinished, request: StorageRequest): ?State =
return some State(SaleFailed())
method run*(
state: SaleFinished, machine: Machine
): Future[?State] {.async: (raises: []).} =
let agent = SalesAgent(machine)
let data = agent.data
without request =? data.request:
raiseAssert "no sale request"
info "Slot finished and paid out",
requestId = data.requestId, slotIndex = data.slotIndex
try:
if onClear =? agent.context.onClear:
onClear(request, data.slotIndex)
if onCleanUp =? agent.onCleanUp:
await onCleanUp(returnedCollateral = state.returnedCollateral)
except CancelledError as e:
trace "SaleFilled.run onCleanUp was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SaleFilled.run in onCleanUp callback", error = e.msgDetail
return some State(SaleErrored(error: e))

View File

@ -1,51 +0,0 @@
import pkg/chronos
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ../salesagent
import ./errored
logScope:
topics = "marketplace sales ignored"
# Ignored slots could mean there was no availability or that the slot could
# not be reserved.
type SaleIgnored* = ref object of SaleState
reprocessSlot*: bool # readd slot to queue with `seen` flag
returnsCollateral*: bool # returns collateral when a reservation was created
method `$`*(state: SaleIgnored): string =
"SaleIgnored"
method run*(
state: SaleIgnored, machine: Machine
): Future[?State] {.async: (raises: []).} =
let agent = SalesAgent(machine)
let data = agent.data
let market = agent.context.market
without request =? data.request:
raiseAssert "no sale request"
var returnedCollateral = UInt256.none
try:
if state.returnsCollateral:
# The returnedCollateral is needed because a reservation could
# be created and the collateral assigned to that reservation.
# The returnedCollateral will be used in the cleanup function
# and be passed to the deleteReservation function.
let slot = Slot(request: request, slotIndex: data.slotIndex)
returnedCollateral = request.ask.collateralPerSlot.some
if onCleanUp =? agent.onCleanUp:
await onCleanUp(
reprocessSlot = state.reprocessSlot, returnedCollateral = returnedCollateral
)
except CancelledError as e:
trace "SaleIgnored.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SaleIgnored.run in onCleanUp", error = e.msgDetail
return some State(SaleErrored(error: e))

View File

@ -1,71 +0,0 @@
import pkg/questionable/results
import ../../clock
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ../salesagent
import ./filling
import ./cancelled
import ./errored
import ./failed
logScope:
topics = "marketplace sales initial-proving"
type SaleInitialProving* = ref object of SaleState
method `$`*(state: SaleInitialProving): string =
"SaleInitialProving"
method onCancelled*(state: SaleInitialProving, request: StorageRequest): ?State =
return some State(SaleCancelled())
method onFailed*(state: SaleInitialProving, request: StorageRequest): ?State =
return some State(SaleFailed())
proc waitUntilNextPeriod(clock: Clock, periodicity: Periodicity) {.async.} =
trace "Waiting until next period"
let period = periodicity.periodOf(clock.now().Timestamp)
let periodEnd = periodicity.periodEnd(period)
await clock.waitUntil((periodEnd + 1).toSecondsSince1970)
proc waitForStableChallenge(market: Market, clock: Clock, slotId: SlotId) {.async.} =
let periodicity = await market.periodicity()
let downtime = await market.proofDowntime()
await clock.waitUntilNextPeriod(periodicity)
while (await market.getPointer(slotId)) > (256 - downtime):
await clock.waitUntilNextPeriod(periodicity)
method run*(
state: SaleInitialProving, machine: Machine
): Future[?State] {.async: (raises: []).} =
let data = SalesAgent(machine).data
let context = SalesAgent(machine).context
let market = context.market
let clock = context.clock
without request =? data.request:
raiseAssert "no sale request"
without onProve =? context.onProve:
raiseAssert "onProve callback not set"
try:
debug "Waiting for a proof challenge that is valid for the entire period"
let slot = Slot(request: request, slotIndex: data.slotIndex)
await waitForStableChallenge(market, clock, slot.id)
debug "Generating initial proof", requestId = data.requestId
let challenge = await context.market.getChallenge(slot.id)
without proof =? (await onProve(slot, challenge)), err:
error "Failed to generate initial proof", error = err.msg
return some State(SaleErrored(error: err))
debug "Finished proof calculation", requestId = data.requestId
return some State(SaleFilling(proof: proof))
except CancelledError as e:
trace "SaleInitialProving.run onCleanUp was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SaleInitialProving.run", error = e.msgDetail
return some State(SaleErrored(error: e))

View File

@ -1,46 +0,0 @@
import ../../logutils
import ../../market
import ../../utils/exceptions
import ../statemachine
import ../salesagent
import ./cancelled
import ./failed
import ./finished
import ./errored
logScope:
topics = "marketplace sales payout"
type SalePayout* = ref object of SaleState
method `$`*(state: SalePayout): string =
"SalePayout"
method onCancelled*(state: SalePayout, request: StorageRequest): ?State =
return some State(SaleCancelled())
method onFailed*(state: SalePayout, request: StorageRequest): ?State =
return some State(SaleFailed())
method run*(
state: SalePayout, machine: Machine
): Future[?State] {.async: (raises: []).} =
let data = SalesAgent(machine).data
let market = SalesAgent(machine).context.market
without request =? data.request:
raiseAssert "no sale request"
try:
let slot = Slot(request: request, slotIndex: data.slotIndex)
debug "Collecting finished slot's reward",
requestId = data.requestId, slotIndex = data.slotIndex
let currentCollateral = await market.currentCollateral(slot.id)
await market.freeSlot(slot.id)
return some State(SaleFinished(returnedCollateral: some currentCollateral))
except CancelledError as e:
trace "SalePayout.run onCleanUp was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SalePayout.run", error = e.msgDetail
return some State(SaleErrored(error: e))

View File

@ -1,110 +0,0 @@
import pkg/questionable
import pkg/questionable/results
import pkg/metrics
import ../../logutils
import ../../market
import ../../utils/exceptions
import ../salesagent
import ../statemachine
import ./cancelled
import ./failed
import ./filled
import ./ignored
import ./slotreserving
import ./errored
declareCounter(
codex_reservations_availability_mismatch, "codex reservations availability_mismatch"
)
type SalePreparing* = ref object of SaleState
logScope:
topics = "marketplace sales preparing"
method `$`*(state: SalePreparing): string =
"SalePreparing"
method onCancelled*(state: SalePreparing, request: StorageRequest): ?State =
return some State(SaleCancelled())
method onFailed*(state: SalePreparing, request: StorageRequest): ?State =
return some State(SaleFailed())
method onSlotFilled*(
state: SalePreparing, requestId: RequestId, slotIndex: uint64
): ?State =
return some State(SaleFilled())
method run*(
state: SalePreparing, machine: Machine
): Future[?State] {.async: (raises: []).} =
let agent = SalesAgent(machine)
let data = agent.data
let context = agent.context
let market = context.market
let reservations = context.reservations
try:
await agent.retrieveRequest()
await agent.subscribe()
without request =? data.request:
error "request could not be retrieved", id = data.requestId
let error = newException(SaleError, "request could not be retrieved")
return some State(SaleErrored(error: error))
let slotId = slotId(data.requestId, data.slotIndex)
let state = await market.slotState(slotId)
if state != SlotState.Free and state != SlotState.Repair:
return some State(SaleIgnored(reprocessSlot: false))
# TODO: Once implemented, check to ensure the host is allowed to fill the slot,
# due to the [sliding window mechanism](https://github.com/logos-storage/logos-storage-research/blob/master/design/marketplace.md#dispersal)
logScope:
slotIndex = data.slotIndex
slotSize = request.ask.slotSize
duration = request.ask.duration
pricePerBytePerSecond = request.ask.pricePerBytePerSecond
collateralPerByte = request.ask.collateralPerByte
let requestEnd = await market.getRequestEnd(data.requestId)
without availability =?
await reservations.findAvailability(
request.ask.slotSize, request.ask.duration, request.ask.pricePerBytePerSecond,
request.ask.collateralPerByte, requestEnd,
):
debug "No availability found for request, ignoring"
return some State(SaleIgnored(reprocessSlot: true))
info "Availability found for request, creating reservation"
without reservation =?
await noCancel reservations.createReservation(
availability.id, request.ask.slotSize, request.id, data.slotIndex,
request.ask.collateralPerByte, requestEnd,
), error:
trace "Creation of reservation failed"
# Race condition:
# reservations.findAvailability (line 64) is no guarantee. You can never know for certain that the reservation can be created until after you have it.
# Should createReservation fail because there's no space, we proceed to SaleIgnored.
if error of BytesOutOfBoundsError:
# Lets monitor how often this happen and if it is often we can make it more inteligent to handle it
codex_reservations_availability_mismatch.inc()
return some State(SaleIgnored(reprocessSlot: true))
return some State(SaleErrored(error: error))
trace "Reservation created successfully"
data.reservation = some reservation
return some State(SaleSlotReserving())
except CancelledError as e:
trace "SalePreparing.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SalePreparing.run", error = e.msgDetail
return some State(SaleErrored(error: e))

View File

@ -1,166 +0,0 @@
import std/options
import pkg/questionable/results
import ../../clock
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ../salesagent
import ../salescontext
import ./cancelled
import ./failed
import ./errored
import ./payout
logScope:
topics = "marketplace sales proving"
type
SlotFreedError* = object of CatchableError
SlotNotFilledError* = object of CatchableError
SaleProving* = ref object of SaleState
loop: Future[void]
method prove*(
state: SaleProving,
slot: Slot,
challenge: ProofChallenge,
onProve: OnProve,
market: Market,
currentPeriod: Period,
) {.base, async.} =
try:
without proof =? (await onProve(slot, challenge)), err:
error "Failed to generate proof", error = err.msg
# In this state, there's nothing we can do except try again next time.
return
debug "Submitting proof", currentPeriod = currentPeriod, slotId = slot.id
await market.submitProof(slot.id, proof)
except CancelledError as error:
trace "Submitting proof cancelled"
raise error
except CatchableError as e:
error "Submitting proof failed", msg = e.msgDetail
proc proveLoop(
state: SaleProving,
market: Market,
clock: Clock,
request: StorageRequest,
slotIndex: uint64,
onProve: OnProve,
) {.async.} =
let slot = Slot(request: request, slotIndex: slotIndex)
let slotId = slot.id
logScope:
period = currentPeriod
requestId = request.id
slotIndex
slotId = slot.id
proc getCurrentPeriod(): Future[Period] {.async.} =
let periodicity = await market.periodicity()
return periodicity.periodOf(clock.now().Timestamp)
proc waitUntilPeriod(period: Period) {.async.} =
let periodicity = await market.periodicity()
# Ensure that we're past the period boundary by waiting an additional second
await clock.waitUntil((periodicity.periodStart(period) + 1).toSecondsSince1970)
while true:
let currentPeriod = await getCurrentPeriod()
let slotState = await market.slotState(slot.id)
case slotState
of SlotState.Filled:
debug "Proving for new period", period = currentPeriod
if (await market.isProofRequired(slotId)) or
(await market.willProofBeRequired(slotId)):
let challenge = await market.getChallenge(slotId)
debug "Proof is required", period = currentPeriod, challenge = challenge
await state.prove(slot, challenge, onProve, market, currentPeriod)
of SlotState.Cancelled:
debug "Slot reached cancelled state"
# do nothing, let onCancelled callback take care of it
of SlotState.Repair:
warn "Slot was forcible freed"
let message = "Slot was forcible freed and host was removed from its hosting"
raise newException(SlotFreedError, message)
of SlotState.Failed:
debug "Slot reached failed state"
# do nothing, let onFailed callback take care of it
of SlotState.Finished:
debug "Slot reached finished state", period = currentPeriod
return # exit the loop
else:
let message = "Slot is not in Filled state, but in state: " & $slotState
raise newException(SlotNotFilledError, message)
debug "waiting until next period"
await waitUntilPeriod(currentPeriod + 1)
method `$`*(state: SaleProving): string =
"SaleProving"
method onCancelled*(state: SaleProving, request: StorageRequest): ?State =
# state.loop cancellation happens automatically when run is cancelled due to
# state change
return some State(SaleCancelled())
method onFailed*(state: SaleProving, request: StorageRequest): ?State =
# state.loop cancellation happens automatically when run is cancelled due to
# state change
return some State(SaleFailed())
method run*(
state: SaleProving, machine: Machine
): Future[?State] {.async: (raises: []).} =
let data = SalesAgent(machine).data
let context = SalesAgent(machine).context
without request =? data.request:
raiseAssert "no sale request"
without onProve =? context.onProve:
raiseAssert "onProve callback not set"
without market =? context.market:
raiseAssert("market not set")
without clock =? context.clock:
raiseAssert("clock not set")
try:
debug "Start proving", requestId = data.requestId, slotIndex = data.slotIndex
try:
let loop = state.proveLoop(market, clock, request, data.slotIndex, onProve)
state.loop = loop
await loop
except CancelledError as e:
trace "proving loop cancelled"
discard
except CatchableError as e:
error "Proving failed",
msg = e.msg, typ = $(type e), stack = e.getStackTrace(), error = e.msgDetail
return some State(SaleErrored(error: e))
finally:
# Cleanup of the proving loop
debug "Stopping proving.", requestId = data.requestId, slotIndex = data.slotIndex
if not state.loop.isNil:
if not state.loop.finished:
try:
await state.loop.cancelAndWait()
except CancelledError:
discard
except CatchableError as e:
error "Error during cancellation of proving loop", msg = e.msg
state.loop = nil
return some State(SalePayout())
except CancelledError as e:
trace "SaleProving.run onCleanUp was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SaleProving.run", error = e.msgDetail
return some State(SaleErrored(error: e))

View File

@ -1,57 +0,0 @@
import ../../conf
when storage_enable_proof_failures:
import std/strutils
import pkg/stint
import pkg/ethers
import ../../contracts/marketplace
import ../../contracts/requests
import ../../logutils
import ../../market
import ../../utils/exceptions
import ../salescontext
import ./proving
import ./errored
logScope:
topics = "marketplace sales simulated-proving"
type SaleProvingSimulated* = ref object of SaleProving
failEveryNProofs*: int
proofCount: int
proc onSubmitProofError(error: ref CatchableError, period: Period, slotId: SlotId) =
error "Submitting invalid proof failed", period, slotId, msg = error.msgDetail
method prove*(
state: SaleProvingSimulated,
slot: Slot,
challenge: ProofChallenge,
onProve: OnProve,
market: Market,
currentPeriod: Period,
) {.async.} =
try:
trace "Processing proving in simulated mode"
state.proofCount += 1
if state.failEveryNProofs > 0 and state.proofCount mod state.failEveryNProofs == 0:
state.proofCount = 0
try:
warn "Submitting INVALID proof", period = currentPeriod, slotId = slot.id
await market.submitProof(slot.id, Groth16Proof.default)
except ProofInvalidError as e:
discard # expected
except CancelledError as error:
raise error
except CatchableError as e:
onSubmitProofError(e, currentPeriod, slot.id)
else:
await procCall SaleProving(state).prove(
slot, challenge, onProve, market, currentPeriod
)
except CancelledError as e:
trace "Submitting INVALID proof cancelled", error = e.msgDetail
raise e
except CatchableError as e:
error "Submitting INVALID proof failed", error = e.msgDetail

View File

@ -1,65 +0,0 @@
import pkg/questionable
import pkg/metrics
import ../../logutils
import ../../market
import ../../utils/exceptions
import ../salesagent
import ../statemachine
import ./cancelled
import ./failed
import ./ignored
import ./downloading
import ./errored
type SaleSlotReserving* = ref object of SaleState
logScope:
topics = "marketplace sales reserving"
method `$`*(state: SaleSlotReserving): string =
"SaleSlotReserving"
method onCancelled*(state: SaleSlotReserving, request: StorageRequest): ?State =
return some State(SaleCancelled())
method onFailed*(state: SaleSlotReserving, request: StorageRequest): ?State =
return some State(SaleFailed())
method run*(
state: SaleSlotReserving, machine: Machine
): Future[?State] {.async: (raises: []).} =
let agent = SalesAgent(machine)
let data = agent.data
let context = agent.context
let market = context.market
logScope:
requestId = data.requestId
slotIndex = data.slotIndex
try:
let canReserve = await market.canReserveSlot(data.requestId, data.slotIndex)
if canReserve:
try:
trace "Reserving slot"
await market.reserveSlot(data.requestId, data.slotIndex)
except SlotReservationNotAllowedError as e:
debug "Slot cannot be reserved, ignoring", error = e.msg
return some State(SaleIgnored(reprocessSlot: false, returnsCollateral: true))
except MarketError as e:
return some State(SaleErrored(error: e))
# other CatchableErrors are handled "automatically" by the SaleState
trace "Slot successfully reserved"
return some State(SaleDownloading())
else:
# do not re-add this slot to the queue, and return bytes from Reservation to
# the Availability
debug "Slot cannot be reserved, ignoring"
return some State(SaleIgnored(reprocessSlot: false, returnsCollateral: true))
except CancelledError as e:
trace "SaleSlotReserving.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SaleSlotReserving.run", error = e.msgDetail
return some State(SaleErrored(error: e))

View File

@ -1,73 +0,0 @@
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ../salesagent
import ./filled
import ./finished
import ./failed
import ./errored
import ./proving
import ./cancelled
import ./payout
logScope:
topics = "marketplace sales unknown"
type
SaleUnknown* = ref object of SaleState
SaleUnknownError* = object of CatchableError
UnexpectedSlotError* = object of SaleUnknownError
method `$`*(state: SaleUnknown): string =
"SaleUnknown"
method onCancelled*(state: SaleUnknown, request: StorageRequest): ?State =
return some State(SaleCancelled())
method onFailed*(state: SaleUnknown, request: StorageRequest): ?State =
return some State(SaleFailed())
method run*(
state: SaleUnknown, machine: Machine
): Future[?State] {.async: (raises: []).} =
let agent = SalesAgent(machine)
let data = agent.data
let market = agent.context.market
try:
await agent.retrieveRequest()
await agent.subscribe()
without request =? data.request:
error "request could not be retrieved", id = data.requestId
let error = newException(SaleError, "request could not be retrieved")
return some State(SaleErrored(error: error))
let slotId = slotId(data.requestId, data.slotIndex)
let slotState = await market.slotState(slotId)
case slotState
of SlotState.Free:
let error =
newException(UnexpectedSlotError, "Slot state on chain should not be 'free'")
return some State(SaleErrored(error: error))
of SlotState.Filled:
return some State(SaleFilled())
of SlotState.Finished:
return some State(SalePayout())
of SlotState.Paid:
return some State(SaleFinished())
of SlotState.Failed:
return some State(SaleFailed())
of SlotState.Cancelled:
return some State(SaleCancelled())
of SlotState.Repair:
let error = newException(
SlotFreedError, "Slot was forcible freed and host was removed from its hosting"
)
return some State(SaleErrored(error: error))
except CancelledError as e:
trace "SaleUnknown.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during SaleUnknown.run", error = e.msgDetail
return some State(SaleErrored(error: e))

View File

@ -1,6 +0,0 @@
import ./slots/builder
import ./slots/sampler
import ./slots/proofs
import ./slots/types
export builder, sampler, proofs, types

View File

@ -1,8 +0,0 @@
import ./builder/builder
import ./converters
import ../merkletree
export builder, converters
type Poseidon2Builder* = SlotsBuilder[Poseidon2Tree, Poseidon2Hash]

View File

@ -1,404 +0,0 @@
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/math
import std/sequtils
import std/sugar
import pkg/libp2p
import pkg/chronos
import pkg/questionable
import pkg/questionable/results
import pkg/constantine/math/io/io_fields
import ../../logutils
import ../../utils
import ../../stores
import ../../manifest
import ../../merkletree
import ../../utils/asynciter
import ../../indexingstrategy
import ../converters
export converters, asynciter
logScope:
topics = "codex slotsbuilder"
type SlotsBuilder*[T, H] = ref object of RootObj
store: BlockStore
manifest: Manifest # current manifest
strategy: IndexingStrategy # indexing strategy
cellSize: NBytes # cell size
numSlotBlocks: Natural
# number of blocks per slot (should yield a power of two number of cells)
slotRoots: seq[H] # roots of the slots
emptyBlock: seq[byte] # empty block
verifiableTree: ?T # verification tree (dataset tree)
emptyDigestTree: T # empty digest tree for empty blocks
func verifiable*[T, H](self: SlotsBuilder[T, H]): bool {.inline.} =
## Returns true if the slots are verifiable.
##
self.manifest.verifiable
func slotRoots*[T, H](self: SlotsBuilder[T, H]): seq[H] {.inline.} =
## Returns the slot roots.
##
self.slotRoots
func verifyTree*[T, H](self: SlotsBuilder[T, H]): ?T {.inline.} =
## Returns the slots tree (verification tree).
##
self.verifiableTree
func verifyRoot*[T, H](self: SlotsBuilder[T, H]): ?H {.inline.} =
## Returns the slots root (verification root).
##
if tree =? self.verifyTree and root =? tree.root:
return some root
func numSlots*[T, H](self: SlotsBuilder[T, H]): Natural =
## Number of slots.
##
self.manifest.numSlots
func numSlotBlocks*[T, H](self: SlotsBuilder[T, H]): Natural =
## Number of blocks per slot.
##
self.numSlotBlocks
func numBlocks*[T, H](self: SlotsBuilder[T, H]): Natural =
## Number of blocks.
##
self.numSlotBlocks * self.manifest.numSlots
func slotBytes*[T, H](self: SlotsBuilder[T, H]): NBytes =
## Number of bytes per slot.
##
(self.manifest.blockSize.int * self.numSlotBlocks).NBytes
func numBlockCells*[T, H](self: SlotsBuilder[T, H]): Natural =
## Number of cells per block.
##
(self.manifest.blockSize div self.cellSize).Natural
func cellSize*[T, H](self: SlotsBuilder[T, H]): NBytes =
## Cell size.
##
self.cellSize
func numSlotCells*[T, H](self: SlotsBuilder[T, H]): Natural =
## Number of cells per slot.
##
self.numBlockCells * self.numSlotBlocks
func slotIndicesIter*[T, H](self: SlotsBuilder[T, H], slot: Natural): ?!Iter[int] =
## Returns the slot indices.
##
self.strategy.getIndices(slot).catch
func slotIndices*[T, H](self: SlotsBuilder[T, H], slot: Natural): seq[int] =
## Returns the slot indices.
##
if iter =? self.strategy.getIndices(slot).catch:
return toSeq(iter)
func manifest*[T, H](self: SlotsBuilder[T, H]): Manifest =
## Returns the manifest.
##
self.manifest
proc buildBlockTree*[T, H](
self: SlotsBuilder[T, H], blkIdx: Natural, slotPos: Natural
): Future[?!(seq[byte], T)] {.async: (raises: [CancelledError]).} =
## Build the block digest tree and return a tuple with the
## block data and the tree.
##
logScope:
blkIdx = blkIdx
slotPos = slotPos
numSlotBlocks = self.manifest.numSlotBlocks
cellSize = self.cellSize
trace "Building block tree"
if slotPos > (self.manifest.numSlotBlocks - 1):
# pad blocks are 0 byte blocks
trace "Returning empty digest tree for pad block"
return success (self.emptyBlock, self.emptyDigestTree)
without blk =? await self.store.getBlock(self.manifest.treeCid, blkIdx), err:
error "Failed to get block CID for tree at index", err = err.msg
return failure(err)
if blk.isEmpty:
success (self.emptyBlock, self.emptyDigestTree)
else:
without tree =? T.digestTree(blk.data, self.cellSize.int), err:
error "Failed to create digest for block", err = err.msg
return failure(err)
success (blk.data, tree)
proc getCellHashes*[T, H](
self: SlotsBuilder[T, H], slotIndex: Natural
): Future[?!seq[H]] {.async: (raises: [CancelledError, IndexingError]).} =
## Collect all the cells from a block and return
## their hashes.
##
let
treeCid = self.manifest.treeCid
blockCount = self.manifest.blocksCount
numberOfSlots = self.manifest.numSlots
logScope:
treeCid = treeCid
origBlockCount = blockCount
numberOfSlots = numberOfSlots
slotIndex = slotIndex
let hashes = collect(newSeq):
for i, blkIdx in self.strategy.getIndices(slotIndex):
logScope:
blkIdx = blkIdx
pos = i
trace "Getting block CID for tree at index"
without (_, tree) =? (await self.buildBlockTree(blkIdx, i)) and digest =? tree.root,
err:
error "Failed to get block CID for tree at index", err = err.msg
return failure(err)
trace "Get block digest", digest = digest.toHex
digest
success hashes
proc buildSlotTree*[T, H](
self: SlotsBuilder[T, H], slotIndex: Natural
): Future[?!T] {.async: (raises: [CancelledError]).} =
## Build the slot tree from the block digest hashes
## and return the tree.
try:
without cellHashes =? (await self.getCellHashes(slotIndex)), err:
error "Failed to select slot blocks", err = err.msg
return failure(err)
T.init(cellHashes)
except IndexingError as err:
error "Failed to build slot tree", err = err.msg
return failure(err)
proc buildSlot*[T, H](
self: SlotsBuilder[T, H], slotIndex: Natural
): Future[?!H] {.async: (raises: [CancelledError]).} =
## Build a slot tree and store the proofs in
## the block store.
##
logScope:
cid = self.manifest.treeCid
slotIndex = slotIndex
trace "Building slot tree"
without tree =? (await self.buildSlotTree(slotIndex)) and
treeCid =? tree.root .? toSlotCid, err:
error "Failed to build slot tree", err = err.msg
return failure(err)
trace "Storing slot tree", treeCid, slotIndex, leaves = tree.leavesCount
for i, leaf in tree.leaves:
without cellCid =? leaf.toCellCid, err:
error "Failed to get CID for slot cell", err = err.msg
return failure(err)
without proof =? tree.getProof(i) and encodableProof =? proof.toEncodableProof, err:
error "Failed to get proof for slot tree", err = err.msg
return failure(err)
if err =?
(await self.store.putCidAndProof(treeCid, i, cellCid, encodableProof)).errorOption:
error "Failed to store slot tree", err = err.msg
return failure(err)
tree.root()
func buildVerifyTree*[T, H](self: SlotsBuilder[T, H], slotRoots: openArray[H]): ?!T =
T.init(@slotRoots)
proc buildSlots*[T, H](
self: SlotsBuilder[T, H]
): Future[?!void] {.async: (raises: [CancelledError]).} =
## Build all slot trees and store them in the block store.
##
logScope:
cid = self.manifest.treeCid
blockCount = self.manifest.blocksCount
trace "Building slots"
if self.slotRoots.len == 0:
self.slotRoots = collect(newSeq):
for i in 0 ..< self.manifest.numSlots:
without slotRoot =? (await self.buildSlot(i)), err:
error "Failed to build slot", err = err.msg, index = i
return failure(err)
slotRoot
without tree =? self.buildVerifyTree(self.slotRoots) and root =? tree.root, err:
error "Failed to build slot roots tree", err = err.msg
return failure(err)
if verifyTree =? self.verifyTree and verifyRoot =? verifyTree.root:
if not bool(verifyRoot == root): # TODO: `!=` doesn't work for SecretBool
return failure "Existing slots root doesn't match reconstructed root."
self.verifiableTree = some tree
success()
proc buildManifest*[T, H](
self: SlotsBuilder[T, H]
): Future[?!Manifest] {.async: (raises: [CancelledError]).} =
if err =? (await self.buildSlots()).errorOption:
error "Failed to build slot roots", err = err.msg
return failure(err)
without rootCids =? self.slotRoots.toSlotCids(), err:
error "Failed to map slot roots to CIDs", err = err.msg
return failure(err)
without rootProvingCidRes =? self.verifyRoot .? toVerifyCid() and
rootProvingCid =? rootProvingCidRes, err:
error "Failed to map slot roots to CIDs", err = err.msg
return failure(err)
Manifest.new(
self.manifest, rootProvingCid, rootCids, self.cellSize, self.strategy.strategyType
)
proc new*[T, H](
_: type SlotsBuilder[T, H],
store: BlockStore,
manifest: Manifest,
strategy = LinearStrategy,
cellSize = DefaultCellSize,
): ?!SlotsBuilder[T, H] =
if not manifest.protected:
trace "Manifest is not protected."
return failure("Manifest is not protected.")
logScope:
blockSize = manifest.blockSize
strategy = strategy
cellSize = cellSize
if (manifest.blocksCount mod manifest.numSlots) != 0:
const msg = "Number of blocks must be divisible by number of slots."
trace msg
return failure(msg)
let cellSize = if manifest.verifiable: manifest.cellSize else: cellSize
if (manifest.blockSize mod cellSize) != 0.NBytes:
const msg = "Block size must be divisible by cell size."
trace msg
return failure(msg)
let
numSlotBlocks = manifest.numSlotBlocks
numBlockCells = (manifest.blockSize div cellSize).int # number of cells per block
numSlotCells = manifest.numSlotBlocks * numBlockCells
# number of uncorrected slot cells
pow2SlotCells = nextPowerOfTwo(numSlotCells) # pow2 cells per slot
numPadSlotBlocks = (pow2SlotCells div numBlockCells) - numSlotBlocks
# pow2 blocks per slot
numSlotBlocksTotal =
# pad blocks per slot
if numPadSlotBlocks > 0:
numPadSlotBlocks + numSlotBlocks
else:
numSlotBlocks
numBlocksTotal = numSlotBlocksTotal * manifest.numSlots # number of blocks per slot
emptyBlock = newSeq[byte](manifest.blockSize.int)
emptyDigestTree = ?T.digestTree(emptyBlock, cellSize.int)
strategy =
?strategy.init(
0,
manifest.blocksCount - 1,
manifest.numSlots,
manifest.numSlots,
numPadSlotBlocks,
).catch
logScope:
numSlotBlocks = numSlotBlocks
numBlockCells = numBlockCells
numSlotCells = numSlotCells
pow2SlotCells = pow2SlotCells
numPadSlotBlocks = numPadSlotBlocks
numBlocksTotal = numBlocksTotal
numSlotBlocksTotal = numSlotBlocksTotal
strategy = strategy.strategyType
trace "Creating slots builder"
var self = SlotsBuilder[T, H](
store: store,
manifest: manifest,
strategy: strategy,
cellSize: cellSize,
emptyBlock: emptyBlock,
numSlotBlocks: numSlotBlocksTotal,
emptyDigestTree: emptyDigestTree,
)
if manifest.verifiable:
if manifest.slotRoots.len == 0 or manifest.slotRoots.len != manifest.numSlots:
return failure "Manifest is verifiable but slot roots are missing or invalid."
let
slotRoots = manifest.slotRoots.mapIt((?it.fromSlotCid()))
tree = ?self.buildVerifyTree(slotRoots)
expectedRoot = ?manifest.verifyRoot.fromVerifyCid()
verifyRoot = ?tree.root
if verifyRoot != expectedRoot:
return failure "Existing slots root doesn't match reconstructed root."
self.slotRoots = slotRoots
self.verifiableTree = some tree
success self

View File

@ -1,82 +0,0 @@
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sequtils
import pkg/libp2p
import pkg/stew/arrayops
import pkg/questionable
import pkg/questionable/results
import pkg/poseidon2
import pkg/poseidon2/io
import ../codextypes
import ../merkletree
import ../errors
import ../utils/digest
func toCid(hash: Poseidon2Hash, mcodec: MultiCodec, cidCodec: MultiCodec): ?!Cid =
let
mhash = ?MultiHash.init(mcodec, hash.toBytes).mapFailure
treeCid = ?Cid.init(CIDv1, cidCodec, mhash).mapFailure
success treeCid
proc toPoseidon2Hash(
cid: Cid, mcodec: MultiCodec, cidCodec: MultiCodec
): ?!Poseidon2Hash =
if cid.cidver != CIDv1:
return failure("Unexpected CID version")
if cid.mcodec != cidCodec:
return failure(
"Cid is not of expected codec. Was: " & $cid.mcodec & " but expected: " & $cidCodec
)
let
mhash = ?cid.mhash.mapFailure
bytes: array[32, byte] = array[32, byte].initCopyFrom(mhash.digestBytes())
hash = ?Poseidon2Hash.fromBytes(bytes).toFailure
success hash
func toCellCid*(hash: Poseidon2Hash): ?!Cid =
toCid(hash, Pos2Bn128MrklCodec, CodexSlotCellCodec)
func fromCellCid*(cid: Cid): ?!Poseidon2Hash =
toPoseidon2Hash(cid, Pos2Bn128MrklCodec, CodexSlotCellCodec)
func toSlotCid*(hash: Poseidon2Hash): ?!Cid =
toCid(hash, multiCodec("identity"), SlotRootCodec)
func toSlotCids*(slotRoots: openArray[Poseidon2Hash]): ?!seq[Cid] =
success slotRoots.mapIt(?it.toSlotCid)
func fromSlotCid*(cid: Cid): ?!Poseidon2Hash =
toPoseidon2Hash(cid, multiCodec("identity"), SlotRootCodec)
func toVerifyCid*(hash: Poseidon2Hash): ?!Cid =
toCid(hash, multiCodec("identity"), SlotProvingRootCodec)
func fromVerifyCid*(cid: Cid): ?!Poseidon2Hash =
toPoseidon2Hash(cid, multiCodec("identity"), SlotProvingRootCodec)
func toEncodableProof*(proof: Poseidon2Proof): ?!CodexProof =
let encodableProof = CodexProof(
mcodec: multiCodec("identity"),
index: proof.index,
nleaves: proof.nleaves,
path: proof.path.mapIt(@(it.toBytes)),
)
success encodableProof
func toVerifiableProof*(proof: CodexProof): ?!Poseidon2Proof =
let nodes = proof.path.mapIt(?Poseidon2Hash.fromBytes(it.toArray32).toFailure)
Poseidon2Proof.init(index = proof.index, nleaves = proof.nleaves, nodes = nodes)

View File

@ -1,5 +0,0 @@
import ./proofs/backends
import ./proofs/prover
import ./proofs/backendfactory
export circomcompat, prover, backendfactory

View File

@ -1,82 +0,0 @@
import os
import strutils
import pkg/chronos
import pkg/chronicles
import pkg/questionable
import pkg/confutils/defs
import pkg/stew/io2
import pkg/ethers
import ../../conf
import ./backends
import ./backendutils
proc initializeFromConfig(config: CodexConf, utils: BackendUtils): ?!AnyBackend =
if not fileAccessible($config.circomR1cs, {AccessFlags.Read}) or
not endsWith($config.circomR1cs, ".r1cs"):
return failure("Circom R1CS file not accessible")
if not fileAccessible($config.circomWasm, {AccessFlags.Read}) or
not endsWith($config.circomWasm, ".wasm"):
return failure("Circom wasm file not accessible")
if not fileAccessible($config.circomZkey, {AccessFlags.Read}) or
not endsWith($config.circomZkey, ".zkey"):
return failure("Circom zkey file not accessible")
trace "Initialized prover backend from cli config"
success(
utils.initializeCircomBackend(
$config.circomR1cs, $config.circomWasm, $config.circomZkey
)
)
proc r1csFilePath(config: CodexConf): string =
config.circuitDir / "proof_main.r1cs"
proc wasmFilePath(config: CodexConf): string =
config.circuitDir / "proof_main.wasm"
proc zkeyFilePath(config: CodexConf): string =
config.circuitDir / "proof_main.zkey"
proc initializeFromCircuitDirFiles(
config: CodexConf, utils: BackendUtils
): ?!AnyBackend {.gcsafe.} =
if fileExists(config.r1csFilePath) and fileExists(config.wasmFilePath) and
fileExists(config.zkeyFilePath):
trace "Initialized prover backend from local files"
return success(
utils.initializeCircomBackend(
config.r1csFilePath, config.wasmFilePath, config.zkeyFilePath
)
)
failure("Circuit files not found")
proc suggestDownloadTool(config: CodexConf) =
without address =? config.marketplaceAddress:
raise (ref Defect)(
msg: "Proving backend initializing while marketplace address not set."
)
let
tokens = ["cirdl", "\"" & $config.circuitDir & "\"", config.ethProvider, $address]
instructions = "'./" & tokens.join(" ") & "'"
warn "Proving circuit files are not found. Please run the following to download them:",
instructions
proc initializeBackend*(
config: CodexConf, utils: BackendUtils = BackendUtils()
): ?!AnyBackend =
without backend =? initializeFromConfig(config, utils), cliErr:
info "Could not initialize prover backend from CLI options...", msg = cliErr.msg
without backend =? initializeFromCircuitDirFiles(config, utils), localErr:
info "Could not initialize prover backend from circuit dir files...",
msg = localErr.msg
suggestDownloadTool(config)
return failure("CircuitFilesNotFound")
# Unexpected: value of backend does not survive leaving each scope. (definition does though...)
return success(backend)
return success(backend)

View File

@ -1,5 +0,0 @@
import ./backends/circomcompat
export circomcompat
type AnyBackend* = CircomCompat

View File

@ -1,240 +0,0 @@
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/sugar
import pkg/chronos
import pkg/questionable/results
import pkg/circomcompat
import ../../types
import ../../../stores
import ../../../contracts
import ./converters
export circomcompat, converters
type
CircomCompat* = object
slotDepth: int # max depth of the slot tree
datasetDepth: int # max depth of dataset tree
blkDepth: int # depth of the block merkle tree (pow2 for now)
cellElms: int # number of field elements per cell
numSamples: int # number of samples per slot
r1csPath: string # path to the r1cs file
wasmPath: string # path to the wasm file
zkeyPath: string # path to the zkey file
backendCfg: ptr CircomBn254Cfg
vkp*: ptr CircomKey
NormalizedProofInputs*[H] {.borrow: `.`.} = distinct ProofInputs[H]
func normalizeInput*[H](
self: CircomCompat, input: ProofInputs[H]
): NormalizedProofInputs[H] =
## Parameters in CIRCOM circuits are statically sized and must be properly
## padded before they can be passed onto the circuit. This function takes
## variable length parameters and performs that padding.
##
## The output from this function can be JSON-serialized and used as direct
## inputs to the CIRCOM circuit for testing and debugging when one wishes
## to bypass the Rust FFI.
let normSamples = collect:
for sample in input.samples:
var merklePaths = sample.merklePaths
merklePaths.setLen(self.slotDepth)
Sample[H](cellData: sample.cellData, merklePaths: merklePaths)
var normSlotProof = input.slotProof
normSlotProof.setLen(self.datasetDepth)
NormalizedProofInputs[H] ProofInputs[H](
entropy: input.entropy,
datasetRoot: input.datasetRoot,
slotIndex: input.slotIndex,
slotRoot: input.slotRoot,
nCellsPerSlot: input.nCellsPerSlot,
nSlotsPerDataSet: input.nSlotsPerDataSet,
slotProof: normSlotProof,
samples: normSamples,
)
proc release*(self: CircomCompat) =
## Release the ctx
##
if not isNil(self.backendCfg):
self.backendCfg.unsafeAddr.release_cfg()
if not isNil(self.vkp):
self.vkp.unsafeAddr.release_key()
proc prove[H](self: CircomCompat, input: NormalizedProofInputs[H]): ?!CircomProof =
doAssert input.samples.len == self.numSamples, "Number of samples does not match"
doAssert input.slotProof.len <= self.datasetDepth,
"Slot proof is too deep - dataset has more slots than what we can handle?"
doAssert input.samples.allIt(
block:
(
it.merklePaths.len <= self.slotDepth + self.blkDepth and
it.cellData.len == self.cellElms
)
), "Merkle paths too deep or cells too big for circuit"
# TODO: All parameters should match circom's static parametter
var ctx: ptr CircomCompatCtx
defer:
if ctx != nil:
ctx.addr.release_circom_compat()
if init_circom_compat(self.backendCfg, addr ctx) != ERR_OK or ctx == nil:
raiseAssert("failed to initialize CircomCompat ctx")
var
entropy = input.entropy.toBytes
dataSetRoot = input.datasetRoot.toBytes
slotRoot = input.slotRoot.toBytes
if ctx.push_input_u256_array("entropy".cstring, entropy[0].addr, entropy.len.uint32) !=
ERR_OK:
return failure("Failed to push entropy")
if ctx.push_input_u256_array(
"dataSetRoot".cstring, dataSetRoot[0].addr, dataSetRoot.len.uint32
) != ERR_OK:
return failure("Failed to push data set root")
if ctx.push_input_u256_array(
"slotRoot".cstring, slotRoot[0].addr, slotRoot.len.uint32
) != ERR_OK:
return failure("Failed to push data set root")
if ctx.push_input_u32("nCellsPerSlot".cstring, input.nCellsPerSlot.uint32) != ERR_OK:
return failure("Failed to push nCellsPerSlot")
if ctx.push_input_u32("nSlotsPerDataSet".cstring, input.nSlotsPerDataSet.uint32) !=
ERR_OK:
return failure("Failed to push nSlotsPerDataSet")
if ctx.push_input_u32("slotIndex".cstring, input.slotIndex.uint32) != ERR_OK:
return failure("Failed to push slotIndex")
var slotProof = input.slotProof.mapIt(it.toBytes).concat
doAssert(slotProof.len == self.datasetDepth)
# arrays are always flattened
if ctx.push_input_u256_array(
"slotProof".cstring, slotProof[0].addr, uint (slotProof[0].len * slotProof.len)
) != ERR_OK:
return failure("Failed to push slot proof")
for s in input.samples:
var
merklePaths = s.merklePaths.mapIt(it.toBytes)
data = s.cellData.mapIt(@(it.toBytes)).concat
if ctx.push_input_u256_array(
"merklePaths".cstring,
merklePaths[0].addr,
uint (merklePaths[0].len * merklePaths.len),
) != ERR_OK:
return failure("Failed to push merkle paths")
if ctx.push_input_u256_array("cellData".cstring, data[0].addr, data.len.uint) !=
ERR_OK:
return failure("Failed to push cell data")
var proofPtr: ptr Proof = nil
let proof =
try:
if (let res = self.backendCfg.prove_circuit(ctx, proofPtr.addr); res != ERR_OK) or
proofPtr == nil:
return failure("Failed to prove - err code: " & $res)
proofPtr[]
finally:
if proofPtr != nil:
proofPtr.addr.release_proof()
success proof
proc prove*[H](self: CircomCompat, input: ProofInputs[H]): ?!CircomProof =
self.prove(self.normalizeInput(input))
proc verify*[H](
self: CircomCompat, proof: CircomProof, inputs: ProofInputs[H]
): ?!bool =
## Verify a proof using a ctx
##
var
proofPtr = unsafeAddr proof
inputs = inputs.toCircomInputs()
try:
let res = verify_circuit(proofPtr, inputs.addr, self.vkp)
if res == ERR_OK:
success true
elif res == ERR_FAILED_TO_VERIFY_PROOF:
success false
else:
failure("Failed to verify proof - err code: " & $res)
finally:
inputs.releaseCircomInputs()
proc init*(
_: type CircomCompat,
r1csPath: string,
wasmPath: string,
zkeyPath: string = "",
slotDepth = DefaultMaxSlotDepth,
datasetDepth = DefaultMaxDatasetDepth,
blkDepth = DefaultBlockDepth,
cellElms = DefaultCellElms,
numSamples = DefaultSamplesNum,
): CircomCompat =
## Create a new ctx
##
var cfg: ptr CircomBn254Cfg
var zkey = if zkeyPath.len > 0: zkeyPath.cstring else: nil
if init_circom_config(r1csPath.cstring, wasmPath.cstring, zkey, cfg.addr) != ERR_OK or
cfg == nil:
if cfg != nil:
cfg.addr.release_cfg()
raiseAssert("failed to initialize circom compat config")
var vkpPtr: ptr VerifyingKey = nil
if cfg.get_verifying_key(vkpPtr.addr) != ERR_OK or vkpPtr == nil:
if vkpPtr != nil:
vkpPtr.addr.release_key()
raiseAssert("Failed to get verifying key")
CircomCompat(
r1csPath: r1csPath,
wasmPath: wasmPath,
zkeyPath: zkeyPath,
slotDepth: slotDepth,
datasetDepth: datasetDepth,
blkDepth: blkDepth,
cellElms: cellElms,
numSamples: numSamples,
backendCfg: cfg,
vkp: vkpPtr,
)

View File

@ -1,54 +0,0 @@
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import pkg/circomcompat
import ../../../contracts
import ../../types
import ../../../merkletree
type
CircomG1* = G1
CircomG2* = G2
CircomProof* = Proof
CircomKey* = VerifyingKey
CircomInputs* = Inputs
proc toCircomInputs*(inputs: ProofInputs[Poseidon2Hash]): CircomInputs =
var
slotIndex = inputs.slotIndex.toF.toBytes.toArray32
datasetRoot = inputs.datasetRoot.toBytes.toArray32
entropy = inputs.entropy.toBytes.toArray32
elms = [entropy, datasetRoot, slotIndex]
let inputsPtr = allocShared0(32 * elms.len)
copyMem(inputsPtr, addr elms[0], elms.len * 32)
CircomInputs(elms: cast[ptr array[32, byte]](inputsPtr), len: elms.len.uint)
proc releaseCircomInputs*(inputs: var CircomInputs) =
if not inputs.elms.isNil:
deallocShared(inputs.elms)
inputs.elms = nil
func toG1*(g: CircomG1): G1Point =
G1Point(x: UInt256.fromBytesLE(g.x), y: UInt256.fromBytesLE(g.y))
func toG2*(g: CircomG2): G2Point =
G2Point(
x: Fp2Element(real: UInt256.fromBytesLE(g.x[0]), imag: UInt256.fromBytesLE(g.x[1])),
y: Fp2Element(real: UInt256.fromBytesLE(g.y[0]), imag: UInt256.fromBytesLE(g.y[1])),
)
func toGroth16Proof*(proof: CircomProof): Groth16Proof =
Groth16Proof(a: proof.a.toG1, b: proof.b.toG2, c: proof.c.toG1)

View File

@ -1,8 +0,0 @@
import ./backends
type BackendUtils* = ref object of RootObj
method initializeCircomBackend*(
self: BackendUtils, r1csFile: string, wasmFile: string, zKeyFile: string
): AnyBackend {.base, gcsafe.} =
CircomCompat.init(r1csFile, wasmFile, zKeyFile)

View File

@ -1,93 +0,0 @@
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
##
import pkg/chronos
import pkg/chronicles
import pkg/circomcompat
import pkg/poseidon2
import pkg/questionable/results
import pkg/libp2p/cid
import ../../manifest
import ../../merkletree
import ../../stores
import ../../market
import ../../utils/poseidon2digest
import ../../conf
import ../builder
import ../sampler
import ./backends
import ../types
export backends
logScope:
topics = "codex prover"
type
AnyProof* = CircomProof
AnySampler* = Poseidon2Sampler
# add any other generic type here, eg. Poseidon2Sampler | ReinforceConcreteSampler
AnyBuilder* = Poseidon2Builder
# add any other generic type here, eg. Poseidon2Builder | ReinforceConcreteBuilder
AnyProofInputs* = ProofInputs[Poseidon2Hash]
Prover* = ref object of RootObj
backend: AnyBackend
store: BlockStore
nSamples: int
proc prove*(
self: Prover, slotIdx: int, manifest: Manifest, challenge: ProofChallenge
): Future[?!(AnyProofInputs, AnyProof)] {.async: (raises: [CancelledError]).} =
## Prove a statement using backend.
## Returns a future that resolves to a proof.
logScope:
cid = manifest.treeCid
slot = slotIdx
challenge = challenge
trace "Received proof challenge"
without builder =? AnyBuilder.new(self.store, manifest), err:
error "Unable to create slots builder", err = err.msg
return failure(err)
without sampler =? AnySampler.new(slotIdx, self.store, builder), err:
error "Unable to create data sampler", err = err.msg
return failure(err)
without proofInput =? await sampler.getProofInput(challenge, self.nSamples), err:
error "Unable to get proof input for slot", err = err.msg
return failure(err)
# prove slot
without proof =? self.backend.prove(proofInput), err:
error "Unable to prove slot", err = err.msg
return failure(err)
success (proofInput, proof)
proc verify*(
self: Prover, proof: AnyProof, inputs: AnyProofInputs
): Future[?!bool] {.async: (raises: [CancelledError]).} =
## Prove a statement using backend.
## Returns a future that resolves to a proof.
self.backend.verify(proof, inputs)
proc new*(
_: type Prover, store: BlockStore, backend: AnyBackend, nSamples: int
): Prover =
Prover(store: store, backend: backend, nSamples: nSamples)

View File

@ -1,8 +0,0 @@
import ./sampler/sampler
import ./sampler/utils
import ../merkletree
export sampler, utils
type Poseidon2Sampler* = DataSampler[Poseidon2Tree, Poseidon2Hash]

View File

@ -1,138 +0,0 @@
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sugar
import pkg/chronos
import pkg/questionable
import pkg/questionable/results
import pkg/stew/arrayops
import ../../logutils
import ../../market
import ../../blocktype as bt
import ../../merkletree
import ../../manifest
import ../../stores
import ../converters
import ../builder
import ../types
import ./utils
logScope:
topics = "codex datasampler"
type DataSampler*[T, H] = ref object of RootObj
index: Natural
blockStore: BlockStore
builder: SlotsBuilder[T, H]
func getCell*[T, H](
self: DataSampler[T, H], blkBytes: seq[byte], blkCellIdx: Natural
): seq[H] =
let
cellSize = self.builder.cellSize.uint64
dataStart = cellSize * blkCellIdx.uint64
dataEnd = dataStart + cellSize
doAssert (dataEnd - dataStart) == cellSize, "Invalid cell size"
blkBytes[dataStart ..< dataEnd].elements(H).toSeq()
proc getSample*[T, H](
self: DataSampler[T, H], cellIdx: int, slotTreeCid: Cid, slotRoot: H
): Future[?!Sample[H]] {.async: (raises: [CancelledError]).} =
let
cellsPerBlock = self.builder.numBlockCells
blkCellIdx = cellIdx.toCellInBlk(cellsPerBlock) # block cell index
blkSlotIdx = cellIdx.toBlkInSlot(cellsPerBlock) # slot tree index
origBlockIdx = self.builder.slotIndices(self.index)[blkSlotIdx]
# convert to original dataset block index
logScope:
cellIdx = cellIdx
blkSlotIdx = blkSlotIdx
blkCellIdx = blkCellIdx
origBlockIdx = origBlockIdx
trace "Retrieving sample from block tree"
let
(_, proof) = (await self.blockStore.getCidAndProof(slotTreeCid, blkSlotIdx.Natural)).valueOr:
return failure("Failed to get slot tree CID and proof")
slotProof = proof.toVerifiableProof().valueOr:
return failure("Failed to get verifiable proof")
(bytes, blkTree) = (await self.builder.buildBlockTree(origBlockIdx, blkSlotIdx)).valueOr:
return failure("Failed to build block tree")
cellData = self.getCell(bytes, blkCellIdx)
cellProof = blkTree.getProof(blkCellIdx).valueOr:
return failure("Failed to get proof from block tree")
success Sample[H](cellData: cellData, merklePaths: (cellProof.path & slotProof.path))
proc getProofInput*[T, H](
self: DataSampler[T, H], entropy: ProofChallenge, nSamples: Natural
): Future[?!ProofInputs[H]] {.async: (raises: [CancelledError]).} =
## Generate proofs as input to the proving circuit.
##
let
entropy = H.fromBytes(array[31, byte].initCopyFrom(entropy[0 .. 30]))
# truncate to 31 bytes, otherwise it _might_ be greater than mod
verifyTree = self.builder.verifyTree.toFailure.valueOr:
return failure("Failed to get verify tree")
slotProof = verifyTree.getProof(self.index).valueOr:
return failure("Failed to get slot proof")
datasetRoot = verifyTree.root().valueOr:
return failure("Failed to get dataset root")
slotTreeCid = self.builder.manifest.slotRoots[self.index]
slotRoot = self.builder.slotRoots[self.index]
cellIdxs = entropy.cellIndices(slotRoot, self.builder.numSlotCells, nSamples)
logScope:
cells = cellIdxs
trace "Collecting input for proof"
let samples = collect(newSeq):
for cellIdx in cellIdxs:
(await self.getSample(cellIdx, slotTreeCid, slotRoot)).valueOr:
return failure("Failed to get sample")
success ProofInputs[H](
entropy: entropy,
datasetRoot: datasetRoot,
slotProof: slotProof.path,
nSlotsPerDataSet: self.builder.numSlots,
nCellsPerSlot: self.builder.numSlotCells,
slotRoot: slotRoot,
slotIndex: self.index,
samples: samples,
)
proc new*[T, H](
_: type DataSampler[T, H],
index: Natural,
blockStore: BlockStore,
builder: SlotsBuilder[T, H],
): ?!DataSampler[T, H] =
if index > builder.slotRoots.high:
error "Slot index is out of range"
return failure("Slot index is out of range")
if not builder.verifiable:
return failure("Cannot instantiate DataSampler for non-verifiable builder")
success DataSampler[T, H](index: index, blockStore: blockStore, builder: builder)

View File

@ -1,75 +0,0 @@
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/bitops
import pkg/questionable/results
import pkg/constantine/math/arithmetic
import ../../merkletree
func extractLowBits*[n: static int](elm: BigInt[n], k: int): uint64 =
doAssert(k > 0 and k <= 64)
var r = 0'u64
for i in 0 ..< k:
let b = bit[n](elm, i)
let y = uint64(b)
if (y != 0):
r = bitor(r, 1'u64 shl i)
r
func extractLowBits(fld: Poseidon2Hash, k: int): uint64 =
let elm: BigInt[254] = fld.toBig()
return extractLowBits(elm, k)
func floorLog2*(x: int): int =
doAssert (x > 0)
var k = -1
var y = x
while (y > 0):
k += 1
y = y shr 1
return k
func ceilingLog2*(x: int): int =
doAssert (x > 0)
return (floorLog2(x - 1) + 1)
func toBlkInSlot*(cell: Natural, numCells: Natural): Natural =
let log2 = ceilingLog2(numCells)
doAssert(1 shl log2 == numCells, "`numCells` is assumed to be a power of two")
return cell div numCells
func toCellInBlk*(cell: Natural, numCells: Natural): Natural =
let log2 = ceilingLog2(numCells)
doAssert(1 shl log2 == numCells, "`numCells` is assumed to be a power of two")
return cell mod numCells
func cellIndex*(
entropy: Poseidon2Hash, slotRoot: Poseidon2Hash, numCells: Natural, counter: Natural
): Natural =
let log2 = ceilingLog2(numCells)
doAssert(1 shl log2 == numCells, "`numCells` is assumed to be a power of two")
let hash = Sponge.digest(@[entropy, slotRoot, counter.toF], rate = 2)
return int(extractLowBits(hash, log2))
func cellIndices*(
entropy: Poseidon2Hash,
slotRoot: Poseidon2Hash,
numCells: Natural,
nSamples: Natural,
): seq[Natural] =
var indices: seq[Natural]
for i in 1 .. nSamples:
indices.add(cellIndex(entropy, slotRoot, numCells, i))
indices

View File

@ -1,30 +0,0 @@
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
type
Sample*[H] = object
cellData*: seq[H]
merklePaths*: seq[H]
PublicInputs*[H] = object
slotIndex*: int
datasetRoot*: H
entropy*: H
ProofInputs*[H] = object
entropy*: H
datasetRoot*: H
slotIndex*: Natural
slotRoot*: H
nCellsPerSlot*: Natural
nSlotsPerDataSet*: Natural
slotProof*: seq[H]
# inclusion proof that shows that the slot root (leaf) is part of the dataset (root)
samples*: seq[Sample[H]]
# inclusion proofs which show that the selected cells (leafs) are part of the slot (roots)

View File

@ -54,8 +54,7 @@ proc new*(
method `size`*(self: StoreStream): int =
## The size of a StoreStream is the size of the original dataset, without
## padding or parity blocks.
let m = self.manifest
(if m.protected: m.originalDatasetSize else: m.datasetSize).int
self.manifest.datasetSize.int
proc `size=`*(self: StoreStream, size: int) {.error: "Setting the size is forbidden".} =
discard

View File

@ -1,151 +0,0 @@
import std/sets
import std/sequtils
import pkg/chronos
import pkg/questionable/results
import pkg/stew/endians2
import ./validationconfig
import ./market
import ./clock
import ./logutils
export market
export sets
export validationconfig
type Validation* = ref object
slots: HashSet[SlotId]
clock: Clock
market: Market
subscriptions: seq[Subscription]
running: Future[void]
periodicity: Periodicity
proofTimeout: uint64
config: ValidationConfig
logScope:
topics = "codex validator"
proc new*(
_: type Validation, clock: Clock, market: Market, config: ValidationConfig
): Validation =
Validation(clock: clock, market: market, config: config)
proc slots*(validation: Validation): seq[SlotId] =
validation.slots.toSeq
proc getCurrentPeriod(validation: Validation): Period =
return validation.periodicity.periodOf(validation.clock.now().Timestamp)
proc waitUntilNextPeriod(validation: Validation) {.async.} =
let period = validation.getCurrentPeriod()
let periodEnd = validation.periodicity.periodEnd(period)
trace "Waiting until next period", currentPeriod = period
await validation.clock.waitUntil((periodEnd + 1).toSecondsSince1970)
func groupIndexForSlotId*(slotId: SlotId, validationGroups: ValidationGroups): uint16 =
let a = slotId.toArray
let slotIdInt64 = uint64.fromBytesBE(a)
(slotIdInt64 mod uint64(validationGroups)).uint16
func maxSlotsConstraintRespected(validation: Validation): bool =
validation.config.maxSlots == 0 or validation.slots.len < validation.config.maxSlots
func shouldValidateSlot(validation: Validation, slotId: SlotId): bool =
without validationGroups =? validation.config.groups:
return true
groupIndexForSlotId(slotId, validationGroups) == validation.config.groupIndex
proc subscribeSlotFilled(validation: Validation) {.async.} =
proc onSlotFilled(requestId: RequestId, slotIndex: uint64) =
if not validation.maxSlotsConstraintRespected:
return
let slotId = slotId(requestId, slotIndex)
if validation.shouldValidateSlot(slotId):
trace "Adding slot", slotId
validation.slots.incl(slotId)
let subscription = await validation.market.subscribeSlotFilled(onSlotFilled)
validation.subscriptions.add(subscription)
proc removeSlotsThatHaveEnded(validation: Validation) {.async.} =
var ended: HashSet[SlotId]
let slots = validation.slots
for slotId in slots:
let state = await validation.market.slotState(slotId)
if state != SlotState.Filled:
trace "Removing slot", slotId, slotState = state
ended.incl(slotId)
validation.slots.excl(ended)
proc markProofAsMissing(
validation: Validation, slotId: SlotId, period: Period
) {.async: (raises: [CancelledError]).} =
logScope:
currentPeriod = validation.getCurrentPeriod()
try:
if await validation.market.canMarkProofAsMissing(slotId, period):
trace "Marking proof as missing", slotId, periodProofMissed = period
await validation.market.markProofAsMissing(slotId, period)
else:
let inDowntime {.used.} = await validation.market.inDowntime(slotId)
trace "Proof not missing", checkedPeriod = period, inDowntime
except CancelledError as e:
raise e
except CatchableError as e:
error "Marking proof as missing failed", msg = e.msg
proc markProofsAsMissing(validation: Validation) {.async: (raises: [CancelledError]).} =
let slots = validation.slots
for slotId in slots:
let previousPeriod = validation.getCurrentPeriod() - 1
await validation.markProofAsMissing(slotId, previousPeriod)
proc run(validation: Validation) {.async: (raises: [CancelledError]).} =
trace "Validation started"
try:
while true:
await validation.waitUntilNextPeriod()
await validation.removeSlotsThatHaveEnded()
await validation.markProofsAsMissing()
except CancelledError:
trace "Validation stopped"
discard # do not propagate as run is asyncSpawned
except CatchableError as e:
error "Validation failed", msg = e.msg
proc findEpoch(validation: Validation, secondsAgo: uint64): SecondsSince1970 =
return validation.clock.now - secondsAgo.int64
proc restoreHistoricalState(validation: Validation) {.async.} =
trace "Restoring historical state..."
let requestDurationLimit = await validation.market.requestDurationLimit
let startTimeEpoch = validation.findEpoch(secondsAgo = requestDurationLimit)
let slotFilledEvents =
await validation.market.queryPastSlotFilledEvents(fromTime = startTimeEpoch)
for event in slotFilledEvents:
if not validation.maxSlotsConstraintRespected:
break
let slotId = slotId(event.requestId, event.slotIndex)
let slotState = await validation.market.slotState(slotId)
if slotState == SlotState.Filled and validation.shouldValidateSlot(slotId):
trace "Adding slot [historical]", slotId
validation.slots.incl(slotId)
trace "Historical state restored", numberOfSlots = validation.slots.len
proc start*(validation: Validation) {.async.} =
trace "Starting validator",
groups = validation.config.groups, groupIndex = validation.config.groupIndex
validation.periodicity = await validation.market.periodicity()
validation.proofTimeout = await validation.market.proofTimeout()
await validation.subscribeSlotFilled()
await validation.restoreHistoricalState()
validation.running = validation.run()
proc stop*(validation: Validation) {.async.} =
if not validation.running.isNil and not validation.running.finished:
await validation.running.cancelAndWait()
while validation.subscriptions.len > 0:
let subscription = validation.subscriptions.pop()
await subscription.unsubscribe()

View File

@ -1,35 +0,0 @@
import std/strformat
import pkg/questionable
import pkg/questionable/results
type
ValidationGroups* = range[2 .. 65535]
MaxSlots* = int
ValidationConfig* = object
maxSlots: MaxSlots
groups: ?ValidationGroups
groupIndex: uint16
func init*(
_: type ValidationConfig,
maxSlots: MaxSlots,
groups: ?ValidationGroups,
groupIndex: uint16 = 0,
): ?!ValidationConfig =
if maxSlots < 0:
return failure "The value of maxSlots must be greater than " &
fmt"or equal to 0! (got: {maxSlots})"
if validationGroups =? groups and groupIndex >= uint16(validationGroups):
return failure "The value of the group index must be less than " &
fmt"validation groups! (got: {groupIndex = }, " & fmt"groups = {validationGroups})"
success ValidationConfig(maxSlots: maxSlots, groups: groups, groupIndex: groupIndex)
func maxSlots*(config: ValidationConfig): MaxSlots =
config.maxSlots
func groups*(config: ValidationConfig): ?ValidationGroups =
config.groups
func groupIndex*(config: ValidationConfig): uint16 =
config.groupIndex

View File

@ -59,7 +59,7 @@ template callEventCallback(ctx: ptr StorageContext, eventName: string, body: unt
## Template used to notify the client of global events
## Example: onConnectionChanged, onProofMissing, etc.
if isNil(ctx[].eventCallback):
error eventName & " - eventCallback is nil"
error eventName&" - eventCallback is nil"
return
foreignThreadGc:

View File

@ -69,9 +69,6 @@ proc readValue*(r: var JsonReader, val: var Duration) =
raise newException(SerializationError, "Cannot parse the duration: " & input)
val = dur
proc readValue*(r: var JsonReader, val: var EthAddress) =
val = EthAddress.init(r.readValue(string)).get()
type NodeLifecycleRequest* = object
operation: NodeLifecycleMsgType
configJson: cstring

View File

@ -32,42 +32,11 @@ components:
description: Content Identifier as specified at https://github.com/multiformats/cid
example: QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N
SlotId:
type: string
description: Keccak hash of the abi encoded tuple (RequestId, slot index)
example: 268a781e0db3f7cf36b18e5f4fdb7f586ec9edd08e5500b17c0e518a769f114a
LogLevel:
type: string
description: "One of the log levels: TRACE, DEBUG, INFO, NOTICE, WARN, ERROR or FATAL"
example: DEBUG
EthereumAddress:
type: string
description: Address of Ethereum address
PricePerBytePerSecond:
type: string
description: The amount of tokens paid per byte per second per slot to hosts the client is willing to pay
CollateralPerByte:
type: string
description: Number as decimal string that represents how much collateral per byte is asked from hosts that wants to fill a slots
Duration:
type: integer
format: int64
description: The duration of the request in seconds
ProofProbability:
type: string
description: How often storage proofs are required as decimal string
Expiry:
type: integer
format: int64
description: A timestamp as seconds since unix epoch at which this request expires if the Request does not find requested amount of nodes to host the data.
SPR:
type: string
description: Signed Peer Record (libp2p)
@ -86,15 +55,6 @@ components:
id:
$ref: "#/components/schemas/PeerId"
Content:
type: object
required:
- cid
description: Parameters specifying the content
properties:
cid:
$ref: "#/components/schemas/Cid"
Node:
type: object
required:
@ -124,9 +84,6 @@ components:
revision:
type: string
example: 0c647d8
contracts:
type: string
example: 0b537c7
PeersTable:
type: object
@ -172,251 +129,6 @@ components:
storage:
$ref: "#/components/schemas/StorageVersion"
SalesAvailability:
type: object
required:
- totalSize
- duration
- minPricePerBytePerSecond
- totalCollateral
properties:
totalSize:
type: integer
format: int64
description: Total size of availability's storage in bytes
duration:
$ref: "#/components/schemas/Duration"
minPricePerBytePerSecond:
type: string
description: Minimal price per byte per second paid (in amount of tokens) for the hosted request's slot for the request's duration as decimal string
totalCollateral:
type: string
description: Total collateral (in amount of tokens) that can be used for matching requests
enabled:
type: boolean
description: Enable the ability to receive sales on this availability.
default: true
until:
type: integer
description: Specifies the latest timestamp, after which the availability will no longer host any slots. If set to 0, there will be no restrictions.
default: 0
SalesAvailabilityREAD:
required:
- id
- totalRemainingCollateral
- freeSize
allOf:
- $ref: "#/components/schemas/SalesAvailability"
- type: object
properties:
id:
$ref: "#/components/schemas/Id"
readonly: true
freeSize:
type: integer
format: int64
description: Unused size of availability's storage in bytes as decimal string
readOnly: true
totalRemainingCollateral:
type: string
description: Total collateral effective (in amount of tokens) that can be used for matching requests
readOnly: true
Slot:
type: object
required:
- id
- request
- slotIndex
properties:
id:
$ref: "#/components/schemas/SlotId"
request:
$ref: "#/components/schemas/StorageRequest"
slotIndex:
type: integer
format: int64
description: Slot Index number
SlotAgent:
type: object
required:
- state
- requestId
- slotIndex
properties:
slotIndex:
type: integer
format: int64
description: Slot Index number
requestId:
$ref: "#/components/schemas/Id"
request:
$ref: "#/components/schemas/StorageRequest"
reservation:
$ref: "#/components/schemas/Reservation"
state:
type: string
description: Description of the slot's
enum:
- SaleCancelled
- SaleDownloading
- SaleErrored
- SaleFailed
- SaleFilled
- SaleFilling
- SaleFinished
- SaleIgnored
- SaleInitialProving
- SalePayout
- SalePreparing
- SaleProving
- SaleUnknown
Reservation:
type: object
required:
- id
- availabilityId
- size
- requestId
- slotIndex
- validUntil
properties:
id:
$ref: "#/components/schemas/Id"
availabilityId:
$ref: "#/components/schemas/Id"
size:
type: integer
format: int64
description: Size of the slot in bytes
requestId:
$ref: "#/components/schemas/Id"
slotIndex:
type: integer
format: int64
description: Slot Index number
validUntil:
type: integer
description: Timestamp after which the reservation will no longer be valid.
StorageRequestCreation:
type: object
required:
- pricePerBytePerSecond
- duration
- proofProbability
- collateralPerByte
- expiry
properties:
duration:
$ref: "#/components/schemas/Duration"
pricePerBytePerSecond:
$ref: "#/components/schemas/PricePerBytePerSecond"
proofProbability:
$ref: "#/components/schemas/ProofProbability"
nodes:
description: Minimal number of nodes the content should be stored on
type: integer
default: 3
minimum: 3
tolerance:
description: Additional number of nodes on top of the `nodes` property that can be lost before pronouncing the content lost
type: integer
default: 1
minimum: 1
collateralPerByte:
$ref: "#/components/schemas/CollateralPerByte"
expiry:
type: integer
format: int64
description: Number that represents expiry threshold in seconds from when the Request is submitted. When the threshold is reached and the Request does not find requested amount of nodes to host the data, the Request is voided. The number of seconds can not be higher then the Request's duration itself.
StorageAsk:
type: object
required:
- slots
- slotSize
- duration
- proofProbability
- pricePerBytePerSecond
- collateralPerByte
- maxSlotLoss
properties:
slots:
description: Number of slots (eq. hosts) that the Request want to have the content spread over
type: integer
format: int64
slotSize:
type: integer
format: int64
description: Amount of storage per slot in bytes
duration:
$ref: "#/components/schemas/Duration"
proofProbability:
$ref: "#/components/schemas/ProofProbability"
pricePerBytePerSecond:
$ref: "#/components/schemas/PricePerBytePerSecond"
collateralPerByte:
$ref: "#/components/schemas/CollateralPerByte"
maxSlotLoss:
type: integer
format: int64
description: Max slots that can be lost without data considered to be lost
StorageRequest:
type: object
required:
- id
- client
- ask
- content
- expiry
- nonce
properties:
id:
type: string
description: Request ID
client:
$ref: "#/components/schemas/EthereumAddress"
ask:
$ref: "#/components/schemas/StorageAsk"
content:
$ref: "#/components/schemas/Content"
expiry:
$ref: "#/components/schemas/Expiry"
nonce:
type: string
description: Random data
Purchase:
type: object
required:
- state
- requestId
properties:
state:
type: string
description: Description of the Request's state
enum:
- cancelled
- errored
- failed
- finished
- pending
- started
- submitted
- unknown
error:
type: string
nullable: true
description: If Request failed, then here is presented the error message
request:
$ref: "#/components/schemas/StorageRequest"
requestId:
$ref: "#/components/schemas/Id"
DataList:
type: object
required:
@ -444,7 +156,6 @@ components:
- treeCid
- datasetSize
- blockSize
- protected
properties:
treeCid:
$ref: "#/components/schemas/Cid"
@ -456,9 +167,6 @@ components:
blockSize:
type: integer
description: "Size of blocks"
protected:
type: boolean
description: "Indicates if content is protected by erasure-coding"
filename:
type: string
nullable: true
@ -499,8 +207,6 @@ servers:
- url: "http://localhost:8080/api/storage/v1"
tags:
- name: Marketplace
description: Marketplace information and operations
- name: Data
description: Data operations
- name: Node
@ -771,237 +477,6 @@ paths:
"500":
description: "It's not working as planned"
"/sales/slots":
get:
summary: "Returns active slots"
tags: [Marketplace]
operationId: getActiveSlots
responses:
"200":
description: Retrieved active slots
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/Slot"
"503":
description: Persistence is not enabled
"/sales/slots/{slotId}":
get:
summary: "Returns active slot with id {slotId} for the host"
tags: [Marketplace]
operationId: getActiveSlotById
parameters:
- in: path
name: slotId
required: true
schema:
$ref: "#/components/schemas/Cid"
description: File to be downloaded.
responses:
"200":
description: Retrieved active slot
content:
application/json:
schema:
$ref: "#/components/schemas/SlotAgent"
"400":
description: Invalid or missing SlotId
"404":
description: Host is not in an active sale for the slot
"503":
description: Persistence is not enabled
"/sales/availability":
get:
summary: "Returns storage that is for sale"
tags: [Marketplace]
operationId: getAvailabilities
responses:
"200":
description: Retrieved storage availabilities of the node
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/SalesAvailabilityREAD"
"500":
description: Error getting unused availabilities
"503":
description: Persistence is not enabled
post:
summary: "Offers storage for sale"
operationId: offerStorage
tags: [Marketplace]
requestBody:
content:
application/json:
schema:
$ref: "#/components/schemas/SalesAvailability"
responses:
"201":
description: Created storage availability
content:
application/json:
schema:
$ref: "#/components/schemas/SalesAvailabilityREAD"
"400":
description: Invalid data input
"422":
description: Not enough node's storage quota available or the provided parameters did not pass validation
"500":
description: Error reserving availability
"503":
description: Persistence is not enabled
"/sales/availability/{id}":
patch:
summary: "Updates availability"
description: |
The new parameters will be only considered for new requests.
Existing Requests linked to this Availability will continue as is.
operationId: updateOfferedStorage
tags: [Marketplace]
parameters:
- in: path
name: id
required: true
schema:
type: string
description: ID of Availability
requestBody:
content:
application/json:
schema:
$ref: "#/components/schemas/SalesAvailability"
responses:
"204":
description: Availability successfully updated
"400":
description: Invalid data input
"404":
description: Availability not found
"422":
description: The provided parameters did not pass validation
"500":
description: Error reserving availability
"503":
description: Persistence is not enabled
"/sales/availability/{id}/reservations":
get:
summary: "Get availability's reservations"
description: Return's list of Reservations for ongoing Storage Requests that the node hosts.
operationId: getReservations
tags: [Marketplace]
parameters:
- in: path
name: id
required: true
schema:
type: string
description: ID of Availability
responses:
"200":
description: Retrieved storage availabilities of the node
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/Reservation"
"400":
description: Invalid Availability ID
"404":
description: Availability not found
"500":
description: Error getting reservations
"503":
description: Persistence is not enabled
"/storage/request/{cid}":
post:
summary: "Creates a new Request for storage"
tags: [Marketplace]
operationId: createStorageRequest
parameters:
- in: path
name: cid
required: true
schema:
$ref: "#/components/schemas/Cid"
description: CID of the uploaded data that should be stored
requestBody:
content:
application/json:
schema:
$ref: "#/components/schemas/StorageRequestCreation"
responses:
"200":
description: Returns the Request ID as decimal string
content:
text/plain:
schema:
type: string
"400":
description: Invalid or missing Request ID
"422":
description: The storage request parameters are not valid
"404":
description: Request ID not found
"503":
description: Persistence is not enabled
"/storage/purchases":
get:
summary: "Returns list of purchase IDs"
tags: [Marketplace]
operationId: getPurchases
responses:
"200":
description: Gets all purchase IDs stored in node
content:
application/json:
schema:
type: array
items:
type: string
"503":
description: Persistence is not enabled
"/storage/purchases/{id}":
get:
summary: "Returns purchase details"
tags: [Marketplace]
operationId: getPurchase
parameters:
- in: path
name: id
required: true
schema:
type: string
description: Hexadecimal ID of a Purchase
responses:
"200":
description: Purchase details
content:
application/json:
schema:
$ref: "#/components/schemas/Purchase"
"400":
description: Invalid or missing Purchase ID
"404":
description: Purchase not found
"503":
description: Persistence is not enabled
"/spr":
get:
summary: "Get Node's SPR"

View File

@ -1,527 +0,0 @@
{
"dataSetRoot": "16074246370508166450132968585287196391860062495017081813239200574579640171677"
, "entropy": "1234567"
, "nCellsPerSlot": 512
, "nSlotsPerDataSet": 11
, "slotIndex": 3
, "slotRoot": "20744935707483803411869804102043283881376973626291244537230284476834672019997"
, "slotProof":
[ "14279309641024220656349577745390262299143357053971618723978902485113885925133"
, "17350220251883387610715080716935498684002984280929482268590417788651882821293"
, "3614172556528990402229172918446087216573760062512459539027101853103043539066"
, "9593656216696187567506330076677122799107266567595923589824071605501987205034"
, "0"
, "0"
, "0"
, "0"
]
, "cellData":
[ [ "211066599696340205996365563960462032209214145564176017965177408819390441927"
, "256399834032317991719099034134771774537377713676282398278615627599320306708"
, "40526956212941839024868120947422067322935297516255336725720469887577875470"
, "369406626072040375689238003388146123438765868500054546379159741776926336393"
, "333671948877941061129138970028865844558745266314244224980179694307884999701"
, "12844191661993811401197260475054004253686019503294245287625435635597431176"
, "103242505924551040184986153577926463300854479121445566984435820844932904541"
, "357267598134410031301285377503939045679462829143562392803919346036584141082"
, "162961530392479745020607594774288130869393650464001426863668385541077786641"
, "426633666684068667053061341108297711407154520752589357873877687002123230254"
, "131217200633679697678620573903653316618851166977399592012996945149000115543"
, "347806146382240882424437318028620747118202052207615339887983883245341422889"
, "373560143578047669373240014805175743607410583882636003337120578008437374619"
, "188643112726610812698950916978832639394489168469865816132268079928321994342"
, "261643693073361806247543578456646407192325702146752104760117340650316255422"
, "260425332276127964154119199351807753107358504026338378706506655351595199132"
, "374895089121103633563000003194929058314955925591115574099805066048344387554"
, "251251687633538360627384151887287228926166832908633974282160065378311171354"
, "72870348025150463132527129203816383011245664502016571773976961034605631401"
, "234969517550818492643515432666311755833657223377594670026839098818269671638"
, "250704662734070531273640113211555977086430125469327371276827055724111014200"
, "85287059658255939741261887818611116570376488685014337052369839580946343903"
, "148959658976765873884541474400081762855732313547557243964921157967555048302"
, "402116967301520959272239788104745348344918207829380669065019055837727389479"
, "440503687192139964066297025050080823601005280790824714962651368433530759519"
, "149064344353438643307231355233484617851197634669308957706338957575177745645"
, "249140840255377018814348914718469942883867200392561109698520706525194687651"
, "108796851176515124780842490199733462942992752881710253277179665118758071359"
, "168245155425564161902686596247453762364112240129335852645432169468767513906"
, "129930759770229612264501691941454447321585806001002088749773920103899362070"
, "26204732465033162738545933008873662562758704699080684615280202127289894343"
, "434343986187775289860542785529657444690073100436887472033336117760907652966"
, "361202432740487795596808128962740911600093857340619816047190021218849540225"
, "100616813001075101816809823591611435583084916492802624686700765550893945525"
, "262383566766515164611427346701355047932794351290691325516723194829671679460"
, "223966317895663049002893008659178463136086169222436544014405848127792334099"
, "416071800998333357259662686053338495720384342746822618737948080251761863079"
, "402582378631671531909245563300554883898015688826468151075059467077182712018"
, "271682867846395286938993638577506552857925968097084028550962231439839229096"
, "447239701195120035004067146414333903010840427278848992921567785105217019890"
, "275718367441702584521943480326858762208121719038684001399322597215141179102"
, "86424132534636958411139699704035287483951667275825975536719519441147485752"
, "149332313586183975744469174256094358432564607635406143904268565140988616920"
, "431284330776421588418608279008210842837281123158951642164956884286883748089"
, "328694410745471749523135644660455669483988686888634622076863114197617693825"
, "112671940998917362968156144648543607958275336559773039070338509488400224090"
, "40612851250697989627190554726382690498263128439797780029697069621854862060"
, "235047914228675997216342104196597257178021277585839376175077878186492271543"
, "169718735151210135199527910197065439221144015957220768545119706561079163228"
, "345850109040121443591415752965486014695272086901102608769402892906795715635"
, "107916794601837835951508003838161872232087225679609623116098837565956752373"
, "415195406060373162425374246348197423165008252112453298469572523506488563795"
, "18574536720926634955170276058049993354780447816096707123565164996905722992"
, "77316964375201096801231570737992491072607736322255206149311341101525354519"
, "198566211140075666401818378905444403588640345933666108724809349396921957675"
, "71926050707400318807625168942501384117254391471312636171205913503928815127"
, "303403754792341398295052586417779761162194818661412867094096550880325459639"
, "444796230931706624375881141460151785952798771079111017071124833045933389733"
, "430832727519144759265424205289522948157007336118070755365887670730658782114"
, "75431213985866235726407973044434444984663930761207296437571668004273515965"
, "9242635103653159191249730220870735855877366952081272723035956668095954838"
, "93770678769846326584848478152412123903909949977598807336203128684179492141"
, "438043261966019084676377174988310087831395864304423411701911688757793135582"
, "175357918416563657734972138036003712114814934655676571874083109097142591069"
, "301619681954194702458985259161884119574424456150215738560639417824784261940"
, "376627771252167062559065889174364784087884871999807562320457079200311413098"
, "77407"
]
, [ "424838222746158988229624788694939151178615656210585621868910231014323837551"
, "113188486246955346418956949679485575685258850346101035778277727456423482970"
, "275449978206132565019222655023969430014622832597654643418394602485645803413"
, "407856757018138010439232009766252068440920591566039673660874626878413077905"
, "433369046493777496016384947949950877133856528218602671493669395706908819748"
, "258364166531180422149545015891786023981872586904946376136311798402581278793"
, "111997719221028147522803956659709775148434180015507797582340706052412284571"
, "370086597976426508280413876667101417393598181708677558733730556109327409076"
, "394139601979449259075910238117153992797849553309541269624848742084205563806"
, "224088276319080487199395482893988152025671468715318212801266537036640477323"
, "412710245119809501914481942088314642684754542082140451180970198371889738885"
, "353872602359946553306242341162348980635834907495492814598834657175405697176"
, "252575199506028475372678621140654219936768774012823764047177692104580452933"
, "259093153824033122452869556249315839899366493071746219770487886456301642099"
, "433829976798312333371154167497560676982294392632725612538640639617101218872"
, "69918581382122563555200898078544150952625715196904114153232367538572342772"
, "337937520623192257595352158476909569245988839238498098464935654555688460123"
, "264739973232292969253318643276671532055689422253184191167449284055983944338"
, "326116252818829775096345069850111970510714050346103409479803743342138260656"
, "236666502377413649728378889488706275212721356921124776708954261777813709815"
, "211625935799984260567718228446525455893664313064841539301444509150157287163"
, "60213206239417039999880027112341976360540689886703427811513517396638607512"
, "68310118105957780876770075529546844404225720757669797609686816545988561625"
, "423863085551351065136684030270731679105571943009795949621903966660399419936"
, "388914614294393005039878123500859325222684672184567792659076815268598434245"
, "449456790291560508709069826219925144971979653209755565240911568965768874382"
, "448810363770763694447869940916735951256986784286793489549428379909616059117"
, "93646909783664049092056237949587618925209622020026157405117796611689551192"
, "352210795298632954574896499649181574584074853828419384742874364724522457331"
, "37455517056393404525863484733101879886413925183061645520768912552476716150"
, "386617357584684336812125385078476270301738184058813703112840991226785114117"
, "309940292044597334261558429176136686101590729982259514472573656131579113438"
, "375815246167575100319857872432879650174355611853064771241069582477717074415"
, "332214507344122806007757734266883566559371568252728459951766124888176633706"
, "148990259460952914990881100852534318351247069504848477833147446514732789712"
, "328669527889838880414072022433859139004058211332184916573516704632073044118"
, "39278026039348543645873027549112998051601664395028652771103624299930924528"
, "147717660530589785119644237145092759103012624422229579698752386490700965238"
, "374018518701362017594752095877197725242352803195413267746619111489936980685"
, "19185486483883210730969367354195688373879769005461272861759636600984416877"
, "61866046506558157021682973167090213780467780519546382332208868591026703563"
, "186854966504766517012887726614015646154225796572138017810371160981778288347"
, "87813550250328892091332566928942844770632705056120813488729800874811845697"
, "207775163424060266085108794048371834145545842567796157378282772999548202308"
, "369987573847786237689249538753881486995686208870889713177975415012214427429"
, "240880979016395044518849230927615466120209140082149273390921042537474853143"
, "174902051454932913375934735427101804474275543418199101786687925733405159872"
, "342217255652950822372803598682842961053537267723988087801275319754065261308"
, "403207518067666945448161377960706451817747922771285796668778802535227939962"
, "407191459999036791052261270163259267557900498930952521056725210031161568230"
, "338216472523551728793268225845396218561132966393725938551091882807069657206"
, "118364222678576550870375112494142500603091119946985793934499723872824782886"
, "269721611028665323587192624288165848310917029418261851436925829954710472436"
, "227424498125745236125352117206136621428869662458452610254773560636280935711"
, "334380807433339401906359555583987757411855090694162252260781648609761248049"
, "42470806516174819075107446234449247453971524726021445768611797530804156161"
, "418994916402918322951830716592888390611524984156817012683478842068581638820"
, "363263142412048420546716019774090729399081311227606555141174736853886128407"
, "192292715923468025058557166341646729623133127372303791236447550026886802680"
, "450253878713722337767292128303865371116770532625906889925779639839924402495"
, "412596147086332805611392200560087191411541689130482740065137300252639590489"
, "264059866105067484811456793906835462997849523506366903974724979936196358724"
, "80922384260325792825608274004101667366364502159441286540209512108302572137"
, "69261112192907071823876894642934902051254647002357333297635511793652550535"
, "342810644068354896385837929029331085645928473943142618800192452300937098227"
, "228361826362950356801549793202622774869100858404479869989505302905936946659"
, "89244"
]
, [ "359926778925154809567585559910064420821311221299937568759183366972901588855"
, "128688825421597555299113005875649976344934035310192572516197551102045945994"
, "225379354482053502673695304662016054746658452775856157938886424152134693969"
, "321872319934407904743844034025550739031781361848065513098922085967524180784"
, "250375637325405951645070615947051799520095842815922754899017741501395744611"
, "97676493052788080672307677749501730337551225267342432472194527468634722352"
, "140101187036396881926000630022834247990512766860086619783437252676747730662"
, "428833039353549335335605804240430918631639449874968791377641834408506136850"
, "418359203734539413977740838354554804415161215624809316660001820037711273005"
, "197411877418795659129213175603102238904459737200167987167255995825203749339"
, "221646252100316559257470803343058462853641953756978011126414924636869625612"
, "106393540293584181037890192557883541231531964825708650997196071036779482686"
, "121473330828208543539643554911190528237124877123871673169194452404939833883"
, "234055622144947293638512253368547046093971383516706577723613696162225606040"
, "68307451767502390304445005915787226559811433450721625085741437522802389574"
, "446891883436763112014492564462451523127134145501201571918449345324780633462"
, "83718652783543018019599555197511164121642363321504039439786358267060414949"
, "90267297500929836073049162292104311427365986272517761342871530827272320168"
, "398425606698859520268856768787424690872952910789754531465894080258173664751"
, "323570139379118444589557840594603212198136718240764273769210492735883659788"
, "318597103584099056378057647488068323974418467250708490151864712850204121402"
, "6299083430773359277240726214182464517380839990956316267425262319606033077"
, "27638206326925436960316131682014727983280820447721477666884742925275976240"
, "434344186848869917381375812528446841024676181532946456237439060027443649574"
, "64735754118644738348599761514174981324344130078598038275246522384474432918"
, "53068717269762105498508401788249005564862415051761175636612434108259085043"
, "35813044996911619267309099508360887777226716179396659295580849861836012116"
, "67751791392924142809580984450371772015056060429352159361446914484238646676"
, "68534949135677447506316576616742207938855454921330757052467057435206318183"
, "98510151949547604999069864337574320742530406479752176012935179772005228326"
, "342190252152505345443004241184891966319091967630257822491352072978326623645"
, "362701658859425316334005554473186516818256386066010799465369887406035738447"
, "266999116654850467726292928465517542818678046748008340458185725047959981772"
, "227089355966197874086821090531951502393729872265201602128464978982907992285"
, "240800343500959216904535208047288234867926058830277460630902462914796702354"
, "447956858573680756485556898469710354624642653441041335815079102198306530583"
, "89422712944117481549242421245588048728782658978853365197341587057196539094"
, "72610343179362050463955730204044877712105879926618304878262944723464870506"
, "8676698500519447254981838968537883138182811064381378248657969913325524054"
, "180453700216061196739413267121764366438386031039311941313645439527087166894"
, "63346784016053253727621352882430122335280702556586808389293772066791583857"
, "400031453850139978805133735852120986024101930860924735862305444817383365395"
, "230104622290558570218036071349472289358926019290368625724986905348610140188"
, "175689489221336091369196327293045073133701056385414159213338224521167050830"
, "73310331103509697419315970265031228794034932318600293733074730812482185479"
, "371383255255842707875498538452907102684511927320158672347778293876877893808"
, "165319345890230193939972313953881372394171342391835626454759321320435952720"
, "184753541001210613115361457830691571384268642766106935428207829332011259768"
, "378810733004878432563271790625801205570962762395854282745166380274493181314"
, "86321674336629444862383262780020871828941143514651008200999725989892879714"
, "332634533993388248915777870988529817692938793120418377552247997050250349749"
, "41742010257820712511267405684781534740157292266212120171929853763272599516"
, "224101330592139734390658213442359402546038743346294438455635537496290117560"
, "204363902046137087420878796391135987770418514008394389852388361468850216359"
, "296526036888463156867153847102999031430641220536577556044554029990778763710"
, "137568796227115931047082828377698464185467276723279763304686078869351280509"
, "147456720891843338735232645318045656082153590170441596326500204393398792771"
, "297291342309626392877004635010131510068476699687818509485687346071074942006"
, "20748013593486831233186810444485136836664689496258849465507672301203832324"
, "335431726883875036252568773858744219410188302724423071283643221351691013313"
, "50487384098835523033417523223562560857744547945136829388944024807752630716"
, "425952679139710019732649156938676226492714486376811103362264146658191708598"
, "439787938069461539508728805881194071103269524405653997968488488049426387373"
, "279863410796988495259178322026394289028023166886112128504434877538089779477"
, "398941099058270093463626870965433502581301413791423667994257456160377865247"
, "5759692644185723491187478313102540786562625675495805072053262277490225012"
, "115176"
]
, [ "199440901482393381753657315848210955092644686957900737971520778206058989647"
, "339215657660349719251187938243949119362753238126598750470832739830379601048"
, "17957417011314891245400567671664723859427624136284133790923936126779445290"
, "294761585889095249928492042608516765584723814657871392207964321318076158536"
, "367304199921887970655898837985855454346911090426896946930048519042744277770"
, "173405546837606747721292792526074597519538685230485266741646923399938591491"
, "13202798104529529703580600642858924379886936325402696094624200032343206719"
, "28211272278315691894282764239983301742024079691520980592618486887749800025"
, "73792448247120972778500624350664849847034095641998271809779791788652649022"
, "386961947078838359430674078072441680475090687247027225632133013772954043342"
, "247859266401821616700765969075270662915024391205665146199401830650793676517"
, "243938047874995926342875119559105623088213951205962677439167259642163766960"
, "14909501249861872673329370269106359532506159818320693170564856401208688898"
, "200331653478898243177761429526240803993101536716611440775588088625522029071"
, "127891684617049394579738365914860024509007913559921966744972525605976847919"
, "202912167983786187592861727924433749852786012202809544200943965898118027816"
, "176370650316309755425558132466370508977563252342126855874617990006444464573"
, "179490319446297562731655155074196383539396893457024237113284509223965454107"
, "118703379899134287650980989454755985628620830085932176414465582081598659194"
, "102025594191113707886629488454876652037341696284939671367279050169222988689"
, "421132375430104331136732058548913808473025321492255503838896123601628815453"
, "328334791815856213206267892694535121226673194052951034497930366807851111845"
, "83012322813281668737061895967682970093636853452224812061110092135287899376"
, "329204708391107220275172926348002826875172581958734129645882445887919889321"
, "410748869385474485539728045765785256294532845070137964737879849265390625591"
, "197274807717335387012872999914232051341799797613667869923402386359379902675"
, "235713095185988155553500217595661312303861624791720350423698435045853678746"
, "150631584359141913552843813384841153102535679219851913018874172414598353488"
, "207783836843813911284913666774420250626019971129676431904589416081127483900"
, "15728034718954665549174230921445077500399069880555428849958014406422697976"
, "69799423545177501667748653663121504155622623013014583690766769624482972893"
, "265665371394145560256710553779588458846778960884758560117805511822299802326"
, "149195925869584039415331014261414953602245337159354350672030141190471260449"
, "162328395279114064180857718453973759378615891406692054752029241989300597156"
, "104643123291849369328362168243087456326274773811561124607060302871149280568"
, "320704123383295724141970902124509237736831907552238934395000039155541269937"
, "77914486216152383968400591704791756847610018757857965758408455442143531631"
, "238365259321088298905088250444146071545398991768186471689605160523129613763"
, "279409375422154593510552116666741774885392805494152031820287626934209978908"
, "195118776021452609708543280509123101181249086555819844047203390788132717252"
, "197977884437087886153482042453896079316138251415359773453987232734849953584"
, "168185043240980087638006965666857387510828226074470344841850764460631595331"
, "231157923359356077977363679818678437836536420121744865399935742538602805912"
, "177903771863742191900138437188329108771172098110036075491750018158192424072"
, "313552174443290416730632310997197097951229162137491595709733420111980331403"
, "273253450712049988786741336540196077743997302924525995219038781650977490211"
, "421908030281055821389377531613150504859996607596444776050212044919345332385"
, "180108184992593746898140529947178182204857361841304042854173884504394805936"
, "37075272799330399065679301151342697855905822543084867570322173216259074746"
, "364885615491975468180698816037289079302391684470748470356247509218051645743"
, "397482868106190800111749908137311511782294652288655656060379563261618687603"
, "192853269627895017416381451198403197013798085262867793715522216458130791820"
, "450480853450142791928572953497280890976328598410525218090104787739409705079"
, "40278654070502961330170439514434669768416784968274956021022493731441898222"
, "251277143131769020481025315216040765839561111684608785317366609258959942695"
, "95094468748825454459610961968601800404132682484160170977941285794444806916"
, "160586633865113902389134480029183924655750088163491531655080014223476604929"
, "211661229493873434581423107377168478957907088187044576032505407590783850232"
, "409651293631434750272174456674594508340952460788494035327119354167465019826"
, "233213211946836553080627522409887799285199986120567245752841080275284294566"
, "143182900674482411759481361336063079267405785923487419697568907351386146653"
, "430050085956999990041799366370428972519385994997821389120583306252090911051"
, "241257468571530133762629460194062384921386438426367660851087853915892684115"
, "106478922860328643074356032194102718325829441005019365153538120054339275205"
, "252933430690486626644000908036895289326526510137514385824014300035301154822"
, "242924628511152862437189415942615812459145003511499320946981326550434266392"
, "107566"
]
, [ "10892488375325920610152294701785926476935321890453222549428070867493882259"
, "230776541958253414701326765204413805639078570664616139597900921490475143840"
, "162235550819840758141721889536295480113278275911087429090017541814695333320"
, "318634611531007856220026646570492630940047240387334221027051366284225674524"
, "347695480420330337439096561080975864031317110001559084872978387417651465445"
, "243301070227446762125488369714708670219289121859111929877695012393609726208"
, "312153141205681954392441579373652470334482191990502803524039048245142338874"
, "243769659456658813016931656268556893289414122189859136776671772112510762644"
, "235510946617019540983239794402008700611014664578713646813426116406215382253"
, "394638234040056271265534896894991100558052611842099314808878257003754175212"
, "112730195097163222179949980634599934392634120069300673310070655800491242211"
, "112545144551723145227061757291353149296490850338535641681237178578430772288"
, "399161925498018051746424503488168548076537557369822821644005390567188305750"
, "291823556779130095044851238536860577785281148245623338113991004798525195947"
, "443006765181360772964201996695219533539991249361936300010158089981376772939"
, "74018417655448362012716269253153545524112649147848318337218250865231619883"
, "361038295627629757439073080656763633087074408866229383288288831546300069767"
, "269655542872834422597617091710145830035767834433114567250727497135451412216"
, "58289072717559976781527037332370898163550414718655391280446986067842922181"
, "365399954331278626941053447122605263207706937018525782908294459265663426953"
, "83576501872896181822963149907518188809522341045703363171261203985876068484"
, "203403783686919490357886887779316222746544665267595235714814853282937072937"
, "226090172488558641139782927103632452136731207206963592401497570070700221117"
, "249813560776802008219945831355163226098682741553305882699128946352901227282"
, "236586835155013316983057471119105606475813711035522306026673814184519826069"
, "420611449257527132395951061920868895981487081726689195426673433565077012458"
, "414979562418422585161189066520079202660418565693776869373399931253524536378"
, "115851377630895049619958829726730418778383692593973233290077769966938018584"
, "248071158447148977966329235335508229928647083339924308951291087789494073866"
, "8254100651607835318906499132096926759832050649688561036854000785129084907"
, "91385483239630999205401307763890943367451672856478206313711901403403429289"
, "369346641925770698935046191632374874762045839747680407985443471202350286304"
, "236809023553698844817613980395588655518336237009075203833474757804664254158"
, "8367847400805682648908349286699722579431227561083498361702537964306290078"
, "599241730770400067632779114435635342549228985534229813617556932580328166"
, "347112528350917448294202348829052279076907614831011498643025223836596915573"
, "384244379244118003891043669466323943736726794634167201471569326059716944701"
, "118013777197672343498581960057939216208494837962825017767101107204031333144"
, "27234916267695376599463409893017377196853108034589808756909998459364893467"
, "443519198016088819704735929590164254445884637317806160485888215392818578737"
, "396780482611567392375183169345153676737175342167284140440545202776279411157"
, "420351155303051883480975795437743307852799862858964108014000673383502660760"
, "17379377743250873773932440622865094720355292220433235366224143179854831702"
, "299671454782683147939928632170233327590769402224392134648893444626929909373"
, "143062753141414050359792615867774312517100868919516205179025540179759009492"
, "79497692490953838158801094558761984613913564034406069659969793097043605498"
, "422748645389700647011491406944374966856916994331478229959954030359911549565"
, "101802829812014644970197499895811874607753186302439171072935333706660468030"
, "376428369998893026519415315112012919906032811618495880392785036762185101192"
, "193969030999254195249242252871597931610859408264053152789041067245597391073"
, "262277607928686742238285487190873200602833495734085188071246746209841324139"
, "154099884960502807271641574310268486840763221700920893692135020347157046386"
, "155875061164585018658671842995328931296342883770473498362059838106632382461"
, "248574435283666782825705601259695525637993294311364397935499480206725256362"
, "171325185063038052248966557755722232979612702743265316145563443527135798688"
, "19982746887818897250405980185937061235439217109294376948752373205830077881"
, "363719103724181291346130833008745948141602173373912337315631878022251200824"
, "174596812883797666354579747966720458118607233660798323559531788300018084931"
, "296611197821867585469311917529661698312828606304722408477045992233526328708"
, "115884038550627840768260751168697204665962542002024023842649383174518895165"
, "265597417366164841889730505737916646261040505851159477903649773521314216810"
, "59890857222664166616144499778264545115438678877460924559608721809777680238"
, "150275344313515259978222149421664752546204516114655782705875535407472455999"
, "119762211657733951640135703777940013374803447228667976561992857374027112851"
, "124750313254270944205036764607428674226393862430770588148329786501973600535"
, "223562415856611692667255745278751292230311455355383541649772348032933666931"
, "70851"
]
]
, "merklePaths":
[ [ "12330511756602656312909435206144412037562550923861053314147193494605624608532"
, "11626412651279744307149389430094868876817959605147995292836995419396445628874"
, "5992799448428980485292103987999399446160713735809250167288027256759377161164"
, "19665782623633007009046423286205585606029554995684266199914536255425604862856"
, "16487082247902739801276638140231448150060608377815926330854457096549924699346"
, "13757776896542890425183206586238760231244468647794123671233758868377423038254"
, "5689382212199289790852870716648320118282913659439556386010574440270030991956"
, "19397819444005071538172083272808000152189643623980212079104170950073560541073"
, "13602141253349313166682170066159161155345481788270338632198566205283140117430"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
]
, [ "20475873274412005481064639175076772466269555202404881992985142339142752174247"
, "16346160910918020455716115524174351294558871582658635761456705803199256575588"
, "2853750013041818757293788273269978233503065226276819540991640203844566736443"
, "9192572535522846104757923561847416578417599996904208474460700268961782558170"
, "11041850361074018956732434352769265303294549935255362322653487210796196161858"
, "20835509643844784930831626622202658364476409300598072395494952478408974334325"
, "15426115581767819720710837762133134950520914636073261355445708100826108573907"
, "7565353224987902191368863653499353764559862741092477570130316358454603122676"
, "2622681935585012630617774892501744551457568716225188460692779556142778732663"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
]
, [ "13099868869639061574800067722436547911616384523753701384396275064543709640456"
, "353757809120595355213201328586632712724405232919181040928026587840976194078"
, "17653300914565730132855106316678548541847283141888106932466281385199556950861"
, "15467462085462582082877261755656498905479817107855355753427299990712166382496"
, "8291733777946446853018893495264026584437749231931118771866890345692346711355"
, "15510790697317206014779022286261864844918915222875014882833700758879700055506"
, "5689382212199289790852870716648320118282913659439556386010574440270030991956"
, "19397819444005071538172083272808000152189643623980212079104170950073560541073"
, "13602141253349313166682170066159161155345481788270338632198566205283140117430"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
]
, [ "123238869181525326412236116167249816873084559218151119452851092131080991962"
, "9610314342084317647296061595824603740114670828357751076517430572434680540425"
, "16802740554584732104294972716558962567961331277692246600846665155168171370476"
, "151083360419914122898584757765086723506432610661508069194962432698872036623"
, "10357032992337239725601662829902169825217513617307319193581711776597892496381"
, "10120699018002766520605012835043517238241846918467244955580419060582311503402"
, "21149604008153751948441881526949680605328007895979738537313721955134548786062"
, "5720106921878932614189421948890362637585879521377362100104826996201092964473"
, "2622681935585012630617774892501744551457568716225188460692779556142778732663"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
]
, [ "19153513112714782931111012244694922243101677748840856395814929006033044311081"
, "21046138187228318287277629107063936540039891592394801899272249280765102572688"
, "18057980437430910028015917806534512217725128031222973066601095455076586015436"
, "5766677914654397407881589917461473873246279171605373166264025525757502238061"
, "12019967669236656188577515900815533059046454955207846938479617973037184411021"
, "14504305765289705714959523666100275156034056689367568164630385862257567596209"
, "7152002871325824138073253423783370852632926621899161541618248808716037342022"
, "9714587356194206699401761190093056901650105401919163689816999407566849779455"
, "13602141253349313166682170066159161155345481788270338632198566205283140117430"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
, "0"
]
]
}

Some files were not shown because too many files have changed in this diff Show More