Merge branch 'master' into feature/ceremony-files

This commit is contained in:
Ben 2024-05-23 10:32:12 +02:00
commit c3fb06b3c9
No known key found for this signature in database
GPG Key ID: 541B9D8C9F1426A1
16 changed files with 518 additions and 112 deletions

View File

@ -96,17 +96,35 @@ The following options are available:
--block-mn Number of blocks to check every maintenance cycle [=1000]. --block-mn Number of blocks to check every maintenance cycle [=1000].
-c, --cache-size The size of the block cache, 0 disables the cache - might help on slow hardrives -c, --cache-size The size of the block cache, 0 disables the cache - might help on slow hardrives
[=0]. [=0].
--persistence Enables persistence mechanism, requires an Ethereum node [=false].
Available sub-commands:
codex persistence [OPTIONS]... command
The following options are available:
--eth-provider The URL of the JSON-RPC API of the Ethereum node [=ws://localhost:8545]. --eth-provider The URL of the JSON-RPC API of the Ethereum node [=ws://localhost:8545].
--eth-account The Ethereum account that is used for storage contracts [=EthAddress.none]. --eth-account The Ethereum account that is used for storage contracts.
--eth-private-key File containing Ethereum private key for storage contracts [=string.none]. --eth-private-key File containing Ethereum private key for storage contracts.
--marketplace-address Address of deployed Marketplace contract [=EthAddress.none]. --marketplace-address Address of deployed Marketplace contract.
--validator Enables validator, requires an Ethereum node [=false]. --validator Enables validator, requires an Ethereum node [=false].
--validator-max-slots Maximum number of slots that the validator monitors [=1000]. --validator-max-slots Maximum number of slots that the validator monitors [=1000].
Available sub-commands: Available sub-commands:
codex initNode codex persistence prover [OPTIONS]...
The following options are available:
--circom-r1cs The r1cs file for the storage circuit.
--circom-wasm The wasm file for the storage circuit.
--circom-zkey The zkey file for the storage circuit.
--circom-no-zkey Ignore the zkey file - use only for testing! [=false].
--proof-samples Number of samples to prove [=5].
--max-slot-depth The maximum depth of the slot tree [=32].
--max-dataset-depth The maximum depth of the dataset tree [=8].
--max-block-depth The maximum depth of the network block merkle tree [=5].
--max-cell-elements The maximum number of elements in a cell [=67].
``` ```
#### Logging #### Logging
@ -118,9 +136,11 @@ Using the `log-level` parameter, you can set the top-level log level like `--log
you can set log levels for specific topics like `--log-level="info; trace: marketplace,node; error: blockexchange"`, you can set log levels for specific topics like `--log-level="info; trace: marketplace,node; error: blockexchange"`,
which sets the top-level log level to `info` and then for topics `marketplace` and `node` sets the level to `trace` and so on. which sets the top-level log level to `info` and then for topics `marketplace` and `node` sets the level to `trace` and so on.
### Example: running two Codex clients ### Guides
To get acquainted with Codex, consider running the manual two-client test described [HERE](docs/TWOCLIENTTEST.md). To get acquainted with Codex, consider:
* running the simple [Codex Two-Client Test](docs/TwoClientTest.md) for a start, and;
* if you are feeling more adventurous, try [Running a Local Codex Network with Marketplace Support](docs/Marketplace.md) using a local blockchain as well.
## API ## API

View File

@ -154,9 +154,7 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
let let
haves = b.peers.peersHave(cid) haves = b.peers.peersHave(cid)
trace "Current number of peers for block", cid, peers = haves.len
if haves.len < b.minPeersPerBlock: if haves.len < b.minPeersPerBlock:
trace "Discovering block", cid
try: try:
let let
request = b.discovery request = b.discovery
@ -168,7 +166,6 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
let let
peers = await request peers = await request
trace "Discovered peers for block", peers = peers.len, cid
let let
dialed = await allFinished( dialed = await allFinished(
peers.mapIt( b.network.dialPeer(it.data) )) peers.mapIt( b.network.dialPeer(it.data) ))
@ -189,10 +186,9 @@ proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
for cid in cids: for cid in cids:
if cid notin b.discoveryQueue: if cid notin b.discoveryQueue:
try: try:
trace "Queueing find block", cid, queue = b.discoveryQueue.len
b.discoveryQueue.putNoWait(cid) b.discoveryQueue.putNoWait(cid)
except CatchableError as exc: except CatchableError as exc:
trace "Exception queueing discovery request", exc = exc.msg warn "Exception queueing discovery request", exc = exc.msg
proc queueProvideBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} = proc queueProvideBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
for cid in cids: for cid in cids:

View File

@ -125,7 +125,7 @@ proc stop*(b: BlockExcEngine) {.async.} =
proc sendWantHave( proc sendWantHave(
b: BlockExcEngine, b: BlockExcEngine,
address: BlockAddress, address: BlockAddress, # pluralize this entire call chain, please
excluded: seq[BlockExcPeerCtx], excluded: seq[BlockExcPeerCtx],
peers: seq[BlockExcPeerCtx]): Future[void] {.async.} = peers: seq[BlockExcPeerCtx]): Future[void] {.async.} =
trace "Sending wantHave request to peers", address trace "Sending wantHave request to peers", address
@ -139,7 +139,7 @@ proc sendWantHave(
proc sendWantBlock( proc sendWantBlock(
b: BlockExcEngine, b: BlockExcEngine,
address: BlockAddress, address: BlockAddress, # pluralize this entire call chain, please
blockPeer: BlockExcPeerCtx): Future[void] {.async.} = blockPeer: BlockExcPeerCtx): Future[void] {.async.} =
trace "Sending wantBlock request to", peer = blockPeer.id, address trace "Sending wantBlock request to", peer = blockPeer.id, address
await b.network.request.sendWantList( await b.network.request.sendWantList(
@ -154,13 +154,11 @@ proc monitorBlockHandle(
peerId: PeerId) {.async.} = peerId: PeerId) {.async.} =
try: try:
trace "Monitoring block handle", address, peerId
discard await handle discard await handle
trace "Block handle success", address, peerId
except CancelledError as exc: except CancelledError as exc:
trace "Block handle cancelled", address, peerId trace "Block handle cancelled", address, peerId
except CatchableError as exc: except CatchableError as exc:
trace "Error block handle, disconnecting peer", address, exc = exc.msg, peerId warn "Error block handle, disconnecting peer", address, exc = exc.msg, peerId
# TODO: really, this is just a quick and dirty way of # TODO: really, this is just a quick and dirty way of
# preventing hitting the same "bad" peer every time, however, # preventing hitting the same "bad" peer every time, however,
@ -217,7 +215,6 @@ proc blockPresenceHandler*(
b: BlockExcEngine, b: BlockExcEngine,
peer: PeerId, peer: PeerId,
blocks: seq[BlockPresence]) {.async.} = blocks: seq[BlockPresence]) {.async.} =
trace "Received presence update for peer", peer, blocks = blocks.len
let let
peerCtx = b.peers.get(peer) peerCtx = b.peers.get(peer)
wantList = toSeq(b.pendingBlocks.wantList) wantList = toSeq(b.pendingBlocks.wantList)
@ -227,12 +224,6 @@ proc blockPresenceHandler*(
for blk in blocks: for blk in blocks:
if presence =? Presence.init(blk): if presence =? Presence.init(blk):
logScope:
address = $presence.address
have = presence.have
price = presence.price
trace "Updating presence"
peerCtx.setPresence(presence) peerCtx.setPresence(presence)
let let
@ -323,14 +314,12 @@ proc resolveBlocks*(b: BlockExcEngine, blocks: seq[Block]) {.async.} =
proc payForBlocks(engine: BlockExcEngine, proc payForBlocks(engine: BlockExcEngine,
peer: BlockExcPeerCtx, peer: BlockExcPeerCtx,
blocksDelivery: seq[BlockDelivery]) {.async.} = blocksDelivery: seq[BlockDelivery]) {.async.} =
trace "Paying for blocks", len = blocksDelivery.len
let let
sendPayment = engine.network.request.sendPayment sendPayment = engine.network.request.sendPayment
price = peer.price(blocksDelivery.mapIt(it.address)) price = peer.price(blocksDelivery.mapIt(it.address))
if payment =? engine.wallet.pay(peer, price): if payment =? engine.wallet.pay(peer, price):
trace "Sending payment for blocks", price trace "Sending payment for blocks", price, len = blocksDelivery.len
await sendPayment(peer.id, payment) await sendPayment(peer.id, payment)
proc validateBlockDelivery( proc validateBlockDelivery(
@ -365,7 +354,7 @@ proc blocksDeliveryHandler*(
b: BlockExcEngine, b: BlockExcEngine,
peer: PeerId, peer: PeerId,
blocksDelivery: seq[BlockDelivery]) {.async.} = blocksDelivery: seq[BlockDelivery]) {.async.} =
trace "Got blocks from peer", peer, len = blocksDelivery.len trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt($it.address)).join(",")
var validatedBlocksDelivery: seq[BlockDelivery] var validatedBlocksDelivery: seq[BlockDelivery]
for bd in blocksDelivery: for bd in blocksDelivery:
@ -411,7 +400,6 @@ proc wantListHandler*(
b: BlockExcEngine, b: BlockExcEngine,
peer: PeerId, peer: PeerId,
wantList: WantList) {.async.} = wantList: WantList) {.async.} =
trace "Got wantList for peer", peer, items = wantList.entries.len
let let
peerCtx = b.peers.get(peer) peerCtx = b.peers.get(peer)
if isNil(peerCtx): if isNil(peerCtx):
@ -430,8 +418,6 @@ proc wantListHandler*(
wantType = $e.wantType wantType = $e.wantType
if idx < 0: # updating entry if idx < 0: # updating entry
trace "Processing new want list entry"
let let
have = await e.address in b.localStore have = await e.address in b.localStore
price = @( price = @(
@ -442,41 +428,35 @@ proc wantListHandler*(
codex_block_exchange_want_have_lists_received.inc() codex_block_exchange_want_have_lists_received.inc()
if not have and e.sendDontHave: if not have and e.sendDontHave:
trace "Adding dont have entry to presence response"
presence.add( presence.add(
BlockPresence( BlockPresence(
address: e.address, address: e.address,
`type`: BlockPresenceType.DontHave, `type`: BlockPresenceType.DontHave,
price: price)) price: price))
elif have and e.wantType == WantType.WantHave: elif have and e.wantType == WantType.WantHave:
trace "Adding have entry to presence response"
presence.add( presence.add(
BlockPresence( BlockPresence(
address: e.address, address: e.address,
`type`: BlockPresenceType.Have, `type`: BlockPresenceType.Have,
price: price)) price: price))
elif e.wantType == WantType.WantBlock: elif e.wantType == WantType.WantBlock:
trace "Added entry to peer's want blocks list"
peerCtx.peerWants.add(e) peerCtx.peerWants.add(e)
codex_block_exchange_want_block_lists_received.inc() codex_block_exchange_want_block_lists_received.inc()
else: else:
# peer doesn't want this block anymore # peer doesn't want this block anymore
if e.cancel: if e.cancel:
trace "Removing entry from peer want list"
peerCtx.peerWants.del(idx) peerCtx.peerWants.del(idx)
else: else:
trace "Updating entry in peer want list"
# peer might want to ask for the same cid with # peer might want to ask for the same cid with
# different want params # different want params
peerCtx.peerWants[idx] = e # update entry peerCtx.peerWants[idx] = e # update entry
if presence.len > 0: if presence.len > 0:
trace "Sending presence to remote", items = presence.len trace "Sending presence to remote", items = presence.mapIt($it).join(",")
await b.network.request.sendPresence(peer, presence) await b.network.request.sendPresence(peer, presence)
trace "Scheduling a task to check want-list", peer
if not b.scheduleTask(peerCtx): if not b.scheduleTask(peerCtx):
trace "Unable to schedule task for peer", peer warn "Unable to schedule task for peer", peer
proc accountHandler*( proc accountHandler*(
engine: BlockExcEngine, engine: BlockExcEngine,
@ -541,8 +521,6 @@ proc dropPeer*(b: BlockExcEngine, peer: PeerId) =
b.peers.remove(peer) b.peers.remove(peer)
proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
trace "Handling task for peer", peer = task.id
# Send to the peer blocks he wants to get, # Send to the peer blocks he wants to get,
# if they present in our local store # if they present in our local store
@ -559,7 +537,6 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
if peerWant.address in addresses: if peerWant.address in addresses:
peerWant.inFlight = inFlight peerWant.inFlight = inFlight
trace "wantsBlocks", peer = task.id, n = wantsBlocks.len
if wantsBlocks.len > 0: if wantsBlocks.len > 0:
# Mark wants as in-flight. # Mark wants as in-flight.
let wantAddresses = wantsBlocks.mapIt(it.address) let wantAddresses = wantsBlocks.mapIt(it.address)
@ -567,7 +544,6 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
wantsBlocks.sort(SortOrder.Descending) wantsBlocks.sort(SortOrder.Descending)
proc localLookup(e: WantListEntry): Future[?!BlockDelivery] {.async.} = proc localLookup(e: WantListEntry): Future[?!BlockDelivery] {.async.} =
trace "Handling lookup for entry", address = e.address
if e.address.leaf: if e.address.leaf:
(await b.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map( (await b.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map(
(blkAndProof: (Block, CodexProof)) => (blkAndProof: (Block, CodexProof)) =>
@ -591,7 +567,7 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
updateInFlight(failedAddresses, false) updateInFlight(failedAddresses, false)
if blocksDelivery.len > 0: if blocksDelivery.len > 0:
trace "Sending blocks to peer", peer = task.id, blocks = blocksDelivery.len trace "Sending blocks to peer", peer = task.id, blocks = (blocksDelivery.mapIt($it.address)).join(",")
await b.network.request.sendBlocksDelivery( await b.network.request.sendBlocksDelivery(
task.id, task.id,
blocksDelivery blocksDelivery
@ -600,7 +576,6 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64) codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64)
task.peerWants.keepItIf(it.address notin successAddresses) task.peerWants.keepItIf(it.address notin successAddresses)
trace "Removed entries from peerWants", peerWants = task.peerWants.len
proc blockexcTaskRunner(b: BlockExcEngine) {.async.} = proc blockexcTaskRunner(b: BlockExcEngine) {.async.} =
## process tasks ## process tasks
@ -611,7 +586,6 @@ proc blockexcTaskRunner(b: BlockExcEngine) {.async.} =
let let
peerCtx = await b.taskQueue.pop() peerCtx = await b.taskQueue.pop()
trace "Got new task from queue", peerId = peerCtx.id
await b.taskHandler(peerCtx) await b.taskHandler(peerCtx)
info "Exiting blockexc task runner" info "Exiting blockexc task runner"

View File

@ -58,15 +58,13 @@ proc getWantHandle*(
inFlight: inFlight, inFlight: inFlight,
startTime: getMonoTime().ticks) startTime: getMonoTime().ticks)
trace "Adding pending future for block", address, inFlight = p.blocks[address].inFlight
p.updatePendingBlockGauge() p.updatePendingBlockGauge()
return await p.blocks[address].handle.wait(timeout) return await p.blocks[address].handle.wait(timeout)
except CancelledError as exc: except CancelledError as exc:
trace "Blocks cancelled", exc = exc.msg, address trace "Blocks cancelled", exc = exc.msg, address
raise exc raise exc
except CatchableError as exc: except CatchableError as exc:
trace "Pending WANT failed or expired", exc = exc.msg error "Pending WANT failed or expired", exc = exc.msg
# no need to cancel, it is already cancelled by wait() # no need to cancel, it is already cancelled by wait()
raise exc raise exc
finally: finally:
@ -88,8 +86,6 @@ proc resolve*(
for bd in blocksDelivery: for bd in blocksDelivery:
p.blocks.withValue(bd.address, blockReq): p.blocks.withValue(bd.address, blockReq):
trace "Resolving block", address = bd.address
if not blockReq.handle.finished: if not blockReq.handle.finished:
let let
startTime = blockReq.startTime startTime = blockReq.startTime
@ -99,7 +95,9 @@ proc resolve*(
blockReq.handle.complete(bd.blk) blockReq.handle.complete(bd.blk)
codex_block_exchange_retrieval_time_us.set(retrievalDurationUs) codex_block_exchange_retrieval_time_us.set(retrievalDurationUs)
trace "Block retrieval time", retrievalDurationUs, address = bd.address
if retrievalDurationUs > 500000:
warn "High block retrieval time", retrievalDurationUs, address = bd.address
else: else:
trace "Block handle already finished", address = bd.address trace "Block handle already finished", address = bd.address
@ -112,7 +110,6 @@ proc setInFlight*(
p.blocks.withValue(address, pending): p.blocks.withValue(address, pending):
pending[].inFlight = inFlight pending[].inFlight = inFlight
trace "Setting inflight", address, inFlight = pending[].inFlight
proc isInFlight*( proc isInFlight*(
p: PendingBlocksManager, p: PendingBlocksManager,
@ -122,7 +119,6 @@ proc isInFlight*(
p.blocks.withValue(address, pending): p.blocks.withValue(address, pending):
result = pending[].inFlight result = pending[].inFlight
trace "Getting inflight", address, inFlight = result
proc contains*(p: PendingBlocksManager, cid: Cid): bool = proc contains*(p: PendingBlocksManager, cid: Cid): bool =
BlockAddress.init(cid) in p.blocks BlockAddress.init(cid) in p.blocks

View File

@ -96,7 +96,6 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
b.peers.withValue(id, peer): b.peers.withValue(id, peer):
try: try:
await b.inflightSema.acquire() await b.inflightSema.acquire()
trace "Sending message to peer", peer = id
await peer[].send(msg) await peer[].send(msg)
except CatchableError as err: except CatchableError as err:
error "Error sending message", peer = id, msg = err.msg error "Error sending message", peer = id, msg = err.msg
@ -113,7 +112,6 @@ proc handleWantList(
## ##
if not b.handlers.onWantList.isNil: if not b.handlers.onWantList.isNil:
trace "Handling want list for peer", peer = peer.id, items = list.entries.len
await b.handlers.onWantList(peer.id, list) await b.handlers.onWantList(peer.id, list)
proc sendWantList*( proc sendWantList*(
@ -128,7 +126,6 @@ proc sendWantList*(
## Send a want message to peer ## Send a want message to peer
## ##
trace "Sending want list to peer", peer = id, `type` = $wantType, items = addresses.len
let msg = WantList( let msg = WantList(
entries: addresses.mapIt( entries: addresses.mapIt(
WantListEntry( WantListEntry(
@ -157,7 +154,6 @@ proc handleBlocksDelivery(
## ##
if not b.handlers.onBlocksDelivery.isNil: if not b.handlers.onBlocksDelivery.isNil:
trace "Handling blocks for peer", peer = peer.id, items = blocksDelivery.len
await b.handlers.onBlocksDelivery(peer.id, blocksDelivery) await b.handlers.onBlocksDelivery(peer.id, blocksDelivery)
@ -178,7 +174,6 @@ proc handleBlockPresence(
## ##
if not b.handlers.onPresence.isNil: if not b.handlers.onPresence.isNil:
trace "Handling block presence for peer", peer = peer.id, items = presence.len
await b.handlers.onPresence(peer.id, presence) await b.handlers.onPresence(peer.id, presence)
proc sendBlockPresence*( proc sendBlockPresence*(

View File

@ -45,7 +45,6 @@ proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} =
let let
data = await conn.readLp(MaxMessageSize.int) data = await conn.readLp(MaxMessageSize.int)
msg = Message.protobufDecode(data).mapFailure().tryGet() msg = Message.protobufDecode(data).mapFailure().tryGet()
trace "Got message for peer", peer = b.id
await b.handler(b, msg) await b.handler(b, msg)
except CatchableError as err: except CatchableError as err:
warn "Exception in blockexc read loop", msg = err.msg warn "Exception in blockexc read loop", msg = err.msg
@ -64,10 +63,9 @@ proc send*(b: NetworkPeer, msg: Message) {.async.} =
let conn = await b.connect() let conn = await b.connect()
if isNil(conn): if isNil(conn):
trace "Unable to get send connection for peer message not sent", peer = b.id warn "Unable to get send connection for peer message not sent", peer = b.id
return return
trace "Sending message to remote", peer = b.id
await conn.writeLp(protobufEncode(msg)) await conn.writeLp(protobufEncode(msg))
proc broadcast*(b: NetworkPeer, msg: Message) = proc broadcast*(b: NetworkPeer, msg: Message) =
@ -75,7 +73,7 @@ proc broadcast*(b: NetworkPeer, msg: Message) =
try: try:
await b.send(msg) await b.send(msg)
except CatchableError as exc: except CatchableError as exc:
trace "Exception broadcasting message to peer", peer = b.id, exc = exc.msg warn "Exception broadcasting message to peer", peer = b.id, exc = exc.msg
asyncSpawn sendAwaiter() asyncSpawn sendAwaiter()

View File

@ -25,9 +25,6 @@ import ../../logutils
export payments, nitro export payments, nitro
logScope:
topics = "codex peercontext"
type type
BlockExcPeerCtx* = ref object of RootObj BlockExcPeerCtx* = ref object of RootObj
id*: PeerId id*: PeerId
@ -66,5 +63,4 @@ func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 =
self.blocks.withValue(a, precense): self.blocks.withValue(a, precense):
price += precense[].price price += precense[].price
trace "Blocks price", price
price price

View File

@ -47,15 +47,12 @@ func contains*(self: PeerCtxStore, peerId: PeerId): bool =
peerId in self.peers peerId in self.peers
func add*(self: PeerCtxStore, peer: BlockExcPeerCtx) = func add*(self: PeerCtxStore, peer: BlockExcPeerCtx) =
trace "Adding peer to peer context store", peer = peer.id
self.peers[peer.id] = peer self.peers[peer.id] = peer
func remove*(self: PeerCtxStore, peerId: PeerId) = func remove*(self: PeerCtxStore, peerId: PeerId) =
trace "Removing peer from peer context store", peer = peerId
self.peers.del(peerId) self.peers.del(peerId)
func get*(self: PeerCtxStore, peerId: PeerId): BlockExcPeerCtx = func get*(self: PeerCtxStore, peerId: PeerId): BlockExcPeerCtx =
trace "Retrieving peer from peer context store", peer = peerId
self.peers.getOrDefault(peerId, nil) self.peers.getOrDefault(peerId, nil)
func len*(self: PeerCtxStore): int = func len*(self: PeerCtxStore): int =

View File

@ -73,16 +73,14 @@ method find*(
cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} = cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} =
## Find block providers ## Find block providers
## ##
trace "Finding providers for block", cid
without providers =? without providers =?
(await d.protocol.getProviders(cid.toNodeId())).mapFailure, error: (await d.protocol.getProviders(cid.toNodeId())).mapFailure, error:
trace "Error finding providers for block", cid, error = error.msg warn "Error finding providers for block", cid, error = error.msg
return providers.filterIt( not (it.data.peerId == d.peerId) ) return providers.filterIt( not (it.data.peerId == d.peerId) )
method provide*(d: Discovery, cid: Cid) {.async, base.} = method provide*(d: Discovery, cid: Cid) {.async, base.} =
## Provide a bock Cid ## Provide a block Cid
## ##
let let
nodes = await d.protocol.addProvider( nodes = await d.protocol.addProvider(

View File

@ -95,15 +95,14 @@ proc retrieveCid(
break break
bytes += buff.len bytes += buff.len
trace "Sending chunk", size = buff.len
await resp.sendChunk(addr buff[0], buff.len) await resp.sendChunk(addr buff[0], buff.len)
await resp.finish() await resp.finish()
codex_api_downloads.inc() codex_api_downloads.inc()
except CatchableError as exc: except CatchableError as exc:
trace "Excepting streaming blocks", exc = exc.msg warn "Excepting streaming blocks", exc = exc.msg
return RestApiResponse.error(Http500) return RestApiResponse.error(Http500)
finally: finally:
trace "Sent bytes", cid = cid, bytes info "Sent bytes", cid = cid, bytes
if not stream.isNil: if not stream.isNil:
await stream.close() await stream.close()

View File

@ -34,17 +34,13 @@ type
localStore*: BlockStore # local block store localStore*: BlockStore # local block store
method getBlock*(self: NetworkStore, address: BlockAddress): Future[?!Block] {.async.} = method getBlock*(self: NetworkStore, address: BlockAddress): Future[?!Block] {.async.} =
trace "Getting block from local store or network", address
without blk =? (await self.localStore.getBlock(address)), err: without blk =? (await self.localStore.getBlock(address)), err:
if not (err of BlockNotFoundError): if not (err of BlockNotFoundError):
trace "Error getting block from local store", address, err = err.msg error "Error getting block from local store", address, err = err.msg
return failure err return failure err
trace "Block not in local store", address, err = err.msg
without newBlock =? (await self.engine.requestBlock(address)), err: without newBlock =? (await self.engine.requestBlock(address)), err:
trace "Unable to get block from exchange engine", address, err = err.msg error "Unable to get block from exchange engine", address, err = err.msg
return failure err return failure err
return success newBlock return success newBlock

View File

@ -139,10 +139,9 @@ method getCidAndProof*(
return failure(err) return failure(err)
without (cid, proof) =? (Cid, CodexProof).decode(value), err: without (cid, proof) =? (Cid, CodexProof).decode(value), err:
trace "Unable to decode cid and proof", err = err.msg error "Unable to decode cid and proof", err = err.msg
return failure(err) return failure(err)
trace "Got cid and proof for block", cid, proof = $proof
return success (cid, proof) return success (cid, proof)
method getCid*( method getCid*(
@ -154,10 +153,11 @@ method getCid*(
without value =? await self.metaDs.get(key), err: without value =? await self.metaDs.get(key), err:
if err of DatastoreKeyNotFound: if err of DatastoreKeyNotFound:
trace "Cid not found", treeCid, index # This failure is expected to happen frequently:
# NetworkStore.getBlock will call RepoStore.getBlock before starting the block exchange engine.
return failure(newException(BlockNotFoundError, err.msg)) return failure(newException(BlockNotFoundError, err.msg))
else: else:
trace "Error getting cid from datastore", err = err.msg, key error "Error getting cid from datastore", err = err.msg, key
return failure(err) return failure(err)
return (Cid, CodexProof).decodeCid(value) return (Cid, CodexProof).decodeCid(value)
@ -170,21 +170,19 @@ method getBlock*(self: RepoStore, cid: Cid): Future[?!Block] {.async.} =
cid = cid cid = cid
if cid.isEmpty: if cid.isEmpty:
trace "Empty block, ignoring"
return cid.emptyBlock return cid.emptyBlock
without key =? makePrefixKey(self.postFixLen, cid), err: without key =? makePrefixKey(self.postFixLen, cid), err:
trace "Error getting key from provider", err = err.msg error "Error getting key from provider", err = err.msg
return failure(err) return failure(err)
without data =? await self.repoDs.get(key), err: without data =? await self.repoDs.get(key), err:
if not (err of DatastoreKeyNotFound): if not (err of DatastoreKeyNotFound):
trace "Error getting block from datastore", err = err.msg, key error "Error getting block from datastore", err = err.msg, key
return failure(err) return failure(err)
return failure(newException(BlockNotFoundError, err.msg)) return failure(newException(BlockNotFoundError, err.msg))
trace "Got block for cid", cid
return Block.new(cid, data, verify = true) return Block.new(cid, data, verify = true)

View File

@ -82,7 +82,6 @@ method readOnce*(
## Raise exception if we are already at EOF. ## Raise exception if we are already at EOF.
## ##
trace "Reading from manifest", cid = self.manifest.cid.get(), blocks = self.manifest.blocksCount
if self.atEof: if self.atEof:
raise newLPStreamEOFError() raise newLPStreamEOFError()
@ -104,7 +103,7 @@ method readOnce*(
without blk =? await self.store.getBlock(address), error: without blk =? await self.store.getBlock(address), error:
raise newLPStreamReadError(error) raise newLPStreamReadError(error)
trace "Reading bytes from store stream", blockNum, cid = blk.cid, bytes = readBytes, blockOffset trace "Reading bytes from store stream", manifestCid = self.manifest.cid.get(), numBlocks = self.manifest.blocksCount, blockNum, blkCid = blk.cid, bytes = readBytes, blockOffset
# Copy `readBytes` bytes starting at `blockOffset` from the block into the outbuf # Copy `readBytes` bytes starting at `blockOffset` from the block into the outbuf
if blk.isEmpty: if blk.isEmpty:

View File

@ -34,12 +34,6 @@ proc new*(
proc slots*(validation: Validation): seq[SlotId] = proc slots*(validation: Validation): seq[SlotId] =
validation.slots.toSeq validation.slots.toSeq
proc iterateSlots(validation: Validation, action: proc(s: SlotId): Future[void] {.async.}) {.async.} =
# Copy of hashSet, for iteration.
let slots = validation.slots
for slotId in slots:
await action(slotId)
proc getCurrentPeriod(validation: Validation): UInt256 = proc getCurrentPeriod(validation: Validation): UInt256 =
return validation.periodicity.periodOf(validation.clock.now().u256) return validation.periodicity.periodOf(validation.clock.now().u256)
@ -61,12 +55,12 @@ proc subscribeSlotFilled(validation: Validation) {.async.} =
proc removeSlotsThatHaveEnded(validation: Validation) {.async.} = proc removeSlotsThatHaveEnded(validation: Validation) {.async.} =
var ended: HashSet[SlotId] var ended: HashSet[SlotId]
proc onSlot(slotId: SlotId) {.async.} = let slots = validation.slots
for slotId in slots:
let state = await validation.market.slotState(slotId) let state = await validation.market.slotState(slotId)
if state != SlotState.Filled: if state != SlotState.Filled:
trace "Removing slot", slotId trace "Removing slot", slotId
ended.incl(slotId) ended.incl(slotId)
await validation.iterateSlots(onSlot)
validation.slots.excl(ended) validation.slots.excl(ended)
proc markProofAsMissing(validation: Validation, proc markProofAsMissing(validation: Validation,
@ -88,10 +82,10 @@ proc markProofAsMissing(validation: Validation,
error "Marking proof as missing failed", msg = e.msg error "Marking proof as missing failed", msg = e.msg
proc markProofsAsMissing(validation: Validation) {.async.} = proc markProofsAsMissing(validation: Validation) {.async.} =
proc onSlot(slotId: SlotId) {.async.} = let slots = validation.slots
for slotId in slots:
let previousPeriod = validation.getCurrentPeriod() - 1 let previousPeriod = validation.getCurrentPeriod() - 1
await validation.markProofAsMissing(slotId, previousPeriod) await validation.markProofAsMissing(slotId, previousPeriod)
await validation.iterateSlots(onSlot)
proc run(validation: Validation) {.async.} = proc run(validation: Validation) {.async.} =
trace "Validation started" trace "Validation started"

444
docs/Marketplace.md Normal file
View File

@ -0,0 +1,444 @@
# Running a Local Codex Network with Marketplace Support
This tutorial will teach you how to run a small Codex network with the _storage marketplace_ enabled; i.e., the functionality in Codex which allows participants to offer and buy storage in a market, ensuring that storage providers honor their part of the deal by means of cryptographic proofs.
To complete this tutorial, you will need:
* the [geth](https://github.com/ethereum/go-ethereum) Ethereum client;
* a Codex binary, which [you can compile from source](https://github.com/codex-storage/nim-codex?tab=readme-ov-file#build-and-run).
We will also be using [bash](https://en.wikipedia.org/wiki/Bash_(Unix_shell)) syntax throughout. If you use a different shell, you may need to adapt things to your platform.
In this tutorial, you will:
1. [Set Up a Geth PoA network](#1-set-up-a-geth-poa-network);
2. [Set up The Marketplace](#2-set-up-the-marketplace);
3. [Run Codex](#3-run-codex);
4. [Buy and Sell Storage in the Marketplace](#4-buy-and-sell-storage-on-the-marketplace).
We strongly suggest you to create a folder (e.g. `marketplace-tutorial`), and switch into it before beginning.
## 1. Set Up a Geth PoA Network
For this tutorial, we will use a simple [Proof-of-Authority](https://github.com/ethereum/EIPs/issues/225) network with geth. The first step is creating a _signer account_: an account which will be used by geth to sign the blocks in the network. Any block signed by a signer is accepted as valid.
### 1.1. Create a Signer Account
To create a signer account, run:
```bash
geth account new --datadir geth-data
```
The account generator will ask you to input a password, which you can leave blank. It will then print some information, including the account's public address:
```bash
INFO [03-22|12:58:05.637] Maximum peer count ETH=50 total=50
INFO [03-22|12:58:05.638] Smartcard socket not found, disabling err="stat /run/pcscd/pcscd.comm: no such file or directory"
Your new account is locked with a password. Please give a password. Do not forget this password.
Password:
Repeat password:
Your new key was generated
Public address of the key: 0x93976895c4939d99837C8e0E1779787718EF8368
...
```
In this example, the public address of the signer account is `0x93976895c4939d99837C8e0E1779787718EF8368`. Yours will print a different address. Save it for later usage.
Next set an environment variable for later usage:
```sh
export GETH_SIGNER_ADDR="0x0000000000000000000000000000000000000000"
echo ${GETH_SIGNER_ADDR} > geth_signer_address.txt
```
### 1.2. Configure The Network and Create the Genesis Block
The next step is telling geth what kind of network you want to run. We will be running a [pre-merge](https://ethereum.org/en/roadmap/merge/) network with Proof-of-Authority consensus. To get that working, create a `network.json` file.
If you set the GETH_SIGNER_ADDR variable above you can run to create the `network.json` file:
```sh
echo "{\"config\": { \"chainId\": 12345, \"homesteadBlock\": 0, \"eip150Block\": 0, \"eip155Block\": 0, \"eip158Block\": 0, \"byzantiumBlock\": 0, \"constantinopleBlock\": 0, \"petersburgBlock\": 0, \"istanbulBlock\": 0, \"berlinBlock\": 0, \"londonBlock\": 0, \"arrowGlacierBlock\": 0, \"grayGlacierBlock\": 0, \"clique\": { \"period\": 1, \"epoch\": 30000 } }, \"difficulty\": \"1\", \"gasLimit\": \"8000000\", \"extradata\": \"0x0000000000000000000000000000000000000000000000000000000000000000${GETH_SIGNER_ADDR:2}0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\", \"alloc\": { \"${GETH_SIGNER_ADDR}\": { \"balance\": \"10000000000000000000000\"}}}" > network.json
```
You can also manually create the file with the following content modified with your signer private key:
```json
{
"config": {
"chainId": 12345,
"homesteadBlock": 0,
"eip150Block": 0,
"eip155Block": 0,
"eip158Block": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"petersburgBlock": 0,
"istanbulBlock": 0,
"berlinBlock": 0,
"londonBlock": 0,
"arrowGlacierBlock": 0,
"grayGlacierBlock": 0,
"clique": {
"period": 1,
"epoch": 30000
}
},
"difficulty": "1",
"gasLimit": "8000000",
"extradata": "0x000000000000000000000000000000000000000000000000000000000000000093976895c4939d99837C8e0E1779787718EF83680000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"alloc": {
"0x93976895c4939d99837C8e0E1779787718EF8368": {
"balance": "10000000000000000000000"
}
}
}
```
Note that the signer account address is embedded in two different places:
* inside of the `"extradata"` string, surrounded by zeroes and stripped of its `0x` prefix;
* as an entry key in the `alloc` session.
Make sure to replace that ID with the account ID that you wrote down in Step 1.1.
Once `network.json` is created, you can initialize the network with:
```bash
geth init --datadir geth-data network.json
```
### 1.3. Start your PoA Node
We are now ready to start our $1$-node, private blockchain. To launch the signer node, open a separate terminal on the same working directory and run:
```bash
geth\
--datadir geth-data\
--networkid 12345\
--unlock ${GETH_SIGNER_ADDR}\
--nat extip:127.0.0.1\
--netrestrict 127.0.0.0/24\
--mine\
--miner.etherbase ${GETH_SIGNER_ADDR}\
--http\
--allow-insecure-unlock
```
Note that, once again, the signer account created in Step 1.1 appears both in `--unlock` and `--allow-insecure-unlock`. Make sure you have the `GETH_SIGNER_ADDR` set.
Geth will prompt you to insert the account's password as it starts up. Once you do that, it should be able to start up and begin "mining" blocks.
## 2. Set Up The Marketplace
You will need to open new terminal for this section and geth needs to be running already. Setting up the Codex marketplace entails:
1. Deploying the Codex Marketplace contracts to our private blockchain
2. Setup Ethereum accounts we will use to buy and sell storage in the Codex marketplace
3. Provisioning those accounts with the required token balances
### 2.1. Deploy the Codex Marketplace Contracts
To deploy the contracts, start by cloning the Codex contracts repository locally and installing its dependencies:
```bash
git clone https://github.com/codex-storage/codex-contracts-eth
cd codex-contracts-eth
npm install
```
You now must **wait until $256$ blocks are mined in your PoA network**, or deploy will fail. This should take about $4$ minutes and $30$ seconds. You can check which block height you are currently at by running:
```bash
geth attach --exec web3.eth.blockNumber ../geth-data/geth.ipc
```
once that gets past $256$, you are ready to go. To deploy contracts, run:
```bash
export DISTTEST_NETWORK_URL=http://localhost:8545 # bootstrap node
npx hardhat --network codexdisttestnetwork deploy && cd ../
```
If the command completes successfully, you are ready to prepare the accounts.
### 2.2. Generate the Required Accounts
We will run $2$ Codex nodes: a **storage provider**, which will sell storage on the network, and a **client**, which will buy and use such storage; we therefore need two valid Ethereum accounts. We could create random accounts by using one of the many tools available to that end but, since this is a tutorial running on a local private network, we will simply provide you with two pre-made accounts along with their private keys which you can copy and paste instead:
First make sure you're back in the `marketplace-tutorial` folder and not the `codex-contracts-eth` subfolder. Then set these variables:
**Storage:**
```sh
export ETH_STORAGE_ADDR=0x45BC5ca0fbdD9F920Edd12B90908448C30F32a37
export ETH_STORAGE_PK=0x06c7ac11d4ee1d0ccb53811b71802fa92d40a5a174afad9f2cb44f93498322c3
echo $ETH_STORAGE_PK > storage.pkey && chmod 0600 storage.pkey
```
**Client:**
```sh
export ETH_CLIENT_ADDR=0x9F0C62Fe60b22301751d6cDe1175526b9280b965
export ETH_CLIENT_PK=0x5538ec03c956cb9d0bee02a25b600b0225f1347da4071d0fd70c521fdc63c2fc
echo $ETH_CLIENT_PK > client.pkey && chmod 0600 client.pkey
```
### 2.3. Provision Accounts with Tokens
We now need to transfer some ETH to each of the accounts, as well as provide them with some Codex tokens for the storage node to use as collateral and for the client node to buy actual storage.
Although the process is not particularly complicated, I suggest you use [the script we prepared](https://github.com/gmega/local-codex-bare/blob/main/scripts/mint-tokens.js) for that. This script, essentially:
1. reads the Marketplace contract address and its ABI from the deployment data;
2. transfers $1$ ETH from the signer account to a target account if the target account has no ETH balance;
3. mints $n$ Codex tokens and adds it into the target account's balance.
To use the script, just download it into a local file named `mint-tokens.js`, for instance using curl:
```bash
# set the contract file location
export CONTRACT_DEPLOY_FULL="codex-contracts-eth/deployments/codexdisttestnetwork"
export GETH_SIGNER_ADDR=$(cat geth_signer_address.txt)
# download script
curl https://raw.githubusercontent.com/gmega/codex-local-bare/main/scripts/mint-tokens.js -o mint-tokens.js
```
```bash
# Installs Web3-js
npm install web3
# Provides tokens to the storage account.
node ./mint-tokens.js $CONTRACT_DEPLOY_FULL/TestToken.json $GETH_SIGNER_ADDR 0x45BC5ca0fbdD9F920Edd12B90908448C30F32a37 10000000000
# Provides tokens to the client account.
node ./mint-tokens.js $CONTRACT_DEPLOY_FULL/TestToken.json $GETH_SIGNER_ADDR 0x9F0C62Fe60b22301751d6cDe1175526b9280b965 10000000000
```
If you get a message like `Usage: mint-tokens.js <token-hardhat-deploy-json> <signer-account> <receiver-account> <token-ammount>` then you need to ensure you have
## 3. Run Codex
With accounts and geth in place, we can now start the Codex nodes.
### 3.1. Storage Node
The storage node will be the one storing data and submitting the proofs of storage to the chain. To do that, it needs access to:
1. the address of the Marketplace contract that has been deployed to the local geth node in [Step 2.1](#21-deploy-the-codex-marketplace-contracts);
2. the sample ceremony files which are shipped in the Codex contracts repo.
Recall you have clone the `codex-contracts-eth` repository in Step 2.1. All of the required files are in there.
**Address of the Marketplace Contract.** The contract address can be found inside of the file `codex-contracts-eth/deployments/codexdisttestnetwork/Marketplace.json`:
```bash
grep '"address":' ${CONTRACT_DEPLOY_FULL}/Marketplace.json
```
which should print something like:
```sh
"address": "0x8891732D890f5A7B7181fBc70F7482DE28a7B60f",
```
Then run the following with the correct market place address:
```sh
export MARKETPLACE_ADDRESS="0x0000000000000000000000000000000000000000"
echo ${MARKETPLACE_ADDRESS} > marketplace_address.txt
```
**Prover ceremony files.** The ceremony files are under the `codex-contracts-eth/verifier/networks/codexdisttestnetwork` subdirectory. There are three of them: `proof_main.r1cs`, `proof_main.zkey`, and `prooof_main.wasm`. We will need all of them to start the Codex storage node.
**Starting the storage node.** Let:
* `PROVER_ASSETS` contain the directory where the prover ceremony files are located. **This must be an absolute path**;
* `CODEX_BINARY` contain the location of your Codex binary;
* `MARKETPLACE_ADDRESS` contain the address of the Marketplace contract (obtained above).
Set these paths into environment variables (modify it with the correct paths if you changed them above):
```sh
export CONTRACT_DEPLOY_FULL=$(realpath "codex-contracts-eth/deployments/codexdisttestnetwork")
export PROVER_ASSETS=$(realpath "codex-contracts-eth/verifier/networks/codexdisttestnetwork/")
export CODEX_BINARY=$(realpath "../build/codex")
export MARKETPLACE_ADDRESS=$(cat marketplace_address.txt)
```
To launch the storage node, run:
```bash
${CODEX_BINARY}\
--data-dir=./codex-storage\
--listen-addrs=/ip4/0.0.0.0/tcp/8080\
--api-port=8000\
--disc-port=8090\
persistence\
--eth-provider=http://localhost:8545\
--eth-private-key=./storage.pkey\
--marketplace-address=${MARKETPLACE_ADDRESS}\
--validator\
--validator-max-slots=1000\
prover\
--circom-r1cs=${PROVER_ASSETS}/proof_main.r1cs\
--circom-wasm=${PROVER_ASSETS}/proof_main.wasm\
--circom-zkey=${PROVER_ASSETS}/proof_main.zkey
```
**Starting the client node.**
The client node is started similarly except that:
* we need to pass the SPR of the storage node so it can form a network with it;
* since it does not run any proofs, it does not require any ceremony files.
We get the Signed Peer Record (SPR) of the storage node so we can bootstrap the client node with it. To get the SPR, issue the following call:
```bash
curl -H 'Accept: text/plain' 'http://localhost:8000/api/codex/v1/spr'
```
You should get the SPR back starting with `spr:`. Next set these paths into environment variables:
```bash
# set the SPR for the storage node
export STORAGE_NODE_SPR=$(curl -H 'Accept: text/plain' 'http://localhost:8000/api/codex/v1/spr')
# basic vars
export CONTRACT_DEPLOY_FULL=$(realpath "codex-contracts-eth/deployments/codexdisttestnetwork")
export PROVER_ASSETS=$(realpath "codex-contracts-eth/verifier/networks/codexdisttestnetwork/")
export CODEX_BINARY=$(realpath "../build/codex")
export MARKETPLACE_ADDRESS=$(cat marketplace_address.txt)
```
```bash
${CODEX_BINARY}\
--data-dir=./codex-client\
--listen-addrs=/ip4/0.0.0.0/tcp/8081\
--api-port=8001\
--disc-port=8091\
--bootstrap-node=${STORAGE_NODE_SPR}\
persistence\
--eth-provider=http://localhost:8545\
--eth-private-key=./client.pkey\
--marketplace-address=${MARKETPLACE_ADDRESS}
```
## 4. Buy and Sell Storage on the Marketplace
Any storage negotiation has two sides: a buyer and a seller. Before we can actually request storage, therefore, we must first put some of it for sale.
### 4.1 Sell Storage
The following request will cause the storage node to put out $50\text{MB}$ of storage for sale for $1$ hour, at a price of $1$ Codex token per byte per second, while expressing that it's willing to take at most a $1000$ Codex token penalty for not fulfilling its part of the contract.[^1]
```bash
curl 'http://localhost:8000/api/codex/v1/sales/availability' \
--header 'Content-Type: application/json' \
--data '{
"totalSize": "50000000",
"duration": "3600",
"minPrice": "1",
"maxCollateral": "1000"
}'
```
This should return a response with an id a string (e.g. `"id": "0x552ef12a2ee64ca22b237335c7e1df884df36d22bfd6506b356936bc718565d4"`) which identifies this storage offer. To check the current storage offers for this node, you can issue:
```bash
curl 'http://localhost:8000/api/codex/v1/sales/availability'
```
This should print a list of offers, with the one you just created figuring among them.
## 4.2. Buy Storage
Before we can buy storage, we must have some actual data to request storage for. Start by uploading a small file to your client node. On Linux you could, for instance, use `dd` to generate a $100KB$ file:
```bash
dd if=/dev/urandom of=./data.bin bs=100K count=1
```
but any small file will do. Assuming your file is named `data.bin`, you can upload it with:
```bash
curl "http://localhost:8001/api/codex/v1/data" --data-bin @data.bin
```
Once the upload completes, you should see a CID (e.g. `zDvZRwzm2mK7tvDzKScRLapqGdgNTLyyEBvx1TQY37J2CdWdS6Sj`) for the file printed to the terminal. Use that CID in the purchase request:
```bash
export CID=zDvZRwzm2mK7tvDzKScRLapqGdgNTLyyEBvx1TQY37J2CdWdS6Sj
export EXPIRY_TIME=$((1000 + $(date +%s))) # current time + 1000 seconds
# adjust expiry_time as desired, see below
```
```bash
curl "http://localhost:8001/api/codex/v1/storage/request/${CID}" \
--header 'Content-Type: application/json' \
--data "{
\"duration\": \"1200\",
\"reward\": \"1\",
\"proofProbability\": \"3\",
\"expiry\": \"${EXPIRY_TIME}\",
\"nodes\": 1,
\"tolerance\": 0,
\"collateral\": \"1000\"
}"
```
The parameters under `--data` say that:
1. we want to purchase storage for our file for $20$ minutes (`"duration": "1200"`);
2. we are willing to pay up to $1$ token per byte, per second (`"reward": "1"`);
3. our file will be split into four pieces (`"nodes": 3` and `"tolerance": 1`), so that we only need three pieces to rebuild the file; i.e., we can tolerate that at most one node stops storing our data; either due to failure or other reasons;
4. we demand `1000` tokens in collateral from storage providers for each piece. Since there are $4$ such pieces, there will be `4000` in total collateral committed by all of the storage providers taken together once our request is fulfilled.
Finally, the `expiry` puts a cap on the block time at which our request expires. This has to be at most `current block time + duration`, which means this request can fail if you input the wrong number, which you likely will if you do not know what the current block time is. Fear not, however, as you can try an an arbitrary number (e.g. `1000`), and look at the failure message:
`Expiry needs to be in future. Now: 1711995463`
to compute a valid one. Just take the number in the error message and add the duration; i.e., `1711995463 + 1200 = 1711996663`, then use the resulting number (`1711996663`) as expiry and things should work. The request should return a purchase ID (e.g. `1d0ec5261e3364f8b9d1cf70324d70af21a9b5dccba380b24eb68b4762249185`), which you can use track the completion of your request in the marketplace.
## 4.3. Track your Storage Requests
POSTing a storage request will make it available in the storage market, and a storage node will eventually pick it up.
You can poll the status of your request by means of:
```bash
export STORAGE_PURCHASE_ID="1d0ec5261e3364f8b9d1cf70324d70af21a9b5dccba380b24eb68b4762249185"
curl "http://localhost:8001/api/codex/v1/storage/purchases/${STORAGE_PURCHASE_ID}"
```
For instance:
```bash
> curl 'http://localhost:8001/api/codex/v1/storage/purchases/6c698cd0ad71c41982f83097d6fa75beb582924e08a658357a1cd4d7a2a6766d'
```
This returns a result like:
```json
{
"requestId": "0x6c698cd0ad71c41982f83097d6fa75beb582924e08a658357a1cd4d7a2a6766d",
"request": {
"client": "0xed6c3c20358f0217919a30c98d72e29ceffedc33",
"ask": {
"slots": 3,
"slotSize": "262144",
"duration": "1000",
"proofProbability": "3",
"reward": "1",
"collateral": "1",
"maxSlotLoss": 1
},
"content": {
"cid": "zDvZRwzm3nnkekFLCACmWyKdkYixsX3j9gJhkvFtfYA5K9bpXQnC"
},
"expiry": "1711992852",
"nonce": "0x9f5e651ecd3bf73c914f8ed0b1088869c64095c0d7bd50a38fc92ebf66ff5915",
"id": "0x6c698cd0ad71c41982f83097d6fa75beb582924e08a658357a1cd4d7a2a6766d"
},
"state": "submitted",
"error": null
}
```
Shows that a request has been submitted but has not yet been filled. Your request will be successful once `"state"` shows `"started"`. Anything other than that means the request has not been completely processed yet, and an `"error"` state other than `null` means it failed.
[^1]: Codex files get partitioned into pieces called "slots" and distributed to various storage providers. The collateral refers to one such slot, and will be slowly eaten away as the storage provider fails to deliver timely proofs, but the actual logic is [more involved than that](https://github.com/codex-storage/codex-contracts-eth/blob/6c9f797f408608958714024b9055fcc330e3842f/contracts/Marketplace.sol#L209).

View File

@ -96,18 +96,15 @@ This GET request will return the node's debug information. The response will be
### 3. Launch Node #2 ### 3. Launch Node #2
Retreive the SPR by running: We will need the signed peer record (SPR) from the first node that you got in the previous step.
```bash
curl -H "Accept: text/plain" http://127.0.0.1:8080/api/codex/v1/spr
```
Next replace `<SPR HERE>` in the following command with the SPR returned from the previous command. (Note that it should include the `spr:` at the beginning.) Replace `<SPR HERE>` in the following command with the SPR returned from the previous command. (Note that it should include the `spr:` at the beginning.)
Open a new terminal and run: Open a new terminal and run:
- Mac/Linux: `"build/codex" --data-dir="$(pwd)/Data2" --listen-addrs=/ip4/127.0.0.1/tcp/8071 --api-port=8081 --disc-port=8091 --bootstrap-node=<SPR HERE>` - Mac/Linux: `"build/codex" --data-dir="$(pwd)/Data2" --listen-addrs=/ip4/127.0.0.1/tcp/8071 --api-port=8081 --disc-port=8091 --bootstrap-node=<SPR HERE>`
- Windows: `"build/codex.exe" --data-dir="Data2" --listen-addrs=/ip4/127.0.0.1/tcp/8071 --api-port=8081 --disc-port=8091 --bootstrap-node=<SPR HERE>` - Windows: `"build/codex.exe" --data-dir="Data2" --listen-addrs=/ip4/127.0.0.1/tcp/8071 --api-port=8081 --disc-port=8091 --bootstrap-node=<SPR HERE>`
Alternatively on Mac, Linux, or MSYS2 you can run it in one command like: Alternatively on Mac, Linux, or MSYS2 and a recent Codex binary you can run it in one command like:
```sh ```sh
"build/codex" --data-dir="$(pwd)/Data2" --listen-addrs=/ip4/127.0.0.1/tcp/8071 --api-port=8081 --disc-port=8091 --bootstrap-node=$(curl -H "Accept: text/plain" http://127.0.0.1:8080/api/codex/v1/spr) "build/codex" --data-dir="$(pwd)/Data2" --listen-addrs=/ip4/127.0.0.1/tcp/8071 --api-port=8081 --disc-port=8091 --bootstrap-node=$(curl -H "Accept: text/plain" http://127.0.0.1:8080/api/codex/v1/spr)
@ -121,10 +118,10 @@ We're now also including the `bootstrap-node` argument. This allows us to link t
Normally the two nodes will automatically connect. If they do not automatically connect or you want to manually connect nodes you can use the peerId to connect nodes. Normally the two nodes will automatically connect. If they do not automatically connect or you want to manually connect nodes you can use the peerId to connect nodes.
You can get the first node's peer id by running: You can get the first node's peer id by running the following command and finding the `"peerId"` in the results:
```bash ```bash
curl -X GET -H "Accept: text/plain" http://127.0.0.1:8081/api/codex/v1/peerid curl -X GET -H "Accept: text/plain" http://127.0.0.1:8081/api/codex/v1/debug/info
``` ```
Next replace `<PEER ID HERE>` in the following command with the peerId returned from the previous command: Next replace `<PEER ID HERE>` in the following command with the peerId returned from the previous command:
@ -133,7 +130,7 @@ Next replace `<PEER ID HERE>` in the following command with the peerId returned
curl -X GET http://127.0.0.1:8080/api/codex/v1/connect/<PEER ID HERE>?addrs=/ip4/127.0.0.1/tcp/8071 curl -X GET http://127.0.0.1:8080/api/codex/v1/connect/<PEER ID HERE>?addrs=/ip4/127.0.0.1/tcp/8071
``` ```
Alternatively on Mac, Linux, or MSYS2 you can run it in one command like: Alternatively on Mac, Linux, or MSYS2 and a recent Codex binary you can run it in one command like:
```bash ```bash
curl -X GET http://127.0.0.1:8080/api/codex/v1/connect/$(curl -X GET -H "Accept: text/plain" http://127.0.0.1:8081/api/codex/v1/peerid)\?addrs=/ip4/127.0.0.1/tcp/8071 curl -X GET http://127.0.0.1:8080/api/codex/v1/connect/$(curl -X GET -H "Accept: text/plain" http://127.0.0.1:8081/api/codex/v1/peerid)\?addrs=/ip4/127.0.0.1/tcp/8071
@ -168,3 +165,12 @@ Notice we are connecting to the second node in order to download the file. The C
### 7. Verify The Results ### 7. Verify The Results
If your file is downloaded and identical to the file you uploaded, then this manual test has passed. Rejoice! If on the other hand that didn't happen or you were unable to complete any of these steps, please leave us a message detailing your troubles. If your file is downloaded and identical to the file you uploaded, then this manual test has passed. Rejoice! If on the other hand that didn't happen or you were unable to complete any of these steps, please leave us a message detailing your troubles.
## Notes
When using the Ganache blockchain, there are some deviations from the expected behavior, mainly linked to how blocks are mined, which affects certain functionalities in the Sales module.
Therefore, if you are manually testing processes such as payout collection after a request is finished or proof submissions, you need to mine some blocks manually for it to work correctly. You can do this by using the following curl command:
```bash
$ curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"evm_mine","params":[],"id":67}' 127.0.0.1:8545
```