Blockexchange uses merkle root and index to fetch blocks (#566)

* Blockexchange uses merkle root and index to fetch blocks

* Links the network store getTree to the local store.

* Update codex/stores/repostore.nim

Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
Signed-off-by: Tomasz Bekas <tomasz.bekas@gmail.com>

* Rework erasure.nim to include recent cleanup

* Revert accidential changes to lib versions

* Addressing review comments

* Storing proofs instead of trees

* Fix a comment

* Fix broken tests

* Fix for broken testerasure.nim

* Addressing PR comments

---------

Signed-off-by: Tomasz Bekas <tomasz.bekas@gmail.com>
Co-authored-by: benbierens <thatbenbierens@gmail.com>
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
This commit is contained in:
Tomasz Bekas 2023-11-14 13:02:17 +01:00 committed by GitHub
parent 778093d12e
commit 2396c4d76d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
51 changed files with 2502 additions and 1416 deletions

View File

@ -65,7 +65,7 @@ type
proc discoveryQueueLoop(b: DiscoveryEngine) {.async.} =
while b.discEngineRunning:
for cid in toSeq(b.pendingBlocks.wantList):
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
try:
await b.discoveryQueue.put(cid)
except CatchableError as exc:

View File

@ -11,16 +11,18 @@ import std/sequtils
import std/sets
import std/options
import std/algorithm
import std/sugar
import pkg/chronos
import pkg/chronicles
import pkg/libp2p/[cid, switch]
import pkg/libp2p/[cid, switch, multihash, multicodec]
import pkg/metrics
import pkg/stint
import ../../stores/blockstore
import ../../blocktype as bt
import ../../blocktype
import ../../utils
import ../../merkletree
import ../protobuf/blockexc
import ../protobuf/presence
@ -77,12 +79,6 @@ type
address*: EthAddress
price*: UInt256
proc contains*(a: AsyncHeapQueue[Entry], b: Cid): bool =
## Convenience method to check for entry prepense
##
a.anyIt( it.cid == b )
# attach task scheduler to engine
proc scheduleTask(b: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe} =
b.taskQueue.pushOrUpdateNoWait(task).isOk()
@ -124,22 +120,30 @@ proc stop*(b: BlockExcEngine) {.async.} =
trace "NetworkStore stopped"
proc sendWantHave(b: BlockExcEngine, cid: Cid, selectedPeer: BlockExcPeerCtx, peers: seq[BlockExcPeerCtx]): Future[void] {.async.} =
trace "Sending wantHave request to peers", cid
proc sendWantHave(
b: BlockExcEngine,
address: BlockAddress,
selectedPeer: BlockExcPeerCtx,
peers: seq[BlockExcPeerCtx]): Future[void] {.async.} =
trace "Sending wantHave request to peers", address
for p in peers:
if p != selectedPeer:
if cid notin p.peerHave:
if address notin p.peerHave:
trace " wantHave > ", peer = p.id
await b.network.request.sendWantList(
p.id,
@[cid],
@[address],
wantType = WantType.WantHave) # we only want to know if the peer has the block
proc sendWantBlock(b: BlockExcEngine, cid: Cid, blockPeer: BlockExcPeerCtx): Future[void] {.async.} =
trace "Sending wantBlock request to", peer = blockPeer.id, cid
proc sendWantBlock(
b: BlockExcEngine,
address: BlockAddress,
blockPeer: BlockExcPeerCtx): Future[void] {.async.} =
trace "Sending wantBlock request to", peer = blockPeer.id, address
await b.network.request.sendWantList(
blockPeer.id,
@[cid],
@[address],
wantType = WantType.WantBlock) # we want this remote to send us a block
proc findCheapestPeerForBlock(b: BlockExcEngine, cheapestPeers: seq[BlockExcPeerCtx]): ?BlockExcPeerCtx =
@ -152,36 +156,13 @@ proc findCheapestPeerForBlock(b: BlockExcEngine, cheapestPeers: seq[BlockExcPeer
return some(peers[0])
return some(cheapestPeers[0]) # get cheapest
proc requestBlock*(
b: BlockExcEngine,
cid: Cid,
timeout = DefaultBlockTimeout): Future[bt.Block] {.async.} =
trace "Begin block request", cid, peers = b.peers.len
if b.pendingBlocks.isInFlight(cid):
trace "Request handle already pending", cid
return await b.pendingBlocks.getWantHandle(cid, timeout)
let
blk = b.pendingBlocks.getWantHandle(cid, timeout)
trace "Selecting peers who have", cid
var
peers = b.peers.selectCheapest(cid)
without blockPeer =? b.findCheapestPeerForBlock(peers):
trace "No peers to request blocks from. Queue discovery...", cid
b.discovery.queueFindBlocksReq(@[cid])
return await blk
proc blockHandleMonitor() {.async.} =
proc monitorBlockHandle(b: BlockExcEngine, handle: Future[Block], address: BlockAddress, peerId: PeerId) {.async.} =
try:
trace "Monitoring block handle", cid
b.pendingBlocks.setInFlight(cid, true)
discard await blk
trace "Block handle success", cid
trace "Monitoring block handle", address, peerId
discard await handle
trace "Block handle success", address, peerId
except CatchableError as exc:
trace "Error block handle, disconnecting peer", cid, exc = exc.msg
trace "Error block handle, disconnecting peer", address, exc = exc.msg, peerId
# TODO: really, this is just a quick and dirty way of
# preventing hitting the same "bad" peer every time, however,
@ -191,25 +172,47 @@ proc requestBlock*(
# prioritization
# drop unresponsive peer
await b.network.switch.disconnect(blockPeer.id)
b.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
await b.network.switch.disconnect(peerId)
# monitor block handle
asyncSpawn blockHandleMonitor()
proc requestBlock*(
b: BlockExcEngine,
address: BlockAddress,
timeout = DefaultBlockTimeout
): Future[Block] {.async.} =
let blockFuture = b.pendingBlocks.getWantHandle(address, timeout)
await b.sendWantBlock(cid, blockPeer)
if b.pendingBlocks.isInFlight(address):
return await blockFuture
let peers = b.peers.selectCheapest(address)
if peers.len == 0:
b.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
let maybePeer =
if peers.len > 0:
peers[hash(address) mod peers.len].some
elif b.peers.len > 0:
toSeq(b.peers)[hash(address) mod b.peers.len].some
else:
BlockExcPeerCtx.none
if peer =? maybePeer:
asyncSpawn b.monitorBlockHandle(blockFuture, address, peer.id)
b.pendingBlocks.setInFlight(address)
await b.sendWantBlock(address, peer)
codex_block_exchange_want_block_lists_sent.inc()
if (peers.len - 1) == 0:
trace "No peers to send want list to", cid
b.discovery.queueFindBlocksReq(@[cid])
return await blk
await b.sendWantHave(cid, blockPeer, toSeq(b.peers))
await b.sendWantHave(address, peer, toSeq(b.peers))
codex_block_exchange_want_have_lists_sent.inc()
return await blk
return await blockFuture
proc requestBlock*(
b: BlockExcEngine,
cid: Cid,
timeout = DefaultBlockTimeout
): Future[Block] =
b.requestBlock(BlockAddress.init(cid))
proc blockPresenceHandler*(
b: BlockExcEngine,
@ -226,7 +229,7 @@ proc blockPresenceHandler*(
for blk in blocks:
if presence =? Presence.init(blk):
logScope:
cid = presence.cid
address = $presence.address
have = presence.have
price = presence.price
@ -255,22 +258,22 @@ proc blockPresenceHandler*(
# if none of the connected peers report our wants in their have list,
# fire up discovery
b.discovery.queueFindBlocksReq(
toSeq(b.pendingBlocks.wantList)
toSeq(b.pendingBlocks.wantListCids)
.filter do(cid: Cid) -> bool:
not b.peers.anyIt( cid in it.peerHave ))
not b.peers.anyIt( cid in it.peerHaveCids ))
proc scheduleTasks(b: BlockExcEngine, blocks: seq[bt.Block]) {.async.} =
trace "Schedule a task for new blocks", items = blocks.len
proc scheduleTasks(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} =
trace "Schedule a task for new blocks", items = blocksDelivery.len
let
cids = blocks.mapIt( it.cid )
cids = blocksDelivery.mapIt( it.blk.cid )
# schedule any new peers to provide blocks to
for p in b.peers:
for c in cids: # for each cid
# schedule a peer if it wants at least one cid
# and we have it in our local store
if c in p.peerWants:
if c in p.peerWantsCids:
if await (c in b.localStore):
if b.scheduleTask(p):
trace "Task scheduled for peer", peer = p.id
@ -279,50 +282,110 @@ proc scheduleTasks(b: BlockExcEngine, blocks: seq[bt.Block]) {.async.} =
break # do next peer
proc resolveBlocks*(b: BlockExcEngine, blocks: seq[bt.Block]) {.async.} =
trace "Resolving blocks", blocks = blocks.len
proc resolveBlocks*(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} =
trace "Resolving blocks", blocks = blocksDelivery.len
b.pendingBlocks.resolve(blocks)
await b.scheduleTasks(blocks)
b.discovery.queueProvideBlocksReq(blocks.mapIt( it.cid ))
b.pendingBlocks.resolve(blocksDelivery)
await b.scheduleTasks(blocksDelivery)
var cids = initHashSet[Cid]()
for bd in blocksDelivery:
cids.incl(bd.blk.cid)
if bd.address.leaf:
cids.incl(bd.address.treeCid)
b.discovery.queueProvideBlocksReq(cids.toSeq)
proc resolveBlocks*(b: BlockExcEngine, blocks: seq[Block]) {.async.} =
await b.resolveBlocks(blocks.mapIt(BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid))))
proc payForBlocks(engine: BlockExcEngine,
peer: BlockExcPeerCtx,
blocks: seq[bt.Block]) {.async.} =
trace "Paying for blocks", blocks = blocks.len
blocksDelivery: seq[BlockDelivery]) {.async.} =
trace "Paying for blocks", len = blocksDelivery.len
let
sendPayment = engine.network.request.sendPayment
price = peer.price(blocks.mapIt(it.cid))
price = peer.price(blocksDelivery.mapIt(it.address))
if payment =? engine.wallet.pay(peer, price):
trace "Sending payment for blocks", price
await sendPayment(peer.id, payment)
proc blocksHandler*(
proc validateBlockDelivery(
b: BlockExcEngine,
bd: BlockDelivery
): ?!void =
if bd.address notin b.pendingBlocks:
return failure("Received block is not currently a pending block")
if bd.address.leaf:
without proof =? bd.proof:
return failure("Missing proof")
if proof.index != bd.address.index:
return failure("Proof index " & $proof.index & " doesn't match leaf index " & $bd.address.index)
without leaf =? bd.blk.cid.mhash.mapFailure, err:
return failure("Unable to get mhash from cid for block, nested err: " & err.msg)
without treeRoot =? bd.address.treeCid.mhash.mapFailure, err:
return failure("Unable to get mhash from treeCid for block, nested err: " & err.msg)
without verifyOutcome =? proof.verifyLeaf(leaf, treeRoot), err:
return failure("Unable to verify proof for block, nested err: " & err.msg)
if not verifyOutcome:
return failure("Provided inclusion proof is invalid")
else: # not leaf
if bd.address.cid != bd.blk.cid:
return failure("Delivery cid " & $bd.address.cid & " doesn't match block cid " & $bd.blk.cid)
return success()
proc blocksDeliveryHandler*(
b: BlockExcEngine,
peer: PeerId,
blocks: seq[bt.Block]) {.async.} =
trace "Got blocks from peer", peer, len = blocks.len
for blk in blocks:
if isErr (await b.localStore.putBlock(blk)):
trace "Unable to store block", cid = blk.cid
blocksDelivery: seq[BlockDelivery]) {.async.} =
trace "Got blocks from peer", peer, len = blocksDelivery.len
await b.resolveBlocks(blocks)
codex_block_exchange_blocks_received.inc(blocks.len.int64)
var validatedBlocksDelivery: seq[BlockDelivery]
for bd in blocksDelivery:
logScope:
peer = peer
address = bd.address
if err =? b.validateBlockDelivery(bd).errorOption:
warn "Block validation failed", msg = err.msg
continue
if err =? (await b.localStore.putBlock(bd.blk)).errorOption:
error "Unable to store block", err = err.msg
continue
if bd.address.leaf:
without proof =? bd.proof:
error "Proof expected for a leaf block delivery"
continue
if err =? (await b.localStore.putBlockCidAndProof(bd.address.treeCid, bd.address.index, bd.blk.cid, proof)).errorOption:
error "Unable to store proof and cid for a block"
continue
validatedBlocksDelivery.add(bd)
await b.resolveBlocks(validatedBlocksDelivery)
codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64)
let
peerCtx = b.peers.get(peer)
if peerCtx != nil:
await b.payForBlocks(peerCtx, blocks)
await b.payForBlocks(peerCtx, blocksDelivery)
## shouldn't we remove them from the want-list instead of this:
peerCtx.cleanPresence(blocks.mapIt( it.cid ))
peerCtx.cleanPresence(blocksDelivery.mapIt( it.address ))
proc wantListHandler*(
b: BlockExcEngine,
peer: PeerId,
wantList: Wantlist) {.async.} =
wantList: WantList) {.async.} =
trace "Got wantList for peer", peer, items = wantList.entries.len
let
peerCtx = b.peers.get(peer)
@ -338,14 +401,14 @@ proc wantListHandler*(
logScope:
peer = peerCtx.id
cid = e.cid
address = e.address
wantType = $e.wantType
if idx < 0: # updating entry
trace "Processing new want list entry", cid = e.cid
trace "Processing new want list entry"
let
have = await e.cid in b.localStore
have = await e.address in b.localStore
price = @(
b.pricing.get(Pricing(price: 0.u256))
.price.toBytesBE)
@ -354,21 +417,21 @@ proc wantListHandler*(
codex_block_exchange_want_have_lists_received.inc()
if not have and e.sendDontHave:
trace "Adding dont have entry to presence response", cid = e.cid
trace "Adding dont have entry to presence response"
presence.add(
BlockPresence(
cid: e.cid.data.buffer,
address: e.address,
`type`: BlockPresenceType.DontHave,
price: price))
elif have and e.wantType == WantType.WantHave:
trace "Adding have entry to presence response", cid = e.cid
trace "Adding have entry to presence response"
presence.add(
BlockPresence(
cid: e.cid.data.buffer,
address: e.address,
`type`: BlockPresenceType.Have,
price: price))
elif e.wantType == WantType.WantBlock:
trace "Added entry to peer's want blocks list", cid = e.cid
trace "Added entry to peer's want blocks list"
peerCtx.peerWants.add(e)
codex_block_exchange_want_block_lists_received.inc()
else:
@ -424,6 +487,8 @@ proc setupPeer*(b: BlockExcEngine, peer: PeerId) {.async.} =
## list exchange
##
trace "Setting up peer", peer
if peer notin b.peers:
trace "Setting up new peer", peer
b.peers.add(BlockExcPeerCtx(
@ -432,9 +497,11 @@ proc setupPeer*(b: BlockExcEngine, peer: PeerId) {.async.} =
trace "Added peer", peers = b.peers.len
# broadcast our want list, the other peer will do the same
if b.pendingBlocks.len > 0:
if b.pendingBlocks.wantListLen > 0:
trace "Sending our want list to a peer", peer
let cids = toSeq(b.pendingBlocks.wantList)
await b.network.request.sendWantList(
peer, toSeq(b.pendingBlocks.wantList), full = true)
peer, cids, full = true)
if address =? b.pricing.?address:
await b.network.request.sendAccount(peer, Account(address: address))
@ -468,30 +535,41 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
wantsBlocks.sort(SortOrder.Descending)
proc localLookup(e: WantListEntry): Future[?!BlockDelivery] {.async.} =
trace "Handling lookup for entry", address = e.address
if e.address.leaf:
(await b.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map(
(blkAndProof: (Block, MerkleProof)) =>
BlockDelivery(address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some)
)
else:
(await b.localStore.getBlock(e.address)).map(
(blk: Block) => BlockDelivery(address: e.address, blk: blk, proof: MerkleProof.none)
)
let
blockFuts = await allFinished(wantsBlocks.mapIt(
b.localStore.getBlock(it.cid)
))
blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup))
# Extract successfully received blocks
let
blocks = blockFuts
blocksDelivery = blocksDeliveryFut
.filterIt(it.completed and it.read.isOk)
.mapIt(it.read.get)
if blocks.len > 0:
trace "Sending blocks to peer", peer = task.id, blocks = blocks.len
await b.network.request.sendBlocks(
if blocksDelivery.len > 0:
trace "Sending blocks to peer", peer = task.id, blocks = blocksDelivery.len
await b.network.request.sendBlocksDelivery(
task.id,
blocks)
blocksDelivery
)
codex_block_exchange_blocks_sent.inc(blocks.len.int64)
codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64)
trace "About to remove entries from peerWants", blocks = blocks.len, items = task.peerWants.len
trace "About to remove entries from peerWants", blocks = blocksDelivery.len, items = task.peerWants.len
# Remove successfully sent blocks
task.peerWants.keepIf(
proc(e: Entry): bool =
not blocks.anyIt( it.cid == e.cid )
proc(e: WantListEntry): bool =
not blocksDelivery.anyIt( it.address == e.address )
)
trace "Removed entries from peerWants", items = task.peerWants.len
@ -547,7 +625,7 @@ proc new*(
proc blockWantListHandler(
peer: PeerId,
wantList: Wantlist): Future[void] {.gcsafe.} =
wantList: WantList): Future[void] {.gcsafe.} =
engine.wantListHandler(peer, wantList)
proc blockPresenceHandler(
@ -555,10 +633,10 @@ proc new*(
presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
engine.blockPresenceHandler(peer, presence)
proc blocksHandler(
proc blocksDeliveryHandler(
peer: PeerId,
blocks: seq[bt.Block]): Future[void] {.gcsafe.} =
engine.blocksHandler(peer, blocks)
blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} =
engine.blocksDeliveryHandler(peer, blocksDelivery)
proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} =
engine.accountHandler(peer, account)
@ -568,7 +646,7 @@ proc new*(
network.handlers = BlockExcHandlers(
onWantList: blockWantListHandler,
onBlocks: blocksHandler,
onBlocksDelivery: blocksDeliveryHandler,
onPresence: blockPresenceHandler,
onAccount: accountHandler,
onPayment: paymentHandler)

View File

@ -18,8 +18,11 @@ import pkg/chronicles
import pkg/chronos
import pkg/libp2p
import pkg/metrics
import pkg/questionable/results
import ../protobuf/blockexc
import ../../blocktype
import ../../merkletree
logScope:
topics = "codex pendingblocks"
@ -37,14 +40,14 @@ type
startTime*: int64
PendingBlocksManager* = ref object of RootObj
blocks*: Table[Cid, BlockReq] # pending Block requests
blocks*: Table[BlockAddress, BlockReq] # pending Block requests
proc updatePendingBlockGauge(p: PendingBlocksManager) =
codex_block_exchange_pending_block_requests.set(p.blocks.len.int64)
proc getWantHandle*(
p: PendingBlocksManager,
cid: Cid,
address: BlockAddress,
timeout = DefaultBlockTimeout,
inFlight = false
): Future[Block] {.async.} =
@ -52,73 +55,106 @@ proc getWantHandle*(
##
try:
if cid notin p.blocks:
p.blocks[cid] = BlockReq(
if address notin p.blocks:
p.blocks[address] = BlockReq(
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
inFlight: inFlight,
startTime: getMonoTime().ticks)
trace "Adding pending future for block", cid, inFlight = p.blocks[cid].inFlight
trace "Adding pending future for block", address, inFlight = p.blocks[address].inFlight
p.updatePendingBlockGauge()
return await p.blocks[cid].handle.wait(timeout)
return await p.blocks[address].handle.wait(timeout)
except CancelledError as exc:
trace "Blocks cancelled", exc = exc.msg, cid
trace "Blocks cancelled", exc = exc.msg, address
raise exc
except CatchableError as exc:
trace "Pending WANT failed or expired", exc = exc.msg
# no need to cancel, it is already cancelled by wait()
raise exc
finally:
p.blocks.del(cid)
p.blocks.del(address)
p.updatePendingBlockGauge()
proc resolve*(p: PendingBlocksManager,
blocks: seq[Block]) =
proc getWantHandle*(
p: PendingBlocksManager,
cid: Cid,
timeout = DefaultBlockTimeout,
inFlight = false
): Future[Block] =
p.getWantHandle(BlockAddress.init(cid), timeout, inFlight)
proc resolve*(
p: PendingBlocksManager,
blocksDelivery: seq[BlockDelivery]
) {.gcsafe, raises: [].} =
## Resolve pending blocks
##
for blk in blocks:
# resolve any pending blocks
p.blocks.withValue(blk.cid, pending):
if not pending[].handle.completed:
trace "Resolving block", cid = blk.cid
pending[].handle.complete(blk)
for bd in blocksDelivery:
p.blocks.withValue(bd.address, blockReq):
trace "Resolving block", address = bd.address
if not blockReq.handle.finished:
let
startTime = pending[].startTime
startTime = blockReq.startTime
stopTime = getMonoTime().ticks
retrievalDurationUs = (stopTime - startTime) div 1000
blockReq.handle.complete(bd.blk)
codex_block_exchange_retrieval_time_us.set(retrievalDurationUs)
trace "Block retrieval time", retrievalDurationUs
trace "Block retrieval time", retrievalDurationUs, address = bd.address
else:
trace "Block handle already finished", address = bd.address
do:
warn "Attempting to resolve block that's not currently a pending block", address = bd.address
proc setInFlight*(p: PendingBlocksManager,
cid: Cid,
address: BlockAddress,
inFlight = true) =
p.blocks.withValue(cid, pending):
p.blocks.withValue(address, pending):
pending[].inFlight = inFlight
trace "Setting inflight", cid, inFlight = pending[].inFlight
trace "Setting inflight", address, inFlight = pending[].inFlight
proc isInFlight*(p: PendingBlocksManager,
cid: Cid
address: BlockAddress,
): bool =
p.blocks.withValue(cid, pending):
p.blocks.withValue(address, pending):
result = pending[].inFlight
trace "Getting inflight", cid, inFlight = result
proc pending*(p: PendingBlocksManager, cid: Cid): bool =
cid in p.blocks
trace "Getting inflight", address, inFlight = result
proc contains*(p: PendingBlocksManager, cid: Cid): bool =
p.pending(cid)
BlockAddress.init(cid) in p.blocks
proc contains*(p: PendingBlocksManager, address: BlockAddress): bool =
address in p.blocks
iterator wantList*(p: PendingBlocksManager): BlockAddress =
for a in p.blocks.keys:
yield a
iterator wantListBlockCids*(p: PendingBlocksManager): Cid =
for a in p.blocks.keys:
if not a.leaf:
yield a.cid
iterator wantListCids*(p: PendingBlocksManager): Cid =
var yieldedCids = initHashSet[Cid]()
for a in p.blocks.keys:
let cid = a.cidOrTreeCid
if cid notin yieldedCids:
yieldedCids.incl(cid)
yield cid
iterator wantList*(p: PendingBlocksManager): Cid =
for k in p.blocks.keys:
yield k
iterator wantHandles*(p: PendingBlocksManager): Future[Block] =
for v in p.blocks.values:
yield v.handle
proc wantListLen*(p: PendingBlocksManager): int =
p.blocks.len
func len*(p: PendingBlocksManager): int =
p.blocks.len

View File

@ -34,14 +34,14 @@ const
MaxInflight* = 100
type
WantListHandler* = proc(peer: PeerId, wantList: Wantlist): Future[void] {.gcsafe.}
BlocksHandler* = proc(peer: PeerId, blocks: seq[bt.Block]): Future[void] {.gcsafe.}
WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.}
BlocksDeliveryHandler* = proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.}
BlockPresenceHandler* = proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
WantListSender* = proc(
id: PeerId,
cids: seq[Cid],
addresses: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
@ -50,19 +50,19 @@ type
BlockExcHandlers* = object
onWantList*: WantListHandler
onBlocks*: BlocksHandler
onBlocksDelivery*: BlocksDeliveryHandler
onPresence*: BlockPresenceHandler
onAccount*: AccountHandler
onPayment*: PaymentHandler
BlocksSender* = proc(peer: PeerId, presence: seq[bt.Block]): Future[void] {.gcsafe.}
BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.}
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
BlockExcRequest* = object
sendWantList*: WantListSender
sendBlocks*: BlocksSender
sendBlocksDelivery*: BlocksDeliverySender
sendPresence*: PresenceSender
sendAccount*: AccountSender
sendPayment*: PaymentSender
@ -94,7 +94,7 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
proc handleWantList(
b: BlockExcNetwork,
peer: NetworkPeer,
list: Wantlist) {.async.} =
list: WantList) {.async.} =
## Handle incoming want list
##
@ -102,32 +102,10 @@ proc handleWantList(
trace "Handling want list for peer", peer = peer.id, items = list.entries.len
await b.handlers.onWantList(peer.id, list)
# TODO: make into a template
proc makeWantList*(
cids: seq[Cid],
priority: int = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false
): Wantlist =
## make list of wanted entries
##
Wantlist(
entries: cids.mapIt(
Entry(
`block`: it.data.buffer,
priority: priority.int32,
cancel: cancel,
wantType: wantType,
sendDontHave: sendDontHave) ),
full: full)
proc sendWantList*(
b: BlockExcNetwork,
id: PeerId,
cids: seq[Cid],
addresses: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
@ -137,58 +115,40 @@ proc sendWantList*(
## Send a want message to peer
##
trace "Sending want list to peer", peer = id, `type` = $wantType, items = cids.len
let msg = makeWantList(
cids,
priority,
cancel,
wantType,
full,
sendDontHave)
trace "Sending want list to peer", peer = id, `type` = $wantType, items = addresses.len
let msg = WantList(
entries: addresses.mapIt(
WantListEntry(
address: it,
priority: priority,
cancel: cancel,
wantType: wantType,
sendDontHave: sendDontHave) ),
full: full)
b.send(id, Message(wantlist: msg))
proc handleBlocks(
proc handleBlocksDelivery(
b: BlockExcNetwork,
peer: NetworkPeer,
blocks: seq[pb.Block]
blocksDelivery: seq[BlockDelivery]
) {.async.} =
## Handle incoming blocks
##
if not b.handlers.onBlocks.isNil:
trace "Handling blocks for peer", peer = peer.id, items = blocks.len
if not b.handlers.onBlocksDelivery.isNil:
trace "Handling blocks for peer", peer = peer.id, items = blocksDelivery.len
await b.handlers.onBlocksDelivery(peer.id, blocksDelivery)
var blks: seq[bt.Block]
for blob in blocks:
without cid =? Cid.init(blob.prefix):
trace "Unable to initialize Cid from protobuf message"
without blk =? bt.Block.new(cid, blob.data, verify = true):
trace "Unable to initialize Block from data"
blks.add(blk)
await b.handlers.onBlocks(peer.id, blks)
template makeBlocks*(blocks: seq[bt.Block]): seq[pb.Block] =
var blks: seq[pb.Block]
for blk in blocks:
blks.add(pb.Block(
prefix: blk.cid.data.buffer,
data: blk.data
))
blks
proc sendBlocks*(
proc sendBlocksDelivery*(
b: BlockExcNetwork,
id: PeerId,
blocks: seq[bt.Block]): Future[void] =
blocksDelivery: seq[BlockDelivery]): Future[void] =
## Send blocks to remote
##
b.send(id, pb.Message(payload: makeBlocks(blocks)))
b.send(id, pb.Message(payload: blocksDelivery))
proc handleBlockPresence(
b: BlockExcNetwork,
@ -260,11 +220,11 @@ proc rpcHandler(
## handle rpc messages
##
try:
if msg.wantlist.entries.len > 0:
asyncSpawn b.handleWantList(peer, msg.wantlist)
if msg.wantList.entries.len > 0:
asyncSpawn b.handleWantList(peer, msg.wantList)
if msg.payload.len > 0:
asyncSpawn b.handleBlocks(peer, msg.payload)
asyncSpawn b.handleBlocksDelivery(peer, msg.payload)
if msg.blockPresences.len > 0:
asyncSpawn b.handleBlockPresence(peer, msg.blockPresences)
@ -359,7 +319,7 @@ proc new*(
proc sendWantList(
id: PeerId,
cids: seq[Cid],
cids: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
@ -369,8 +329,8 @@ proc new*(
id, cids, priority, cancel,
wantType, full, sendDontHave)
proc sendBlocks(id: PeerId, blocks: seq[bt.Block]): Future[void] {.gcsafe.} =
self.sendBlocks(id, blocks)
proc sendBlocksDelivery(id: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} =
self.sendBlocksDelivery(id, blocksDelivery)
proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
self.sendBlockPresence(id, presence)
@ -383,7 +343,7 @@ proc new*(
self.request = BlockExcRequest(
sendWantList: sendWantList,
sendBlocks: sendBlocks,
sendBlocksDelivery: sendBlocksDelivery,
sendPresence: sendPresence,
sendAccount: sendAccount,
sendPayment: sendPayment)

View File

@ -9,6 +9,8 @@
import std/sequtils
import std/tables
import std/sugar
import std/sets
import pkg/chronicles
import pkg/libp2p
@ -20,6 +22,8 @@ import ../protobuf/blockexc
import ../protobuf/payments
import ../protobuf/presence
import ../../blocktype
export payments, nitro
logScope:
@ -28,33 +32,39 @@ logScope:
type
BlockExcPeerCtx* = ref object of RootObj
id*: PeerId
blocks*: Table[Cid, Presence] # remote peer have list including price
peerWants*: seq[Entry] # remote peers want lists
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
peerWants*: seq[WantListEntry] # remote peers want lists
exchanged*: int # times peer has exchanged with us
lastExchange*: Moment # last time peer has exchanged with us
account*: ?Account # ethereum account of this peer
paymentChannel*: ?ChannelId # payment channel id
proc peerHave*(self: BlockExcPeerCtx): seq[Cid] =
proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] =
toSeq(self.blocks.keys)
proc contains*(self: BlockExcPeerCtx, cid: Cid): bool =
cid in self.blocks
proc peerHaveCids*(self: BlockExcPeerCtx): HashSet[Cid] =
self.blocks.keys.toSeq.mapIt(it.cidOrTreeCid).toHashSet
proc peerWantsCids*(self: BlockExcPeerCtx): HashSet[Cid] =
self.peerWants.mapIt(it.address.cidOrTreeCid).toHashSet
proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocks
func setPresence*(self: BlockExcPeerCtx, presence: Presence) =
self.blocks[presence.cid] = presence
self.blocks[presence.address] = presence
func cleanPresence*(self: BlockExcPeerCtx, cids: seq[Cid]) =
for cid in cids:
self.blocks.del(cid)
func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
for a in addresses:
self.blocks.del(a)
func cleanPresence*(self: BlockExcPeerCtx, cid: Cid) =
self.cleanPresence(@[cid])
func cleanPresence*(self: BlockExcPeerCtx, address: BlockAddress) =
self.cleanPresence(@[address])
func price*(self: BlockExcPeerCtx, cids: seq[Cid]): UInt256 =
func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 =
var price = 0.u256
for cid in cids:
self.blocks.withValue(cid, precense):
for a in addresses:
self.blocks.withValue(a, precense):
price += precense[].price
trace "Blocks price", price

View File

@ -20,6 +20,7 @@ import pkg/chronicles
import pkg/libp2p
import ../protobuf/blockexc
import ../../blocktype
import ./peercontext
export peercontext
@ -59,24 +60,32 @@ func get*(self: PeerCtxStore, peerId: PeerId): BlockExcPeerCtx =
func len*(self: PeerCtxStore): int =
self.peers.len
func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it == address ) )
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it == cid ) )
toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it.cidOrTreeCid == cid ) )
func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it == address ) )
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it.cid == cid ) )
toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it.address.cidOrTreeCid == cid ) )
func selectCheapest*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
var peers = self.peersHave(cid)
func selectCheapest*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
# assume that the price for all leaves in a tree is the same
let rootAddress = BlockAddress(leaf: false, cid: address.cidOrTreeCid)
var peers = self.peersHave(rootAddress)
func cmp(a, b: BlockExcPeerCtx): int =
var
priceA = 0.u256
priceB = 0.u256
a.blocks.withValue(cid, precense):
a.blocks.withValue(rootAddress, precense):
priceA = precense[].price
b.blocks.withValue(cid, precense):
b.blocks.withValue(rootAddress, precense):
priceB = precense[].price
if priceA == priceB:

View File

@ -10,46 +10,45 @@
import std/hashes
import std/sequtils
import pkg/libp2p
import pkg/stew/endians2
import message
import ../../blocktype
export Message, protobufEncode, protobufDecode
export Wantlist, WantType, Entry
export Block, BlockPresenceType, BlockPresence
export Wantlist, WantType, WantListEntry
export BlockDelivery, BlockPresenceType, BlockPresence
export AccountMessage, StateChannelUpdate
proc hash*(e: Entry): Hash =
hash(e.`block`)
proc hash*(a: BlockAddress): Hash =
if a.leaf:
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
hash(data)
else:
hash(a.cid.data.buffer)
proc cid*(e: Entry): Cid =
## Helper to convert raw bytes to Cid
##
proc hash*(e: WantListEntry): Hash =
hash(e.address)
Cid.init(e.`block`).get()
proc contains*(a: openArray[Entry], b: Cid): bool =
proc contains*(a: openArray[WantListEntry], b: BlockAddress): bool =
## Convenience method to check for peer precense
##
a.filterIt( it.cid == b ).len > 0
a.anyIt(it.address == b)
proc `==`*(a: Entry, cid: Cid): bool =
return a.cid == cid
proc `==`*(a: WantListEntry, b: BlockAddress): bool =
return a.address == b
proc `<`*(a, b: Entry): bool =
proc `<`*(a, b: WantListEntry): bool =
a.priority < b.priority
proc cid*(e: BlockPresence): Cid =
## Helper to convert raw bytes to Cid
##
Cid.init(e.cid).get()
proc `==`*(a: BlockPresence, b: BlockAddress): bool =
return a.address == b
proc `==`*(a: BlockPresence, cid: Cid): bool =
return cid(a) == cid
proc contains*(a: openArray[BlockPresence], b: Cid): bool =
proc contains*(a: openArray[BlockPresence], b: BlockAddress): bool =
## Convenience method to check for peer precense
##
a.filterIt( cid(it) == b ).len > 0
a.anyIt(it.address == b)

View File

@ -2,11 +2,18 @@
# and Protobuf encoder/decoder for these messages.
#
# Eventually all this code should be auto-generated from message.proto.
import std/sugar
import pkg/libp2p/protobuf/minprotobuf
import pkg/libp2p/cid
import pkg/questionable
import ../../units
import ../../merkletree
import ../../blocktype
const
MaxBlockSize* = 100.MiBs.uint
MaxMessageSize* = 100.MiBs.uint
@ -16,27 +23,28 @@ type
WantBlock = 0,
WantHave = 1
Entry* = object
`block`*: seq[byte] # The block cid
WantListEntry* = object
address*: BlockAddress
priority*: int32 # The priority (normalized). default to 1
cancel*: bool # Whether this revokes an entry
wantType*: WantType # Note: defaults to enum 0, ie Block
sendDontHave*: bool # Note: defaults to false
Wantlist* = object
entries*: seq[Entry] # A list of wantlist entries
full*: bool # Whether this is the full wantlist. default to false
WantList* = object
entries*: seq[WantListEntry] # A list of wantList entries
full*: bool # Whether this is the full wantList. default to false
Block* = object
prefix*: seq[byte] # CID prefix (cid version, multicodec and multihash prefix (type + length)
data*: seq[byte]
BlockDelivery* = object
blk*: Block
address*: BlockAddress
proof*: ?MerkleProof # Present only if `address.leaf` is true
BlockPresenceType* = enum
Have = 0,
DontHave = 1
BlockPresence* = object
cid*: seq[byte] # The block cid
address*: BlockAddress
`type`*: BlockPresenceType
price*: seq[byte] # Amount of assets to pay for the block (UInt256)
@ -47,8 +55,8 @@ type
update*: seq[byte] # Signed Nitro state, serialized as JSON
Message* = object
wantlist*: Wantlist
payload*: seq[Block]
wantList*: WantList
payload*: seq[BlockDelivery]
blockPresences*: seq[BlockPresence]
pendingBytes*: uint
account*: AccountMessage
@ -58,9 +66,20 @@ type
# Encoding Message into seq[byte] in Protobuf format
#
proc write*(pb: var ProtoBuffer, field: int, value: Entry) =
proc write*(pb: var ProtoBuffer, field: int, value: BlockAddress) =
var ipb = initProtoBuffer()
ipb.write(1, value.`block`)
ipb.write(1, value.leaf.uint)
if value.leaf:
ipb.write(2, value.treeCid.data.buffer)
ipb.write(3, value.index.uint64)
else:
ipb.write(4, value.cid.data.buffer)
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: WantListEntry) =
var ipb = initProtoBuffer()
ipb.write(1, value.address)
ipb.write(2, value.priority.uint64)
ipb.write(3, value.cancel.uint)
ipb.write(4, value.wantType.uint)
@ -68,7 +87,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: Entry) =
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: Wantlist) =
proc write*(pb: var ProtoBuffer, field: int, value: WantList) =
var ipb = initProtoBuffer()
for v in value.entries:
ipb.write(1, v)
@ -76,16 +95,20 @@ proc write*(pb: var ProtoBuffer, field: int, value: Wantlist) =
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: Block) =
proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) =
var ipb = initProtoBuffer(maxSize = MaxBlockSize)
ipb.write(1, value.prefix)
ipb.write(2, value.data)
ipb.write(1, value.blk.cid.data.buffer)
ipb.write(2, value.blk.data)
ipb.write(3, value.address)
if value.address.leaf:
if proof =? value.proof:
ipb.write(4, proof.encode())
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: BlockPresence) =
var ipb = initProtoBuffer()
ipb.write(1, value.cid)
ipb.write(1, value.address)
ipb.write(2, value.`type`.uint)
ipb.write(3, value.price)
ipb.finish()
@ -105,7 +128,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
proc protobufEncode*(value: Message): seq[byte] =
var ipb = initProtoBuffer(maxSize = MaxMessageSize)
ipb.write(1, value.wantlist)
ipb.write(1, value.wantList)
for v in value.payload:
ipb.write(3, v)
for v in value.blockPresences:
@ -120,12 +143,41 @@ proc protobufEncode*(value: Message): seq[byte] =
#
# Decoding Message from seq[byte] in Protobuf format
#
proc decode*(_: type Entry, pb: ProtoBuffer): ProtoResult[Entry] =
proc decode*(_: type BlockAddress, pb: ProtoBuffer): ProtoResult[BlockAddress] =
var
value = Entry()
value: BlockAddress
leaf: bool
field: uint64
discard ? pb.getField(1, value.`block`)
cidBuf = newSeq[byte]()
if ? pb.getField(1, field):
leaf = bool(field)
if leaf:
var
treeCid: Cid
index: Natural
if ? pb.getField(2, cidBuf):
treeCid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
if ? pb.getField(3, field):
index = field
value = BlockAddress(leaf: true, treeCid: treeCid, index: index)
else:
var cid: Cid
if ? pb.getField(4, cidBuf):
cid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
value = BlockAddress(leaf: false, cid: cid)
ok(value)
proc decode*(_: type WantListEntry, pb: ProtoBuffer): ProtoResult[WantListEntry] =
var
value = WantListEntry()
field: uint64
ipb: ProtoBuffer
buf = newSeq[byte]()
if ? pb.getField(1, ipb):
value.address = ? BlockAddress.decode(ipb)
if ? pb.getField(2, field):
value.priority = int32(field)
if ? pb.getField(3, field):
@ -136,30 +188,53 @@ proc decode*(_: type Entry, pb: ProtoBuffer): ProtoResult[Entry] =
value.sendDontHave = bool(field)
ok(value)
proc decode*(_: type Wantlist, pb: ProtoBuffer): ProtoResult[Wantlist] =
proc decode*(_: type WantList, pb: ProtoBuffer): ProtoResult[WantList] =
var
value = Wantlist()
value = WantList()
field: uint64
sublist: seq[seq[byte]]
if ? pb.getRepeatedField(1, sublist):
for item in sublist:
value.entries.add(? Entry.decode(initProtoBuffer(item)))
value.entries.add(? WantListEntry.decode(initProtoBuffer(item)))
if ? pb.getField(2, field):
value.full = bool(field)
ok(value)
proc decode*(_: type Block, pb: ProtoBuffer): ProtoResult[Block] =
proc decode*(_: type BlockDelivery, pb: ProtoBuffer): ProtoResult[BlockDelivery] =
var
value = Block()
discard ? pb.getField(1, value.prefix)
discard ? pb.getField(2, value.data)
value = BlockDelivery()
field: uint64
dataBuf = newSeq[byte]()
cidBuf = newSeq[byte]()
cid: Cid
ipb: ProtoBuffer
if ? pb.getField(1, cidBuf):
cid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
if ? pb.getField(2, dataBuf):
value.blk = ? Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob)
if ? pb.getField(3, ipb):
value.address = ? BlockAddress.decode(ipb)
if value.address.leaf:
var proofBuf = newSeq[byte]()
if ? pb.getField(4, proofBuf):
let proof = ? MerkleProof.decode(proofBuf).mapErr(x => ProtoError.IncorrectBlob)
value.proof = proof.some
else:
value.proof = MerkleProof.none
else:
value.proof = MerkleProof.none
ok(value)
proc decode*(_: type BlockPresence, pb: ProtoBuffer): ProtoResult[BlockPresence] =
var
value = BlockPresence()
field: uint64
discard ? pb.getField(1, value.cid)
ipb: ProtoBuffer
if ? pb.getField(1, ipb):
value.address = ? BlockAddress.decode(ipb)
if ? pb.getField(2, field):
value.`type` = BlockPresenceType(field)
discard ? pb.getField(3, value.price)
@ -184,10 +259,10 @@ proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
ipb: ProtoBuffer
sublist: seq[seq[byte]]
if ? pb.getField(1, ipb):
value.wantlist = ? Wantlist.decode(ipb)
value.wantList = ? WantList.decode(ipb)
if ? pb.getRepeatedField(3, sublist):
for item in sublist:
value.payload.add(? Block.decode(initProtoBuffer(item, maxSize = MaxBlockSize)))
value.payload.add(? BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize)))
if ? pb.getRepeatedField(4, sublist):
for item in sublist:
value.blockPresences.add(? BlockPresence.decode(initProtoBuffer(item)))

View File

@ -5,6 +5,8 @@ import pkg/questionable/results
import pkg/upraises
import ./blockexc
import ../../blocktype
export questionable
export stint
export BlockPresenceType
@ -14,7 +16,7 @@ upraises.push: {.upraises: [].}
type
PresenceMessage* = blockexc.BlockPresence
Presence* = object
cid*: Cid
address*: BlockAddress
have*: bool
price*: UInt256
@ -24,19 +26,18 @@ func parse(_: type UInt256, bytes: seq[byte]): ?UInt256 =
UInt256.fromBytesBE(bytes).some
func init*(_: type Presence, message: PresenceMessage): ?Presence =
without cid =? Cid.init(message.cid) and
price =? UInt256.parse(message.price):
without price =? UInt256.parse(message.price):
return none Presence
some Presence(
cid: cid,
address: message.address,
have: message.`type` == BlockPresenceType.Have,
price: price
)
func init*(_: type PresenceMessage, presence: Presence): PresenceMessage =
PresenceMessage(
cid: presence.cid.data.buffer,
address: presence.address,
`type`: if presence.have:
BlockPresenceType.Have
else:

View File

@ -8,17 +8,19 @@
## those terms.
import std/tables
import std/sugar
export tables
import pkg/upraises
push: {.upraises: [].}
import pkg/libp2p/[cid, multicodec]
import pkg/libp2p/[cid, multicodec, multihash]
import pkg/stew/byteutils
import pkg/questionable
import pkg/questionable/results
import pkg/chronicles
import pkg/json_serialization
import ./units
import ./utils
@ -37,91 +39,50 @@ type
cid*: Cid
data*: seq[byte]
template EmptyCid*: untyped =
var
EmptyCid {.global, threadvar.}:
array[CIDv0..CIDv1, Table[MultiCodec, Cid]]
BlockAddress* = object
case leaf*: bool
of true:
treeCid*: Cid
index*: Natural
else:
cid*: Cid
once:
EmptyCid = [
CIDv0: {
multiCodec("sha2-256"): Cid
.init("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n")
.get()
}.toTable,
CIDv1: {
multiCodec("sha2-256"): Cid
.init("bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku")
.get()
}.toTable,
]
EmptyCid
proc `==`*(a, b: BlockAddress): bool =
a.leaf == b.leaf and
(
if a.leaf:
a.treeCid == b.treeCid and a.index == b.index
else:
a.cid == b.cid
)
template EmptyDigests*: untyped =
var
EmptyDigests {.global, threadvar.}:
array[CIDv0..CIDv1, Table[MultiCodec, MultiHash]]
proc `$`*(a: BlockAddress): string =
if a.leaf:
"treeCid: " & $a.treeCid & ", index: " & $a.index
else:
"cid: " & $a.cid
once:
EmptyDigests = [
CIDv0: {
multiCodec("sha2-256"): EmptyCid[CIDv0]
.catch
.get()[multiCodec("sha2-256")]
.catch
.get()
.mhash
.get()
}.toTable,
CIDv1: {
multiCodec("sha2-256"): EmptyCid[CIDv1]
.catch
.get()[multiCodec("sha2-256")]
.catch
.get()
.mhash
.get()
}.toTable,
]
proc writeValue*(
writer: var JsonWriter,
value: Cid
) {.upraises:[IOError].} =
writer.writeValue($value)
EmptyDigests
proc cidOrTreeCid*(a: BlockAddress): Cid =
if a.leaf:
a.treeCid
else:
a.cid
template EmptyBlock*: untyped =
var
EmptyBlock {.global, threadvar.}:
array[CIDv0..CIDv1, Table[MultiCodec, Block]]
proc address*(b: Block): BlockAddress =
BlockAddress(leaf: false, cid: b.cid)
once:
EmptyBlock = [
CIDv0: {
multiCodec("sha2-256"): Block(
cid: EmptyCid[CIDv0][multiCodec("sha2-256")])
}.toTable,
CIDv1: {
multiCodec("sha2-256"): Block(
cid: EmptyCid[CIDv1][multiCodec("sha2-256")])
}.toTable,
]
proc init*(_: type BlockAddress, cid: Cid): BlockAddress =
BlockAddress(leaf: false, cid: cid)
EmptyBlock
proc isEmpty*(cid: Cid): bool =
cid == EmptyCid[cid.cidver]
.catch
.get()[cid.mhash.get().mcodec]
.catch
.get()
proc isEmpty*(blk: Block): bool =
blk.cid.isEmpty
proc emptyBlock*(cid: Cid): Block =
EmptyBlock[cid.cidver]
.catch
.get()[cid.mhash.get().mcodec]
.catch
.get()
proc init*(_: type BlockAddress, treeCid: Cid, index: Natural): BlockAddress =
BlockAddress(leaf: true, treeCid: treeCid, index: index)
proc `$`*(b: Block): string =
result &= "cid: " & $b.cid
@ -156,15 +117,56 @@ func new*(
## creates a new block for both storage and network IO
##
if verify:
let
mhash = ? cid.mhash.mapFailure
b = ? Block.new(
data = @data,
version = cid.cidver,
codec = cid.mcodec,
mcodec = mhash.mcodec)
computedMhash = ? MultiHash.digest($mhash.mcodec, data).mapFailure
computedCid = ? Cid.init(cid.cidver, cid.mcodec, computedMhash).mapFailure
if computedCid != cid:
return "Cid doesn't match the data".failure
if verify and cid != b.cid:
return "Cid and content don't match!".failure
return Block(
cid: cid,
data: @data
).success
success b
proc emptyCid*(version: CidVersion, hcodec: MultiCodec, dcodec: MultiCodec): ?!Cid =
## Returns cid representing empty content, given cid version, hash codec and data codec
##
const
Sha256 = multiCodec("sha2-256")
Raw = multiCodec("raw")
DagPB = multiCodec("dag-pb")
DagJson = multiCodec("dag-json")
var index {.global, threadvar.}: Table[(CidVersion, MultiCodec, MultiCodec), Cid]
once:
index = {
# source https://ipld.io/specs/codecs/dag-pb/fixtures/cross-codec/#dagpb_empty
(CIDv0, Sha256, DagPB): ? Cid.init("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n").mapFailure,
(CIDv1, Sha256, DagPB): ? Cid.init("zdj7Wkkhxcu2rsiN6GUyHCLsSLL47kdUNfjbFqBUUhMFTZKBi").mapFailure, # base36: bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku
(CIDv1, Sha256, DagJson): ? Cid.init("z4EBG9jGUWMVxX9deANWX7iPyExLswe2akyF7xkNAaYgugvnhmP").mapFailure, # base36: baguqeera6mfu3g6n722vx7dbitpnbiyqnwah4ddy4b5c3rwzxc5pntqcupta
(CIDv1, Sha256, Raw): ? Cid.init("zb2rhmy65F3REf8SZp7De11gxtECBGgUKaLdiDj7MCGCHxbDW").mapFailure,
}.toTable
index[(version, hcodec, dcodec)].catch
proc emptyDigest*(version: CidVersion, hcodec: MultiCodec, dcodec: MultiCodec): ?!MultiHash =
emptyCid(version, hcodec, dcodec)
.flatMap((cid: Cid) => cid.mhash.mapFailure)
proc emptyBlock*(version: CidVersion, hcodec: MultiCodec): ?!Block =
emptyCid(version, hcodec, multiCodec("raw"))
.flatMap((cid: Cid) => Block.new(cid = cid, data = @[]))
proc emptyBlock*(cid: Cid): ?!Block =
cid.mhash.mapFailure.flatMap((mhash: MultiHash) =>
emptyBlock(cid.cidver, mhash.mcodec))
proc isEmpty*(cid: Cid): bool =
success(cid) == cid.mhash.mapFailure.flatMap((mhash: MultiHash) =>
emptyCid(cid.cidver, mhash.mcodec, cid.mcodec))
proc isEmpty*(blk: Block): bool =
blk.cid.isEmpty

View File

@ -12,15 +12,21 @@ import pkg/upraises
push: {.upraises: [].}
import std/sequtils
import std/options
import std/sugar
import pkg/chronos
import pkg/chronicles
import pkg/questionable
import pkg/libp2p/[multicodec, cid, multibase, multihash]
import pkg/libp2p/protobuf/minprotobuf
import ../manifest
import ../merkletree
import ../stores
import ../blocktype as bt
import ../utils
import ../utils/asynciter
import pkg/stew/byteutils
import ./backend
@ -64,12 +70,14 @@ type
decoderProvider*: DecoderProvider
store*: BlockStore
GetNext = proc(): Future[?(bt.Block, int)] {.upraises: [], gcsafe, closure.}
PendingBlocksIter* = ref object
finished*: bool
next*: GetNext
EncodingParams = object
ecK: int
ecM: int
rounded: int
steps: int
blocksCount: int
func indexToPos(self: Erasure, encoded: Manifest, idx, step: int): int {.inline.} =
func indexToPos(steps, idx, step: int): int {.inline.} =
## Convert an index to a position in the encoded
## dataset
## `idx` - the index to convert
@ -77,93 +85,71 @@ func indexToPos(self: Erasure, encoded: Manifest, idx, step: int): int {.inline.
## `pos` - the position in the encoded dataset
##
(idx - step) div encoded.steps
iterator items*(blocks: PendingBlocksIter): Future[?(bt.Block, int)] =
while not blocks.finished:
yield blocks.next()
(idx - step) div steps
proc getPendingBlocks(
self: Erasure,
manifest: Manifest,
start, stop, steps: int): ?!PendingBlocksIter =
indicies: seq[int]): AsyncIter[(?!bt.Block, int)] =
## Get pending blocks iterator
##
var
# calculate block indexes to retrieve
blockIdx = toSeq(countup(start, stop, steps))
# request all blocks from the store
pendingBlocks = blockIdx.mapIt(
self.store.getBlock(manifest[it]) # Get the data blocks (first K)
# request blocks from the store
pendingBlocks = indicies.map( (i: int) =>
self.store.getBlock(BlockAddress.init(manifest.treeCid, i)).map((r: ?!bt.Block) => (r, i)) # Get the data blocks (first K)
)
indices = pendingBlocks # needed so we can track the block indices
iter = PendingBlocksIter(finished: false)
trace "Requesting blocks", pendingBlocks = pendingBlocks.len
proc next(): Future[?(bt.Block, int)] {.async.} =
if iter.finished:
trace "No more blocks"
return none (bt.Block, int)
proc isFinished(): bool = pendingBlocks.len == 0
if pendingBlocks.len == 0:
iter.finished = true
trace "No more blocks - finished"
return none (bt.Block, int)
proc genNext(): Future[(?!bt.Block, int)] {.async.} =
let completedFut = await one(pendingBlocks)
if (let i = pendingBlocks.find(completedFut); i >= 0):
pendingBlocks.del(i)
return await completedFut
else:
let (_, index) = await completedFut
raise newException(CatchableError, "Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " & $index)
let
done = await one(pendingBlocks)
idx = indices.find(done)
logScope:
idx = idx
blockIdx = blockIdx[idx]
manifest = manifest[blockIdx[idx]]
pendingBlocks.del(pendingBlocks.find(done))
without blk =? (await done), error:
trace "Failed retrieving block", err = $error.msg
return none (bt.Block, int)
trace "Retrieved block"
some (blk, blockIdx[idx])
iter.next = next
success iter
Iter.new(genNext, isFinished)
proc prepareEncodingData(
self: Erasure,
encoded: Manifest,
manifest: Manifest,
params: EncodingParams,
step: int,
data: ref seq[seq[byte]],
cids: ref seq[Cid],
emptyBlock: seq[byte]): Future[?!int] {.async.} =
## Prepare data for encoding
##
without pendingBlocksIter =?
self.getPendingBlocks(
encoded,
step,
encoded.rounded - 1, encoded.steps), err:
trace "Unable to get pending blocks", error = err.msg
return failure(err)
let
indicies = toSeq(countup(step, params.rounded - 1, params.steps))
pendingBlocksIter = self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount))
var resolved = 0
for blkFut in pendingBlocksIter:
if (blk, idx) =? (await blkFut):
let
pos = self.indexToPos(encoded, idx, step)
for fut in pendingBlocksIter:
let (blkOrErr, idx) = await fut
without blk =? blkOrErr, err:
warn "Failed retreiving a block", treeCid = manifest.treeCid, idx, msg = err.msg
continue
if blk.isEmpty:
trace "Padding with empty block", idx
shallowCopy(data[pos], emptyBlock)
else:
trace "Encoding block", cid = blk.cid, idx
shallowCopy(data[pos], blk.data)
let pos = indexToPos(params.steps, idx, step)
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
cids[idx] = blk.cid
resolved.inc()
success resolved
for idx in indicies.filterIt(it >= manifest.blocksCount):
let pos = indexToPos(params.steps, idx, step)
trace "Padding with empty block", idx
shallowCopy(data[pos], emptyBlock)
without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec), err:
return failure(err)
cids[idx] = emptyBlockCid
success(resolved)
proc prepareDecodingData(
self: Erasure,
@ -171,36 +157,38 @@ proc prepareDecodingData(
step: int,
data: ref seq[seq[byte]],
parityData: ref seq[seq[byte]],
cids: ref seq[Cid],
emptyBlock: seq[byte]): Future[?!(int, int)] {.async.} =
## Prepare data for decoding
## `encoded` - the encoded manifest
## `step` - the current step
## `data` - the data to be prepared
## `parityData` - the parityData to be prepared
## `cids` - cids of prepared data
## `emptyBlock` - the empty block to be used for padding
##
without pendingBlocksIter =?
self.getPendingBlocks(
encoded,
step,
encoded.len - 1, encoded.steps), err:
trace "Unable to get pending blocks", error = err.msg
return failure(err)
let
indicies = toSeq(countup(step, encoded.blocksCount - 1, encoded.steps))
pendingBlocksIter = self.getPendingBlocks(encoded, indicies)
var
dataPieces = 0
parityPieces = 0
resolved = 0
for blkFut in pendingBlocksIter:
for fut in pendingBlocksIter:
# Continue to receive blocks until we have just enough for decoding
# or no more blocks can arrive
if resolved >= encoded.ecK:
break
if (blk, idx) =? (await blkFut):
let (blkOrErr, idx) = await fut
without blk =? blkOrErr, err:
trace "Failed retreiving a block", idx, treeCid = encoded.treeCid, msg = err.msg
continue
let
pos = self.indexToPos(encoded, idx, step)
pos = indexToPos(encoded.steps, idx, step)
logScope:
cid = blk.cid
@ -209,6 +197,7 @@ proc prepareDecodingData(
step = step
empty = blk.isEmpty
cids[idx] = blk.cid
if idx >= encoded.rounded:
trace "Retrieved parity block"
shallowCopy(parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data)
@ -222,78 +211,62 @@ proc prepareDecodingData(
return success (dataPieces, parityPieces)
proc prepareManifest(
self: Erasure,
manifest: Manifest,
blocks: int,
parity: int): ?!Manifest =
proc init(_: type EncodingParams, manifest: Manifest, ecK: int, ecM: int): ?!EncodingParams =
if ecK > manifest.blocksCount:
return failure("Unable to encode manifest, not enough blocks, ecK = " & $ecK & ", blocksCount = " & $manifest.blocksCount)
logScope:
original_cid = manifest.cid.get()
original_len = manifest.len
blocks = blocks
parity = parity
let
rounded = roundUp(manifest.blocksCount, ecK)
steps = divUp(manifest.blocksCount, ecK)
blocksCount = rounded + (steps * ecM)
if blocks > manifest.len:
trace "Unable to encode manifest, not enough blocks", blocks = blocks, len = manifest.len
return failure("Not enough blocks to encode")
trace "Preparing erasure coded manifest", blocks, parity
without var encoded =? Manifest.new(manifest, blocks, parity), error:
trace "Unable to create manifest", msg = error.msg
return error.failure
logScope:
steps = encoded.steps
rounded_blocks = encoded.rounded
new_manifest = encoded.len
trace "Erasure coded manifest prepared"
success encoded
EncodingParams(
ecK: ecK,
ecM: ecM,
rounded: rounded,
steps: steps,
blocksCount: blocksCount
).success
proc encodeData(
self: Erasure,
manifest: Manifest): Future[?!void] {.async.} =
manifest: Manifest,
params: EncodingParams
): Future[?!Manifest] {.async.} =
## Encode blocks pointed to by the protected manifest
##
## `manifest` - the manifest to encode
##
var
encoded = manifest
logScope:
steps = encoded.steps
rounded_blocks = encoded.rounded
new_manifest = encoded.len
protected = encoded.protected
ecK = encoded.ecK
ecM = encoded.ecM
if not encoded.protected:
trace "Manifest is not erasure protected"
return failure("Manifest is not erasure protected")
steps = params.steps
rounded_blocks = params.rounded
blocks_count = params.blocksCount
ecK = params.ecK
ecM = params.ecM
var
encoder = self.encoderProvider(encoded.blockSize.int, encoded.ecK, encoded.ecM)
emptyBlock = newSeq[byte](encoded.blockSize.int)
cids = seq[Cid].new()
encoder = self.encoderProvider(manifest.blockSize.int, params.ecK, params.ecM)
emptyBlock = newSeq[byte](manifest.blockSize.int)
cids[].setLen(params.blocksCount)
try:
for step in 0..<encoded.steps:
for step in 0..<params.steps:
# TODO: Don't allocate a new seq every time, allocate once and zero out
var
data = seq[seq[byte]].new() # number of blocks to encode
parityData = newSeqWith[seq[byte]](encoded.ecM, newSeq[byte](encoded.blockSize.int))
parityData = newSeqWith[seq[byte]](params.ecM, newSeq[byte](manifest.blockSize.int))
data[].setLen(encoded.ecK)
data[].setLen(params.ecK)
# TODO: this is a tight blocking loop so we sleep here to allow
# other events to be processed, this should be addressed
# by threading
await sleepAsync(10.millis)
without resolved =?
(await self.prepareEncodingData(encoded, step, data, emptyBlock)), err:
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)), err:
trace "Unable to prepare data", error = err.msg
return failure(err)
@ -303,20 +276,39 @@ proc encodeData(
let res = encoder.encode(data[], parityData);
res.isErr):
trace "Unable to encode manifest!", error = $res.error
return res.mapFailure
return failure($res.error)
var idx = encoded.rounded + step
for j in 0..<encoded.ecM:
var idx = params.rounded + step
for j in 0..<params.ecM:
without blk =? bt.Block.new(parityData[j]), error:
trace "Unable to create parity block", err = error.msg
return failure(error)
trace "Adding parity block", cid = blk.cid, idx
encoded[idx] = blk.cid
cids[idx] = blk.cid
if isErr (await self.store.putBlock(blk)):
trace "Unable to store block!", cid = blk.cid
return failure("Unable to store block!")
idx.inc(encoded.steps)
idx.inc(params.steps)
without tree =? MerkleTree.init(cids[]), err:
return failure(err)
without treeCid =? tree.rootCid, err:
return failure(err)
if err =? (await self.store.putAllProofs(tree)).errorOption:
return failure(err)
let encodedManifest = Manifest.new(
manifest = manifest,
treeCid = treeCid,
datasetSize = (manifest.blockSize.int * params.blocksCount).NBytes,
ecK = params.ecK,
ecM = params.ecM
)
return encodedManifest.success
except CancelledError as exc:
trace "Erasure coding encoding cancelled"
raise exc # cancellation needs to be propagated
@ -326,8 +318,6 @@ proc encodeData(
finally:
encoder.release()
return success()
proc encode*(
self: Erasure,
manifest: Manifest,
@ -340,47 +330,37 @@ proc encode*(
## `parity` - the number of parity blocks to generate - M
##
without var encoded =? self.prepareManifest(manifest, blocks, parity), error:
trace "Unable to prepare manifest", error = error.msg
return failure error
without params =? EncodingParams.init(manifest, blocks, parity), err:
return failure(err)
if err =? (await self.encodeData(encoded)).errorOption:
trace "Unable to encode data", error = err.msg
return failure err
without encodedManifest =? await self.encodeData(manifest, params), err:
return failure(err)
return success encoded
return success encodedManifest
proc decode*(
self: Erasure,
encoded: Manifest,
all = true): Future[?!Manifest] {.async.} =
encoded: Manifest
): Future[?!Manifest] {.async.} =
## Decode a protected manifest into it's original
## manifest
##
## `encoded` - the encoded (protected) manifest to
## be recovered
## `all` - if true, all blocks will be recovered,
## including parity
##
logScope:
steps = encoded.steps
rounded_blocks = encoded.rounded
new_manifest = encoded.len
protected = encoded.protected
ecK = encoded.ecK
ecM = encoded.ecM
if not encoded.protected:
trace "Manifest is not erasure protected"
return failure "Manifest is not erasure protected"
new_manifest = encoded.blocksCount
var
cids = seq[Cid].new()
recoveredIndices = newSeq[int]()
decoder = self.decoderProvider(encoded.blockSize.int, encoded.ecK, encoded.ecM)
emptyBlock = newSeq[byte](encoded.blockSize.int)
hasParity = false
trace "Decoding erasure coded manifest"
cids[].setLen(encoded.blocksCount)
try:
for step in 0..<encoded.steps:
# TODO: this is a tight blocking loop so we sleep here to allow
@ -390,16 +370,14 @@ proc decode*(
var
data = seq[seq[byte]].new()
# newSeq[seq[byte]](encoded.ecK) # number of blocks to encode
parityData = seq[seq[byte]].new()
recovered = newSeqWith[seq[byte]](encoded.ecK, newSeq[byte](encoded.blockSize.int))
resolved = 0
data[].setLen(encoded.ecK) # set len to K
parityData[].setLen(encoded.ecM) # set len to M
without (dataPieces, parityPieces) =?
(await self.prepareDecodingData(encoded, step, data, parityData, emptyBlock)), err:
(await self.prepareDecodingData(encoded, step, data, parityData, cids, emptyBlock)), err:
trace "Unable to prepare data", error = err.msg
return failure(err)
@ -415,18 +393,19 @@ proc decode*(
return failure($err.error)
for i in 0..<encoded.ecK:
if data[i].len <= 0 and not encoded.blocks[i].isEmpty:
let idx = i * encoded.steps + step
if data[i].len <= 0 and not cids[idx].isEmpty:
without blk =? bt.Block.new(recovered[i]), error:
trace "Unable to create block!", exc = error.msg
return failure(error)
doAssert blk.cid in encoded.blocks,
"Recovered block not in original manifest"
trace "Recovered block", cid = blk.cid, index = i
if isErr (await self.store.putBlock(blk)):
trace "Unable to store block!", cid = blk.cid
return failure("Unable to store block!")
cids[idx] = blk.cid
recoveredIndices.add(idx)
except CancelledError as exc:
trace "Erasure coding decoding cancelled"
raise exc # cancellation needs to be propagated
@ -436,8 +415,23 @@ proc decode*(
finally:
decoder.release()
without decoded =? Manifest.new(blocks = encoded.blocks[0..<encoded.originalLen]), error:
return error.failure
without tree =? MerkleTree.init(cids[0..<encoded.originalBlocksCount]), err:
return failure(err)
without treeCid =? tree.rootCid, err:
return failure(err)
if treeCid != encoded.originalTreeCid:
return failure("Original tree root differs from the tree root computed out of recovered data")
let idxIter = Iter
.fromItems(recoveredIndices)
.filter((i: int) => i < tree.leavesCount)
if err =? (await self.store.putSomeProofs(tree, idxIter)).errorOption:
return failure(err)
let decoded = Manifest.new(encoded)
return decoded.success

View File

@ -34,54 +34,45 @@ proc encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] =
? manifest.verify()
var pbNode = initProtoBuffer()
for c in manifest.blocks:
var pbLink = initProtoBuffer()
pbLink.write(1, c.data.buffer) # write Cid links
pbLink.finish()
pbNode.write(2, pbLink)
# NOTE: The `Data` field in the the `dag-pb`
# contains the following protobuf `Message`
#
# ```protobuf
# Message ErasureInfo {
# optional uint32 K = 1; # number of encoded blocks
# optional uint32 M = 2; # number of parity blocks
# optional bytes cid = 3; # cid of the original dataset
# optional uint32 original = 4; # number of original blocks
# optional uint32 ecK = 1; # number of encoded blocks
# optional uint32 ecM = 2; # number of parity blocks
# optional bytes originalTreeCid = 3; # cid of the original dataset
# optional uint32 originalDatasetSize = 4; # size of the original dataset
# }
# Message Header {
# optional bytes rootHash = 1; # the root (tree) hash
# optional bytes treeCid = 1; # cid (root) of the tree
# optional uint32 blockSize = 2; # size of a single block
# optional uint32 blocksLen = 3; # total amount of blocks
# optional uint64 datasetSize = 3; # size of the dataset
# optional ErasureInfo erasure = 4; # erasure coding info
# optional uint64 originalBytes = 5;# exact file size
# }
# ```
#
let cid = ? manifest.cid
# var treeRootVBuf = initVBuffer()
var header = initProtoBuffer()
header.write(1, cid.data.buffer)
header.write(1, manifest.treeCid.data.buffer)
header.write(2, manifest.blockSize.uint32)
header.write(3, manifest.len.uint32)
header.write(5, manifest.originalBytes.uint64)
header.write(3, manifest.datasetSize.uint32)
if manifest.protected:
var erasureInfo = initProtoBuffer()
erasureInfo.write(1, manifest.ecK.uint32)
erasureInfo.write(2, manifest.ecM.uint32)
erasureInfo.write(3, manifest.originalCid.data.buffer)
erasureInfo.write(4, manifest.originalLen.uint32)
erasureInfo.write(3, manifest.originalTreeCid.data.buffer)
erasureInfo.write(4, manifest.originalDatasetSize.uint32)
erasureInfo.finish()
header.write(4, erasureInfo)
pbNode.write(1, header) # set the rootHash Cid as the data field
pbNode.write(1, header) # set the treeCid as the data field
pbNode.finish()
return pbNode.buffer.success
func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
proc decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
## Decode a manifest from a data blob
##
@ -89,86 +80,70 @@ func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
pbNode = initProtoBuffer(data)
pbHeader: ProtoBuffer
pbErasureInfo: ProtoBuffer
rootHash: seq[byte]
originalCid: seq[byte]
originalBytes: uint64
treeCidBuf: seq[byte]
originalTreeCid: seq[byte]
datasetSize: uint32
blockSize: uint32
blocksLen: uint32
originalLen: uint32
originalDatasetSize: uint32
ecK, ecM: uint32
blocks: seq[Cid]
# Decode `Header` message
if pbNode.getField(1, pbHeader).isErr:
return failure("Unable to decode `Header` from dag-pb manifest!")
# Decode `Header` contents
if pbHeader.getField(1, rootHash).isErr:
return failure("Unable to decode `rootHash` from manifest!")
if pbHeader.getField(1, treeCidBuf).isErr:
return failure("Unable to decode `treeCid` from manifest!")
if pbHeader.getField(2, blockSize).isErr:
return failure("Unable to decode `blockSize` from manifest!")
if pbHeader.getField(3, blocksLen).isErr:
return failure("Unable to decode `blocksLen` from manifest!")
if pbHeader.getField(5, originalBytes).isErr:
return failure("Unable to decode `originalBytes` from manifest!")
if pbHeader.getField(3, datasetSize).isErr:
return failure("Unable to decode `datasetSize` from manifest!")
if pbHeader.getField(4, pbErasureInfo).isErr:
return failure("Unable to decode `erasureInfo` from manifest!")
if pbErasureInfo.buffer.len > 0:
let protected = pbErasureInfo.buffer.len > 0
if protected:
if pbErasureInfo.getField(1, ecK).isErr:
return failure("Unable to decode `K` from manifest!")
if pbErasureInfo.getField(2, ecM).isErr:
return failure("Unable to decode `M` from manifest!")
if pbErasureInfo.getField(3, originalCid).isErr:
return failure("Unable to decode `originalCid` from manifest!")
if pbErasureInfo.getField(3, originalTreeCid).isErr:
return failure("Unable to decode `originalTreeCid` from manifest!")
if pbErasureInfo.getField(4, originalLen).isErr:
return failure("Unable to decode `originalLen` from manifest!")
if pbErasureInfo.getField(4, originalDatasetSize).isErr:
return failure("Unable to decode `originalDatasetSize` from manifest!")
let rootHashCid = ? Cid.init(rootHash).mapFailure
var linksBuf: seq[seq[byte]]
if pbNode.getRepeatedField(2, linksBuf).isOk:
for pbLinkBuf in linksBuf:
var
blockBuf: seq[byte]
pbLink = initProtoBuffer(pbLinkBuf)
if pbLink.getField(1, blockBuf).isOk:
blocks.add(? Cid.init(blockBuf).mapFailure)
if blocksLen.int != blocks.len:
return failure("Total blocks and length of blocks in header don't match!")
let
self = if pbErasureInfo.buffer.len > 0:
treeCid = ? Cid.init(treeCidBuf).mapFailure
let
self = if protected:
Manifest.new(
rootHash = rootHashCid,
originalBytes = originalBytes.NBytes,
treeCid = treeCid,
datasetSize = datasetSize.NBytes,
blockSize = blockSize.NBytes,
blocks = blocks,
version = rootHashCid.cidver,
hcodec = (? rootHashCid.mhash.mapFailure).mcodec,
codec = rootHashCid.mcodec,
version = treeCid.cidver,
hcodec = (? treeCid.mhash.mapFailure).mcodec,
codec = treeCid.mcodec,
ecK = ecK.int,
ecM = ecM.int,
originalCid = ? Cid.init(originalCid).mapFailure,
originalLen = originalLen.int
originalTreeCid = ? Cid.init(originalTreeCid).mapFailure,
originalDatasetSize = originalDatasetSize.NBytes
)
else:
Manifest.new(
rootHash = rootHashCid,
originalBytes = originalBytes.NBytes,
treeCid = treeCid,
datasetSize = datasetSize.NBytes,
blockSize = blockSize.NBytes,
blocks = blocks,
version = rootHashCid.cidver,
hcodec = (? rootHashCid.mhash.mapFailure).mcodec,
codec = rootHashCid.mcodec
version = treeCid.cidver,
hcodec = (? treeCid.mhash.mapFailure).mcodec,
codec = treeCid.mcodec
)
? self.verify()

View File

@ -30,10 +30,9 @@ export types
type
Manifest* = ref object of RootObj
rootHash {.serialize.}: ?Cid # Root (tree) hash of the contained data set
originalBytes* {.serialize.}: NBytes # Exact size of the original (uploaded) file
treeCid {.serialize.}: Cid # Root of the merkle tree
datasetSize {.serialize.}: NBytes # Total size of all blocks
blockSize {.serialize.}: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed)
blocks: seq[Cid] # Block Cid
version: CidVersion # Cid version
hcodec: MultiCodec # Multihash codec
codec: MultiCodec # Data set codec
@ -41,8 +40,8 @@ type
of true:
ecK: int # Number of blocks to encode
ecM: int # Number of resulting parity blocks
originalCid: Cid # The original Cid of the dataset being erasure coded
originalLen: int # The length of the original manifest
originalTreeCid: Cid # The original root of the dataset being erasure coded
originalDatasetSize: NBytes
else:
discard
@ -53,8 +52,8 @@ type
proc blockSize*(self: Manifest): NBytes =
self.blockSize
proc blocks*(self: Manifest): seq[Cid] =
self.blocks
proc datasetSize*(self: Manifest): NBytes =
self.datasetSize
proc version*(self: Manifest): CidVersion =
self.version
@ -74,33 +73,25 @@ proc ecK*(self: Manifest): int =
proc ecM*(self: Manifest): int =
self.ecM
proc originalCid*(self: Manifest): Cid =
self.originalCid
proc originalTreeCid*(self: Manifest): Cid =
self.originalTreeCid
proc originalLen*(self: Manifest): int =
self.originalLen
proc originalBlocksCount*(self: Manifest): int =
divUp(self.originalDatasetSize.int, self.blockSize.int)
proc originalDatasetSize*(self: Manifest): NBytes =
self.originalDatasetSize
proc treeCid*(self: Manifest): Cid =
self.treeCid
proc blocksCount*(self: Manifest): int =
divUp(self.datasetSize.int, self.blockSize.int)
############################################################
# Operations on block list
############################################################
func len*(self: Manifest): int =
self.blocks.len
func `[]`*(self: Manifest, i: Natural): Cid =
self.blocks[i]
func `[]=`*(self: var Manifest, i: Natural, item: Cid) =
self.rootHash = Cid.none
self.blocks[i] = item
func `[]`*(self: Manifest, i: BackwardsIndex): Cid =
self.blocks[self.len - i.int]
func `[]=`*(self: Manifest, i: BackwardsIndex, item: Cid) =
self.rootHash = Cid.none
self.blocks[self.len - i.int] = item
func isManifest*(cid: Cid): ?!bool =
let res = ?cid.contentType().mapFailure(CodexError)
($(res) in ManifestContainers).success
@ -108,25 +99,6 @@ func isManifest*(cid: Cid): ?!bool =
func isManifest*(mc: MultiCodec): ?!bool =
($mc in ManifestContainers).success
proc add*(self: Manifest, cid: Cid) =
assert not self.protected # we expect that protected manifests are created with properly-sized self.blocks
self.rootHash = Cid.none
trace "Adding cid to manifest", cid
self.blocks.add(cid)
self.originalBytes = self.blocks.len.NBytes * self.blockSize
iterator items*(self: Manifest): Cid =
for b in self.blocks:
yield b
iterator pairs*(self: Manifest): tuple[key: int, val: Cid] =
for pair in self.blocks.pairs():
yield pair
func contains*(self: Manifest, cid: Cid): bool =
cid in self.blocks
############################################################
# Various sizes and verification
############################################################
@ -134,79 +106,61 @@ func contains*(self: Manifest, cid: Cid): bool =
func bytes*(self: Manifest, pad = true): NBytes =
## Compute how many bytes corresponding StoreStream(Manifest, pad) will return
if pad or self.protected:
self.len.NBytes * self.blockSize
self.blocksCount.NBytes * self.blockSize
else:
self.originalBytes
self.datasetSize
func rounded*(self: Manifest): int =
## Number of data blocks in *protected* manifest including padding at the end
roundUp(self.originalLen, self.ecK)
roundUp(self.originalBlocksCount, self.ecK)
func steps*(self: Manifest): int =
## Number of EC groups in *protected* manifest
divUp(self.originalLen, self.ecK)
divUp(self.originalBlocksCount, self.ecK)
func verify*(self: Manifest): ?!void =
## Check manifest correctness
##
let originalLen = (if self.protected: self.originalLen else: self.len)
if divUp(self.originalBytes, self.blockSize) != originalLen:
return failure newException(CodexError, "Broken manifest: wrong originalBytes")
if self.protected and (self.len != self.steps * (self.ecK + self.ecM)):
return failure newException(CodexError, "Broken manifest: wrong originalLen")
if self.protected and (self.blocksCount != self.steps * (self.ecK + self.ecM)):
return failure newException(CodexError, "Broken manifest: wrong originalBlocksCount")
return success()
proc cid*(self: Manifest): ?!Cid {.deprecated: "use treeCid instead".} =
self.treeCid.success
############################################################
# Cid computation
############################################################
template hashBytes(mh: MultiHash): seq[byte] =
## get the hash bytes of a multihash object
##
mh.data.buffer[mh.dpos..(mh.dpos + mh.size - 1)]
proc makeRoot*(self: Manifest): ?!void =
## Create a tree hash root of the contained
## block hashes
##
var
stack: seq[MultiHash]
for cid in self:
stack.add(? cid.mhash.mapFailure)
while stack.len > 1:
let
(b1, b2) = (stack.pop(), stack.pop())
mh = ? MultiHash.digest(
$self.hcodec,
(b1.hashBytes() & b2.hashBytes()))
.mapFailure
stack.add(mh)
if stack.len == 1:
let digest = ? EmptyDigests[self.version][self.hcodec].catch
let cid = ? Cid.init(self.version, self.codec, digest).mapFailure
self.rootHash = cid.some
success()
proc cid*(self: Manifest): ?!Cid =
## Generate a root hash using the treehash algorithm
##
if self.rootHash.isNone:
? self.makeRoot()
(!self.rootHash).success
proc `==`*(a, b: Manifest): bool =
(a.treeCid == b.treeCid) and
(a.datasetSize == b.datasetSize) and
(a.blockSize == b.blockSize) and
(a.version == b.version) and
(a.hcodec == b.hcodec) and
(a.codec == b.codec) and
(a.protected == b.protected) and
(if a.protected:
(a.ecK == b.ecK) and
(a.ecM == b.ecM) and
(a.originalTreeCid == b.originalTreeCid) and
(a.originalDatasetSize == b.originalDatasetSize)
else:
true)
proc `$`*(self: Manifest): string =
"treeCid: " & $self.treeCid &
", datasetSize: " & $self.datasetSize &
", blockSize: " & $self.blockSize &
", version: " & $self.version &
", hcodec: " & $self.hcodec &
", codec: " & $self.codec &
", protected: " & $self.protected &
(if self.protected:
", ecK: " & $self.ecK &
", ecM: " & $self.ecM &
", originalTreeCid: " & $self.originalTreeCid &
", originalDatasetSize: " & $self.originalDatasetSize
else:
"")
############################################################
# Constructors
@ -214,67 +168,61 @@ proc cid*(self: Manifest): ?!Cid =
proc new*(
T: type Manifest,
blocks: openArray[Cid] = [],
protected = false,
version = CIDv1,
treeCid: Cid,
blockSize: NBytes,
datasetSize: NBytes,
version: CidVersion = CIDv1,
hcodec = multiCodec("sha2-256"),
codec = multiCodec("raw"),
blockSize = DefaultBlockSize
): ?!Manifest =
## Create a manifest using an array of `Cid`s
##
if hcodec notin EmptyDigests[version]:
return failure("Unsupported manifest hash codec!")
protected = false,
): Manifest =
T(
blocks: @blocks,
treeCid: treeCid,
blockSize: blockSize,
datasetSize: datasetSize,
version: version,
codec: codec,
hcodec: hcodec,
blockSize: blockSize,
originalBytes: blocks.len.NBytes * blockSize,
protected: protected).success
protected: protected)
proc new*(
T: type Manifest,
manifest: Manifest,
treeCid: Cid,
datasetSize: NBytes,
ecK, ecM: int
): ?!Manifest =
): Manifest =
## Create an erasure protected dataset from an
## un-protected one
## unprotected one
##
var
self = Manifest(
Manifest(
treeCid: treeCid,
datasetSize: datasetSize,
version: manifest.version,
codec: manifest.codec,
hcodec: manifest.hcodec,
originalBytes: manifest.originalBytes,
blockSize: manifest.blockSize,
protected: true,
ecK: ecK, ecM: ecM,
originalCid: ? manifest.cid,
originalLen: manifest.len)
originalTreeCid: manifest.treeCid,
originalDatasetSize: manifest.datasetSize)
let
encodedLen = self.rounded + (self.steps * ecM)
self.blocks = newSeq[Cid](encodedLen)
# copy original manifest blocks
for i in 0..<self.rounded:
if i < manifest.len:
self.blocks[i] = manifest[i]
else:
self.blocks[i] = EmptyCid[manifest.version]
.catch
.get()[manifest.hcodec]
.catch
.get()
? self.verify()
self.success
proc new*(
T: type Manifest,
manifest: Manifest
): Manifest =
## Create an unprotected dataset from an
## erasure protected one
##
Manifest(
treeCid: manifest.originalTreeCid,
datasetSize: manifest.originalDatasetSize,
version: manifest.version,
codec: manifest.codec,
hcodec: manifest.hcodec,
blockSize: manifest.blockSize,
protected: false)
proc new*(
T: type Manifest,
@ -287,50 +235,27 @@ proc new*(
proc new*(
T: type Manifest,
rootHash: Cid,
originalBytes: NBytes,
treeCid: Cid,
datasetSize: NBytes,
blockSize: NBytes,
blocks: seq[Cid],
version: CidVersion,
hcodec: MultiCodec,
codec: MultiCodec,
ecK: int,
ecM: int,
originalCid: Cid,
originalLen: int
originalTreeCid: Cid,
originalDatasetSize: NBytes
): Manifest =
Manifest(
rootHash: rootHash.some,
originalBytes: originalBytes,
treeCid: treeCid,
datasetSize: datasetSize,
blockSize: blockSize,
blocks: blocks,
version: version,
hcodec: hcodec,
codec: codec,
protected: true,
ecK: ecK,
ecM: ecM,
originalCid: originalCid,
originalLen: originalLen
)
proc new*(
T: type Manifest,
rootHash: Cid,
originalBytes: NBytes,
blockSize: NBytes,
blocks: seq[Cid],
version: CidVersion,
hcodec: MultiCodec,
codec: MultiCodec
): Manifest =
Manifest(
rootHash: rootHash.some,
originalBytes: originalBytes,
blockSize: blockSize,
blocks: blocks,
version: version,
hcodec: hcodec,
codec: codec,
protected: false,
originalTreeCid: originalTreeCid,
originalDatasetSize: originalDatasetSize
)

4
codex/merkletree.nim Normal file
View File

@ -0,0 +1,4 @@
import ./merkletree/merkletree
import ./merkletree/coders
export merkletree, coders

View File

@ -0,0 +1,75 @@
## Nim-Codex
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/libp2p
import pkg/questionable
import pkg/questionable/results
import ./merkletree
import ../units
import ../errors
const MaxMerkleTreeSize = 100.MiBs.uint
const MaxMerkleProofSize = 1.MiBs.uint
proc encode*(self: MerkleTree): seq[byte] =
var pb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
pb.write(1, self.mcodec.uint64)
pb.write(2, self.digestSize.uint64)
pb.write(3, self.leavesCount.uint64)
pb.write(4, self.nodesBuffer)
pb.finish
pb.buffer
proc decode*(_: type MerkleTree, data: seq[byte]): ?!MerkleTree =
var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize)
var mcodecCode: uint64
var digestSize: uint64
var leavesCount: uint64
discard ? pb.getField(1, mcodecCode).mapFailure
discard ? pb.getField(2, digestSize).mapFailure
discard ? pb.getField(3, leavesCount).mapFailure
let mcodec = MultiCodec.codec(cast[int](mcodecCode))
if mcodec == InvalidMultiCodec:
return failure("Invalid MultiCodec code " & $cast[int](mcodec))
var nodesBuffer = newSeq[byte]()
discard ? pb.getField(4, nodesBuffer).mapFailure
let tree = ? MerkleTree.init(mcodec, digestSize, leavesCount, nodesBuffer)
success(tree)
proc encode*(self: MerkleProof): seq[byte] =
var pb = initProtoBuffer(maxSize = MaxMerkleProofSize)
pb.write(1, self.mcodec.uint64)
pb.write(2, self.digestSize.uint64)
pb.write(3, self.index.uint64)
pb.write(4, self.nodesBuffer)
pb.finish
pb.buffer
proc decode*(_: type MerkleProof, data: seq[byte]): ?!MerkleProof =
var pb = initProtoBuffer(data, maxSize = MaxMerkleProofSize)
var mcodecCode: uint64
var digestSize: uint64
var index: uint64
discard ? pb.getField(1, mcodecCode).mapFailure
discard ? pb.getField(2, digestSize).mapFailure
discard ? pb.getField(3, index).mapFailure
let mcodec = MultiCodec.codec(cast[int](mcodecCode))
if mcodec == InvalidMultiCodec:
return failure("Invalid MultiCodec code " & $cast[int](mcodec))
var nodesBuffer = newSeq[byte]()
discard ? pb.getField(4, nodesBuffer).mapFailure
let proof = ? MerkleProof.init(mcodec, digestSize, index, nodesBuffer)
success(proof)

View File

@ -1,5 +1,5 @@
## Nim-Codex
## Copyright (c) 2022 Status Research & Development GmbH
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
@ -7,41 +7,43 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sequtils
import std/math
import std/bitops
import std/sequtils
import std/sugar
import std/algorithm
import pkg/libp2p
import pkg/stew/byteutils
import pkg/chronicles
import pkg/questionable
import pkg/questionable/results
import pkg/nimcrypto/sha2
import pkg/libp2p/[cid, multicodec, multihash, vbuffer]
import pkg/stew/byteutils
import ../errors
logScope:
topics = "codex merkletree"
type
MerkleHash* = MultiHash
MerkleTree* = object
leavesCount: int
nodes: seq[MerkleHash]
mcodec: MultiCodec
digestSize: Natural
leavesCount: Natural
nodesBuffer*: seq[byte]
MerkleProof* = object
index: int
path: seq[MerkleHash]
# Tree constructed from leaves H0..H2 is
# H5=H(H3 & H4)
# / \
# H3=H(H0 & H1) H4=H(H2 & H2)
# / \ /
# H0=H(A) H1=H(B) H2=H(C)
# | | |
# A B C
#
# Memory layout is [H0, H1, H2, H3, H4, H5]
#
# Proofs of inclusion are
# - [H1, H4] for A
# - [H0, H4] for B
# - [H2, H3] for C
mcodec: MultiCodec
digestSize: Natural
index: Natural
nodesBuffer*: seq[byte]
MerkleTreeBuilder* = object
mcodec: MultiCodec
digestSize: Natural
buffer: seq[byte]
###########################################################
# Helper functions
###########################################################
func computeTreeHeight(leavesCount: int): int =
if isPowerOfTwo(leavesCount):
@ -49,141 +51,368 @@ func computeTreeHeight(leavesCount: int): int =
else:
fastLog2(leavesCount) + 2
func getLowHigh(leavesCount, level: int): (int, int) =
var width = leavesCount
var low = 0
for _ in 0..<level:
low += width
width = (width + 1) div 2
(low, low + width - 1)
func getLowHigh(self: MerkleTree, level: int): (int, int) =
getLowHigh(self.leavesCount, level)
func computeTotalSize(leavesCount: int): int =
func computeLevels(leavesCount: int): seq[tuple[offset: int, width: int, index: int]] =
let height = computeTreeHeight(leavesCount)
getLowHigh(leavesCount, height - 1)[1] + 1
var levels = newSeq[tuple[offset: int, width: int, index: int]](height)
proc getWidth(self: MerkleTree, level: int): int =
let (low, high) = self.getLowHigh(level)
high - low + 1
levels[0].offset = 0
levels[0].width = leavesCount
levels[0].index = 0
for i in 1..<height:
levels[i].offset = levels[i - 1].offset + levels[i - 1].width
levels[i].width = (levels[i - 1].width + 1) div 2
levels[i].index = i
levels
func getChildren(self: MerkleTree, level, i: int): (MerkleHash, MerkleHash) =
let (low, high) = self.getLowHigh(level - 1)
let leftIdx = low + 2 * i
let rightIdx = min(leftIdx + 1, high)
proc digestFn(mcodec: MultiCodec, dst: var openArray[byte], dstPos: int, data: openArray[byte]): ?!void =
var mhash = ? MultiHash.digest($mcodec, data).mapFailure
if (dstPos + mhash.size) > dst.len:
return failure("Not enough space in a destination buffer")
dst[dstPos..<dstPos + mhash.size] = mhash.data.buffer[mhash.dpos..<mhash.dpos + mhash.size]
success()
(self.nodes[leftIdx], self.nodes[rightIdx])
###########################################################
# MerkleTreeBuilder
###########################################################
func getSibling(self: MerkleTree, level, i: int): MerkleHash =
let (low, high) = self.getLowHigh(level)
if i mod 2 == 0:
self.nodes[min(low + i + 1, high)]
else:
self.nodes[low + i - 1]
proc init*(
T: type MerkleTreeBuilder,
mcodec: MultiCodec = multiCodec("sha2-256")
): ?!MerkleTreeBuilder =
let mhash = ? MultiHash.digest($mcodec, "".toBytes).mapFailure
success(MerkleTreeBuilder(mcodec: mcodec, digestSize: mhash.size, buffer: newSeq[byte]()))
proc setNode(self: var MerkleTree, level, i: int, value: MerkleHash): void =
let (low, _) = self.getLowHigh(level)
self.nodes[low + i] = value
proc addDataBlock*(self: var MerkleTreeBuilder, dataBlock: openArray[byte]): ?!void =
## Hashes the data block and adds the result of hashing to a buffer
##
let oldLen = self.buffer.len
self.buffer.setLen(oldLen + self.digestSize)
digestFn(self.mcodec, self.buffer, oldLen, dataBlock)
proc root*(self: MerkleTree): MerkleHash =
self.nodes[^1]
proc addLeaf*(self: var MerkleTreeBuilder, leaf: MultiHash): ?!void =
if leaf.mcodec != self.mcodec or leaf.size != self.digestSize:
return failure("Expected mcodec to be " & $self.mcodec & " and digest size to be " &
$self.digestSize & " but was " & $leaf.mcodec & " and " & $leaf.size)
proc len*(self: MerkleTree): int =
self.nodes.len
let oldLen = self.buffer.len
self.buffer.setLen(oldLen + self.digestSize)
self.buffer[oldLen..<oldLen + self.digestSize] = leaf.data.buffer[leaf.dpos..<leaf.dpos + self.digestSize]
success()
proc leaves*(self: MerkleTree): seq[MerkleHash] =
self.nodes[0..<self.leavesCount]
proc build*(self: MerkleTreeBuilder): ?!MerkleTree =
## Builds a tree from previously added data blocks
##
## Tree built from data blocks A, B and C is
## H5=H(H3 & H4)
## / \
## H3=H(H0 & H1) H4=H(H2 & 0x00)
## / \ /
## H0=H(A) H1=H(B) H2=H(C)
## | | |
## A B C
##
## Memory layout is [H0, H1, H2, H3, H4, H5]
##
let
mcodec = self.mcodec
digestSize = self.digestSize
leavesCount = self.buffer.len div self.digestSize
proc nodes*(self: MerkleTree): seq[MerkleHash] =
self.nodes
if leavesCount == 0:
return failure("At least one data block is required")
proc height*(self: MerkleTree): int =
computeTreeHeight(self.leavesCount)
let levels = computeLevels(leavesCount)
let totalNodes = levels[^1].offset + 1
proc `$`*(self: MerkleTree): string =
result &= "leavesCount: " & $self.leavesCount
result &= "\nnodes: " & $self.nodes
proc getProof*(self: MerkleTree, index: int): ?!MerkleProof =
if index >= self.leavesCount or index < 0:
return failure("Index " & $index & " out of range [0.." & $self.leaves.high & "]" )
var path = newSeq[MerkleHash](self.height - 1)
for level in 0..<path.len:
let i = index div (1 shl level)
path[level] = self.getSibling(level, i)
success(MerkleProof(index: index, path: path))
proc initTreeFromLeaves(leaves: openArray[MerkleHash]): ?!MerkleTree =
without mcodec =? leaves.?[0].?mcodec and
digestSize =? leaves.?[0].?size:
return failure("At least one leaf is required")
if not leaves.allIt(it.mcodec == mcodec):
return failure("All leaves must use the same codec")
let totalSize = computeTotalSize(leaves.len)
var tree = MerkleTree(leavesCount: leaves.len, nodes: newSeq[MerkleHash](totalSize))
var buf = newSeq[byte](digestSize * 2)
proc combine(l, r: MerkleHash): ?!MerkleHash =
copyMem(addr buf[0], unsafeAddr l.data.buffer[0], digestSize)
copyMem(addr buf[digestSize], unsafeAddr r.data.buffer[0], digestSize)
MultiHash.digest($mcodec, buf).mapErr(
c => newException(CatchableError, "Error calculating hash using codec " & $mcodec & ": " & $c)
)
var tree = MerkleTree(mcodec: mcodec, digestSize: digestSize, leavesCount: leavesCount, nodesBuffer: newSeq[byte](totalNodes * digestSize))
# copy leaves
for i in 0..<tree.getWidth(0):
tree.setNode(0, i, leaves[i])
tree.nodesBuffer[0..<leavesCount * digestSize] = self.buffer[0..<leavesCount * digestSize]
# calculate intermediate nodes
for level in 1..<tree.height:
for i in 0..<tree.getWidth(level):
let (left, right) = tree.getChildren(level, i)
var zero = newSeq[byte](digestSize)
var one = newSeq[byte](digestSize)
one[^1] = 0x01
without mhash =? combine(left, right), error:
return failure(error)
tree.setNode(level, i, mhash)
var
concatBuf = newSeq[byte](2 * digestSize)
prevLevel = levels[0]
for level in levels[1..^1]:
for i in 0..<level.width:
let parentIndex = level.offset + i
let leftChildIndex = prevLevel.offset + 2 * i
let rightChildIndex = leftChildIndex + 1
success(tree)
concatBuf[0..<digestSize] = tree.nodesBuffer[leftChildIndex * digestSize..<(leftChildIndex + 1) * digestSize]
func init*(
T: type MerkleTree,
root: MerkleHash,
leavesCount: int
): MerkleTree =
let totalSize = computeTotalSize(leavesCount)
var nodes = newSeq[MerkleHash](totalSize)
nodes[^1] = root
MerkleTree(nodes: nodes, leavesCount: leavesCount)
var dummyValue = if prevLevel.index == 0: zero else: one
if rightChildIndex < prevLevel.offset + prevLevel.width:
concatBuf[digestSize..^1] = tree.nodesBuffer[rightChildIndex * digestSize..<(rightChildIndex + 1) * digestSize]
else:
concatBuf[digestSize..^1] = dummyValue
? digestFn(mcodec, tree.nodesBuffer, parentIndex * digestSize, concatBuf)
prevLevel = level
return success(tree)
###########################################################
# MerkleTree
###########################################################
proc nodeBufferToMultiHash(self: (MerkleTree | MerkleProof), index: int): MultiHash =
var buf = newSeq[byte](self.digestSize)
let offset = index * self.digestSize
buf[0..^1] = self.nodesBuffer[offset..<(offset + self.digestSize)]
{.noSideEffect.}:
without mhash =? MultiHash.init($self.mcodec, buf).mapFailure, errx:
error "Error converting bytes to hash", msg = errx.msg
mhash
proc len*(self: (MerkleTree | MerkleProof)): Natural =
self.nodesBuffer.len div self.digestSize
proc nodes*(self: (MerkleTree | MerkleProof)): seq[MultiHash] {.noSideEffect.} =
toSeq(0..<self.len).map(i => self.nodeBufferToMultiHash(i))
proc mcodec*(self: (MerkleTree | MerkleProof)): MultiCodec =
self.mcodec
proc digestSize*(self: (MerkleTree | MerkleProof)): Natural =
self.digestSize
proc root*(self: MerkleTree): MultiHash =
let rootIndex = self.len - 1
self.nodeBufferToMultiHash(rootIndex)
proc rootCid*(self: MerkleTree, version = CIDv1, dataCodec = multiCodec("raw")): ?!Cid =
Cid.init(version, dataCodec, self.root).mapFailure
iterator leaves*(self: MerkleTree): MultiHash =
for i in 0..<self.leavesCount:
yield self.nodeBufferToMultiHash(i)
iterator leavesCids*(self: MerkleTree, version = CIDv1, dataCodec = multiCodec("raw")): ?!Cid =
for leaf in self.leaves:
yield Cid.init(version, dataCodec, leaf).mapFailure
proc leavesCount*(self: MerkleTree): Natural =
self.leavesCount
proc getLeaf*(self: MerkleTree, index: Natural): ?!MultiHash =
if index >= self.leavesCount:
return failure("Index " & $index & " out of range [0.." & $(self.leavesCount - 1) & "]" )
success(self.nodeBufferToMultiHash(index))
proc getLeafCid*(self: MerkleTree, index: Natural, version = CIDv1, dataCodec = multiCodec("raw")): ?!Cid =
let leaf = ? self.getLeaf(index)
Cid.init(version, dataCodec, leaf).mapFailure
proc height*(self: MerkleTree): Natural =
computeTreeHeight(self.leavesCount)
proc getProof*(self: MerkleTree, index: Natural): ?!MerkleProof =
## Extracts proof from a tree for a given index
##
## Given a tree built from data blocks A, B and C
## H5
## / \
## H3 H4
## / \ /
## H0 H1 H2
## | | |
## A B C
##
## Proofs of inclusion (index and path) are
## - 0,[H1, H4] for data block A
## - 1,[H0, H4] for data block B
## - 2,[0x00, H3] for data block C
##
if index >= self.leavesCount:
return failure("Index " & $index & " out of range [0.." & $(self.leavesCount - 1) & "]" )
var zero = newSeq[byte](self.digestSize)
var one = newSeq[byte](self.digestSize)
one[^1] = 0x01
let levels = computeLevels(self.leavesCount)
var proofNodesBuffer = newSeq[byte]((levels.len - 1) * self.digestSize)
for level in levels[0..^2]:
let lr = index shr level.index
let siblingIndex = if lr mod 2 == 0:
level.offset + lr + 1
else:
level.offset + lr - 1
var dummyValue = if level.index == 0: zero else: one
if siblingIndex < level.offset + level.width:
proofNodesBuffer[level.index * self.digestSize..<(level.index + 1) * self.digestSize] =
self.nodesBuffer[siblingIndex * self.digestSize..<(siblingIndex + 1) * self.digestSize]
else:
proofNodesBuffer[level.index * self.digestSize..<(level.index + 1) * self.digestSize] = dummyValue
success(MerkleProof(mcodec: self.mcodec, digestSize: self.digestSize, index: index, nodesBuffer: proofNodesBuffer))
proc `$`*(self: MerkleTree): string {.noSideEffect.} =
"mcodec:" & $self.mcodec &
", digestSize: " & $self.digestSize &
", leavesCount: " & $self.leavesCount &
", nodes: " & $self.nodes
proc `==`*(a, b: MerkleTree): bool =
(a.mcodec == b.mcodec) and
(a.digestSize == b.digestSize) and
(a.leavesCount == b.leavesCount) and
(a.nodesBuffer == b.nodesBuffer)
proc init*(
T: type MerkleTree,
leaves: openArray[MerkleHash]
mcodec: MultiCodec,
digestSize: Natural,
leavesCount: Natural,
nodesBuffer: seq[byte]
): ?!MerkleTree =
initTreeFromLeaves(leaves)
let levels = computeLevels(leavesCount)
let totalNodes = levels[^1].offset + 1
if totalNodes * digestSize == nodesBuffer.len:
success(
MerkleTree(
mcodec: mcodec,
digestSize: digestSize,
leavesCount: leavesCount,
nodesBuffer: nodesBuffer
)
)
else:
failure("Expected nodesBuffer len to be " & $(totalNodes * digestSize) & " but was " & $nodesBuffer.len)
proc index*(self: MerkleProof): int =
proc init*(
T: type MerkleTree,
leaves: openArray[MultiHash]
): ?!MerkleTree =
without leaf =? leaves.?[0]:
return failure("At least one leaf is required")
var builder = ? MerkleTreeBuilder.init(mcodec = leaf.mcodec)
for l in leaves:
let res = builder.addLeaf(l)
if res.isErr:
return failure(res.error)
builder.build()
proc init*(
T: type MerkleTree,
cids: openArray[Cid]
): ?!MerkleTree =
var leaves = newSeq[MultiHash]()
for cid in cids:
let res = cid.mhash.mapFailure
if res.isErr:
return failure(res.error)
else:
leaves.add(res.value)
MerkleTree.init(leaves)
###########################################################
# MerkleProof
###########################################################
proc verifyLeaf*(self: MerkleProof, leaf: MultiHash, treeRoot: MultiHash): ?!bool =
if leaf.mcodec != self.mcodec:
return failure("Leaf mcodec was " & $leaf.mcodec & ", but " & $self.mcodec & " expected")
if leaf.mcodec != self.mcodec:
return failure("Tree root mcodec was " & $treeRoot.mcodec & ", but " & $treeRoot.mcodec & " expected")
var digestBuf = newSeq[byte](self.digestSize)
digestBuf[0..^1] = leaf.data.buffer[leaf.dpos..<(leaf.dpos + self.digestSize)]
let proofLen = self.nodesBuffer.len div self.digestSize
var concatBuf = newSeq[byte](2 * self.digestSize)
for i in 0..<proofLen:
let offset = i * self.digestSize
let lr = self.index shr i
if lr mod 2 == 0:
concatBuf[0..^1] = digestBuf & self.nodesBuffer[offset..<(offset + self.digestSize)]
else:
concatBuf[0..^1] = self.nodesBuffer[offset..<(offset + self.digestSize)] & digestBuf
? digestFn(self.mcodec, digestBuf, 0, concatBuf)
let computedRoot = ? MultiHash.init(self.mcodec, digestBuf).mapFailure
success(computedRoot == treeRoot)
proc verifyDataBlock*(self: MerkleProof, dataBlock: openArray[byte], treeRoot: MultiHash): ?!bool =
var digestBuf = newSeq[byte](self.digestSize)
? digestFn(self.mcodec, digestBuf, 0, dataBlock)
let leaf = ? MultiHash.init(self.mcodec, digestBuf).mapFailure
self.verifyLeaf(leaf, treeRoot)
proc index*(self: MerkleProof): Natural =
self.index
proc path*(self: MerkleProof): seq[MerkleHash] =
self.path
proc `$`*(self: MerkleProof): string =
result &= "index: " & $self.index
result &= "\npath: " & $self.path
"mcodec:" & $self.mcodec &
", digestSize: " & $self.digestSize &
", index: " & $self.index &
", nodes: " & $self.nodes
func `==`*(a, b: MerkleProof): bool =
(a.index == b.index) and (a.path == b.path)
(a.index == b.index) and
(a.mcodec == b.mcodec) and
(a.digestSize == b.digestSize) and
(a.nodesBuffer == b.nodesBuffer)
proc init*(
T: type MerkleProof,
index: int,
path: seq[MerkleHash]
): MerkleProof =
MerkleProof(index: index, path: path)
index: Natural,
nodes: seq[MultiHash]
): ?!MerkleProof =
if nodes.len == 0:
return failure("At least one node is required")
let
mcodec = nodes[0].mcodec
digestSize = nodes[0].size
var nodesBuffer = newSeq[byte](nodes.len * digestSize)
for nodeIndex, node in nodes:
nodesBuffer[nodeIndex * digestSize..<(nodeIndex + 1) * digestSize] = node.data.buffer[node.dpos..<node.dpos + digestSize]
success(MerkleProof(mcodec: mcodec, digestSize: digestSize, index: index, nodesBuffer: nodesBuffer))
func init*(
T: type MerkleProof,
mcodec: MultiCodec,
digestSize: Natural,
index: Natural,
nodesBuffer: seq[byte]
): ?!MerkleProof =
if nodesBuffer.len mod digestSize != 0:
return failure("nodesBuffer len is not a multiple of digestSize")
let treeHeight = (nodesBuffer.len div digestSize) + 1
let maxLeavesCount = 1 shl treeHeight
if index < maxLeavesCount:
return success(
MerkleProof(
mcodec: mcodec,
digestSize: digestSize,
index: index,
nodesBuffer: nodesBuffer
)
)
else:
return failure("index higher than max leaves count")

View File

@ -16,6 +16,8 @@ const
CodexManifestNamespace* = CodexRepoNamespace & "/manifests" # manifest namespace
CodexBlocksTtlNamespace* = # Cid TTL
CodexMetaNamespace & "/ttl"
CodexBlockProofNamespace* = # Cid and Proof
CodexMetaNamespace & "/proof"
CodexDhtNamespace* = "dht" # Dht namespace
CodexDhtProvidersNamespace* = # Dht providers namespace
CodexDhtNamespace & "/providers"

View File

@ -11,13 +11,14 @@ import std/options
import std/tables
import std/sequtils
import std/strformat
import std/sugar
import pkg/questionable
import pkg/questionable/results
import pkg/chronicles
import pkg/chronos
import pkg/libp2p/switch
import pkg/libp2p/[switch, multicodec, multihash]
import pkg/libp2p/stream/bufferstream
# TODO: remove once exported by libp2p
@ -27,6 +28,7 @@ import pkg/libp2p/signed_envelope
import ./chunker
import ./blocktype as bt
import ./manifest
import ./merkletree
import ./stores/blockstore
import ./blockexchange
import ./streams
@ -34,6 +36,7 @@ import ./erasure
import ./discovery
import ./contracts
import ./node/batch
import ./utils
export batch
@ -87,7 +90,7 @@ proc fetchManifest*(
trace "Retrieving manifest for cid", cid
without blk =? await node.blockStore.getBlock(cid), err:
without blk =? await node.blockStore.getBlock(BlockAddress.init(cid)), err:
trace "Error retrieve manifest block", cid, err = err.msg
return failure err
@ -109,17 +112,20 @@ proc fetchBatched*(
## Fetch manifest in batches of `batchSize`
##
let
batches =
(manifest.blocks.len div batchSize) +
(manifest.blocks.len mod batchSize)
let batchCount = divUp(manifest.blocksCount, batchSize)
trace "Fetching blocks in batches of", size = batchSize
for blks in manifest.blocks.distribute(max(1, batches), true):
try:
let
blocks = blks.mapIt(node.blockStore.getBlock( it ))
let iter = Iter.fromSlice(0..<manifest.blocksCount)
.map((i: int) => node.blockStore.getBlock(BlockAddress.init(manifest.treeCid, i)))
for batchNum in 0..<batchCount:
let blocks = collect:
for i in 0..<batchSize:
if not iter.finished:
iter.next()
try:
await allFuturesThrowing(allFinished(blocks))
if not onBatch.isNil:
await onBatch(blocks.mapIt( it.read.get ))
@ -157,7 +163,7 @@ proc retrieve*(
let
stream = BufferStream.new()
without blk =? (await node.blockStore.getBlock(cid)), err:
without blk =? (await node.blockStore.getBlock(BlockAddress.init(cid))), err:
return failure(err)
proc streamOneBlock(): Future[void] {.async.} =
@ -181,11 +187,12 @@ proc store*(
##
trace "Storing data"
without var blockManifest =? Manifest.new(blockSize = blockSize):
return failure("Unable to create Block Set")
let
hcodec = multiCodec("sha2-256")
dataCodec = multiCodec("raw")
chunker = LPStreamChunker.new(stream, chunkSize = blockSize)
# Manifest and chunker should use the same blockSize
let chunker = LPStreamChunker.new(stream, chunkSize = blockSize)
var cids: seq[Cid]
try:
while (
@ -193,10 +200,18 @@ proc store*(
chunk.len > 0):
trace "Got data from stream", len = chunk.len
without blk =? bt.Block.new(chunk):
without mhash =? MultiHash.digest($hcodec, chunk).mapFailure, err:
return failure(err)
without cid =? Cid.init(CIDv1, dataCodec, mhash).mapFailure, err:
return failure(err)
without blk =? bt.Block.new(cid, chunk, verify = false):
return failure("Unable to init block from chunk!")
blockManifest.add(blk.cid)
cids.add(cid)
if err =? (await self.blockStore.putBlock(blk)).errorOption:
trace "Unable to store block", cid = blk.cid, err = err.msg
return failure(&"Unable to store block {blk.cid}")
@ -208,34 +223,51 @@ proc store*(
finally:
await stream.close()
without tree =? MerkleTree.init(cids), err:
return failure(err)
without treeCid =? tree.rootCid(CIDv1, dataCodec), err:
return failure(err)
for index, cid in cids:
without proof =? tree.getProof(index), err:
return failure(err)
if err =? (await self.blockStore.putBlockCidAndProof(treeCid, index, cid, proof)).errorOption:
# TODO add log here
return failure(err)
let manifest = Manifest.new(
treeCid = treeCid,
blockSize = blockSize,
datasetSize = NBytes(chunker.offset),
version = CIDv1,
hcodec = hcodec,
codec = dataCodec
)
# Generate manifest
blockManifest.originalBytes = NBytes(chunker.offset) # store the exact file size
without data =? blockManifest.encode():
without data =? manifest.encode(), err:
return failure(
newException(CodexError, "Could not generate dataset manifest!"))
newException(CodexError, "Error encoding manifest: " & err.msg))
# Store as a dag-pb block
without manifest =? bt.Block.new(data = data, codec = DagPBCodec):
without manifestBlk =? bt.Block.new(data = data, codec = DagPBCodec):
trace "Unable to init block from manifest data!"
return failure("Unable to init block from manifest data!")
if isErr (await self.blockStore.putBlock(manifest)):
trace "Unable to store manifest", cid = manifest.cid
return failure("Unable to store manifest " & $manifest.cid)
if isErr (await self.blockStore.putBlock(manifestBlk)):
trace "Unable to store manifest", cid = manifestBlk.cid
return failure("Unable to store manifest " & $manifestBlk.cid)
without cid =? blockManifest.cid, error:
trace "Unable to generate manifest Cid!", exc = error.msg
return failure(error.msg)
trace "Stored data", manifestCid = manifest.cid,
contentCid = cid,
blocks = blockManifest.len,
size=blockManifest.originalBytes
info "Stored data", manifestCid = manifestBlk.cid,
treeCid = treeCid,
blocks = manifest.blocksCount,
datasetSize = manifest.datasetSize
# Announce manifest
await self.discovery.provide(manifest.cid)
await self.discovery.provide(manifestBlk.cid)
await self.discovery.provide(treeCid)
return manifest.cid.success
return manifestBlk.cid.success
proc iterateManifests*(node: CodexNodeRef, onManifest: OnManifest) {.async.} =
without cids =? await node.blockStore.listBlocks(BlockType.Manifest):
@ -309,7 +341,7 @@ proc requestStorage*(
# because the slotSize is used to determine the amount of bytes to reserve
# in a Reservations
# TODO: slotSize: (encoded.blockSize.int * encoded.steps).u256,
slotSize: (encoded.blockSize.int * encoded.blocks.len).u256,
slotSize: (encoded.blockSize.int * encoded.blocksCount).u256,
duration: duration,
proofProbability: proofProbability,
reward: reward,
@ -319,7 +351,7 @@ proc requestStorage*(
content: StorageContent(
cid: $encodedBlk.cid,
erasure: StorageErasure(
totalChunks: encoded.len.uint64,
totalChunks: encoded.blocksCount.uint64,
),
por: StoragePoR(
u: @[], # TODO: PoR setup

View File

@ -4,5 +4,6 @@ import ./stores/networkstore
import ./stores/repostore
import ./stores/maintenance
import ./stores/keyutils
import ./stores/treehelper
export cachestore, blockstore, networkstore, repostore, maintenance, keyutils
export cachestore, blockstore, networkstore, repostore, maintenance, keyutils, treehelper

View File

@ -18,6 +18,8 @@ import pkg/questionable/results
import ../clock
import ../blocktype
import ../merkletree
import ../utils
export blocktype
@ -27,23 +29,31 @@ type
BlockType* {.pure.} = enum
Manifest, Block, Both
GetNext* = proc(): Future[?Cid] {.upraises: [], gcsafe, closure.}
BlocksIter* = ref object
finished*: bool
next*: GetNext
BlockStore* = ref object of RootObj
iterator items*(self: BlocksIter): Future[?Cid] =
while not self.finished:
yield self.next()
method getBlock*(self: BlockStore, cid: Cid): Future[?!Block] {.base.} =
## Get a block from the blockstore
##
raiseAssert("Not implemented!")
raiseAssert("getBlock by cid not implemented!")
method getBlock*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!Block] {.base.} =
## Get a block from the blockstore
##
raiseAssert("getBlock by treecid not implemented!")
method getBlock*(self: BlockStore, address: BlockAddress): Future[?!Block] {.base.} =
## Get a block from the blockstore
##
raiseAssert("getBlock by addr not implemented!")
method getBlockAndProof*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!(Block, MerkleProof)] {.base.} =
## Get a block and associated inclusion proof by Cid of a merkle tree and an index of a leaf in a tree
##
raiseAssert("getBlockAndProof not implemented!")
method putBlock*(
self: BlockStore,
@ -53,7 +63,19 @@ method putBlock*(
## Put a block to the blockstore
##
raiseAssert("Not implemented!")
raiseAssert("putBlock not implemented!")
method putBlockCidAndProof*(
self: BlockStore,
treeCid: Cid,
index: Natural,
blockCid: Cid,
proof: MerkleProof
): Future[?!void] {.base.} =
## Put a block to the blockstore
##
raiseAssert("putBlockCidAndProof not implemented!")
method ensureExpiry*(
self: BlockStore,
@ -70,28 +92,40 @@ method delBlock*(self: BlockStore, cid: Cid): Future[?!void] {.base.} =
## Delete a block from the blockstore
##
raiseAssert("Not implemented!")
raiseAssert("delBlock not implemented!")
method delBlock*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!void] {.base.} =
## Delete a block from the blockstore
##
raiseAssert("delBlock not implemented!")
method hasBlock*(self: BlockStore, cid: Cid): Future[?!bool] {.base.} =
## Check if the block exists in the blockstore
##
raiseAssert("Not implemented!")
raiseAssert("hasBlock not implemented!")
method hasBlock*(self: BlockStore, tree: Cid, index: Natural): Future[?!bool] {.base.} =
## Check if the block exists in the blockstore
##
raiseAssert("hasBlock not implemented!")
method listBlocks*(
self: BlockStore,
blockType = BlockType.Manifest): Future[?!BlocksIter] {.base.} =
blockType = BlockType.Manifest): Future[?!AsyncIter[?Cid]] {.base.} =
## Get the list of blocks in the BlockStore. This is an intensive operation
##
raiseAssert("Not implemented!")
raiseAssert("listBlocks not implemented!")
method close*(self: BlockStore): Future[void] {.base.} =
## Close the blockstore, cleaning up resources managed by it.
## For some implementations this may be a no-op
##
raiseAssert("Not implemented!")
raiseAssert("close not implemented!")
proc contains*(self: BlockStore, blk: Cid): Future[bool] {.async.} =
## Check if the block exists in the blockstore.
@ -99,3 +133,9 @@ proc contains*(self: BlockStore, blk: Cid): Future[bool] {.async.} =
##
return (await self.hasBlock(blk)) |? false
proc contains*(self: BlockStore, address: BlockAddress): Future[bool] {.async.} =
return if address.leaf:
(await self.hasBlock(address.treeCid, address.index)) |? false
else:
(await self.hasBlock(address.cid)) |? false

View File

@ -25,6 +25,8 @@ import ../units
import ../chunker
import ../errors
import ../manifest
import ../merkletree
import ../utils
import ../clock
export blockstore
@ -37,6 +39,7 @@ type
currentSize*: NBytes
size*: NBytes
cache: LruCache[Cid, Block]
cidAndProofCache: LruCache[(Cid, Natural), (Cid, MerkleProof)]
InvalidBlockSize* = object of CodexError
@ -51,10 +54,10 @@ method getBlock*(self: CacheStore, cid: Cid): Future[?!Block] {.async.} =
if cid.isEmpty:
trace "Empty block, ignoring"
return success cid.emptyBlock
return cid.emptyBlock
if cid notin self.cache:
return failure (ref BlockNotFoundError)(msg: "Block not in cache")
return failure (ref BlockNotFoundError)(msg: "Block not in cache " & $cid)
try:
return success self.cache[cid]
@ -62,6 +65,35 @@ method getBlock*(self: CacheStore, cid: Cid): Future[?!Block] {.async.} =
trace "Error requesting block from cache", cid, error = exc.msg
return failure exc
proc getCidAndProof(self: CacheStore, treeCid: Cid, index: Natural): ?!(Cid, MerkleProof) =
if cidAndProof =? self.cidAndProofCache.getOption((treeCid, index)):
success(cidAndProof)
else:
failure(newException(BlockNotFoundError, "Block not in cache: " & $BlockAddress.init(treeCid, index)))
method getBlock*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!Block] {.async.} =
without cidAndProof =? self.getCidAndProof(treeCid, index), err:
return failure(err)
await self.getBlock(cidAndProof[0])
method getBlockAndProof*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!(Block, MerkleProof)] {.async.} =
without cidAndProof =? self.getCidAndProof(treeCid, index), err:
return failure(err)
let (cid, proof) = cidAndProof
without blk =? await self.getBlock(cid), err:
return failure(err)
success((blk, proof))
method getBlock*(self: CacheStore, address: BlockAddress): Future[?!Block] =
if address.leaf:
self.getBlock(address.treeCid, address.index)
else:
self.getBlock(address.cid)
method hasBlock*(self: CacheStore, cid: Cid): Future[?!bool] {.async.} =
## Check if the block exists in the blockstore
##
@ -73,6 +105,16 @@ method hasBlock*(self: CacheStore, cid: Cid): Future[?!bool] {.async.} =
return (cid in self.cache).success
method hasBlock*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!bool] {.async.} =
without cidAndProof =? self.getCidAndProof(treeCid, index), err:
if err of BlockNotFoundError:
return success(false)
else:
return failure(err)
await self.hasBlock(cidAndProof[0])
func cids(self: CacheStore): (iterator: Cid {.gcsafe.}) =
return iterator(): Cid =
for cid in self.cache.keys:
@ -81,12 +123,12 @@ func cids(self: CacheStore): (iterator: Cid {.gcsafe.}) =
method listBlocks*(
self: CacheStore,
blockType = BlockType.Manifest
): Future[?!BlocksIter] {.async.} =
): Future[?!AsyncIter[?Cid]] {.async.} =
## Get the list of blocks in the BlockStore. This is an intensive operation
##
var
iter = BlocksIter()
iter = AsyncIter[?Cid]()
let
cids = self.cids()
@ -102,7 +144,7 @@ method listBlocks*(
cid = cids()
if finished(cids):
iter.finished = true
iter.finish
return Cid.none
without isManifest =? cid.isManifest, err:
@ -168,6 +210,16 @@ method putBlock*(
discard self.putBlockSync(blk)
return success()
method putBlockCidAndProof*(
self: CacheStore,
treeCid: Cid,
index: Natural,
blockCid: Cid,
proof: MerkleProof
): Future[?!void] {.async.} =
self.cidAndProofCache[(treeCid, index)] = (blockCid, proof)
success()
method ensureExpiry*(
self: CacheStore,
cid: Cid,
@ -193,6 +245,14 @@ method delBlock*(self: CacheStore, cid: Cid): Future[?!void] {.async.} =
return success()
method delBlock*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!void] {.async.} =
let maybeRemoved = self.cidAndProofCache.del((treeCid, index))
if removed =? maybeRemoved:
return await self.delBlock(removed[0])
return success()
method close*(self: CacheStore): Future[void] {.async.} =
## Close the blockstore, a no-op for this implementation
##
@ -217,8 +277,10 @@ proc new*(
currentSize = 0'nb
size = int(cacheSize div chunkSize)
cache = newLruCache[Cid, Block](size)
cidAndProofCache = newLruCache[(Cid, Natural), (Cid, MerkleProof)](size)
store = CacheStore(
cache: cache,
cidAndProofCache: cidAndProofCache,
currentSize: currentSize,
size: cacheSize)

View File

@ -10,6 +10,7 @@
import pkg/upraises
push: {.upraises: [].}
import std/sugar
import pkg/questionable/results
import pkg/datastore
import pkg/libp2p
@ -23,6 +24,7 @@ const
CodexTotalBlocksKey* = Key.init(CodexBlockTotalNamespace).tryGet
CodexManifestKey* = Key.init(CodexManifestNamespace).tryGet
BlocksTtlKey* = Key.init(CodexBlocksTtlNamespace).tryGet
BlockProofKey* = Key.init(CodexBlockProofNamespace).tryGet
QuotaKey* = Key.init(CodexQuotaNamespace).tryGet
QuotaUsedKey* = (QuotaKey / "used").tryGet
QuotaReservedKey* = (QuotaKey / "reserved").tryGet
@ -42,3 +44,7 @@ proc createBlockExpirationMetadataKey*(cid: Cid): ?!Key =
proc createBlockExpirationMetadataQueryKey*(): ?!Key =
let queryString = ? (BlocksTtlKey / "*")
Key.init(queryString)
proc createBlockCidAndProofMetadataKey*(treeCid: Cid, index: Natural): ?!Key =
(BlockProofKey / $treeCid).flatMap((k: Key) => k / $index)

View File

@ -17,6 +17,7 @@ import pkg/questionable/results
import ./repostore
import ../utils/timer
import ../utils/asynciter
import ../clock
import ../systemclock

View File

@ -11,45 +11,64 @@ import pkg/upraises
push: {.upraises: [].}
import std/sugar
import pkg/chronicles
import pkg/chronos
import pkg/libp2p
import ../blocktype as bt
import ../blocktype
import ../utils/asyncheapqueue
import ../utils/asynciter
import ../clock
import ./blockstore
import ../blockexchange
import ../merkletree
import ../blocktype
export blockstore, blockexchange, asyncheapqueue
logScope:
topics = "codex networkstore"
const BlockPrefetchAmount = 5
type
NetworkStore* = ref object of BlockStore
engine*: BlockExcEngine # blockexc decision engine
localStore*: BlockStore # local block store
method getBlock*(self: NetworkStore, cid: Cid): Future[?!bt.Block] {.async.} =
trace "Getting block from local store or network", cid
method getBlock*(self: NetworkStore, address: BlockAddress): Future[?!Block] {.async.} =
trace "Getting block from local store or network", address
without blk =? await self.localStore.getBlock(cid), error:
without blk =? await self.localStore.getBlock(address), error:
if not (error of BlockNotFoundError): return failure error
trace "Block not in local store", cid
trace "Block not in local store", address
without newBlock =? (await self.engine.requestBlock(cid)).catch, error:
trace "Unable to get block from exchange engine", cid
without newBlock =? (await self.engine.requestBlock(address)).catch, error:
trace "Unable to get block from exchange engine", address
return failure error
return success newBlock
return success blk
method getBlock*(self: NetworkStore, cid: Cid): Future[?!Block] =
## Get a block from the blockstore
##
self.getBlock(BlockAddress.init(cid))
method getBlock*(self: NetworkStore, treeCid: Cid, index: Natural): Future[?!Block] =
## Get a block from the blockstore
##
self.getBlock(BlockAddress.init(treeCid, index))
method putBlock*(
self: NetworkStore,
blk: bt.Block,
blk: Block,
ttl = Duration.none
): Future[?!void] {.async.} =
## Store block locally and notify the network
@ -64,6 +83,15 @@ method putBlock*(
await self.engine.resolveBlocks(@[blk])
return success()
method putBlockCidAndProof*(
self: NetworkStore,
treeCid: Cid,
index: Natural,
blockCid: Cid,
proof: MerkleProof
): Future[?!void] =
self.localStore.putBlockCidAndProof(treeCid, index, blockCid, proof)
method ensureExpiry*(
self: NetworkStore,
cid: Cid,
@ -82,7 +110,7 @@ method ensureExpiry*(
method listBlocks*(
self: NetworkStore,
blockType = BlockType.Manifest): Future[?!BlocksIter] =
blockType = BlockType.Manifest): Future[?!AsyncIter[?Cid]] =
self.localStore.listBlocks(blockType)
method delBlock*(self: NetworkStore, cid: Cid): Future[?!void] =

View File

@ -12,8 +12,10 @@ import pkg/upraises
push: {.upraises: [].}
import pkg/chronos
import pkg/chronos/futures
import pkg/chronicles
import pkg/libp2p/cid
import pkg/libp2p/[cid, multicodec, multihash]
import pkg/lrucache
import pkg/metrics
import pkg/questionable
import pkg/questionable/results
@ -25,6 +27,8 @@ import ./keyutils
import ../blocktype
import ../clock
import ../systemclock
import ../merkletree
import ../utils
export blocktype, cid
@ -59,15 +63,6 @@ type
cid*: Cid
expiration*: SecondsSince1970
GetNext = proc(): Future[?BlockExpiration] {.upraises: [], gcsafe, closure.}
BlockExpirationIter* = ref object
finished*: bool
next*: GetNext
iterator items*(q: BlockExpirationIter): Future[?BlockExpiration] =
while not q.finished:
yield q.next()
proc updateMetrics(self: RepoStore) =
codex_repostore_blocks.set(self.totalBlocks.int64)
codex_repostore_bytes_used.set(self.quotaUsedBytes.int64)
@ -82,6 +77,63 @@ func available*(self: RepoStore): uint =
func available*(self: RepoStore, bytes: uint): bool =
return bytes < self.available()
proc encode(cidAndProof: (Cid, MerkleProof)): seq[byte] =
## Encodes a tuple of cid and merkle proof in a following format:
## | 8-bytes | n-bytes | remaining bytes |
## | n | cid | proof |
##
## where n is a size of cid
##
let
(cid, proof) = cidAndProof
cidBytes = cid.data.buffer
proofBytes = proof.encode
n = cidBytes.len
nBytes = n.uint64.toBytesBE
@nBytes & cidBytes & proofBytes
proc decode(_: type (Cid, MerkleProof), data: seq[byte]): ?!(Cid, MerkleProof) =
let
n = uint64.fromBytesBE(data[0..<sizeof(uint64)]).int
let
cid = ? Cid.init(data[sizeof(uint64)..<sizeof(uint64) + n]).mapFailure
proof = ? MerkleProof.decode(data[sizeof(uint64) + n..^1])
success((cid, proof))
method putBlockCidAndProof*(
self: RepoStore,
treeCid: Cid,
index: Natural,
blockCid: Cid,
proof: MerkleProof
): Future[?!void] {.async.} =
## Put a block to the blockstore
##
without key =? createBlockCidAndProofMetadataKey(treeCid, index), err:
return failure(err)
let value = (blockCid, proof).encode()
await self.metaDs.put(key, value)
proc getCidAndProof(
self: RepoStore,
treeCid: Cid,
index: Natural
): Future[?!(Cid, MerkleProof)] {.async.} =
without key =? createBlockCidAndProofMetadataKey(treeCid, index), err:
return failure(err)
without value =? await self.metaDs.get(key), err:
if err of DatastoreKeyNotFound:
return failure(newException(BlockNotFoundError, err.msg))
else:
return failure(err)
return (Cid, MerkleProof).decode(value)
method getBlock*(self: RepoStore, cid: Cid): Future[?!Block] {.async.} =
## Get a block from the blockstore
##
@ -91,7 +143,7 @@ method getBlock*(self: RepoStore, cid: Cid): Future[?!Block] {.async.} =
if cid.isEmpty:
trace "Empty block, ignoring"
return success cid.emptyBlock
return cid.emptyBlock
without key =? makePrefixKey(self.postFixLen, cid), err:
trace "Error getting key from provider", err = err.msg
@ -105,7 +157,34 @@ method getBlock*(self: RepoStore, cid: Cid): Future[?!Block] {.async.} =
return failure(newException(BlockNotFoundError, err.msg))
trace "Got block for cid", cid
return Block.new(cid, data)
return Block.new(cid, data, verify = true)
method getBlockAndProof*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!(Block, MerkleProof)] {.async.} =
without cidAndProof =? await self.getCidAndProof(treeCid, index), err:
return failure(err)
let (cid, proof) = cidAndProof
without blk =? await self.getBlock(cid), err:
return failure(err)
success((blk, proof))
method getBlock*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!Block] {.async.} =
without cidAndProof =? await self.getCidAndProof(treeCid, index), err:
return failure(err)
await self.getBlock(cidAndProof[0])
method getBlock*(self: RepoStore, address: BlockAddress): Future[?!Block] =
## Get a block from the blockstore
##
if address.leaf:
self.getBlock(address.treeCid, address.index)
else:
self.getBlock(address.cid)
proc getBlockExpirationEntry(
self: RepoStore,
@ -291,6 +370,23 @@ method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} =
self.updateMetrics()
return success()
method delBlock*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!void] {.async.} =
without key =? createBlockCidAndProofMetadataKey(treeCid, index), err:
return failure(err)
without value =? await self.metaDs.get(key), err:
if err of DatastoreKeyNotFound:
return success()
else:
return failure(err)
without cidAndProof =? (Cid, MerkleProof).decode(value), err:
return failure(err)
self.delBlock(cidAndProof[0])
await self.metaDs.delete(key)
method hasBlock*(self: RepoStore, cid: Cid): Future[?!bool] {.async.} =
## Check if the block exists in the blockstore
##
@ -308,15 +404,25 @@ method hasBlock*(self: RepoStore, cid: Cid): Future[?!bool] {.async.} =
return await self.repoDs.has(key)
method hasBlock*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!bool] {.async.} =
without cidAndProof =? await self.getCidAndProof(treeCid, index), err:
if err of BlockNotFoundError:
return success(false)
else:
return failure(err)
await self.hasBlock(cidAndProof[0])
method listBlocks*(
self: RepoStore,
blockType = BlockType.Manifest): Future[?!BlocksIter] {.async.} =
blockType = BlockType.Manifest
): Future[?!AsyncIter[?Cid]] {.async.} =
## Get the list of blocks in the RepoStore.
## This is an intensive operation
##
var
iter = BlocksIter()
iter = AsyncIter[?Cid]()
let key =
case blockType:
@ -331,13 +437,14 @@ method listBlocks*(
proc next(): Future[?Cid] {.async.} =
await idleAsync()
iter.finished = queryIter.finished
if not queryIter.finished:
if queryIter.finished:
iter.finish
else:
if pair =? (await queryIter.next()) and cid =? pair.key:
doAssert pair.data.len == 0
trace "Retrieved record from repo", cid
return Cid.init(cid.value).option
else:
return Cid.none
iter.next = next
@ -350,7 +457,7 @@ proc createBlockExpirationQuery(maxNumber: int, offset: int): ?!Query =
method getBlockExpirations*(
self: RepoStore,
maxNumber: int,
offset: int): Future[?!BlockExpirationIter] {.async, base.} =
offset: int): Future[?!AsyncIter[?BlockExpiration]] {.async, base.} =
## Get block expirations from the given RepoStore
##
@ -362,7 +469,7 @@ method getBlockExpirations*(
trace "Unable to execute block expirations query"
return failure(err)
var iter = BlockExpirationIter()
var iter = AsyncIter[?BlockExpiration]()
proc next(): Future[?BlockExpiration] {.async.} =
if not queryIter.finished:
@ -377,7 +484,7 @@ method getBlockExpirations*(
).some
else:
discard await queryIter.dispose()
iter.finished = true
iter.finish
return BlockExpiration.none
iter.next = next
@ -519,4 +626,5 @@ func new*(
clock: clock,
postFixLen: postFixLen,
quotaMaxBytes: quotaMaxBytes,
blockTtl: blockTtl)
blockTtl: blockTtl
)

View File

@ -0,0 +1,52 @@
## Nim-Codex
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push: {.upraises: [].}
import std/sugar
import pkg/chronos
import pkg/chronos/futures
import pkg/metrics
import pkg/questionable
import pkg/questionable/results
import ./blockstore
import ../utils/asynciter
import ../merkletree
proc putSomeProofs*(store: BlockStore, tree: MerkleTree, iter: Iter[int]): Future[?!void] {.async.} =
without treeCid =? tree.rootCid, err:
return failure(err)
for i in iter:
if i notin 0..<tree.leavesCount:
return failure("Invalid leaf index " & $i & ", tree with cid " & $treeCid & " has " & $tree.leavesCount & " leaves")
without blkCid =? tree.getLeafCid(i), err:
return failure(err)
without proof =? tree.getProof(i), err:
return failure(err)
let res = await store.putBlockCidAndProof(treeCid, i, blkCid, proof)
if err =? res.errorOption:
return failure(err)
success()
proc putSomeProofs*(store: BlockStore, tree: MerkleTree, iter: Iter[Natural]): Future[?!void] =
store.putSomeProofs(tree, iter.map((i: Natural) => i.ord))
proc putAllProofs*(store: BlockStore, tree: MerkleTree): Future[?!void] =
store.putSomeProofs(tree, Iter.fromSlice(0..<tree.leavesCount))

View File

@ -1,5 +1,5 @@
## Nim-Dagger
## Copyright (c) 2022 Status Research & Development GmbH
## Nim-Codex
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
@ -20,6 +20,7 @@ import pkg/stew/ptrops
import ../stores
import ../manifest
import ../blocktype
import ../utils
import ./seekablestream
@ -81,7 +82,7 @@ method readOnce*(
## Raise exception if we are already at EOF.
##
trace "Reading from manifest", cid = self.manifest.cid.get(), blocks = self.manifest.len
trace "Reading from manifest", cid = self.manifest.cid.get(), blocks = self.manifest.blocksCount
if self.atEof:
raise newLPStreamEOFError()
@ -97,9 +98,10 @@ method readOnce*(
readBytes = min([self.size - self.offset,
nbytes - read,
self.manifest.blockSize.int - blockOffset])
address = BlockAddress(leaf: true, treeCid: self.manifest.treeCid, index: blockNum)
# Read contents of block `blockNum`
without blk =? await self.store.getBlock(self.manifest[blockNum]), error:
without blk =? await self.store.getBlock(address), error:
raise newLPStreamReadError(error)
trace "Reading bytes from store stream", blockNum, cid = blk.cid, bytes = readBytes, blockOffset

View File

@ -9,13 +9,15 @@
##
import std/parseutils
import std/options
import pkg/chronos
import ./utils/asyncheapqueue
import ./utils/fileutils
import ./utils/asynciter
export asyncheapqueue, fileutils
export asyncheapqueue, fileutils, asynciter
func divUp*[T: SomeInteger](a, b : T): T =
@ -27,6 +29,13 @@ func roundUp*[T](a, b : T): T =
## Round up 'a' to the next value divisible by 'b'
divUp(a,b) * b
proc orElse*[A](a, b: Option[A]): Option[A] =
if (a.isSome()):
a
else:
b
when not declared(parseDuration): # Odd code formatting to minimize diff v. mainLine
const Whitespace = {' ', '\t', '\v', '\r', '\l', '\f'}

148
codex/utils/asynciter.nim Normal file
View File

@ -0,0 +1,148 @@
import std/sugar
import pkg/questionable
import pkg/chronos
import pkg/upraises
type
Function*[T, U] = proc(fut: T): U {.upraises: [CatchableError], gcsafe, closure.}
IsFinished* = proc(): bool {.upraises: [], gcsafe, closure.}
GenNext*[T] = proc(): T {.upraises: [CatchableError], gcsafe, closure.}
Iter*[T] = ref object
finished: bool
next*: GenNext[T]
AsyncIter*[T] = Iter[Future[T]]
proc finish*[T](self: Iter[T]): void =
self.finished = true
proc finished*[T](self: Iter[T]): bool =
self.finished
iterator items*[T](self: Iter[T]): T =
while not self.finished:
yield self.next()
iterator pairs*[T](self: Iter[T]): tuple[key: int, val: T] {.inline.} =
var i = 0
while not self.finished:
yield (i, self.next())
inc(i)
proc map*[T, U](fut: Future[T], fn: Function[T, U]): Future[U] {.async.} =
let t = await fut
fn(t)
proc new*[T](_: type Iter, genNext: GenNext[T], isFinished: IsFinished, finishOnErr: bool = true): Iter[T] =
var iter = Iter[T]()
proc next(): T {.upraises: [CatchableError].} =
if not iter.finished:
var item: T
try:
item = genNext()
except CatchableError as err:
if finishOnErr or isFinished():
iter.finish
raise err
if isFinished():
iter.finish
return item
else:
raise newException(CatchableError, "Iterator is finished but next item was requested")
if isFinished():
iter.finish
iter.next = next
return iter
proc fromItems*[T](_: type Iter, items: seq[T]): Iter[T] =
## Create new iterator from items
##
Iter.fromSlice(0..<items.len)
.map((i: int) => items[i])
proc fromSlice*[U, V: Ordinal](_: type Iter, slice: HSlice[U, V]): Iter[U] =
## Creates new iterator from slice
##
Iter.fromRange(slice.a.int, slice.b.int, 1)
proc fromRange*[U, V, S: Ordinal](_: type Iter, a: U, b: V, step: S = 1): Iter[U] =
## Creates new iterator in range a..b with specified step (default 1)
##
var i = a
proc genNext(): U =
let u = i
inc(i, step)
u
proc isFinished(): bool =
(step > 0 and i > b) or
(step < 0 and i < b)
Iter.new(genNext, isFinished)
proc map*[T, U](iter: Iter[T], fn: Function[T, U]): Iter[U] =
Iter.new(
genNext = () => fn(iter.next()),
isFinished = () => iter.finished
)
proc filter*[T](iter: Iter[T], predicate: Function[T, bool]): Iter[T] =
var nextT: Option[T]
proc tryFetch(): void =
nextT = T.none
while not iter.finished:
let t = iter.next()
if predicate(t):
nextT = some(t)
break
proc genNext(): T =
let t = nextT.unsafeGet
tryFetch()
return t
proc isFinished(): bool =
nextT.isNone
tryFetch()
Iter.new(genNext, isFinished)
proc prefetch*[T](iter: Iter[T], n: Positive): Iter[T] =
var ringBuf = newSeq[T](n)
var iterLen = int.high
var i = 0
proc tryFetch(j: int): void =
if not iter.finished:
let item = iter.next()
ringBuf[j mod n] = item
if iter.finished:
iterLen = min(j + 1, iterLen)
else:
if j == 0:
iterLen = 0
proc genNext(): T =
let item = ringBuf[i mod n]
tryFetch(i + n)
inc i
return item
proc isFinished(): bool =
i >= iterLen
# initialize ringBuf with n prefetched values
for j in 0..<n:
tryFetch(j)
Iter.new(genNext, isFinished)

View File

@ -12,6 +12,7 @@ import pkg/codex/stores
import pkg/codex/blockexchange
import pkg/codex/chunker
import pkg/codex/manifest
import pkg/codex/merkletree
import pkg/codex/blocktype as bt
import ../../helpers/mockdiscovery
@ -25,6 +26,7 @@ asyncchecksuite "Block Advertising and Discovery":
var
blocks: seq[bt.Block]
manifest: Manifest
tree: MerkleTree
manifestBlock: bt.Block
switch: Switch
peerStore: PeerCtxStore
@ -52,7 +54,7 @@ asyncchecksuite "Block Advertising and Discovery":
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
manifest = Manifest.new( blocks.mapIt( it.cid ) ).tryGet()
(manifest, tree) = makeManifestAndTree(blocks).tryGet()
manifestBlock = bt.Block.new(
manifest.encode().tryGet(), codec = DagPBCodec).tryGet()
@ -151,7 +153,7 @@ asyncchecksuite "Block Advertising and Discovery":
peerId = PeerId.example
haves = collect(initTable()):
for blk in blocks:
{ blk.cid: Presence(cid: blk.cid, price: 0.u256) }
{ blk.address: Presence(address: blk.address, price: 0.u256) }
engine.peers.add(
BlockExcPeerCtx(
@ -164,7 +166,7 @@ asyncchecksuite "Block Advertising and Discovery":
check false
await engine.start() # fire up discovery loop
engine.pendingBlocks.resolve(blocks)
engine.pendingBlocks.resolve(blocks.mapIt(BlockDelivery(blk: it, address: it.address)))
await allFuturesThrowing(
allFinished(pendingBlocks))
@ -240,9 +242,14 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
advertised[cid] = switch[3].peerInfo.signedPeerRecord
await blockexc[1].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[0..5])
await blockexc[2].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[4..10])
await blockexc[3].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[10..15])
discard blocks[0..5].mapIt(blockexc[1].engine.pendingBlocks.getWantHandle(it.address))
await blockexc[1].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, blocks[0..5].mapIt(BlockDelivery(blk: it, address: it.address)))
discard blocks[4..10].mapIt(blockexc[2].engine.pendingBlocks.getWantHandle(it.address))
await blockexc[2].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, blocks[4..10].mapIt(BlockDelivery(blk: it, address: it.address)))
discard blocks[10..15].mapIt(blockexc[3].engine.pendingBlocks.getWantHandle(it.address))
await blockexc[3].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, blocks[10..15].mapIt(BlockDelivery(blk: it, address: it.address)))
MockDiscovery(blockexc[0].engine.discovery.discovery)
.findBlockProvidersHandler = proc(d: MockDiscovery, cid: Cid):
@ -256,13 +263,13 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
await allFuturesThrowing(
switch.mapIt( it.start() ) &
blockexc.mapIt( it.engine.start() ))
blockexc.mapIt( it.engine.start() )).wait(10.seconds)
await allFutures(futs)
await allFutures(futs).wait(10.seconds)
await allFuturesThrowing(
blockexc.mapIt( it.engine.stop() ) &
switch.mapIt( it.stop() ))
switch.mapIt( it.stop() )).wait(10.seconds)
test "E2E - Should advertise and discover blocks with peers already connected":
# Distribute the blocks amongst 1..3
@ -282,9 +289,14 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
advertised[cid] = switch[3].peerInfo.signedPeerRecord
await blockexc[1].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[0..5])
await blockexc[2].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[4..10])
await blockexc[3].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[10..15])
discard blocks[0..5].mapIt(blockexc[1].engine.pendingBlocks.getWantHandle(it.address))
await blockexc[1].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, blocks[0..5].mapIt(BlockDelivery(blk: it, address: it.address)))
discard blocks[4..10].mapIt(blockexc[2].engine.pendingBlocks.getWantHandle(it.address))
await blockexc[2].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, blocks[4..10].mapIt(BlockDelivery(blk: it, address: it.address)))
discard blocks[10..15].mapIt(blockexc[3].engine.pendingBlocks.getWantHandle(it.address))
await blockexc[3].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, blocks[10..15].mapIt(BlockDelivery(blk: it, address: it.address)))
MockDiscovery(blockexc[0].engine.discovery.discovery)
.findBlockProvidersHandler = proc(d: MockDiscovery, cid: Cid):
@ -297,10 +309,10 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
await allFuturesThrowing(
switch.mapIt( it.start() ) &
blockexc.mapIt( it.engine.start() ))
blockexc.mapIt( it.engine.start() )).wait(10.seconds)
await allFutures(futs).wait(10.seconds)
await allFuturesThrowing(
blockexc.mapIt( it.engine.stop() ) &
switch.mapIt( it.stop() ))
switch.mapIt( it.stop() )).wait(10.seconds)

View File

@ -57,7 +57,7 @@ asyncchecksuite "Test Discovery Engine":
blockDiscovery.findBlockProvidersHandler =
proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, gcsafe.} =
pendingBlocks.resolve(blocks.filterIt( it.cid == cid))
pendingBlocks.resolve(blocks.filterIt( it.cid == cid).mapIt(BlockDelivery(blk: it, address: it.address)))
await discoveryEngine.start()
await allFuturesThrowing(allFinished(wants)).wait(1.seconds)
@ -154,7 +154,9 @@ asyncchecksuite "Test Discovery Engine":
var
peerCtx = BlockExcPeerCtx(id: PeerId.example)
peerCtx.blocks[cid] = Presence(cid: cid, price: 0.u256)
let address = BlockAddress(leaf: false, cid: cid)
peerCtx.blocks[address] = Presence(address: address, price: 0.u256)
peerStore.add(peerCtx)
want.fire()

View File

@ -120,10 +120,11 @@ asyncchecksuite "NetworkStore engine - 2 nodes":
test "Should send want-have for block":
let blk = bt.Block.new("Block 1".toBytes).tryGet()
let blkFut = nodeCmps1.pendingBlocks.getWantHandle( blk.cid )
(await nodeCmps2.localStore.putBlock(blk)).tryGet()
let entry = Entry(
`block`: blk.cid.data.buffer,
let entry = WantListEntry(
address: blk.address,
priority: 1,
cancel: false,
wantType: WantType.WantBlock,
@ -136,6 +137,7 @@ asyncchecksuite "NetworkStore engine - 2 nodes":
.pushOrUpdateNoWait(peerCtx1).isOk
check eventually (await nodeCmps1.localStore.hasBlock(blk.cid)).tryGet()
check eventually (await blkFut) == blk
test "Should get blocks from remote":
let

View File

@ -14,7 +14,7 @@ import pkg/codex/blockexchange
import pkg/codex/stores
import pkg/codex/chunker
import pkg/codex/discovery
import pkg/codex/blocktype as bt
import pkg/codex/blocktype
import pkg/codex/utils/asyncheapqueue
import ../../helpers
@ -30,7 +30,7 @@ asyncchecksuite "NetworkStore engine basic":
blockDiscovery: Discovery
peerStore: PeerCtxStore
pendingBlocks: PendingBlocksManager
blocks: seq[bt.Block]
blocks: seq[Block]
done: Future[void]
setup:
@ -48,20 +48,20 @@ asyncchecksuite "NetworkStore engine basic":
if chunk.len <= 0:
break
blocks.add(bt.Block.new(chunk).tryGet())
blocks.add(Block.new(chunk).tryGet())
done = newFuture[void]()
test "Should send want list to new peers":
proc sendWantList(
id: PeerId,
cids: seq[Cid],
addresses: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false) {.gcsafe, async.} =
check cids.mapIt($it).sorted == blocks.mapIt( $it.cid ).sorted
check addresses.mapIt($it.cidOrTreeCid).sorted == blocks.mapIt( $it.cid ).sorted
done.complete()
let
@ -140,7 +140,7 @@ asyncchecksuite "NetworkStore engine handlers":
discovery: DiscoveryEngine
peerCtx: BlockExcPeerCtx
localStore: BlockStore
blocks: seq[bt.Block]
blocks: seq[Block]
setup:
rng = Rng.instance()
@ -151,7 +151,7 @@ asyncchecksuite "NetworkStore engine handlers":
if chunk.len <= 0:
break
blocks.add(bt.Block.new(chunk).tryGet())
blocks.add(Block.new(chunk).tryGet())
seckey = PrivateKey.random(rng[]).tryGet()
peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet()
@ -193,7 +193,7 @@ asyncchecksuite "NetworkStore engine handlers":
let ctx = await engine.taskQueue.pop()
check ctx.id == peerId
# only `wantBlock` scheduled
check ctx.peerWants.mapIt( it.cid ) == blocks.mapIt( it.cid )
check ctx.peerWants.mapIt( it.address.cidOrTreeCid ) == blocks.mapIt( it.cid )
let done = handler()
await engine.wantListHandler(peerId, wantList)
@ -205,7 +205,7 @@ asyncchecksuite "NetworkStore engine handlers":
wantList = makeWantList(blocks.mapIt( it.cid ))
proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
check presence.mapIt( it.cid ) == wantList.entries.mapIt( it.`block` )
check presence.mapIt( it.address ) == wantList.entries.mapIt( it.address )
done.complete()
engine.network = BlockExcNetwork(
@ -227,7 +227,7 @@ asyncchecksuite "NetworkStore engine handlers":
sendDontHave = true)
proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
check presence.mapIt( it.cid ) == wantList.entries.mapIt( it.`block` )
check presence.mapIt( it.address ) == wantList.entries.mapIt( it.address )
for p in presence:
check:
p.`type` == BlockPresenceType.DontHave
@ -249,12 +249,8 @@ asyncchecksuite "NetworkStore engine handlers":
sendDontHave = true)
proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
let
cid1Buf = blocks[0].cid.data.buffer
cid2Buf = blocks[1].cid.data.buffer
for p in presence:
if p.cid != cid1Buf and p.cid != cid2Buf:
if p.address.cidOrTreeCid != blocks[0].cid and p.address.cidOrTreeCid != blocks[1].cid:
check p.`type` == BlockPresenceType.DontHave
else:
check p.`type` == BlockPresenceType.Have
@ -277,7 +273,9 @@ asyncchecksuite "NetworkStore engine handlers":
engine.pendingBlocks.getWantHandle( it.cid )
)
await engine.blocksHandler(peerId, blocks)
let blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address))
await engine.blocksDeliveryHandler(peerId, blocksDelivery)
let resolved = await allFinished(pending)
check resolved.mapIt( it.read ) == blocks
for b in blocks:
@ -292,7 +290,7 @@ asyncchecksuite "NetworkStore engine handlers":
peerContext.account = account.some
peerContext.blocks = blocks.mapIt(
(it.cid, Presence(cid: it.cid, price: rand(uint16).u256))
(it.address, Presence(address: it.address, price: rand(uint16).u256))
).toTable
engine.network = BlockExcNetwork(
@ -301,7 +299,7 @@ asyncchecksuite "NetworkStore engine handlers":
let
amount =
blocks.mapIt(
peerContext.blocks[it.cid].price
peerContext.blocks[it.address].price
).foldl(a + b)
balances = !payment.state.outcome.balances(Asset)
@ -311,22 +309,24 @@ asyncchecksuite "NetworkStore engine handlers":
done.complete()
))
await engine.blocksHandler(peerId, blocks)
await engine.blocksDeliveryHandler(peerId, blocks.mapIt(BlockDelivery(blk: it, address: it.address)))
await done.wait(100.millis)
test "Should handle block presence":
var
handles: Table[Cid, Future[bt.Block]]
handles: Table[Cid, Future[Block]]
proc sendWantList(
id: PeerId,
cids: seq[Cid],
addresses: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false) {.gcsafe, async.} =
engine.pendingBlocks.resolve(blocks.filterIt( it.cid in cids ))
engine.pendingBlocks.resolve(blocks
.filterIt( it.address in addresses )
.mapIt(BlockDelivery(blk: it, address: it.address)))
engine.network = BlockExcNetwork(
request: BlockExcRequest(
@ -343,14 +343,14 @@ asyncchecksuite "NetworkStore engine handlers":
blocks.mapIt(
PresenceMessage.init(
Presence(
cid: it.cid,
address: it.address,
have: true,
price: price
))))
for cid in blocks.mapIt(it.cid):
check cid in peerCtx.peerHave
check peerCtx.blocks[cid].price == price
for a in blocks.mapIt(it.address):
check a in peerCtx.peerHave
check peerCtx.blocks[a].price == price
asyncchecksuite "Task Handler":
var
@ -369,7 +369,7 @@ asyncchecksuite "Task Handler":
peersCtx: seq[BlockExcPeerCtx]
peers: seq[PeerId]
blocks: seq[bt.Block]
blocks: seq[Block]
setup:
rng = Rng.instance()
@ -379,7 +379,7 @@ asyncchecksuite "Task Handler":
if chunk.len <= 0:
break
blocks.add(bt.Block.new(chunk).tryGet())
blocks.add(Block.new(chunk).tryGet())
seckey = PrivateKey.random(rng[]).tryGet()
peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet()
@ -419,22 +419,22 @@ asyncchecksuite "Task Handler":
engine.pricing = Pricing.example.some
test "Should send want-blocks in priority order":
proc sendBlocks(
proc sendBlocksDelivery(
id: PeerId,
blks: seq[bt.Block]) {.gcsafe, async.} =
check blks.len == 2
blocksDelivery: seq[BlockDelivery]) {.gcsafe, async.} =
check blocksDelivery.len == 2
check:
blks[1].cid == blocks[0].cid
blks[0].cid == blocks[1].cid
blocksDelivery[1].address == blocks[0].address
blocksDelivery[0].address == blocks[1].address
for blk in blocks:
(await engine.localStore.putBlock(blk)).tryGet()
engine.network.request.sendBlocks = sendBlocks
engine.network.request.sendBlocksDelivery = sendBlocksDelivery
# second block to send by priority
peersCtx[0].peerWants.add(
Entry(
`block`: blocks[0].cid.data.buffer,
WantListEntry(
address: blocks[0].address,
priority: 49,
cancel: false,
wantType: WantType.WantBlock,
@ -443,8 +443,8 @@ asyncchecksuite "Task Handler":
# first block to send by priority
peersCtx[0].peerWants.add(
Entry(
`block`: blocks[1].cid.data.buffer,
WantListEntry(
address: blocks[1].address,
priority: 50,
cancel: false,
wantType: WantType.WantBlock,
@ -455,14 +455,14 @@ asyncchecksuite "Task Handler":
test "Should send presence":
let present = blocks
let missing = @[bt.Block.new("missing".toBytes).tryGet()]
let missing = @[Block.new("missing".toBytes).tryGet()]
let price = (!engine.pricing).price
proc sendPresence(id: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
check presence.mapIt(!Presence.init(it)) == @[
Presence(cid: present[0].cid, have: true, price: price),
Presence(cid: present[1].cid, have: true, price: price),
Presence(cid: missing[0].cid, have: false)
Presence(address: present[0].address, have: true, price: price),
Presence(address: present[1].address, have: true, price: price),
Presence(address: missing[0].address, have: false)
]
for blk in blocks:
@ -471,8 +471,8 @@ asyncchecksuite "Task Handler":
# have block
peersCtx[0].peerWants.add(
Entry(
`block`: present[0].cid.data.buffer,
WantListEntry(
address: present[0].address,
priority: 1,
cancel: false,
wantType: WantType.WantHave,
@ -481,8 +481,8 @@ asyncchecksuite "Task Handler":
# have block
peersCtx[0].peerWants.add(
Entry(
`block`: present[1].cid.data.buffer,
WantListEntry(
address: present[1].address,
priority: 1,
cancel: false,
wantType: WantType.WantHave,
@ -491,8 +491,8 @@ asyncchecksuite "Task Handler":
# don't have block
peersCtx[0].peerWants.add(
Entry(
`block`: missing[0].cid.data.buffer,
WantListEntry(
address: missing[0].address,
priority: 1,
cancel: false,
wantType: WantType.WantHave,

View File

@ -7,13 +7,12 @@ import ../../helpers
checksuite "block presence protobuf messages":
let cid = Cid.example
let price = UInt256.example
let presence = Presence(cid: cid, have: true, price: price)
let message = PresenceMessage.init(presence)
test "encodes CID":
check message.cid == cid.data.buffer
let
cid = Cid.example
address = BlockAddress(leaf: false, cid: cid)
price = UInt256.example
presence = Presence(address: address, have: true, price: price)
message = PresenceMessage.init(presence)
test "encodes have/donthave":
var presence = presence
@ -26,12 +25,7 @@ checksuite "block presence protobuf messages":
check message.price == @(price.toBytesBE)
test "decodes CID":
check Presence.init(message).?cid == cid.some
test "fails to decode when CID is invalid":
var incorrect = message
incorrect.cid.del(0)
check Presence.init(incorrect).isNone
check Presence.init(message).?address == address.some
test "decodes have/donthave":
var message = message

View File

@ -47,13 +47,13 @@ asyncchecksuite "Network - Handlers":
discard await networkPeer.connect()
test "Want List handler":
proc wantListHandler(peer: PeerId, wantList: Wantlist) {.gcsafe, async.} =
proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} =
# check that we got the correct amount of entries
check wantList.entries.len == 4
for b in blocks:
check b.cid in wantList.entries
let entry = wantList.entries[wantList.entries.find(b.cid)]
check b.address in wantList.entries
let entry = wantList.entries[wantList.entries.find(b.address)]
check entry.wantType == WantType.WantHave
check entry.priority == 1
check entry.cancel == true
@ -74,13 +74,13 @@ asyncchecksuite "Network - Handlers":
await done.wait(500.millis)
test "Blocks Handler":
proc blocksHandler(peer: PeerId, blks: seq[bt.Block]) {.gcsafe, async.} =
check blks == blocks
proc blocksDeliveryHandler(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.gcsafe, async.} =
check blocks == blocksDelivery.mapIt(it.blk)
done.complete()
network.handlers.onBlocks = blocksHandler
network.handlers.onBlocksDelivery = blocksDeliveryHandler
let msg = Message(payload: makeBlocks(blocks))
let msg = Message(payload: blocks.mapIt(BlockDelivery(blk: it, address: it.address)))
await buffer.pushData(lenPrefix(protobufEncode(msg)))
await done.wait(500.millis)
@ -88,10 +88,10 @@ asyncchecksuite "Network - Handlers":
test "Presence Handler":
proc presenceHandler(
peer: PeerId,
precense: seq[BlockPresence]) {.gcsafe, async.} =
presence: seq[BlockPresence]) {.gcsafe, async.} =
for b in blocks:
check:
b.cid in precense
b.address in presence
done.complete()
@ -100,7 +100,7 @@ asyncchecksuite "Network - Handlers":
let msg = Message(
blockPresences: blocks.mapIt(
BlockPresence(
cid: it.cid.data.buffer,
address: it.address,
type: BlockPresenceType.Have
)))
await buffer.pushData(lenPrefix(protobufEncode(msg)))
@ -177,13 +177,13 @@ asyncchecksuite "Network - Senders":
switch2.stop())
test "Send want list":
proc wantListHandler(peer: PeerId, wantList: Wantlist) {.gcsafe, async.} =
proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} =
# check that we got the correct amount of entries
check wantList.entries.len == 4
for b in blocks:
check b.cid in wantList.entries
let entry = wantList.entries[wantList.entries.find(b.cid)]
check b.address in wantList.entries
let entry = wantList.entries[wantList.entries.find(b.address)]
check entry.wantType == WantType.WantHave
check entry.priority == 1
check entry.cancel == true
@ -194,21 +194,21 @@ asyncchecksuite "Network - Senders":
network2.handlers.onWantList = wantListHandler
await network1.sendWantList(
switch2.peerInfo.peerId,
blocks.mapIt( it.cid ),
blocks.mapIt( it.address ),
1, true, WantType.WantHave,
true, true)
await done.wait(500.millis)
test "send blocks":
proc blocksHandler(peer: PeerId, blks: seq[bt.Block]) {.gcsafe, async.} =
check blks == blocks
proc blocksDeliveryHandler(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.gcsafe, async.} =
check blocks == blocksDelivery.mapIt(it.blk)
done.complete()
network2.handlers.onBlocks = blocksHandler
await network1.sendBlocks(
network2.handlers.onBlocksDelivery = blocksDeliveryHandler
await network1.sendBlocksDelivery(
switch2.peerInfo.peerId,
blocks)
blocks.mapIt(BlockDelivery(blk: it, address: it.address)))
await done.wait(500.millis)
@ -218,7 +218,7 @@ asyncchecksuite "Network - Senders":
precense: seq[BlockPresence]) {.gcsafe, async.} =
for b in blocks:
check:
b.cid in precense
b.address in precense
done.complete()
@ -228,7 +228,7 @@ asyncchecksuite "Network - Senders":
switch2.peerInfo.peerId,
blocks.mapIt(
BlockPresence(
cid: it.cid.data.buffer,
address: it.address,
type: BlockPresenceType.Have
)))

View File

@ -35,12 +35,12 @@ checksuite "Peer Context Store Peer Selection":
var
store: PeerCtxStore
peerCtxs: seq[BlockExcPeerCtx]
cids: seq[Cid]
addresses: seq[BlockAddress]
setup:
store = PeerCtxStore.new()
cids = collect(newSeq):
for i in 0..<10: Cid.example
addresses = collect(newSeq):
for i in 0..<10: BlockAddress(leaf: false, cid: Cid.example)
peerCtxs = collect(newSeq):
for i in 0..<10: BlockExcPeerCtx.example
@ -50,20 +50,20 @@ checksuite "Peer Context Store Peer Selection":
teardown:
store = nil
cids = @[]
addresses = @[]
peerCtxs = @[]
test "Should select peers that have Cid":
peerCtxs[0].blocks = collect(initTable):
for i, c in cids:
{ c: Presence(cid: c, price: i.u256) }
for i, a in addresses:
{ a: Presence(address: a, price: i.u256) }
peerCtxs[5].blocks = collect(initTable):
for i, c in cids:
{ c: Presence(cid: c, price: i.u256) }
for i, a in addresses:
{ a: Presence(address: a, price: i.u256) }
let
peers = store.peersHave(cids[0])
peers = store.peersHave(addresses[0])
check peers.len == 2
check peerCtxs[0] in peers
@ -71,19 +71,19 @@ checksuite "Peer Context Store Peer Selection":
test "Should select cheapest peers for Cid":
peerCtxs[0].blocks = collect(initTable):
for i, c in cids:
{ c: Presence(cid: c, price: (5 + i).u256) }
for i, a in addresses:
{ a: Presence(address: a, price: (5 + i).u256) }
peerCtxs[5].blocks = collect(initTable):
for i, c in cids:
{ c: Presence(cid: c, price: (2 + i).u256) }
for i, a in addresses:
{ a: Presence(address: a, price: (2 + i).u256) }
peerCtxs[9].blocks = collect(initTable):
for i, c in cids:
{ c: Presence(cid: c, price: i.u256) }
for i, a in addresses:
{ a: Presence(address: a, price: i.u256) }
let
peers = store.selectCheapest(cids[0])
peers = store.selectCheapest(addresses[0])
check peers.len == 3
check peers[0] == peerCtxs[9]
@ -92,9 +92,9 @@ checksuite "Peer Context Store Peer Selection":
test "Should select peers that want Cid":
let
entries = cids.mapIt(
Entry(
`block`: it.data.buffer,
entries = addresses.mapIt(
WantListEntry(
address: it,
priority: 1,
cancel: false,
wantType: WantType.WantBlock,
@ -104,7 +104,7 @@ checksuite "Peer Context Store Peer Selection":
peerCtxs[5].peerWants = entries
let
peers = store.peersWant(cids[4])
peers = store.peersWant(addresses[4])
check peers.len == 2
check peerCtxs[0] in peers

View File

@ -18,7 +18,7 @@ checksuite "Pending Blocks":
discard pendingBlocks.getWantHandle(blk.cid)
check pendingBlocks.pending(blk.cid)
check blk.cid in pendingBlocks
test "Should resolve want handle":
let
@ -27,7 +27,7 @@ checksuite "Pending Blocks":
handle = pendingBlocks.getWantHandle(blk.cid)
check blk.cid in pendingBlocks
pendingBlocks.resolve(@[blk])
pendingBlocks.resolve(@[blk].mapIt(BlockDelivery(blk: it, address: it.address)))
check (await handle) == blk
check blk.cid notin pendingBlocks
@ -64,7 +64,7 @@ checksuite "Pending Blocks":
check:
blks.mapIt( $it.cid ).sorted(cmp[string]) ==
toSeq(pendingBlocks.wantList).mapIt( $it ).sorted(cmp[string])
toSeq(pendingBlocks.wantListBlockCids).mapIt( $it ).sorted(cmp[string])
test "Should get want handles list":
let
@ -74,7 +74,7 @@ checksuite "Pending Blocks":
wantHandles = toSeq(pendingBlocks.wantHandles)
check wantHandles.len == handles.len
pendingBlocks.resolve(blks)
pendingBlocks.resolve(blks.mapIt(BlockDelivery(blk: it, address: it.address)))
check:
(await allFinished(wantHandles)).mapIt( $it.read.cid ).sorted(cmp[string]) ==

View File

@ -53,6 +53,10 @@ proc example*(_: type BlockExcPeerCtx): BlockExcPeerCtx =
proc example*(_: type Cid): Cid =
bt.Block.example.cid
proc example*(_: type MultiHash, mcodec = multiCodec("sha2-256")): MultiHash =
let bytes = newSeqWith(256, rand(uint8))
MultiHash.digest($mcodec, bytes).tryGet()
proc example*(_: type Availability): Availability =
Availability.init(
size = uint16.example.u256,

View File

@ -1,25 +1,30 @@
import std/sequtils
import pkg/chronos
import pkg/libp2p
import pkg/libp2p/varint
import pkg/codex/blocktype as bt
import pkg/codex/blocktype
import pkg/codex/stores
import pkg/codex/manifest
import pkg/codex/merkletree
import pkg/codex/blockexchange
import pkg/codex/rng
import ./helpers/nodeutils
import ./helpers/randomchunker
import ./helpers/mockchunker
import ./helpers/mockdiscovery
import ./helpers/always
import ../checktest
export randomchunker, nodeutils, mockdiscovery, always, checktest, manifest
export randomchunker, nodeutils, mockdiscovery, mockchunker, always, checktest, manifest
export libp2p except setup, eventually
# NOTE: The meaning of equality for blocks
# is changed here, because blocks are now `ref`
# types. This is only in tests!!!
func `==`*(a, b: bt.Block): bool =
func `==`*(a, b: Block): bool =
(a.cid == b.cid) and (a.data == b.data)
proc lenPrefix*(msg: openArray[byte]): seq[byte] =
@ -33,21 +38,85 @@ proc lenPrefix*(msg: openArray[byte]): seq[byte] =
return buf
proc makeManifestAndTree*(blocks: seq[Block]): ?!(Manifest, MerkleTree) =
if blocks.len == 0:
return failure("Blocks list was empty")
let
datasetSize = blocks.mapIt(it.data.len).foldl(a + b)
blockSize = blocks.mapIt(it.data.len).foldl(max(a, b))
tree = ? MerkleTree.init(blocks.mapIt(it.cid))
treeCid = ? tree.rootCid
manifest = Manifest.new(
treeCid = treeCid,
blockSize = NBytes(blockSize),
datasetSize = NBytes(datasetSize),
version = CIDv1,
hcodec = tree.mcodec
)
return success((manifest, tree))
proc makeWantList*(
cids: seq[Cid],
priority: int = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false
): WantList =
WantList(
entries: cids.mapIt(
WantListEntry(
address: BlockAddress(leaf: false, cid: it),
priority: priority.int32,
cancel: cancel,
wantType: wantType,
sendDontHave: sendDontHave) ),
full: full)
proc storeDataGetManifest*(store: BlockStore, chunker: Chunker): Future[Manifest] {.async.} =
var cids = newSeq[Cid]()
while (
let chunk = await chunker.getBytes();
chunk.len > 0):
let blk = Block.new(chunk).tryGet()
cids.add(blk.cid)
(await store.putBlock(blk)).tryGet()
let
tree = MerkleTree.init(cids).tryGet()
treeCid = tree.rootCid.tryGet()
manifest = Manifest.new(
treeCid = treeCid,
blockSize = NBytes(chunker.chunkSize),
datasetSize = NBytes(chunker.offset),
)
for i in 0..<tree.leavesCount:
let proof = tree.getProof(i).tryGet()
(await store.putBlockCidAndProof(treeCid, i, cids[i], proof)).tryGet()
return manifest
proc corruptBlocks*(
store: BlockStore,
manifest: Manifest,
blks, bytes: int): Future[seq[int]] {.async.} =
var pos: seq[int]
doAssert blks < manifest.len
doAssert blks < manifest.blocksCount
while pos.len < blks:
let i = Rng.instance.rand(manifest.len - 1)
let i = Rng.instance.rand(manifest.blocksCount - 1)
if pos.find(i) >= 0:
continue
pos.add(i)
var
blk = (await store.getBlock(manifest[i])).tryGet()
blk = (await store.getBlock(manifest.treeCid, i)).tryGet()
bytePos: seq[int]
doAssert bytes < blk.data.len

View File

@ -0,0 +1,45 @@
import std/sequtils
import pkg/chronos
import pkg/codex/chunker
import pkg/codex/rng
export chunker
type
MockChunker* = Chunker
proc new*(
T: type MockChunker,
dataset: openArray[byte],
chunkSize: int | NBytes,
pad: bool = false
): MockChunker =
## Create a chunker that produces data
##
let
chunkSize = chunkSize.NBytes
dataset = @dataset
var consumed = 0
proc reader(data: ChunkBuffer, len: int): Future[int] {.async, gcsafe, raises: [Defect].} =
if consumed >= dataset.len:
return 0
var read = 0
while read < len and
read < chunkSize.int and
(consumed + read) < dataset.len:
data[read] = dataset[consumed + read]
read.inc
consumed += read
return read
Chunker.new(
reader = reader,
pad = pad,
chunkSize = chunkSize)

View File

@ -13,7 +13,8 @@ import pkg/libp2p
import pkg/questionable
import pkg/questionable/results
import codex/stores/repostore
import pkg/codex/stores/repostore
import pkg/codex/utils/asynciter
type
MockRepoStore* = ref object of RepoStore
@ -31,15 +32,14 @@ method delBlock*(self: MockRepoStore, cid: Cid): Future[?!void] {.async.} =
dec self.iteratorIndex
return success()
method getBlockExpirations*(self: MockRepoStore, maxNumber: int, offset: int): Future[?!BlockExpirationIter] {.async.} =
method getBlockExpirations*(self: MockRepoStore, maxNumber: int, offset: int): Future[?!AsyncIter[?BlockExpiration]] {.async.} =
if self.getBlockExpirationsThrows:
raise new CatchableError
self.getBeMaxNumber = maxNumber
self.getBeOffset = offset
var iter = BlockExpirationIter()
iter.finished = false
var iter = AsyncIter[?BlockExpiration]()
self.iteratorIndex = offset
var numberLeft = maxNumber
@ -49,7 +49,7 @@ method getBlockExpirations*(self: MockRepoStore, maxNumber: int, offset: int): F
let selectedBlock = self.testBlockExpirations[self.iteratorIndex]
inc self.iteratorIndex
return selectedBlock.some
iter.finished = true
iter.finish
return BlockExpiration.none
iter.next = next

View File

@ -0,0 +1,42 @@
import std/unittest
import pkg/questionable/results
import pkg/stew/byteutils
import pkg/codex/merkletree
import ../helpers
checksuite "merkletree - coders":
const data =
[
"0123456789012345678901234567890123456789".toBytes,
"1234567890123456789012345678901234567890".toBytes,
"2345678901234567890123456789012345678901".toBytes,
"3456789012345678901234567890123456789012".toBytes,
"4567890123456789012345678901234567890123".toBytes,
"5678901234567890123456789012345678901234".toBytes,
"6789012345678901234567890123456789012345".toBytes,
"7890123456789012345678901234567890123456".toBytes,
"8901234567890123456789012345678901234567".toBytes,
"9012345678901234567890123456789012345678".toBytes,
]
test "encoding and decoding a tree yields the same tree":
var builder = MerkleTreeBuilder.init(multiCodec("sha2-256")).tryGet()
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
builder.addDataBlock(data[2]).tryGet()
builder.addDataBlock(data[3]).tryGet()
builder.addDataBlock(data[4]).tryGet()
builder.addDataBlock(data[5]).tryGet()
builder.addDataBlock(data[6]).tryGet()
builder.addDataBlock(data[7]).tryGet()
builder.addDataBlock(data[8]).tryGet()
builder.addDataBlock(data[9]).tryGet()
let tree = builder.build().tryGet()
let encodedBytes = tree.encode()
let decodedTree = MerkleTree.decode(encodedBytes).tryGet()
check:
tree == decodedTree

View File

@ -1,86 +1,165 @@
import std/unittest
import std/bitops
import std/random
import std/sequtils
import pkg/libp2p
import codex/merkletree/merkletree
import ../helpers
import std/tables
import pkg/questionable/results
import pkg/stew/byteutils
import pkg/nimcrypto/sha2
import pkg/codex/merkletree
import ../helpers
checksuite "merkletree":
const data =
[
"0123456789012345678901234567890123456789".toBytes,
"1234567890123456789012345678901234567890".toBytes,
"2345678901234567890123456789012345678901".toBytes,
"3456789012345678901234567890123456789012".toBytes,
"4567890123456789012345678901234567890123".toBytes,
"5678901234567890123456789012345678901234".toBytes,
"6789012345678901234567890123456789012345".toBytes,
"7890123456789012345678901234567890123456".toBytes,
"8901234567890123456789012345678901234567".toBytes,
"9012345678901234567890123456789012345678".toBytes,
]
const sha256 = multiCodec("sha2-256")
const sha512 = multiCodec("sha2-512")
proc randomHash(codec: MultiCodec = sha256): MerkleHash =
var data: array[0..31, byte]
for i in 0..31:
data[i] = rand(uint8)
return MultiHash.digest($codec, data).tryGet()
proc combine(a, b: MerkleHash, codec: MultiCodec = sha256): MerkleHash =
proc combine(a, b: MultiHash, codec: MultiCodec = sha256): MultiHash =
var buf = newSeq[byte](a.size + b.size)
for i in 0..<a.size:
buf[i] = a.data.buffer[i]
for i in 0..<b.size:
buf[i + a.size] = b.data.buffer[i]
copyMem(addr buf[0], unsafeAddr a.data.buffer[a.dpos], a.size)
copyMem(addr buf[a.size], unsafeAddr b.data.buffer[b.dpos], b.size)
return MultiHash.digest($codec, buf).tryGet()
var
leaves: array[0..10, MerkleHash]
var zeroHash: MultiHash
var oneHash: MultiHash
var expectedLeaves: array[data.len, MultiHash]
var builder: MerkleTreeBuilder
setup:
for i in 0..leaves.high:
leaves[i] = randomHash()
for i in 0..<data.len:
expectedLeaves[i] = MultiHash.digest($sha256, data[i]).tryGet()
test "tree with one leaf has expected root":
let tree = MerkleTree.init(leaves[0..0]).tryGet()
builder = MerkleTreeBuilder.init(sha256).tryGet()
var zero: array[32, byte]
var one: array[32, byte]
one[^1] = 0x01
zeroHash = MultiHash.init($sha256, zero).tryGet()
oneHash = MultiHash.init($sha256, one).tryGet()
test "tree with one leaf has expected structure":
builder.addDataBlock(data[0]).tryGet()
let tree = builder.build().tryGet()
check:
tree.leaves == leaves[0..0]
tree.root == leaves[0]
tree.leaves.toSeq == expectedLeaves[0..0]
tree.root == expectedLeaves[0]
tree.len == 1
test "tree with two leaves has expected root":
let
expectedRoot = combine(leaves[0], leaves[1])
test "tree with two leaves has expected structure":
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
let tree = MerkleTree.init(leaves[0..1]).tryGet()
let tree = builder.build().tryGet()
let expectedRoot = combine(expectedLeaves[0], expectedLeaves[1])
check:
tree.leaves == leaves[0..1]
tree.leaves.toSeq == expectedLeaves[0..1]
tree.len == 3
tree.root == expectedRoot
test "tree with three leaves has expected root":
let
expectedRoot = combine(combine(leaves[0], leaves[1]), combine(leaves[2], leaves[2]))
test "tree with three leaves has expected structure":
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
builder.addDataBlock(data[2]).tryGet()
let tree = MerkleTree.init(leaves[0..2]).tryGet()
let tree = builder.build().tryGet()
let
expectedRoot = combine(
combine(expectedLeaves[0], expectedLeaves[1]),
combine(expectedLeaves[2], zeroHash)
)
check:
tree.leaves == leaves[0..2]
tree.leaves.toSeq == expectedLeaves[0..2]
tree.len == 6
tree.root == expectedRoot
test "tree with two leaves provides expected proofs":
let tree = MerkleTree.init(leaves[0..1]).tryGet()
test "tree with nine leaves has expected structure":
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
builder.addDataBlock(data[2]).tryGet()
builder.addDataBlock(data[3]).tryGet()
builder.addDataBlock(data[4]).tryGet()
builder.addDataBlock(data[5]).tryGet()
builder.addDataBlock(data[6]).tryGet()
builder.addDataBlock(data[7]).tryGet()
builder.addDataBlock(data[8]).tryGet()
let tree = builder.build().tryGet()
let
expectedRoot = combine(
combine(
combine(
combine(expectedLeaves[0], expectedLeaves[1]),
combine(expectedLeaves[2], expectedLeaves[3]),
),
combine(
combine(expectedLeaves[4], expectedLeaves[5]),
combine(expectedLeaves[6], expectedLeaves[7])
)
),
combine(
combine(
combine(expectedLeaves[8], zeroHash),
oneHash
),
oneHash
)
)
check:
tree.leaves.toSeq == expectedLeaves[0..8]
tree.len == 20
tree.root == expectedRoot
test "tree with two leaves provides expected and valid proofs":
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
let tree = builder.build().tryGet()
let expectedProofs = [
MerkleProof.init(0, @[leaves[1]]),
MerkleProof.init(1, @[leaves[0]]),
MerkleProof.init(0, @[expectedLeaves[1]]).tryGet(),
MerkleProof.init(1, @[expectedLeaves[0]]).tryGet(),
]
check:
tree.getProof(0).tryGet() == expectedProofs[0]
tree.getProof(1).tryGet() == expectedProofs[1]
check:
tree.getProof(0).tryGet().verifyDataBlock(data[0], tree.root).tryGet()
tree.getProof(1).tryGet().verifyDataBlock(data[1], tree.root).tryGet()
test "tree with three leaves provides expected proofs":
let tree = MerkleTree.init(leaves[0..2]).tryGet()
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
builder.addDataBlock(data[2]).tryGet()
let tree = builder.build().tryGet()
let expectedProofs = [
MerkleProof.init(0, @[leaves[1], combine(leaves[2], leaves[2])]),
MerkleProof.init(1, @[leaves[0], combine(leaves[2], leaves[2])]),
MerkleProof.init(2, @[leaves[2], combine(leaves[0], leaves[1])]),
MerkleProof.init(0, @[expectedLeaves[1], combine(expectedLeaves[2], zeroHash)]).tryGet(),
MerkleProof.init(1, @[expectedLeaves[0], combine(expectedLeaves[2], zeroHash)]).tryGet(),
MerkleProof.init(2, @[zeroHash, combine(expectedLeaves[0], expectedLeaves[1])]).tryGet(),
]
check:
@ -88,21 +167,73 @@ checksuite "merkletree":
tree.getProof(1).tryGet() == expectedProofs[1]
tree.getProof(2).tryGet() == expectedProofs[2]
check:
tree.getProof(0).tryGet().verifyDataBlock(data[0], tree.root).tryGet()
tree.getProof(1).tryGet().verifyDataBlock(data[1], tree.root).tryGet()
tree.getProof(2).tryGet().verifyDataBlock(data[2], tree.root).tryGet()
test "tree with nine leaves provides expected proofs":
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
builder.addDataBlock(data[2]).tryGet()
builder.addDataBlock(data[3]).tryGet()
builder.addDataBlock(data[4]).tryGet()
builder.addDataBlock(data[5]).tryGet()
builder.addDataBlock(data[6]).tryGet()
builder.addDataBlock(data[7]).tryGet()
builder.addDataBlock(data[8]).tryGet()
let tree = builder.build().tryGet()
let expectedProofs = {
4:
MerkleProof.init(4, @[
expectedLeaves[5],
combine(expectedLeaves[6], expectedLeaves[7]),
combine(
combine(expectedLeaves[0], expectedLeaves[1]),
combine(expectedLeaves[2], expectedLeaves[3]),
),
combine(
combine(
combine(expectedLeaves[8], zeroHash),
oneHash
),
oneHash
)
]).tryGet(),
8:
MerkleProof.init(8, @[
zeroHash,
oneHash,
oneHash,
combine(
combine(
combine(expectedLeaves[0], expectedLeaves[1]),
combine(expectedLeaves[2], expectedLeaves[3]),
),
combine(
combine(expectedLeaves[4], expectedLeaves[5]),
combine(expectedLeaves[6], expectedLeaves[7])
)
)
]).tryGet(),
}.newTable
check:
tree.getProof(4).tryGet() == expectedProofs[4]
tree.getProof(8).tryGet() == expectedProofs[8]
check:
tree.getProof(4).tryGet().verifyDataBlock(data[4], tree.root).tryGet()
tree.getProof(8).tryGet().verifyDataBlock(data[8], tree.root).tryGet()
test "getProof fails for index out of bounds":
let tree = MerkleTree.init(leaves[0..3]).tryGet()
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
builder.addDataBlock(data[2]).tryGet()
let tree = builder.build().tryGet()
check:
isErr(tree.getProof(-1))
isErr(tree.getProof(4))
test "can create MerkleTree directly from root hash":
let tree = MerkleTree.init(leaves[0], 1)
check:
tree.root == leaves[0]
test "cannot create MerkleTree from leaves with different codec":
let res = MerkleTree.init(@[randomHash(sha256), randomHash(sha512)])
check:
isErr(res)

View File

@ -11,6 +11,8 @@ import pkg/questionable/results
import pkg/codex/stores/cachestore
import pkg/codex/chunker
import pkg/codex/manifest
import pkg/codex/merkletree
import pkg/codex/utils
import ../helpers
@ -27,6 +29,8 @@ proc commonBlockStoreTests*(name: string,
asyncchecksuite name & " Store Common":
var
newBlock, newBlock1, newBlock2, newBlock3: Block
manifest: Manifest
tree: MerkleTree
store: BlockStore
setup:
@ -35,6 +39,8 @@ proc commonBlockStoreTests*(name: string,
newBlock2 = Block.new("2".repeat(100).toBytes()).tryGet()
newBlock3 = Block.new("3".repeat(100).toBytes()).tryGet()
(manifest, tree) = makeManifestAndTree(@[newBlock, newBlock1, newBlock2, newBlock3]).tryGet()
if not isNil(before):
await before()
@ -104,10 +110,10 @@ proc commonBlockStoreTests*(name: string,
test "listBlocks Manifest":
let
blocks = @[newBlock1, newBlock2, newBlock3]
manifest = Manifest.new(blocks = blocks.mapIt( it.cid )).tryGet()
manifestBlock = Block.new(manifest.encode().tryGet(), codec = DagPBCodec).tryGet()
treeBlock = Block.new(tree.encode()).tryGet()
putHandles = await allFinished(
(manifestBlock & blocks).mapIt( store.putBlock( it ) ))
(@[treeBlock, manifestBlock] & blocks).mapIt( store.putBlock( it ) ))
for handle in putHandles:
check not handle.failed
@ -128,10 +134,10 @@ proc commonBlockStoreTests*(name: string,
test "listBlocks Both":
let
blocks = @[newBlock1, newBlock2, newBlock3]
manifest = Manifest.new(blocks = blocks.mapIt( it.cid )).tryGet()
manifestBlock = Block.new(manifest.encode().tryGet(), codec = DagPBCodec).tryGet()
treeBlock = Block.new(tree.encode()).tryGet()
putHandles = await allFinished(
(manifestBlock & blocks).mapIt( store.putBlock( it ) ))
(@[treeBlock, manifestBlock] & blocks).mapIt( store.putBlock( it ) ))
for handle in putHandles:
check not handle.failed
@ -146,4 +152,4 @@ proc commonBlockStoreTests*(name: string,
check (await store.hasBlock(cid)).tryGet()
count.inc
check count == 4
check count == 5

View File

@ -70,4 +70,4 @@ checksuite "Cache Store":
commonBlockStoreTests(
"Cache", proc: BlockStore =
BlockStore(CacheStore.new(cacheSize = 500, chunkSize = 1)))
BlockStore(CacheStore.new(cacheSize = 1000, chunkSize = 1)))

View File

@ -16,6 +16,7 @@ import pkg/codex/chunker
import pkg/codex/stores
import pkg/codex/blocktype as bt
import pkg/codex/clock
import pkg/codex/utils/asynciter
import ../helpers
import ../helpers/mockclock
@ -72,7 +73,7 @@ asyncchecksuite "RepoStore":
mockClock = MockClock.new()
mockClock.set(now)
repo = RepoStore.new(repoDs, metaDs, mockClock, quotaMaxBytes = 200)
repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = 200)
teardown:
(await repoDs.close()).tryGet
@ -326,7 +327,7 @@ asyncchecksuite "RepoStore":
response.len == 0
test "Should retrieve block expiration information":
proc unpack(beIter: Future[?!BlockExpirationIter]): Future[seq[BlockExpiration]] {.async.} =
proc unpack(beIter: Future[?!AsyncIter[?BlockExpiration]]): Future[seq[BlockExpiration]] {.async.} =
var expirations = newSeq[BlockExpiration](0)
without iter =? (await beIter), err:
return expirations
@ -366,22 +367,22 @@ asyncchecksuite "RepoStore":
assertExpiration(blockExpirations2[0], blk3)
test "should put empty blocks":
let blk = Cid.example.emptyBlock
let blk = Cid.example.emptyBlock.tryGet()
check (await repo.putBlock(blk)).isOk
test "should get empty blocks":
let blk = Cid.example.emptyBlock
let blk = Cid.example.emptyBlock.tryGet()
let got = await repo.getBlock(blk.cid)
check got.isOk
check got.get.cid == blk.cid
test "should delete empty blocks":
let blk = Cid.example.emptyBlock
let blk = Cid.example.emptyBlock.tryGet()
check (await repo.delBlock(blk.cid)).isOk
test "should have empty block":
let blk = Cid.example.emptyBlock
let blk = Cid.example.emptyBlock.tryGet()
let has = await repo.hasBlock(blk.cid)
check has.isOk
@ -393,7 +394,7 @@ commonBlockStoreTests(
RepoStore.new(
SQLiteDatastore.new(Memory).tryGet(),
SQLiteDatastore.new(Memory).tryGet(),
MockClock.new())))
clock = MockClock.new())))
const
path = currentSourcePath().parentDir / "test"
@ -413,6 +414,6 @@ commonBlockStoreTests(
RepoStore.new(
FSDatastore.new(path, depth).tryGet(),
SQLiteDatastore.new(Memory).tryGet(),
MockClock.new())),
clock = MockClock.new())),
before = before,
after = after)

View File

@ -22,25 +22,16 @@ asyncchecksuite "Erasure encode/decode":
var manifest: Manifest
var store: BlockStore
var erasure: Erasure
var repoDs: Datastore
var metaDs: SQLiteDatastore
setup:
rng = Rng.instance()
chunker = RandomChunker.new(rng, size = dataSetSize, chunkSize = BlockSize)
manifest = !Manifest.new(blockSize = BlockSize)
let
repoDs = SQLiteDatastore.new(Memory).tryGet()
metaDs = SQLiteDatastore.new(Memory).tryGet()
rng = Rng.instance()
chunker = RandomChunker.new(rng, size = dataSetSize, chunkSize = BlockSize)
store = RepoStore.new(repoDs, metaDs)
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider)
while (
let chunk = await chunker.getBytes();
chunk.len > 0):
let blk = bt.Block.new(chunk).tryGet()
manifest.add(blk.cid)
(await store.putBlock(blk)).tryGet()
manifest = await storeDataGetManifest(store, chunker)
proc encode(buffers, parity: int): Future[Manifest] {.async.} =
let
@ -50,8 +41,8 @@ asyncchecksuite "Erasure encode/decode":
parity)).tryGet()
check:
encoded.len mod (buffers + parity) == 0
encoded.rounded == (manifest.len + (buffers - (manifest.len mod buffers)))
encoded.blocksCount mod (buffers + parity) == 0
encoded.rounded == (manifest.blocksCount + (buffers - (manifest.blocksCount mod buffers)))
encoded.steps == encoded.rounded div buffers
return encoded
@ -64,24 +55,25 @@ asyncchecksuite "Erasure encode/decode":
let encoded = await encode(buffers, parity)
var
column = rng.rand((encoded.len - 1) div encoded.steps) # random column
dropped: seq[Cid]
column = rng.rand((encoded.blocksCount - 1) div encoded.steps) # random column
dropped: seq[int]
for _ in 0..<encoded.ecM:
dropped.add(encoded[column])
(await store.delBlock(encoded[column])).tryGet()
dropped.add(column)
(await store.delBlock(encoded.treeCid, column)).tryGet()
(await store.delBlock(manifest.treeCid, column)).tryGet()
column.inc(encoded.steps - 1)
var
decoded = (await erasure.decode(encoded)).tryGet()
check:
decoded.cid.tryGet() == manifest.cid.tryGet()
decoded.cid.tryGet() == encoded.originalCid
decoded.len == encoded.originalLen
decoded.treeCid == manifest.treeCid
decoded.treeCid == encoded.originalTreeCid
decoded.blocksCount == encoded.originalBlocksCount
for d in dropped:
let present = await store.hasBlock(d)
let present = await store.hasBlock(manifest.treeCid, d)
check present.tryGet()
test "Should not tolerate losing more than M data blocks in a single random column":
@ -92,12 +84,13 @@ asyncchecksuite "Erasure encode/decode":
let encoded = await encode(buffers, parity)
var
column = rng.rand((encoded.len - 1) div encoded.steps) # random column
dropped: seq[Cid]
column = rng.rand((encoded.blocksCount - 1) div encoded.steps) # random column
dropped: seq[int]
for _ in 0..<encoded.ecM + 1:
dropped.add(encoded[column])
(await store.delBlock(encoded[column])).tryGet()
dropped.add(column)
(await store.delBlock(encoded.treeCid, column)).tryGet()
(await store.delBlock(manifest.treeCid, column)).tryGet()
column.inc(encoded.steps)
var
@ -107,7 +100,7 @@ asyncchecksuite "Erasure encode/decode":
decoded = (await erasure.decode(encoded)).tryGet()
for d in dropped:
let present = await store.hasBlock(d)
let present = await store.hasBlock(manifest.treeCid, d)
check not present.tryGet()
test "Should tolerate losing M data blocks in M random columns":
@ -123,19 +116,21 @@ asyncchecksuite "Erasure encode/decode":
while offset < encoded.steps - 1:
let
blockIdx = toSeq(countup(offset, encoded.len - 1, encoded.steps))
blockIdx = toSeq(countup(offset, encoded.blocksCount - 1, encoded.steps))
for _ in 0..<encoded.ecM:
blocks.add(rng.sample(blockIdx, blocks))
offset.inc
for idx in blocks:
(await store.delBlock(encoded[idx])).tryGet()
(await store.delBlock(encoded.treeCid, idx)).tryGet()
(await store.delBlock(manifest.treeCid, idx)).tryGet()
discard
discard (await erasure.decode(encoded)).tryGet()
for d in manifest:
let present = await store.hasBlock(d)
for d in 0..<manifest.blocksCount:
let present = await store.hasBlock(manifest.treeCid, d)
check present.tryGet()
test "Should not tolerate losing more than M data blocks in M random columns":
@ -151,20 +146,23 @@ asyncchecksuite "Erasure encode/decode":
while offset < encoded.steps - 1:
let
blockIdx = toSeq(countup(offset, encoded.len - 1, encoded.steps))
blockIdx = toSeq(countup(offset, encoded.blocksCount - 1, encoded.steps))
for _ in 0..<encoded.ecM + 1: # NOTE: the +1
var idx: int
while true:
idx = rng.sample(blockIdx, blocks)
if not encoded[idx].isEmpty:
let blk = (await store.getBlock(encoded.treeCid, idx)).tryGet()
if not blk.isEmpty:
break
blocks.add(idx)
offset.inc
for idx in blocks:
(await store.delBlock(encoded[idx])).tryGet()
(await store.delBlock(encoded.treeCid, idx)).tryGet()
(await store.delBlock(manifest.treeCid, idx)).tryGet()
discard
var
decoded: Manifest
@ -179,13 +177,14 @@ asyncchecksuite "Erasure encode/decode":
let encoded = await encode(buffers, parity)
for b in encoded.blocks[0..<encoded.steps * encoded.ecM]:
(await store.delBlock(b)).tryGet()
for b in 0..<encoded.steps * encoded.ecM:
(await store.delBlock(encoded.treeCid, b)).tryGet()
(await store.delBlock(manifest.treeCid, b)).tryGet()
discard (await erasure.decode(encoded)).tryGet()
for d in manifest:
let present = await store.hasBlock(d)
for d in 0..<manifest.blocksCount:
let present = await store.hasBlock(manifest.treeCid, d)
check present.tryGet()
test "Should tolerate losing M (a.k.a row) contiguous parity blocks":
@ -195,13 +194,14 @@ asyncchecksuite "Erasure encode/decode":
let encoded = await encode(buffers, parity)
for b in encoded.blocks[^(encoded.steps * encoded.ecM)..^1]:
(await store.delBlock(b)).tryGet()
for b in (encoded.blocksCount - encoded.steps * encoded.ecM)..<encoded.blocksCount:
(await store.delBlock(encoded.treeCid, b)).tryGet()
(await store.delBlock(manifest.treeCid, b)).tryGet()
discard (await erasure.decode(encoded)).tryGet()
for d in manifest:
let present = await store.hasBlock(d)
for d in 0..<manifest.blocksCount:
let present = await store.hasBlock(manifest.treeCid, d)
check present.tryGet()
test "handles edge case of 0 parity blocks":

View File

@ -10,88 +10,20 @@ import pkg/codex/blocktype as bt
import pkg/codex/manifest
import ./helpers
import ./examples
checksuite "Manifest":
test "Should produce valid tree hash checksum":
var manifest = Manifest.new(
blocks = @[
Block.new("Block 1".toBytes).tryGet().cid,
Block.new("Block 2".toBytes).tryGet().cid,
Block.new("Block 3".toBytes).tryGet().cid,
Block.new("Block 4".toBytes).tryGet().cid,
Block.new("Block 5".toBytes).tryGet().cid,
Block.new("Block 6".toBytes).tryGet().cid,
Block.new("Block 7".toBytes).tryGet().cid,
]).tryGet()
let
encoded = @[byte 18, 32, 227, 176, 196, 66, 152,
252, 28, 20, 154, 251, 244, 200, 153,
111, 185, 36, 39, 174, 65, 228, 100,
155, 147, 76, 164, 149, 153, 27, 120,
82, 184, 85]
var mh: MultiHash
check MultiHash.decode(encoded, mh).tryGet() > 0
let encodedCid = Cid.init(manifest.version, manifest.codec, mh).tryGet()
check:
encodedCid == manifest.cid.tryGet()
test "Should encode/decode to/from manifest":
let
blocks = (0..<1000).mapIt(
Block.new(("Block " & $it).toBytes).tryGet().cid
)
var
manifest = Manifest.new(blocks).tryGet()
manifest = Manifest.new(
treeCid = Cid.example,
blockSize = 1.MiBs,
datasetSize = 100.MiBs)
let
e = manifest.encode().tryGet()
decoded = Manifest.decode(e).tryGet()
check:
decoded.blocks == blocks
decoded.protected == false
decoded == manifest
test "Should produce a protected manifest":
let
blocks = (0..<333).mapIt(
Block.new(("Block " & $it).toBytes).tryGet().cid
)
manifest = Manifest.new(blocks).tryGet()
var
protected = Manifest.new(manifest, 2, 2).tryGet()
check:
protected.originalCid == manifest.cid.tryGet()
protected.blocks[0..<333] == manifest.blocks
protected.protected == true
protected.originalLen == manifest.len
# fill up with empty Cid's
for i in protected.rounded..<protected.len:
protected[i] = EmptyCid[manifest.version]
.catch
.get()[manifest.hcodec]
.catch
.get()
var
encoded = protected.encode().tryGet()
decoded = Manifest.decode(encoded).tryGet()
check:
decoded.protected == true
decoded.originalLen == manifest.len
decoded.ecK == protected.ecK
decoded.ecM == protected.ecM
decoded.originalCid == protected.originalCid
decoded.originalCid == manifest.cid.tryGet()
decoded.blocks == protected.blocks
decoded.blocks[0..<333] == manifest.blocks

View File

@ -1,3 +1,4 @@
import ./merkletree/testmerkletree
import ./merkletree/testcoders
{.warning[UnusedImport]: off.}

View File

@ -41,18 +41,7 @@ asyncchecksuite "Test Node":
proc fetch(T: type Manifest, chunker: Chunker): Future[Manifest] {.async.} =
# Collect blocks from Chunker into Manifest
var
manifest = Manifest.new().tryGet()
while (
let chunk = await chunker.getBytes();
chunk.len > 0):
let blk = bt.Block.new(chunk).tryGet()
(await localStore.putBlock(blk)).tryGet()
manifest.add(blk.cid)
return manifest
await storeDataGetManifest(localStore, chunker)
proc retrieve(cid: Cid): Future[seq[byte]] {.async.} =
# Retrieve an entire file contents by file Cid
@ -113,8 +102,7 @@ asyncchecksuite "Test Node":
fetched = (await node.fetchManifest(manifestBlock.cid)).tryGet()
check:
fetched.cid == manifest.cid
fetched.blocks == manifest.blocks
fetched == manifest
test "Block Batching":
let
@ -159,7 +147,7 @@ asyncchecksuite "Test Node":
let data = await retrieve(manifestCid)
check:
data.len == localManifest.originalBytes.int
data.len == localManifest.datasetSize.int
data.len == original.len
sha256.digest(data) == sha256.digest(original)

View File

@ -23,32 +23,26 @@ asyncchecksuite "StoreStream":
return true
let
data = [
[byte 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[byte 10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
[byte 20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
[byte 30, 31, 32, 33, 34, 35, 36, 37, 38, 39],
[byte 40, 41, 42, 43, 44, 45, 46, 47, 48, 49],
[byte 50, 51, 52, 53, 54, 55, 56, 57, 58, 59],
[byte 60, 61, 62, 63, 64, 65, 66, 67, 68, 69],
[byte 70, 71, 72, 73, 74, 75, 76, 77, 78, 79],
[byte 80, 81, 82, 83, 84, 85, 86, 87, 88, 89],
[byte 90, 91, 92, 93, 94, 95, 96, 97, 98, 99],
]
data = [byte 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99]
chunkSize = 10
teardown:
await stream.close()
setup:
store = CacheStore.new()
manifest = Manifest.new(blockSize = 10'nb).tryGet()
manifest = await storeDataGetManifest(store, MockChunker.new(dataset = data, chunkSize = chunkSize))
stream = StoreStream.new(store, manifest)
for d in data:
let blk = bt.Block.new(d).tryGet()
manifest.add(blk.cid)
(await store.putBlock(blk)).tryGet()
test "Read all blocks < blockSize":
var
buf = newSeq[byte](8)