Blockexchange uses merkle root and index to fetch blocks

This commit is contained in:
Tomasz Bekas 2023-10-12 15:56:10 +02:00
parent 5f4218ba1c
commit 8c1d97d75c
No known key found for this signature in database
GPG Key ID: 4854E04C98824959
57 changed files with 2706 additions and 1525 deletions

View File

@ -65,7 +65,7 @@ type
proc discoveryQueueLoop(b: DiscoveryEngine) {.async.} =
while b.discEngineRunning:
for cid in toSeq(b.pendingBlocks.wantList):
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
try:
await b.discoveryQueue.put(cid)
except CatchableError as exc:

View File

@ -11,16 +11,18 @@ import std/sequtils
import std/sets
import std/options
import std/algorithm
import std/sugar
import pkg/chronos
import pkg/chronicles
import pkg/libp2p/[cid, switch]
import pkg/libp2p/[cid, switch, multihash, multicodec]
import pkg/metrics
import pkg/stint
import ../../stores/blockstore
import ../../blocktype as bt
import ../../blocktype
import ../../utils
import ../../merkletree
import ../protobuf/blockexc
import ../protobuf/presence
@ -77,12 +79,6 @@ type
address*: EthAddress
price*: UInt256
proc contains*(a: AsyncHeapQueue[Entry], b: Cid): bool =
## Convenience method to check for entry prepense
##
a.anyIt( it.cid == b )
# attach task scheduler to engine
proc scheduleTask(b: BlockExcEngine, task: BlockExcPeerCtx): bool {.gcsafe} =
b.taskQueue.pushOrUpdateNoWait(task).isOk()
@ -124,22 +120,27 @@ proc stop*(b: BlockExcEngine) {.async.} =
trace "NetworkStore stopped"
proc sendWantHave(b: BlockExcEngine, cid: Cid, selectedPeer: BlockExcPeerCtx, peers: seq[BlockExcPeerCtx]): Future[void] {.async.} =
trace "Sending wantHave request to peers", cid
proc sendWantHave(b: BlockExcEngine, address: BlockAddress, selectedPeer: BlockExcPeerCtx, peers: seq[BlockExcPeerCtx]): Future[void] {.async.} =
trace "Sending wantHave request to peers", address
for p in peers:
if p != selectedPeer:
if cid notin p.peerHave:
if address notin p.peerHave:
trace " wantHave > ", peer = p.id
await b.network.request.sendWantList(
p.id,
@[cid],
@[address],
wantType = WantType.WantHave) # we only want to know if the peer has the block
proc sendWantBlock(b: BlockExcEngine, cid: Cid, blockPeer: BlockExcPeerCtx): Future[void] {.async.} =
trace "Sending wantBlock request to", peer = blockPeer.id, cid
proc sendWantBlock(b: BlockExcEngine, address: BlockAddress, blockPeer: BlockExcPeerCtx): Future[void] {.async.} =
let cid = if address.leaf:
address.treeCid
else:
address.cid
trace "Sending wantBlock request to", peer = blockPeer.id, address
await b.network.request.sendWantList(
blockPeer.id,
@[cid],
@[address],
wantType = WantType.WantBlock) # we want this remote to send us a block
proc findCheapestPeerForBlock(b: BlockExcEngine, cheapestPeers: seq[BlockExcPeerCtx]): ?BlockExcPeerCtx =
@ -152,10 +153,29 @@ proc findCheapestPeerForBlock(b: BlockExcEngine, cheapestPeers: seq[BlockExcPeer
return some(peers[0])
return some(cheapestPeers[0]) # get cheapest
proc monitorBlockHandle(b: BlockExcEngine, handle: Future[Block], address: BlockAddress, peerId: PeerId) {.async.} =
try:
trace "Monitoring block handle", address, peerId
discard await handle
trace "Block handle success", address, peerId
except CatchableError as exc:
trace "Error block handle, disconnecting peer", address, exc = exc.msg, peerId
# TODO: really, this is just a quick and dirty way of
# preventing hitting the same "bad" peer every time, however,
# we might as well discover this on or next iteration, so
# it doesn't mean that we're never talking to this peer again.
# TODO: we need a lot more work around peer selection and
# prioritization
# drop unresponsive peer
b.discovery.queueFindBlocksReq(@[address.cidOrTreeCid])
await b.network.switch.disconnect(peerId)
proc requestBlock*(
b: BlockExcEngine,
cid: Cid,
timeout = DefaultBlockTimeout): Future[bt.Block] {.async.} =
timeout = DefaultBlockTimeout): Future[Block] {.async.} =
trace "Begin block request", cid, peers = b.peers.len
if b.pendingBlocks.isInFlight(cid):
@ -164,39 +184,20 @@ proc requestBlock*(
let
blk = b.pendingBlocks.getWantHandle(cid, timeout)
address = BlockAddress(leaf: false, cid: cid)
trace "Selecting peers who have", cid
trace "Selecting peers who have", address
var
peers = b.peers.selectCheapest(cid)
peers = b.peers.selectCheapest(address)
without blockPeer =? b.findCheapestPeerForBlock(peers):
trace "No peers to request blocks from. Queue discovery...", cid
b.discovery.queueFindBlocksReq(@[cid])
return await blk
proc blockHandleMonitor() {.async.} =
try:
trace "Monitoring block handle", cid
b.pendingBlocks.setInFlight(cid, true)
discard await blk
trace "Block handle success", cid
except CatchableError as exc:
trace "Error block handle, disconnecting peer", cid, exc = exc.msg
# TODO: really, this is just a quick and dirty way of
# preventing hitting the same "bad" peer every time, however,
# we might as well discover this on or next iteration, so
# it doesn't mean that we're never talking to this peer again.
# TODO: we need a lot more work around peer selection and
# prioritization
# drop unresponsive peer
await b.network.switch.disconnect(blockPeer.id)
# monitor block handle
asyncSpawn blockHandleMonitor()
await b.sendWantBlock(cid, blockPeer)
asyncSpawn b.monitorBlockHandle(blk, address, blockPeer.id)
b.pendingBlocks.setInFlight(cid, true)
await b.sendWantBlock(address, blockPeer)
codexBlockExchangeWantBlockListsSent.inc()
@ -205,12 +206,94 @@ proc requestBlock*(
b.discovery.queueFindBlocksReq(@[cid])
return await blk
await b.sendWantHave(cid, blockPeer, toSeq(b.peers))
await b.sendWantHave(address, blockPeer, toSeq(b.peers))
codexBlockExchangeWantHaveListsSent.inc()
return await blk
proc requestBlock(
b: BlockExcEngine,
treeReq: TreeReq,
index: Natural,
timeout = DefaultBlockTimeout
): Future[Block] {.async.} =
let address = BlockAddress(leaf: true, treeCid: treeReq.treeCid, index: index)
let handleOrCid = treeReq.getWantHandleOrCid(index, timeout)
if handleOrCid.resolved:
without blk =? await b.localStore.getBlock(handleOrCid.cid), err:
return await b.requestBlock(handleOrCid.cid, timeout)
return blk
let blockFuture = handleOrCid.handle
if treeReq.isInFlight(index):
return await blockFuture
let peers = b.peers.selectCheapest(address)
if peers.len == 0:
b.discovery.queueFindBlocksReq(@[treeReq.treeCid])
let maybePeer =
if peers.len > 0:
peers[index mod peers.len].some
elif b.peers.len > 0:
toSeq(b.peers)[index mod b.peers.len].some
else:
BlockExcPeerCtx.none
if peer =? maybePeer:
asyncSpawn b.monitorBlockHandle(blockFuture, address, peer.id)
treeReq.trySetInFlight(index)
await b.sendWantBlock(address, peer)
codexBlockExchangeWantBlockListsSent.inc()
await b.sendWantHave(address, peer, toSeq(b.peers))
codexBlockExchangeWantHaveListsSent.inc()
return await blockFuture
proc requestBlock*(
b: BlockExcEngine,
treeCid: Cid,
index: Natural,
merkleRoot: MultiHash,
timeout = DefaultBlockTimeout
): Future[Block] =
without treeReq =? b.pendingBlocks.getOrPutTreeReq(treeCid, Natural.none, merkleRoot), err:
raise err
return b.requestBlock(treeReq, index, timeout)
proc requestBlocks*(
b: BlockExcEngine,
treeCid: Cid,
leavesCount: Natural,
merkleRoot: MultiHash,
timeout = DefaultBlockTimeout
): ?!AsyncIter[Block] =
without treeReq =? b.pendingBlocks.getOrPutTreeReq(treeCid, leavesCount.some, merkleRoot), err:
return failure(err)
var
iter = AsyncIter[Block]()
index = 0
proc next(): Future[Block] =
if index < leavesCount:
let fut = b.requestBlock(treeReq, index, timeout)
inc index
if index >= leavesCount:
iter.finished = true
return fut
else:
let fut = newFuture[Block]("engine.requestBlocks")
fut.fail(newException(CodexError, "No more elements for tree with cid " & $treeCid))
return fut
iter.next = next
return success(iter)
proc blockPresenceHandler*(
b: BlockExcEngine,
peer: PeerId,
@ -226,7 +309,7 @@ proc blockPresenceHandler*(
for blk in blocks:
if presence =? Presence.init(blk):
logScope:
cid = presence.cid
address = $presence.address
have = presence.have
price = presence.price
@ -255,22 +338,22 @@ proc blockPresenceHandler*(
# if none of the connected peers report our wants in their have list,
# fire up discovery
b.discovery.queueFindBlocksReq(
toSeq(b.pendingBlocks.wantList)
toSeq(b.pendingBlocks.wantListCids)
.filter do(cid: Cid) -> bool:
not b.peers.anyIt( cid in it.peerHave ))
not b.peers.anyIt( cid in it.peerHaveCids ))
proc scheduleTasks(b: BlockExcEngine, blocks: seq[bt.Block]) {.async.} =
trace "Schedule a task for new blocks", items = blocks.len
proc scheduleTasks(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} =
trace "Schedule a task for new blocks", items = blocksDelivery.len
let
cids = blocks.mapIt( it.cid )
cids = blocksDelivery.mapIt( it.blk.cid )
# schedule any new peers to provide blocks to
for p in b.peers:
for c in cids: # for each cid
# schedule a peer if it wants at least one cid
# and we have it in our local store
if c in p.peerWants:
if c in p.peerWantsCids:
if await (c in b.localStore):
if b.scheduleTask(p):
trace "Task scheduled for peer", peer = p.id
@ -279,50 +362,54 @@ proc scheduleTasks(b: BlockExcEngine, blocks: seq[bt.Block]) {.async.} =
break # do next peer
proc resolveBlocks*(b: BlockExcEngine, blocks: seq[bt.Block]) {.async.} =
trace "Resolving blocks", blocks = blocks.len
proc resolveBlocks*(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} =
trace "Resolving blocks", blocks = blocksDelivery.len
b.pendingBlocks.resolve(blocks)
await b.scheduleTasks(blocks)
b.discovery.queueProvideBlocksReq(blocks.mapIt( it.cid ))
b.pendingBlocks.resolve(blocksDelivery)
await b.scheduleTasks(blocksDelivery)
b.discovery.queueProvideBlocksReq(blocksDelivery.mapIt( it.blk.cid ))
proc resolveBlocks*(b: BlockExcEngine, blocks: seq[Block]) {.async.} =
await b.resolveBlocks(blocks.mapIt(BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid))))
proc payForBlocks(engine: BlockExcEngine,
peer: BlockExcPeerCtx,
blocks: seq[bt.Block]) {.async.} =
trace "Paying for blocks", blocks = blocks.len
blocksDelivery: seq[BlockDelivery]) {.async.} =
trace "Paying for blocks", len = blocksDelivery.len
let
sendPayment = engine.network.request.sendPayment
price = peer.price(blocks.mapIt(it.cid))
price = peer.price(blocksDelivery.mapIt(it.address))
if payment =? engine.wallet.pay(peer, price):
trace "Sending payment for blocks", price
await sendPayment(peer.id, payment)
proc blocksHandler*(
proc blocksDeliveryHandler*(
b: BlockExcEngine,
peer: PeerId,
blocks: seq[bt.Block]) {.async.} =
trace "Got blocks from peer", peer, len = blocks.len
for blk in blocks:
if isErr (await b.localStore.putBlock(blk)):
trace "Unable to store block", cid = blk.cid
blocksDelivery: seq[BlockDelivery]) {.async.} =
trace "Got blocks from peer", peer, len = blocksDelivery.len
await b.resolveBlocks(blocks)
codexBlockExchangeBlocksReceived.inc(blocks.len.int64)
for bd in blocksDelivery:
if isErr (await b.localStore.putBlock(bd.blk)):
trace "Unable to store block", cid = bd.blk.cid
await b.resolveBlocks(blocksDelivery)
codexBlockExchangeBlocksReceived.inc(blocksDelivery.len.int64)
let
peerCtx = b.peers.get(peer)
if peerCtx != nil:
await b.payForBlocks(peerCtx, blocks)
await b.payForBlocks(peerCtx, blocksDelivery)
## shouldn't we remove them from the want-list instead of this:
peerCtx.cleanPresence(blocks.mapIt( it.cid ))
peerCtx.cleanPresence(blocksDelivery.mapIt( it.address ))
proc wantListHandler*(
b: BlockExcEngine,
peer: PeerId,
wantList: Wantlist) {.async.} =
wantList: WantList) {.async.} =
trace "Got wantList for peer", peer, items = wantList.entries.len
let
peerCtx = b.peers.get(peer)
@ -338,14 +425,14 @@ proc wantListHandler*(
logScope:
peer = peerCtx.id
cid = e.cid
# cid = e.cid
wantType = $e.wantType
if idx < 0: # updating entry
trace "Processing new want list entry", cid = e.cid
trace "Processing new want list entry", address = e.address
let
have = await e.cid in b.localStore
have = await e.address in b.localStore
price = @(
b.pricing.get(Pricing(price: 0.u256))
.price.toBytesBE)
@ -354,21 +441,21 @@ proc wantListHandler*(
codexBlockExchangeWantHaveListsReceived.inc()
if not have and e.sendDontHave:
trace "Adding dont have entry to presence response", cid = e.cid
trace "Adding dont have entry to presence response", address = e.address
presence.add(
BlockPresence(
cid: e.cid.data.buffer,
address: e.address,
`type`: BlockPresenceType.DontHave,
price: price))
elif have and e.wantType == WantType.WantHave:
trace "Adding have entry to presence response", cid = e.cid
trace "Adding have entry to presence response", address = e.address
presence.add(
BlockPresence(
cid: e.cid.data.buffer,
address: e.address,
`type`: BlockPresenceType.Have,
price: price))
elif e.wantType == WantType.WantBlock:
trace "Added entry to peer's want blocks list", cid = e.cid
trace "Added entry to peer's want blocks list", address = e.address
peerCtx.peerWants.add(e)
codexBlockExchangeWantBlockListsReceived.inc()
else:
@ -419,11 +506,25 @@ proc paymentHandler*(
else:
context.paymentChannel = engine.wallet.acceptChannel(payment).option
proc onTreeHandler(b: BlockExcEngine, tree: MerkleTree): Future[?!void] {.async.} =
trace "Handling tree"
without treeBlk =? Block.new(tree.encode()), err:
return failure(err)
if err =? (await b.localStore.putBlock(treeBlk)).errorOption:
return failure("Unable to store merkle tree block " & $treeBlk.cid & ", nested err: " & err.msg)
return success()
proc setupPeer*(b: BlockExcEngine, peer: PeerId) {.async.} =
## Perform initial setup, such as want
## list exchange
##
trace "Setting up peer", peer
if peer notin b.peers:
trace "Setting up new peer", peer
b.peers.add(BlockExcPeerCtx(
@ -432,9 +533,11 @@ proc setupPeer*(b: BlockExcEngine, peer: PeerId) {.async.} =
trace "Added peer", peers = b.peers.len
# broadcast our want list, the other peer will do the same
if b.pendingBlocks.len > 0:
if b.pendingBlocks.wantListLen > 0:
trace "Sending our want list to a peer", peer
let cids = toSeq(b.pendingBlocks.wantList)
await b.network.request.sendWantList(
peer, toSeq(b.pendingBlocks.wantList), full = true)
peer, cids, full = true)
if address =? b.pricing.?address:
await b.network.request.sendAccount(peer, Account(address: address))
@ -468,30 +571,41 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
wantsBlocks.sort(SortOrder.Descending)
proc localLookup(e: WantListEntry): Future[?!BlockDelivery] {.async.} =
trace "Handling lookup for entry", address = e.address
if e.address.leaf:
(await b.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map(
(blkAndProof: (Block, MerkleProof)) =>
BlockDelivery(address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some)
)
else:
(await b.localStore.getBlock(e.address.cid)).map(
(blk: Block) => BlockDelivery(address: e.address, blk: blk, proof: MerkleProof.none)
)
let
blockFuts = await allFinished(wantsBlocks.mapIt(
b.localStore.getBlock(it.cid)
))
blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup))
# Extract successfully received blocks
let
blocks = blockFuts
blocksDelivery = blocksDeliveryFut
.filterIt(it.completed and it.read.isOk)
.mapIt(it.read.get)
if blocks.len > 0:
trace "Sending blocks to peer", peer = task.id, blocks = blocks.len
await b.network.request.sendBlocks(
if blocksDelivery.len > 0:
trace "Sending blocks to peer", peer = task.id, blocks = blocksDelivery.len
await b.network.request.sendBlocksDelivery(
task.id,
blocks)
blocksDelivery
)
codexBlockExchangeBlocksSent.inc(blocks.len.int64)
codexBlockExchangeBlocksSent.inc(blocksDelivery.len.int64)
trace "About to remove entries from peerWants", blocks = blocks.len, items = task.peerWants.len
trace "About to remove entries from peerWants", blocks = blocksDelivery.len, items = task.peerWants.len
# Remove successfully sent blocks
task.peerWants.keepIf(
proc(e: Entry): bool =
not blocks.anyIt( it.cid == e.cid )
proc(e: WantListEntry): bool =
not blocksDelivery.anyIt( it.address == e.address )
)
trace "Removed entries from peerWants", items = task.peerWants.len
@ -547,7 +661,7 @@ proc new*(
proc blockWantListHandler(
peer: PeerId,
wantList: Wantlist): Future[void] {.gcsafe.} =
wantList: WantList): Future[void] {.gcsafe.} =
engine.wantListHandler(peer, wantList)
proc blockPresenceHandler(
@ -555,10 +669,10 @@ proc new*(
presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
engine.blockPresenceHandler(peer, presence)
proc blocksHandler(
proc blocksDeliveryHandler(
peer: PeerId,
blocks: seq[bt.Block]): Future[void] {.gcsafe.} =
engine.blocksHandler(peer, blocks)
blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} =
engine.blocksDeliveryHandler(peer, blocksDelivery)
proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} =
engine.accountHandler(peer, account)
@ -566,11 +680,18 @@ proc new*(
proc paymentHandler(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} =
engine.paymentHandler(peer, payment)
proc onTree(tree: MerkleTree): Future[void] {.gcsafe, async.} =
if err =? (await engine.onTreeHandler(tree)).errorOption:
echo "Error handling a tree" & err.msg # TODO
# error "Error handling a tree", msg = err.msg
network.handlers = BlockExcHandlers(
onWantList: blockWantListHandler,
onBlocks: blocksHandler,
onBlocksDelivery: blocksDeliveryHandler,
onPresence: blockPresenceHandler,
onAccount: accountHandler,
onPayment: paymentHandler)
pendingBlocks.onTree = onTree
return engine

View File

@ -14,12 +14,19 @@ import pkg/upraises
push: {.upraises: [].}
import ../../blocktype
import pkg/chronicles
import pkg/questionable
import pkg/questionable/options
import pkg/questionable/results
import pkg/chronos
import pkg/libp2p
import pkg/metrics
import ../../blocktype
import ../protobuf/blockexc
import ../../merkletree
import ../../utils
logScope:
topics = "codex pendingblocks"
@ -36,12 +43,121 @@ type
inFlight*: bool
startTime*: int64
LeafReq* = object
case delivered*: bool
of false:
handle*: Future[Block]
inFlight*: bool
of true:
leaf: MultiHash
blkCid*: Cid
TreeReq* = ref object
leaves*: Table[Natural, LeafReq]
deliveredCount*: Natural
leavesCount*: ?Natural
treeRoot*: MultiHash
treeCid*: Cid
TreeHandler* = proc(tree: MerkleTree): Future[void] {.gcsafe.}
PendingBlocksManager* = ref object of RootObj
blocks*: Table[Cid, BlockReq] # pending Block requests
trees*: Table[Cid, TreeReq]
onTree*: TreeHandler
proc updatePendingBlockGauge(p: PendingBlocksManager) =
codexBlockExchangePendingBlockRequests.set(p.blocks.len.int64)
type
BlockHandleOrCid = object
case resolved*: bool
of true:
cid*: Cid
else:
handle*: Future[Block]
proc buildTree(treeReq: TreeReq): ?!MerkleTree =
trace "Building a merkle tree from leaves", treeCid = treeReq.treeCid, leavesCount = treeReq.leavesCount
without leavesCount =? treeReq.leavesCount:
return failure("Leaves count is none, cannot build a tree")
var builder = ? MerkleTreeBuilder.init(treeReq.treeRoot.mcodec)
for i in 0..<leavesCount:
treeReq.leaves.withValue(i, leafReq):
if leafReq.delivered:
? builder.addLeaf(leafReq.leaf)
else:
return failure("Expected all leaves to be delivered but leaf with index " & $i & " was not")
do:
return failure("Missing a leaf with index " & $i)
let tree = ? builder.build()
if tree.root != treeReq.treeRoot:
return failure("Reconstructed tree root doesn't match the original tree root, tree cid is " & $treeReq.treeCid)
return success(tree)
proc checkIfAllDelivered(p: PendingBlocksManager, treeReq: TreeReq): void =
if treeReq.deliveredCount.some == treeReq.leavesCount:
without tree =? buildTree(treeReq), err:
error "Error building a tree", msg = err.msg
p.trees.del(treeReq.treeCid)
return
p.trees.del(treeReq.treeCid)
try:
asyncSpawn p.onTree(tree)
except Exception as err:
error "Exception when handling tree", msg = err.msg
proc getWantHandleOrCid*(
treeReq: TreeReq,
index: Natural,
timeout = DefaultBlockTimeout
): BlockHandleOrCid =
treeReq.leaves.withValue(index, leafReq):
if not leafReq.delivered:
return BlockHandleOrCid(resolved: false, handle: leafReq.handle)
else:
return BlockHandleOrCid(resolved: true, cid: leafReq.blkCid)
do:
let leafReq = LeafReq(
delivered: false,
handle: newFuture[Block]("pendingBlocks.getWantHandleOrCid"),
inFlight: false
)
treeReq.leaves[index] = leafReq
return BlockHandleOrCid(resolved: false, handle: leafReq.handle)
proc getOrPutTreeReq*(
p: PendingBlocksManager,
treeCid: Cid,
leavesCount = Natural.none, # has value when all leaves are expected to be delivered
treeRoot: MultiHash
): ?!TreeReq =
p.trees.withValue(treeCid, treeReq):
if treeReq.treeRoot != treeRoot:
return failure("Unexpected root for tree with cid " & $treeCid)
if leavesCount == treeReq.leavesCount:
return success(treeReq[])
else:
treeReq.leavesCount = treeReq.leavesCount.orElse(leavesCount)
let res = success(treeReq[])
p.checkIfAllDelivered(treeReq[])
return res
do:
let treeReq = TreeReq(
deliveredCount: 0,
leavesCount: leavesCount,
treeRoot: treeRoot,
treeCid: treeCid
)
p.trees[treeCid] = treeReq
return success(treeReq)
proc getWantHandle*(
p: PendingBlocksManager,
cid: Cid,
@ -73,36 +189,100 @@ proc getWantHandle*(
p.blocks.del(cid)
p.updatePendingBlockGauge()
proc resolve*(p: PendingBlocksManager,
blocks: seq[Block]) =
proc getOrComputeLeaf(mcodec: MultiCodec, blk: Block): ?!MultiHash =
without mhash =? blk.cid.mhash.mapFailure, err:
return MultiHash.digest($mcodec, blk.data).mapFailure
if mhash.mcodec == mcodec:
return success(mhash)
else:
return MultiHash.digest($mcodec, blk.data).mapFailure
proc resolve*(
p: PendingBlocksManager,
blocksDelivery: seq[BlockDelivery]
) {.gcsafe, raises: [].} =
## Resolve pending blocks
##
for blk in blocks:
for bd in blocksDelivery:
if not bd.address.leaf:
if bd.address.cid == bd.blk.cid:
p.blocks.withValue(bd.blk.cid, pending):
if not pending.handle.completed:
trace "Resolving block", cid = bd.blk.cid
pending.handle.complete(bd.blk)
let
startTime = pending[].startTime
stopTime = getMonoTime().ticks
retrievalDurationUs = (stopTime - startTime) div 1000
codexBlockExchangeRetrievalTimeUs.set(retrievalDurationUs)
trace "Block retrieval time", retrievalDurationUs
else:
warn "Delivery cid doesn't match block cid", deliveryCid = bd.address.cid, blockCid = bd.blk.cid
# resolve any pending blocks
p.blocks.withValue(blk.cid, pending):
if not pending[].handle.completed:
trace "Resolving block", cid = blk.cid
pending[].handle.complete(blk)
let
startTime = pending[].startTime
stopTime = getMonoTime().ticks
retrievalDurationUs = (stopTime - startTime) div 1000
codexBlockExchangeRetrievalTimeUs.set(retrievalDurationUs)
trace "Block retrieval time", retrievalDurationUs
if bd.address.leaf:
p.trees.withValue(bd.address.treeCid, treeReq):
treeReq.leaves.withValue(bd.address.index, leafReq):
if not leafReq.delivered:
if proof =? bd.proof:
if not proof.index == bd.address.index:
warn "Proof index doesn't match leaf index", address = bd.address, proofIndex = proof.index
continue
without mhash =? bd.blk.cid.mhash.mapFailure, err:
error "Unable to get mhash from cid for block", address = bd.address, msg = err.msg
continue
without verifySuccess =? proof.verifyLeaf(mhash, treeReq.treeRoot), err:
error "Unable to verify proof for block", address = bd.address, msg = err.msg
continue
if verifySuccess:
without leaf =? getOrComputeLeaf(treeReq.treeRoot.mcodec, bd.blk), err:
error "Unable to get or calculate hash for block", address = bd.address
continue
leafReq.handle.complete(bd.blk)
leafReq[] = LeafReq(delivered: true, blkCid: bd.blk.cid, leaf: leaf)
inc treeReq.deliveredCount
p.checkIfAllDelivered(treeReq[])
else:
warn "Invalid proof provided for a block", address = bd.address
else:
warn "Missing proof for a block", address = bd.address
else:
trace "Ignore veryfing proof for already delivered block", address = bd.address
proc setInFlight*(p: PendingBlocksManager,
cid: Cid,
inFlight = true) =
p.blocks.withValue(cid, pending):
pending[].inFlight = inFlight
trace "Setting inflight", cid, inFlight = pending[].inFlight
pending.inFlight = inFlight
trace "Setting inflight", cid, inFlight = pending.inFlight
proc trySetInFlight*(treeReq: TreeReq,
index: Natural,
inFlight = true) =
treeReq.leaves.withValue(index, leafReq):
if not leafReq.delivered:
leafReq.inFlight = inFlight
trace "Setting inflight", treeCid = treeReq.treeCid, index, inFlight = inFlight
proc isInFlight*(treeReq: TreeReq,
index: Natural
): bool =
treeReq.leaves.withValue(index, leafReq):
return (not leafReq.delivered) and leafReq.inFlight
do:
return false
proc isInFlight*(p: PendingBlocksManager,
cid: Cid
): bool =
p.blocks.withValue(cid, pending):
result = pending[].inFlight
result = pending.inFlight
trace "Getting inflight", cid, inFlight = result
proc pending*(p: PendingBlocksManager, cid: Cid): bool =
@ -111,14 +291,34 @@ proc pending*(p: PendingBlocksManager, cid: Cid): bool =
proc contains*(p: PendingBlocksManager, cid: Cid): bool =
p.pending(cid)
iterator wantList*(p: PendingBlocksManager): Cid =
iterator wantList*(p: PendingBlocksManager): BlockAddress =
for k in p.blocks.keys:
yield BlockAddress(leaf: false, cid: k)
for treeCid, treeReq in p.trees.pairs:
for index, leafReq in treeReq.leaves.pairs:
if not leafReq.delivered:
yield BlockAddress(leaf: true, treeCid: treeCid, index: index)
iterator wantListBlockCids*(p: PendingBlocksManager): Cid =
for k in p.blocks.keys:
yield k
iterator wantListCids*(p: PendingBlocksManager): Cid =
for k in p.blocks.keys:
yield k
for k in p.trees.keys:
yield k
iterator wantHandles*(p: PendingBlocksManager): Future[Block] =
for v in p.blocks.values:
yield v.handle
proc wantListLen*(p: PendingBlocksManager): int =
p.blocks.len + p.trees.len
func len*(p: PendingBlocksManager): int =
p.blocks.len

View File

@ -34,14 +34,14 @@ const
MaxInflight* = 100
type
WantListHandler* = proc(peer: PeerId, wantList: Wantlist): Future[void] {.gcsafe.}
BlocksHandler* = proc(peer: PeerId, blocks: seq[bt.Block]): Future[void] {.gcsafe.}
WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.}
BlocksDeliveryHandler* = proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.}
BlockPresenceHandler* = proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
WantListSender* = proc(
id: PeerId,
cids: seq[Cid],
addresses: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
@ -50,19 +50,19 @@ type
BlockExcHandlers* = object
onWantList*: WantListHandler
onBlocks*: BlocksHandler
onBlocksDelivery*: BlocksDeliveryHandler
onPresence*: BlockPresenceHandler
onAccount*: AccountHandler
onPayment*: PaymentHandler
BlocksSender* = proc(peer: PeerId, presence: seq[bt.Block]): Future[void] {.gcsafe.}
BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.}
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
BlockExcRequest* = object
sendWantList*: WantListSender
sendBlocks*: BlocksSender
sendBlocksDelivery*: BlocksDeliverySender
sendPresence*: PresenceSender
sendAccount*: AccountSender
sendPayment*: PaymentSender
@ -94,7 +94,7 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
proc handleWantList(
b: BlockExcNetwork,
peer: NetworkPeer,
list: Wantlist) {.async.} =
list: WantList) {.async.} =
## Handle incoming want list
##
@ -102,32 +102,10 @@ proc handleWantList(
trace "Handling want list for peer", peer = peer.id, items = list.entries.len
await b.handlers.onWantList(peer.id, list)
# TODO: make into a template
proc makeWantList*(
cids: seq[Cid],
priority: int = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false
): Wantlist =
## make list of wanted entries
##
Wantlist(
entries: cids.mapIt(
Entry(
`block`: it.data.buffer,
priority: priority.int32,
cancel: cancel,
wantType: wantType,
sendDontHave: sendDontHave) ),
full: full)
proc sendWantList*(
b: BlockExcNetwork,
id: PeerId,
cids: seq[Cid],
addresses: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
@ -137,58 +115,40 @@ proc sendWantList*(
## Send a want message to peer
##
trace "Sending want list to peer", peer = id, `type` = $wantType, items = cids.len
let msg = makeWantList(
cids,
priority,
cancel,
wantType,
full,
sendDontHave)
trace "Sending want list to peer", peer = id, `type` = $wantType, items = addresses.len
let msg = WantList(
entries: addresses.mapIt(
WantListEntry(
address: it,
priority: priority,
cancel: cancel,
wantType: wantType,
sendDontHave: sendDontHave) ),
full: full)
b.send(id, Message(wantlist: msg))
proc handleBlocks(
proc handleBlocksDelivery(
b: BlockExcNetwork,
peer: NetworkPeer,
blocks: seq[pb.Block]
blocksDelivery: seq[BlockDelivery]
) {.async.} =
## Handle incoming blocks
##
if not b.handlers.onBlocks.isNil:
trace "Handling blocks for peer", peer = peer.id, items = blocks.len
if not b.handlers.onBlocksDelivery.isNil:
trace "Handling blocks for peer", peer = peer.id, items = blocksDelivery.len
await b.handlers.onBlocksDelivery(peer.id, blocksDelivery)
var blks: seq[bt.Block]
for blob in blocks:
without cid =? Cid.init(blob.prefix):
trace "Unable to initialize Cid from protobuf message"
without blk =? bt.Block.new(cid, blob.data, verify = true):
trace "Unable to initialize Block from data"
blks.add(blk)
await b.handlers.onBlocks(peer.id, blks)
template makeBlocks*(blocks: seq[bt.Block]): seq[pb.Block] =
var blks: seq[pb.Block]
for blk in blocks:
blks.add(pb.Block(
prefix: blk.cid.data.buffer,
data: blk.data
))
blks
proc sendBlocks*(
proc sendBlocksDelivery*(
b: BlockExcNetwork,
id: PeerId,
blocks: seq[bt.Block]): Future[void] =
blocksDelivery: seq[BlockDelivery]): Future[void] =
## Send blocks to remote
##
b.send(id, pb.Message(payload: makeBlocks(blocks)))
b.send(id, pb.Message(payload: blocksDelivery))
proc handleBlockPresence(
b: BlockExcNetwork,
@ -260,11 +220,11 @@ proc rpcHandler(
## handle rpc messages
##
try:
if msg.wantlist.entries.len > 0:
asyncSpawn b.handleWantList(peer, msg.wantlist)
if msg.wantList.entries.len > 0:
asyncSpawn b.handleWantList(peer, msg.wantList)
if msg.payload.len > 0:
asyncSpawn b.handleBlocks(peer, msg.payload)
asyncSpawn b.handleBlocksDelivery(peer, msg.payload)
if msg.blockPresences.len > 0:
asyncSpawn b.handleBlockPresence(peer, msg.blockPresences)
@ -359,7 +319,7 @@ proc new*(
proc sendWantList(
id: PeerId,
cids: seq[Cid],
cids: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
@ -369,8 +329,8 @@ proc new*(
id, cids, priority, cancel,
wantType, full, sendDontHave)
proc sendBlocks(id: PeerId, blocks: seq[bt.Block]): Future[void] {.gcsafe.} =
self.sendBlocks(id, blocks)
proc sendBlocksDelivery(id: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} =
self.sendBlocksDelivery(id, blocksDelivery)
proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
self.sendBlockPresence(id, presence)
@ -383,7 +343,7 @@ proc new*(
self.request = BlockExcRequest(
sendWantList: sendWantList,
sendBlocks: sendBlocks,
sendBlocksDelivery: sendBlocksDelivery,
sendPresence: sendPresence,
sendAccount: sendAccount,
sendPayment: sendPayment)

View File

@ -9,6 +9,8 @@
import std/sequtils
import std/tables
import std/sugar
import std/sets
import pkg/chronicles
import pkg/libp2p
@ -20,6 +22,8 @@ import ../protobuf/blockexc
import ../protobuf/payments
import ../protobuf/presence
import ../../blocktype
export payments, nitro
logScope:
@ -28,33 +32,39 @@ logScope:
type
BlockExcPeerCtx* = ref object of RootObj
id*: PeerId
blocks*: Table[Cid, Presence] # remote peer have list including price
peerWants*: seq[Entry] # remote peers want lists
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
peerWants*: seq[WantListEntry] # remote peers want lists
exchanged*: int # times peer has exchanged with us
lastExchange*: Moment # last time peer has exchanged with us
account*: ?Account # ethereum account of this peer
paymentChannel*: ?ChannelId # payment channel id
proc peerHave*(self: BlockExcPeerCtx): seq[Cid] =
proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] =
toSeq(self.blocks.keys)
proc contains*(self: BlockExcPeerCtx, cid: Cid): bool =
cid in self.blocks
proc peerHaveCids*(self: BlockExcPeerCtx): HashSet[Cid] =
self.blocks.keys.toSeq.mapIt(it.cidOrTreeCid).toHashSet
proc peerWantsCids*(self: BlockExcPeerCtx): HashSet[Cid] =
self.peerWants.mapIt(it.address.cidOrTreeCid).toHashSet
proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocks
func setPresence*(self: BlockExcPeerCtx, presence: Presence) =
self.blocks[presence.cid] = presence
self.blocks[presence.address] = presence
func cleanPresence*(self: BlockExcPeerCtx, cids: seq[Cid]) =
for cid in cids:
self.blocks.del(cid)
func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
for a in addresses:
self.blocks.del(a)
func cleanPresence*(self: BlockExcPeerCtx, cid: Cid) =
self.cleanPresence(@[cid])
func cleanPresence*(self: BlockExcPeerCtx, address: BlockAddress) =
self.cleanPresence(@[address])
func price*(self: BlockExcPeerCtx, cids: seq[Cid]): UInt256 =
func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 =
var price = 0.u256
for cid in cids:
self.blocks.withValue(cid, precense):
for a in addresses:
self.blocks.withValue(a, precense):
price += precense[].price
trace "Blocks price", price

View File

@ -20,6 +20,7 @@ import pkg/chronicles
import pkg/libp2p
import ../protobuf/blockexc
import ../../blocktype
import ./peercontext
export peercontext
@ -59,24 +60,32 @@ func get*(self: PeerCtxStore, peerId: PeerId): BlockExcPeerCtx =
func len*(self: PeerCtxStore): int =
self.peers.len
func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it == address ) )
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it == cid ) )
toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it.cidOrTreeCid == cid ) )
func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it == address ) )
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it.cid == cid ) )
toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it.address.cidOrTreeCid == cid ) )
func selectCheapest*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
var peers = self.peersHave(cid)
func selectCheapest*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
# assume that the price for all leaves in a tree is the same
let rootAddress = BlockAddress(leaf: false, cid: address.cidOrTreeCid)
var peers = self.peersHave(rootAddress)
func cmp(a, b: BlockExcPeerCtx): int =
var
priceA = 0.u256
priceB = 0.u256
a.blocks.withValue(cid, precense):
a.blocks.withValue(rootAddress, precense):
priceA = precense[].price
b.blocks.withValue(cid, precense):
b.blocks.withValue(rootAddress, precense):
priceB = precense[].price
if priceA == priceB:

View File

@ -10,46 +10,42 @@
import std/hashes
import std/sequtils
import pkg/libp2p
import pkg/stew/endians2
import message
import ../../blocktype
export Message, protobufEncode, protobufDecode
export Wantlist, WantType, Entry
export Block, BlockPresenceType, BlockPresence
export Wantlist, WantType, WantListEntry
export BlockDelivery, BlockPresenceType, BlockPresence
export AccountMessage, StateChannelUpdate
proc hash*(e: Entry): Hash =
hash(e.`block`)
proc hash*(e: WantListEntry): Hash =
if e.address.leaf:
let data = e.address.treeCid.data.buffer & @(e.address.index.uint64.toBytesBE)
hash(data)
else:
hash(e.address.cid.data.buffer)
proc cid*(e: Entry): Cid =
## Helper to convert raw bytes to Cid
##
Cid.init(e.`block`).get()
proc contains*(a: openArray[Entry], b: Cid): bool =
proc contains*(a: openArray[WantListEntry], b: BlockAddress): bool =
## Convenience method to check for peer precense
##
a.filterIt( it.cid == b ).len > 0
a.anyIt(it.address == b)
proc `==`*(a: Entry, cid: Cid): bool =
return a.cid == cid
proc `==`*(a: WantListEntry, b: BlockAddress): bool =
return a.address == b
proc `<`*(a, b: Entry): bool =
proc `<`*(a, b: WantListEntry): bool =
a.priority < b.priority
proc cid*(e: BlockPresence): Cid =
## Helper to convert raw bytes to Cid
##
Cid.init(e.cid).get()
proc `==`*(a: BlockPresence, b: BlockAddress): bool =
return a.address == b
proc `==`*(a: BlockPresence, cid: Cid): bool =
return cid(a) == cid
proc contains*(a: openArray[BlockPresence], b: Cid): bool =
proc contains*(a: openArray[BlockPresence], b: BlockAddress): bool =
## Convenience method to check for peer precense
##
a.filterIt( cid(it) == b ).len > 0
a.anyIt(it.address == b)

View File

@ -2,11 +2,18 @@
# and Protobuf encoder/decoder for these messages.
#
# Eventually all this code should be auto-generated from message.proto.
import std/sugar
import pkg/libp2p/protobuf/minprotobuf
import pkg/libp2p/cid
import pkg/questionable
import ../../units
import ../../merkletree
import ../../blocktype
const
MaxBlockSize* = 100.MiBs.uint
MaxMessageSize* = 100.MiBs.uint
@ -16,27 +23,28 @@ type
WantBlock = 0,
WantHave = 1
Entry* = object
`block`*: seq[byte] # The block cid
WantListEntry* = object
address*: BlockAddress
priority*: int32 # The priority (normalized). default to 1
cancel*: bool # Whether this revokes an entry
wantType*: WantType # Note: defaults to enum 0, ie Block
sendDontHave*: bool # Note: defaults to false
Wantlist* = object
entries*: seq[Entry] # A list of wantlist entries
full*: bool # Whether this is the full wantlist. default to false
WantList* = object
entries*: seq[WantListEntry] # A list of wantList entries
full*: bool # Whether this is the full wantList. default to false
Block* = object
prefix*: seq[byte] # CID prefix (cid version, multicodec and multihash prefix (type + length)
data*: seq[byte]
BlockDelivery* = object
blk*: Block
address*: BlockAddress
proof*: ?MerkleProof # Present only if `address.leaf` is true
BlockPresenceType* = enum
Have = 0,
DontHave = 1
BlockPresence* = object
cid*: seq[byte] # The block cid
address*: BlockAddress
`type`*: BlockPresenceType
price*: seq[byte] # Amount of assets to pay for the block (UInt256)
@ -47,8 +55,8 @@ type
update*: seq[byte] # Signed Nitro state, serialized as JSON
Message* = object
wantlist*: Wantlist
payload*: seq[Block]
wantList*: WantList
payload*: seq[BlockDelivery]
blockPresences*: seq[BlockPresence]
pendingBytes*: uint
account*: AccountMessage
@ -58,9 +66,20 @@ type
# Encoding Message into seq[byte] in Protobuf format
#
proc write*(pb: var ProtoBuffer, field: int, value: Entry) =
proc write*(pb: var ProtoBuffer, field: int, value: BlockAddress) =
var ipb = initProtoBuffer()
ipb.write(1, value.`block`)
ipb.write(1, value.leaf.uint)
if value.leaf:
ipb.write(2, value.treeCid.data.buffer)
ipb.write(3, value.index.uint64)
else:
ipb.write(4, value.cid.data.buffer)
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: WantListEntry) =
var ipb = initProtoBuffer()
ipb.write(1, value.address)
ipb.write(2, value.priority.uint64)
ipb.write(3, value.cancel.uint)
ipb.write(4, value.wantType.uint)
@ -68,7 +87,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: Entry) =
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: Wantlist) =
proc write*(pb: var ProtoBuffer, field: int, value: WantList) =
var ipb = initProtoBuffer()
for v in value.entries:
ipb.write(1, v)
@ -76,16 +95,20 @@ proc write*(pb: var ProtoBuffer, field: int, value: Wantlist) =
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: Block) =
proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) =
var ipb = initProtoBuffer(maxSize = MaxBlockSize)
ipb.write(1, value.prefix)
ipb.write(2, value.data)
ipb.write(1, value.blk.cid.data.buffer)
ipb.write(2, value.blk.data)
ipb.write(3, value.address)
if value.address.leaf:
if proof =? value.proof:
ipb.write(4, proof.encode())
ipb.finish()
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: BlockPresence) =
var ipb = initProtoBuffer()
ipb.write(1, value.cid)
ipb.write(1, value.address)
ipb.write(2, value.`type`.uint)
ipb.write(3, value.price)
ipb.finish()
@ -105,7 +128,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
proc protobufEncode*(value: Message): seq[byte] =
var ipb = initProtoBuffer(maxSize = MaxMessageSize)
ipb.write(1, value.wantlist)
ipb.write(1, value.wantList)
for v in value.payload:
ipb.write(3, v)
for v in value.blockPresences:
@ -120,12 +143,41 @@ proc protobufEncode*(value: Message): seq[byte] =
#
# Decoding Message from seq[byte] in Protobuf format
#
proc decode*(_: type Entry, pb: ProtoBuffer): ProtoResult[Entry] =
proc decode*(_: type BlockAddress, pb: ProtoBuffer): ProtoResult[BlockAddress] =
var
value = Entry()
value: BlockAddress
leaf: bool
field: uint64
discard ? pb.getField(1, value.`block`)
cidBuf = newSeq[byte]()
if ? pb.getField(1, field):
leaf = bool(field)
if leaf:
var
treeCid: Cid
index: Natural
if ? pb.getField(2, cidBuf):
treeCid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
if ? pb.getField(3, field):
index = field
value = BlockAddress(leaf: true, treeCid: treeCid, index: index)
else:
var cid: Cid
if ? pb.getField(4, cidBuf):
cid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
value = BlockAddress(leaf: false, cid: cid)
ok(value)
proc decode*(_: type WantListEntry, pb: ProtoBuffer): ProtoResult[WantListEntry] =
var
value = WantListEntry()
field: uint64
ipb: ProtoBuffer
buf = newSeq[byte]()
if ? pb.getField(1, ipb):
value.address = ? BlockAddress.decode(ipb)
if ? pb.getField(2, field):
value.priority = int32(field)
if ? pb.getField(3, field):
@ -136,30 +188,53 @@ proc decode*(_: type Entry, pb: ProtoBuffer): ProtoResult[Entry] =
value.sendDontHave = bool(field)
ok(value)
proc decode*(_: type Wantlist, pb: ProtoBuffer): ProtoResult[Wantlist] =
proc decode*(_: type WantList, pb: ProtoBuffer): ProtoResult[WantList] =
var
value = Wantlist()
value = WantList()
field: uint64
sublist: seq[seq[byte]]
if ? pb.getRepeatedField(1, sublist):
for item in sublist:
value.entries.add(? Entry.decode(initProtoBuffer(item)))
value.entries.add(? WantListEntry.decode(initProtoBuffer(item)))
if ? pb.getField(2, field):
value.full = bool(field)
ok(value)
proc decode*(_: type Block, pb: ProtoBuffer): ProtoResult[Block] =
proc decode*(_: type BlockDelivery, pb: ProtoBuffer): ProtoResult[BlockDelivery] =
var
value = Block()
discard ? pb.getField(1, value.prefix)
discard ? pb.getField(2, value.data)
value = BlockDelivery()
field: uint64
dataBuf = newSeq[byte]()
cidBuf = newSeq[byte]()
cid: Cid
ipb: ProtoBuffer
if ? pb.getField(1, cidBuf):
cid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
if ? pb.getField(2, dataBuf):
value.blk = ? Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob)
if ? pb.getField(3, ipb):
value.address = ? BlockAddress.decode(ipb)
if value.address.leaf:
var proofBuf = newSeq[byte]()
if ? pb.getField(4, proofBuf):
let proof = ? MerkleProof.decode(proofBuf).mapErr(x => ProtoError.IncorrectBlob)
value.proof = proof.some
else:
value.proof = MerkleProof.none
else:
value.proof = MerkleProof.none
ok(value)
proc decode*(_: type BlockPresence, pb: ProtoBuffer): ProtoResult[BlockPresence] =
var
value = BlockPresence()
field: uint64
discard ? pb.getField(1, value.cid)
ipb: ProtoBuffer
if ? pb.getField(1, ipb):
value.address = ? BlockAddress.decode(ipb)
if ? pb.getField(2, field):
value.`type` = BlockPresenceType(field)
discard ? pb.getField(3, value.price)
@ -184,10 +259,10 @@ proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
ipb: ProtoBuffer
sublist: seq[seq[byte]]
if ? pb.getField(1, ipb):
value.wantlist = ? Wantlist.decode(ipb)
value.wantList = ? WantList.decode(ipb)
if ? pb.getRepeatedField(3, sublist):
for item in sublist:
value.payload.add(? Block.decode(initProtoBuffer(item, maxSize = MaxBlockSize)))
value.payload.add(? BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize)))
if ? pb.getRepeatedField(4, sublist):
for item in sublist:
value.blockPresences.add(? BlockPresence.decode(initProtoBuffer(item)))

View File

@ -5,6 +5,8 @@ import pkg/questionable/results
import pkg/upraises
import ./blockexc
import ../../blocktype
export questionable
export stint
export BlockPresenceType
@ -14,7 +16,7 @@ upraises.push: {.upraises: [].}
type
PresenceMessage* = blockexc.BlockPresence
Presence* = object
cid*: Cid
address*: BlockAddress
have*: bool
price*: UInt256
@ -24,19 +26,18 @@ func parse(_: type UInt256, bytes: seq[byte]): ?UInt256 =
UInt256.fromBytesBE(bytes).some
func init*(_: type Presence, message: PresenceMessage): ?Presence =
without cid =? Cid.init(message.cid) and
price =? UInt256.parse(message.price):
without price =? UInt256.parse(message.price):
return none Presence
some Presence(
cid: cid,
address: message.address,
have: message.`type` == BlockPresenceType.Have,
price: price
)
func init*(_: type PresenceMessage, presence: Presence): PresenceMessage =
PresenceMessage(
cid: presence.cid.data.buffer,
address: presence.address,
`type`: if presence.have:
BlockPresenceType.Have
else:

View File

@ -8,17 +8,19 @@
## those terms.
import std/tables
import std/sugar
export tables
import pkg/upraises
push: {.upraises: [].}
import pkg/libp2p/[cid, multicodec]
import pkg/libp2p/[cid, multicodec, multihash]
import pkg/stew/byteutils
import pkg/questionable
import pkg/questionable/results
import pkg/chronicles
import pkg/json_serialization
import ./units
import ./utils
@ -37,91 +39,44 @@ type
cid*: Cid
data*: seq[byte]
template EmptyCid*: untyped =
var
EmptyCid {.global, threadvar.}:
array[CIDv0..CIDv1, Table[MultiCodec, Cid]]
BlockAddress* = object
case leaf*: bool
of true:
treeCid*: Cid
index*: Natural
else:
cid*: Cid
once:
EmptyCid = [
CIDv0: {
multiCodec("sha2-256"): Cid
.init("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n")
.get()
}.toTable,
CIDv1: {
multiCodec("sha2-256"): Cid
.init("bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku")
.get()
}.toTable,
]
EmptyCid
proc `==`*(a, b: BlockAddress): bool =
a.leaf == b.leaf and
(
if a.leaf:
a.treeCid == b.treeCid and a.index == b.index
else:
a.cid == b.cid
)
template EmptyDigests*: untyped =
var
EmptyDigests {.global, threadvar.}:
array[CIDv0..CIDv1, Table[MultiCodec, MultiHash]]
proc `$`*(a: BlockAddress): string =
if a.leaf:
"treeCid: " & $a.treeCid & ", index: " & $a.index
else:
"cid: " & $a.cid
once:
EmptyDigests = [
CIDv0: {
multiCodec("sha2-256"): EmptyCid[CIDv0]
.catch
.get()[multiCodec("sha2-256")]
.catch
.get()
.mhash
.get()
}.toTable,
CIDv1: {
multiCodec("sha2-256"): EmptyCid[CIDv1]
.catch
.get()[multiCodec("sha2-256")]
.catch
.get()
.mhash
.get()
}.toTable,
]
proc writeValue*(
writer: var JsonWriter,
value: Cid
) {.upraises:[IOError].} =
writer.writeValue($value)
EmptyDigests
proc cidOrTreeCid*(a: BlockAddress): Cid =
if a.leaf:
a.treeCid
else:
a.cid
template EmptyBlock*: untyped =
var
EmptyBlock {.global, threadvar.}:
array[CIDv0..CIDv1, Table[MultiCodec, Block]]
once:
EmptyBlock = [
CIDv0: {
multiCodec("sha2-256"): Block(
cid: EmptyCid[CIDv0][multiCodec("sha2-256")])
}.toTable,
CIDv1: {
multiCodec("sha2-256"): Block(
cid: EmptyCid[CIDv1][multiCodec("sha2-256")])
}.toTable,
]
EmptyBlock
proc isEmpty*(cid: Cid): bool =
cid == EmptyCid[cid.cidver]
.catch
.get()[cid.mhash.get().mcodec]
.catch
.get()
proc isEmpty*(blk: Block): bool =
blk.cid.isEmpty
proc emptyBlock*(cid: Cid): Block =
EmptyBlock[cid.cidver]
.catch
.get()[cid.mhash.get().mcodec]
.catch
.get()
proc address*(b: Block): BlockAddress =
BlockAddress(leaf: false, cid: b.cid)
proc `$`*(b: Block): string =
result &= "cid: " & $b.cid
@ -154,17 +109,58 @@ func new*(
verify: bool = true
): ?!Block =
## creates a new block for both storage and network IO
##
if verify:
let
mhash = ? cid.mhash.mapFailure
computedMhash = ? MultiHash.digest($mhash.mcodec, data).mapFailure
computedCid = ? Cid.init(cid.cidver, cid.mcodec, computedMhash).mapFailure
if computedCid != cid:
return "Cid doesn't match the data".failure
return Block(
cid: cid,
data: @data
).success
proc emptyCid*(version: CidVersion, hcodec: MultiCodec, dcodec: MultiCodec): ?!Cid =
## Returns cid representing empty content, given cid version, hash codec and data codec
##
let
mhash = ? cid.mhash.mapFailure
b = ? Block.new(
data = @data,
version = cid.cidver,
codec = cid.mcodec,
mcodec = mhash.mcodec)
const
Sha256 = multiCodec("sha2-256")
Raw = multiCodec("raw")
DagPB = multiCodec("dag-pb")
DagJson = multiCodec("dag-json")
if verify and cid != b.cid:
return "Cid and content don't match!".failure
var index {.global, threadvar.}: Table[(CIDv0, Sha256, DagPB), Result[Cid, CidError]]
once:
index = {
# source https://ipld.io/specs/codecs/dag-pb/fixtures/cross-codec/#dagpb_empty
(CIDv0, Sha256, DagPB): Cid.init("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n"),
(CIDv1, Sha256, DagPB): Cid.init("zdj7Wkkhxcu2rsiN6GUyHCLsSLL47kdUNfjbFqBUUhMFTZKBi"), # base36: bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku
(CIDv1, Sha256, DagJson): Cid.init("z4EBG9jGUWMVxX9deANWX7iPyExLswe2akyF7xkNAaYgugvnhmP"), # base36: baguqeera6mfu3g6n722vx7dbitpnbiyqnwah4ddy4b5c3rwzxc5pntqcupta
(CIDv1, Sha256, Raw): Cid.init("zb2rhmy65F3REf8SZp7De11gxtECBGgUKaLdiDj7MCGCHxbDW"),
}.toTable
success b
index[(version, hcodec, dcodec)].catch.flatMap((a: Result[Cid, CidError]) => a.mapFailure)
proc emptyDigest*(version: CidVersion, hcodec: MultiCodec, dcodec: MultiCodec): ?!MultiHash =
emptyCid(version, hcodec, dcodec)
.flatMap((cid: Cid) => cid.mhash.mapFailure)
proc emptyBlock*(version: CidVersion, hcodec: MultiCodec): ?!Block =
emptyCid(version, hcodec, multiCodec("raw"))
.flatMap((cid: Cid) => Block.new(cid = cid, data = @[]))
proc emptyBlock*(cid: Cid): ?!Block =
cid.mhash.mapFailure.flatMap((mhash: MultiHash) =>
emptyBlock(cid.cidver, mhash.mcodec))
proc isEmpty*(cid: Cid): bool =
success(cid) == cid.mhash.mapFailure.flatMap((mhash: MultiHash) =>
emptyCid(cid.cidver, mhash.mcodec, cid.mcodec))
proc isEmpty*(blk: Block): bool =
blk.cid.isEmpty

View File

@ -231,6 +231,8 @@ proc new*(
wallet = WalletRef.new(EthPrivateKey.random())
network = BlockExcNetwork.new(switch)
treeReader = TreeReader.new()
repoData = case config.repoKind
of repoFS: Datastore(FSDatastore.new($config.dataDir, depth = 5)
.expect("Should create repo file data store!"))

View File

@ -12,15 +12,19 @@ import pkg/upraises
push: {.upraises: [].}
import std/sequtils
import std/options
import pkg/chronos
import pkg/chronicles
import pkg/questionable
import pkg/libp2p/[multicodec, cid, multibase, multihash]
import pkg/libp2p/protobuf/minprotobuf
import ../manifest
import ../merkletree
import ../stores
import ../blocktype as bt
import ../utils
import pkg/stew/byteutils
import ./backend
@ -64,259 +68,142 @@ type
decoderProvider*: DecoderProvider
store*: BlockStore
GetNext = proc(): Future[?(bt.Block, int)] {.upraises: [], gcsafe, closure.}
PendingBlocksIter* = ref object
finished*: bool
next*: GetNext
func indexToPos(self: Erasure, encoded: Manifest, idx, step: int): int {.inline.} =
## Convert an index to a position in the encoded
## dataset
## `idx` - the index to convert
## `step` - the current step
## `pos` - the position in the encoded dataset
proc encode*(
self: Erasure,
manifest: Manifest,
blocks: int,
parity: int
): Future[?!Manifest] {.async.} =
## Encode a manifest into one that is erasure protected.
##
(idx - step) div encoded.steps
iterator items*(blocks: PendingBlocksIter): Future[?(bt.Block, int)] =
while not blocks.finished:
yield blocks.next()
proc getPendingBlocks(
self: Erasure,
manifest: Manifest,
start, stop, steps: int): ?!PendingBlocksIter =
## Get pending blocks iterator
## `manifest` - the original manifest to be encoded
## `blocks` - the number of blocks to be encoded - K
## `parity` - the number of parity blocks to generate - M
##
var
# calculate block indexes to retrieve
blockIdx = toSeq(countup(start, stop, steps))
# request all blocks from the store
pendingBlocks = blockIdx.mapIt(
self.store.getBlock(manifest[it]) # Get the data blocks (first K)
)
indices = pendingBlocks # needed so we can track the block indices
iter = PendingBlocksIter(finished: false)
trace "Requesting blocks", pendingBlocks = pendingBlocks.len
proc next(): Future[?(bt.Block, int)] {.async.} =
if iter.finished:
trace "No more blocks"
return none (bt.Block, int)
if pendingBlocks.len == 0:
iter.finished = true
trace "No more blocks - finished"
return none (bt.Block, int)
let
done = await one(pendingBlocks)
idx = indices.find(done)
logScope:
idx = idx
blockIdx = blockIdx[idx]
manifest = manifest[blockIdx[idx]]
pendingBlocks.del(pendingBlocks.find(done))
without blk =? (await done), error:
trace "Failed retrieving block", err = $error.msg
return none (bt.Block, int)
trace "Retrieved block"
some (blk, blockIdx[idx])
iter.next = next
success iter
proc prepareEncodingData(
self: Erasure,
encoded: Manifest,
step: int,
data: ref seq[seq[byte]],
emptyBlock: seq[byte]): Future[?!int] {.async.} =
## Prepare data for encoding
##
without pendingBlocksIter =?
self.getPendingBlocks(
encoded,
step,
encoded.rounded - 1, encoded.steps), err:
trace "Unable to get pending blocks", error = err.msg
return failure(err)
var resolved = 0
for blkFut in pendingBlocksIter:
if (blk, idx) =? (await blkFut):
let
pos = self.indexToPos(encoded, idx, step)
if blk.isEmpty:
trace "Padding with empty block", idx
shallowCopy(data[pos], emptyBlock)
else:
trace "Encoding block", cid = blk.cid, idx
shallowCopy(data[pos], blk.data)
resolved.inc()
success resolved
proc prepareDecodingData(
self: Erasure,
encoded: Manifest,
step: int,
data: ref seq[seq[byte]],
parityData: ref seq[seq[byte]],
emptyBlock: seq[byte]): Future[?!(int, int)] {.async.} =
## Prepare data for decoding
## `encoded` - the encoded manifest
## `step` - the current step
## `data` - the data to be prepared
## `parityData` - the parityData to be prepared
## `emptyBlock` - the empty block to be used for padding
##
without pendingBlocksIter =?
self.getPendingBlocks(
encoded,
step,
encoded.len - 1, encoded.steps), err:
trace "Unable to get pending blocks", error = err.msg
return failure(err)
var
dataPieces = 0
parityPieces = 0
resolved = 0
for blkFut in pendingBlocksIter:
# Continue to receive blocks until we have just enough for decoding
# or no more blocks can arrive
if resolved >= encoded.ecK:
break
if (blk, idx) =? (await blkFut):
let
pos = self.indexToPos(encoded, idx, step)
logScope:
cid = blk.cid
idx = idx
pos = pos
step = step
empty = blk.isEmpty
if idx >= encoded.rounded:
trace "Retrieved parity block"
shallowCopy(parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data)
parityPieces.inc
else:
trace "Retrieved data block"
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
dataPieces.inc
resolved.inc
return success (dataPieces, parityPieces)
proc prepareManifest(
self: Erasure,
manifest: Manifest,
blocks: int,
parity: int): ?!Manifest =
logScope:
original_cid = manifest.cid.get()
original_len = manifest.len
original_len = manifest.blocksCount
blocks = blocks
parity = parity
if blocks > manifest.len:
trace "Unable to encode manifest, not enough blocks", blocks = blocks, len = manifest.len
return failure("Not enough blocks to encode")
trace "Erasure coding manifest", blocks, parity
trace "Preparing erasure coded manifest", blocks, parity
without var encoded =? Manifest.new(manifest, blocks, parity), error:
trace "Unable to create manifest", msg = error.msg
return error.failure
without tree =? await self.store.getTree(manifest.treeCid), err:
return err.failure
let leaves = tree.leaves
let
rounded = roundUp(manifest.blocksCount, blocks)
steps = divUp(manifest.blocksCount, blocks)
blocksCount = rounded + (steps * parity)
var cids = newSeq[Cid](blocksCount)
# copy original manifest blocks
for i in 0..<rounded:
if i < manifest.blocksCount:
without cid =? Cid.init(manifest.version, manifest.codec, leaves[i]).mapFailure, err:
return err.failure
cids[i] = cid
else:
without cid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec), err:
return err.failure
cids[i] = cid
logScope:
steps = encoded.steps
rounded_blocks = encoded.rounded
new_manifest = encoded.len
trace "Erasure coded manifest prepared"
success encoded
proc encodeData(
self: Erasure,
manifest: Manifest): Future[?!void] {.async.} =
## Encode blocks pointed to by the protected manifest
##
## `manifest` - the manifest to encode
##
steps = steps
rounded_blocks = rounded
new_manifest = blocksCount
var
encoded = manifest
logScope:
steps = encoded.steps
rounded_blocks = encoded.rounded
new_manifest = encoded.len
protected = encoded.protected
ecK = encoded.ecK
ecM = encoded.ecM
if not encoded.protected:
trace "Manifest is not erasure protected"
return failure("Manifest is not erasure protected")
var
encoder = self.encoderProvider(encoded.blockSize.int, encoded.ecK, encoded.ecM)
emptyBlock = newSeq[byte](encoded.blockSize.int)
encoder = self.encoderProvider(manifest.blockSize.int, blocks, parity)
var toadd = 0
var tocount = 0
var maxidx = 0
try:
for step in 0..<encoded.steps:
for i in 0..<steps:
# TODO: Don't allocate a new seq every time, allocate once and zero out
var
data = seq[seq[byte]].new() # number of blocks to encode
parityData = newSeqWith[seq[byte]](encoded.ecM, newSeq[byte](encoded.blockSize.int))
data = newSeq[seq[byte]](blocks) # number of blocks to encode
parityData = newSeqWith[seq[byte]](parity, newSeq[byte](manifest.blockSize.int))
# calculate block indexes to retrieve
blockIdx = toSeq(countup(i, rounded - 1, steps))
# request all blocks from the store
dataBlocks = await allFinished(
blockIdx.mapIt( self.store.getBlock(cids[it]) ))
data[].setLen(encoded.ecK)
# TODO: this is a tight blocking loop so we sleep here to allow
# other events to be processed, this should be addressed
# by threading
await sleepAsync(10.millis)
without resolved =?
(await self.prepareEncodingData(encoded, step, data, emptyBlock)), err:
trace "Unable to prepare data", error = err.msg
return failure(err)
for j in 0..<blocks:
let idx = blockIdx[j]
if idx < manifest.blocksCount:
without blk =? (await dataBlocks[j]), error:
trace "Unable to retrieve block", error = error.msg
return failure error
trace "Erasure coding data", data = data[].len, parity = parityData.len
trace "Encoding block", cid = blk.cid, pos = idx
shallowCopy(data[j], blk.data)
else:
trace "Padding with empty block", pos = idx
data[j] = newSeq[byte](manifest.blockSize.int)
if (
let res = encoder.encode(data[], parityData);
res.isErr):
trace "Erasure coding data", data = data.len, parity = parityData.len
let res = encoder.encode(data, parityData);
if res.isErr:
trace "Unable to encode manifest!", error = $res.error
return res.mapFailure
return failure($res.error)
var idx = encoded.rounded + step
for j in 0..<encoded.ecM:
for j in 0..<parity:
let idx = rounded + blockIdx[j]
without blk =? bt.Block.new(parityData[j]), error:
trace "Unable to create parity block", err = error.msg
return failure(error)
trace "Adding parity block", cid = blk.cid, idx
encoded[idx] = blk.cid
trace "Adding parity block", cid = blk.cid, pos = idx
cids[idx] = blk.cid
maxidx = max(maxidx, idx)
toadd = toadd + blk.data.len
tocount.inc
if isErr (await self.store.putBlock(blk)):
trace "Unable to store block!", cid = blk.cid
return failure("Unable to store block!")
idx.inc(encoded.steps)
without var builder =? MerkleTreeBuilder.init(manifest.hcodec), err:
return failure(err)
for cid in cids:
without mhash =? cid.mhash.mapFailure, err:
return err.failure
if err =? builder.addLeaf(mhash).errorOption:
return failure(err)
without tree =? builder.build(), err:
return failure(err)
without treeBlk =? bt.Block.new(tree.encode()), err:
return failure(err)
if err =? (await self.store.putBlock(treeBlk)).errorOption:
return failure("Unable to store merkle tree block " & $treeBlk.cid & ", nested err: " & err.msg)
let encoded = Manifest.new(
manifest = manifest,
treeCid = treeBlk.cid,
treeRoot = tree.root,
datasetSize = (manifest.blockSize.int * blocksCount).NBytes,
ecK = blocks,
ecM = parity
)
return encoded.success
except CancelledError as exc:
trace "Erasure coding encoding cancelled"
raise exc # cancellation needs to be propagated
@ -326,104 +213,96 @@ proc encodeData(
finally:
encoder.release()
return success()
proc encode*(
self: Erasure,
manifest: Manifest,
blocks: int,
parity: int): Future[?!Manifest] {.async.} =
## Encode a manifest into one that is erasure protected.
##
## `manifest` - the original manifest to be encoded
## `blocks` - the number of blocks to be encoded - K
## `parity` - the number of parity blocks to generate - M
##
without var encoded =? self.prepareManifest(manifest, blocks, parity), error:
trace "Unable to prepare manifest", error = error.msg
return failure error
if err =? (await self.encodeData(encoded)).errorOption:
trace "Unable to encode data", error = err.msg
return failure err
return success encoded
proc decode*(
self: Erasure,
encoded: Manifest,
all = true): Future[?!Manifest] {.async.} =
self: Erasure,
encoded: Manifest
): Future[?!Manifest] {.async.} =
## Decode a protected manifest into it's original
## manifest
##
## `encoded` - the encoded (protected) manifest to
## be recovered
## `all` - if true, all blocks will be recovered,
## including parity
##
logScope:
steps = encoded.steps
rounded_blocks = encoded.rounded
new_manifest = encoded.len
protected = encoded.protected
ecK = encoded.ecK
ecM = encoded.ecM
if not encoded.protected:
trace "Manifest is not erasure protected"
return failure "Manifest is not erasure protected"
new_manifest = encoded.blocksCount
var
decoder = self.decoderProvider(encoded.blockSize.int, encoded.ecK, encoded.ecM)
emptyBlock = newSeq[byte](encoded.blockSize.int)
hasParity = false
trace "Decoding erasure coded manifest"
try:
for step in 0..<encoded.steps:
for i in 0..<encoded.steps:
# TODO: Don't allocate a new seq every time, allocate once and zero out
let
# calculate block indexes to retrieve
blockIdx = toSeq(countup(i, encoded.blocksCount - 1, encoded.steps))
# request all blocks from the store
pendingBlocks = blockIdx.mapIt(
self.store.getBlock(encoded.treeCid, it, encoded.treeRoot) # Get the data blocks (first K)
)
# TODO: this is a tight blocking loop so we sleep here to allow
# other events to be processed, this should be addressed
# by threading
await sleepAsync(10.millis)
var
data = seq[seq[byte]].new()
# newSeq[seq[byte]](encoded.ecK) # number of blocks to encode
parityData = seq[seq[byte]].new()
data = newSeq[seq[byte]](encoded.ecK) # number of blocks to encode
parityData = newSeq[seq[byte]](encoded.ecM)
recovered = newSeqWith[seq[byte]](encoded.ecK, newSeq[byte](encoded.blockSize.int))
idxPendingBlocks = pendingBlocks # copy futures to make using with `one` easier
emptyBlock = newSeq[byte](encoded.blockSize.int)
resolved = 0
data[].setLen(encoded.ecK) # set len to K
parityData[].setLen(encoded.ecM) # set len to M
while true:
# Continue to receive blocks until we have just enough for decoding
# or no more blocks can arrive
if (resolved >= encoded.ecK) or (idxPendingBlocks.len == 0):
break
without (dataPieces, parityPieces) =?
(await self.prepareDecodingData(encoded, step, data, parityData, emptyBlock)), err:
trace "Unable to prepare data", error = err.msg
return failure(err)
let
done = await one(idxPendingBlocks)
idx = pendingBlocks.find(done)
idxPendingBlocks.del(idxPendingBlocks.find(done))
without blk =? (await done), error:
trace "Failed retrieving block", error = error.msg
continue
if idx >= encoded.ecK:
trace "Retrieved parity block", cid = blk.cid, idx
shallowCopy(parityData[idx - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data)
else:
trace "Retrieved data block", cid = blk.cid, idx
shallowCopy(data[idx], if blk.isEmpty: emptyBlock else: blk.data)
resolved.inc
let
dataPieces = data.filterIt( it.len > 0 ).len
parityPieces = parityData.filterIt( it.len > 0 ).len
if dataPieces >= encoded.ecK:
trace "Retrieved all the required data blocks"
trace "Retrieved all the required data blocks", data = dataPieces, parity = parityPieces
continue
trace "Erasure decoding data"
trace "Erasure decoding data", data = dataPieces, parity = parityPieces
if (
let err = decoder.decode(data[], parityData[], recovered);
let err = decoder.decode(data, parityData, recovered);
err.isErr):
trace "Unable to decode data!", err = $err.error
trace "Unable to decode manifest!", err = $err.error
return failure($err.error)
for i in 0..<encoded.ecK:
if data[i].len <= 0 and not encoded.blocks[i].isEmpty:
if data[i].len <= 0:
without blk =? bt.Block.new(recovered[i]), error:
trace "Unable to create block!", exc = error.msg
return failure(error)
doAssert blk.cid in encoded.blocks,
"Recovered block not in original manifest"
trace "Recovered block", cid = blk.cid, index = i
trace "Recovered block", cid = blk.cid
if isErr (await self.store.putBlock(blk)):
trace "Unable to store block!", cid = blk.cid
return failure("Unable to store block!")
@ -436,8 +315,7 @@ proc decode*(
finally:
decoder.release()
without decoded =? Manifest.new(blocks = encoded.blocks[0..<encoded.originalLen]), error:
return error.failure
let decoded = Manifest.new(encoded)
return decoded.success

View File

@ -34,12 +34,6 @@ proc encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] =
? manifest.verify()
var pbNode = initProtoBuffer()
for c in manifest.blocks:
var pbLink = initProtoBuffer()
pbLink.write(1, c.data.buffer) # write Cid links
pbLink.finish()
pbNode.write(2, pbLink)
# NOTE: The `Data` field in the the `dag-pb`
# contains the following protobuf `Message`
#
@ -51,37 +45,38 @@ proc encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] =
# optional uint32 original = 4; # number of original blocks
# }
# Message Header {
# optional bytes rootHash = 1; # the root (tree) hash
# optional uint32 blockSize = 2; # size of a single block
# optional uint32 blocksLen = 3; # total amount of blocks
# optional ErasureInfo erasure = 4; # erasure coding info
# optional uint64 originalBytes = 5;# exact file size
# optional bytes treeCid = 1; # the cid of the tree
# optional bytes treeRoot = 2; # the root hash of the tree
# optional uint32 blockSize = 3; # size of a single block
# optional uint64 originalBytes = 4;# exact file size
# optional ErasureInfo erasure = 5; # erasure coding info
# }
# ```
#
let cid = ? manifest.cid
# var treeRootVBuf = initVBuffer()
var header = initProtoBuffer()
header.write(1, cid.data.buffer)
header.write(2, manifest.blockSize.uint32)
header.write(3, manifest.len.uint32)
header.write(5, manifest.originalBytes.uint64)
header.write(1, manifest.treeCid.data.buffer)
# treeRootVBuf.write(manifest.treeRoot)
header.write(2, manifest.treeRoot.data.buffer)
header.write(3, manifest.blockSize.uint32)
header.write(4, manifest.datasetSize.uint32)
if manifest.protected:
var erasureInfo = initProtoBuffer()
erasureInfo.write(1, manifest.ecK.uint32)
erasureInfo.write(2, manifest.ecM.uint32)
erasureInfo.write(3, manifest.originalCid.data.buffer)
erasureInfo.write(4, manifest.originalLen.uint32)
erasureInfo.write(4, manifest.originalTreeRoot.data.buffer)
erasureInfo.write(5, manifest.originalDatasetSize.uint32)
erasureInfo.finish()
header.write(4, erasureInfo)
header.write(5, erasureInfo)
pbNode.write(1, header) # set the rootHash Cid as the data field
pbNode.write(1, header) # set the treeCid as the data field
pbNode.finish()
return pbNode.buffer.success
func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
proc decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
## Decode a manifest from a data blob
##
@ -89,86 +84,93 @@ func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
pbNode = initProtoBuffer(data)
pbHeader: ProtoBuffer
pbErasureInfo: ProtoBuffer
rootHash: seq[byte]
originalCid: seq[byte]
originalBytes: uint64
treeCidBuf: seq[byte]
treeRootBuf: seq[byte]
originalTreeCid: seq[byte]
originalTreeRootBuf: seq[byte]
datasetSize: uint32
blockSize: uint32
blocksLen: uint32
originalLen: uint32
originalDatasetSize: uint32
ecK, ecM: uint32
blocks: seq[Cid]
# Decode `Header` message
if pbNode.getField(1, pbHeader).isErr:
return failure("Unable to decode `Header` from dag-pb manifest!")
# Decode `Header` contents
if pbHeader.getField(1, rootHash).isErr:
return failure("Unable to decode `rootHash` from manifest!")
if pbHeader.getField(1, treeCidBuf).isErr:
return failure("Unable to decode `treeCid` from manifest!")
if pbHeader.getField(2, blockSize).isErr:
if pbHeader.getField(2, treeRootBuf).isErr:
return failure("Unable to decode `treeRoot` from manifest!")
if pbHeader.getField(3, blockSize).isErr:
return failure("Unable to decode `blockSize` from manifest!")
if pbHeader.getField(3, blocksLen).isErr:
return failure("Unable to decode `blocksLen` from manifest!")
if pbHeader.getField(4, datasetSize).isErr:
return failure("Unable to decode `datasetSize` from manifest!")
if pbHeader.getField(5, originalBytes).isErr:
return failure("Unable to decode `originalBytes` from manifest!")
if pbHeader.getField(4, pbErasureInfo).isErr:
if pbHeader.getField(5, pbErasureInfo).isErr:
return failure("Unable to decode `erasureInfo` from manifest!")
if pbErasureInfo.buffer.len > 0:
let protected = pbErasureInfo.buffer.len > 0
if protected:
if pbErasureInfo.getField(1, ecK).isErr:
return failure("Unable to decode `K` from manifest!")
if pbErasureInfo.getField(2, ecM).isErr:
return failure("Unable to decode `M` from manifest!")
if pbErasureInfo.getField(3, originalCid).isErr:
return failure("Unable to decode `originalCid` from manifest!")
if pbErasureInfo.getField(3, originalTreeCid).isErr:
return failure("Unable to decode `originalTreeCid` from manifest!")
if pbErasureInfo.getField(4, originalTreeRootBuf).isErr:
return failure("Unable to decode `originalTreeRoot` from manifest!")
if pbErasureInfo.getField(4, originalLen).isErr:
return failure("Unable to decode `originalLen` from manifest!")
if pbErasureInfo.getField(5, originalDatasetSize).isErr:
return failure("Unable to decode `originalDatasetSize` from manifest!")
let rootHashCid = ? Cid.init(rootHash).mapFailure
var linksBuf: seq[seq[byte]]
if pbNode.getRepeatedField(2, linksBuf).isOk:
for pbLinkBuf in linksBuf:
var
blockBuf: seq[byte]
pbLink = initProtoBuffer(pbLinkBuf)
var
treeRoot: MultiHash
originalTreeRoot: MultiHash
if pbLink.getField(1, blockBuf).isOk:
blocks.add(? Cid.init(blockBuf).mapFailure)
let
treeCid = ? Cid.init(treeCidBuf).mapFailure
treeRootRes = ? MultiHash.decode(treeRootBuf, treeRoot).mapFailure
if blocksLen.int != blocks.len:
return failure("Total blocks and length of blocks in header don't match!")
if treeRootRes != treeRootBuf.len:
return failure("Error decoding `treeRoot` as MultiHash")
if protected:
let originalTreeRootRes = ? MultiHash.decode(originalTreeRootBuf, originalTreeRoot).mapFailure
if originalTreeRootRes != originalTreeRootBuf.len:
return failure("Error decoding `originalTreeRoot` as MultiHash")
let
self = if pbErasureInfo.buffer.len > 0:
self = if protected:
Manifest.new(
rootHash = rootHashCid,
originalBytes = originalBytes.NBytes,
treeCid = treeCid,
treeRoot = treeRoot,
datasetSize = datasetSize.NBytes,
blockSize = blockSize.NBytes,
blocks = blocks,
version = rootHashCid.cidver,
hcodec = (? rootHashCid.mhash.mapFailure).mcodec,
codec = rootHashCid.mcodec,
version = treeCid.cidver,
hcodec = (? treeCid.mhash.mapFailure).mcodec,
codec = treeCid.mcodec,
ecK = ecK.int,
ecM = ecM.int,
originalCid = ? Cid.init(originalCid).mapFailure,
originalLen = originalLen.int
originalTreeCid = ? Cid.init(originalTreeCid).mapFailure,
originalTreeRoot = originalTreeRoot,
originalDatasetSize = originalDatasetSize.NBytes
)
else:
Manifest.new(
rootHash = rootHashCid,
originalBytes = originalBytes.NBytes,
treeCid = treeCid,
treeRoot = treeRoot,
datasetSize = datasetSize.NBytes,
blockSize = blockSize.NBytes,
blocks = blocks,
version = rootHashCid.cidver,
hcodec = (? rootHashCid.mhash.mapFailure).mcodec,
codec = rootHashCid.mcodec
version = treeCid.cidver,
hcodec = (? treeCid.mhash.mapFailure).mcodec,
codec = treeCid.mcodec
)
? self.verify()

View File

@ -29,19 +29,20 @@ export types
type
Manifest* = ref object of RootObj
rootHash: ?Cid # Root (tree) hash of the contained data set
originalBytes*: NBytes # Exact size of the original (uploaded) file
blockSize: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed)
blocks: seq[Cid] # Block Cid
version: CidVersion # Cid version
hcodec: MultiCodec # Multihash codec
codec: MultiCodec # Data set codec
case protected: bool # Protected datasets have erasure coded info
treeCid: Cid # Cid of the merkle tree
treeRoot: MultiHash # Root hash of the merkle tree
datasetSize: NBytes # Total size of all blocks
blockSize: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed)
version: CidVersion # Cid version
hcodec: MultiCodec # Multihash codec
codec: MultiCodec # Data set codec
case protected: bool # Protected datasets have erasure coded info
of true:
ecK: int # Number of blocks to encode
ecM: int # Number of resulting parity blocks
originalCid: Cid # The original Cid of the dataset being erasure coded
originalLen: int # The length of the original manifest
ecK: int # Number of blocks to encode
ecM: int # Number of resulting parity blocks
originalTreeCid: Cid # The original Cid of the dataset being erasure coded
originalTreeRoot: MultiHash
originalDatasetSize: NBytes
else:
discard
@ -52,8 +53,8 @@ type
proc blockSize*(self: Manifest): NBytes =
self.blockSize
proc blocks*(self: Manifest): seq[Cid] =
self.blocks
proc datasetSize*(self: Manifest): NBytes =
self.datasetSize
proc version*(self: Manifest): CidVersion =
self.version
@ -74,32 +75,30 @@ proc ecM*(self: Manifest): int =
self.ecM
proc originalCid*(self: Manifest): Cid =
self.originalCid
self.originalTreeCid
proc originalLen*(self: Manifest): int =
self.originalLen
proc originalBlocksCount*(self: Manifest): int =
divUp(self.originalDatasetSize.int, self.blockSize.int)
proc originalTreeRoot*(self: Manifest): MultiHash =
self.originalTreeRoot
proc originalDatasetSize*(self: Manifest): NBytes =
self.originalDatasetSize
proc treeCid*(self: Manifest): Cid =
self.treeCid
proc treeRoot*(self: Manifest): MultiHash =
self.treeRoot
proc blocksCount*(self: Manifest): int =
divUp(self.datasetSize.int, self.blockSize.int)
############################################################
# Operations on block list
############################################################
func len*(self: Manifest): int =
self.blocks.len
func `[]`*(self: Manifest, i: Natural): Cid =
self.blocks[i]
func `[]=`*(self: var Manifest, i: Natural, item: Cid) =
self.rootHash = Cid.none
self.blocks[i] = item
func `[]`*(self: Manifest, i: BackwardsIndex): Cid =
self.blocks[self.len - i.int]
func `[]=`*(self: Manifest, i: BackwardsIndex, item: Cid) =
self.rootHash = Cid.none
self.blocks[self.len - i.int] = item
func isManifest*(cid: Cid): ?!bool =
let res = ?cid.contentType().mapFailure(CodexError)
($(res) in ManifestContainers).success
@ -107,25 +106,6 @@ func isManifest*(cid: Cid): ?!bool =
func isManifest*(mc: MultiCodec): ?!bool =
($mc in ManifestContainers).success
proc add*(self: Manifest, cid: Cid) =
assert not self.protected # we expect that protected manifests are created with properly-sized self.blocks
self.rootHash = Cid.none
trace "Adding cid to manifest", cid
self.blocks.add(cid)
self.originalBytes = self.blocks.len.NBytes * self.blockSize
iterator items*(self: Manifest): Cid =
for b in self.blocks:
yield b
iterator pairs*(self: Manifest): tuple[key: int, val: Cid] =
for pair in self.blocks.pairs():
yield pair
func contains*(self: Manifest, cid: Cid): bool =
cid in self.blocks
############################################################
# Various sizes and verification
############################################################
@ -133,79 +113,65 @@ func contains*(self: Manifest, cid: Cid): bool =
func bytes*(self: Manifest, pad = true): NBytes =
## Compute how many bytes corresponding StoreStream(Manifest, pad) will return
if pad or self.protected:
self.len.NBytes * self.blockSize
self.blocksCount.NBytes * self.blockSize
else:
self.originalBytes
self.datasetSize
func rounded*(self: Manifest): int =
## Number of data blocks in *protected* manifest including padding at the end
roundUp(self.originalLen, self.ecK)
roundUp(self.originalBlocksCount, self.ecK)
func steps*(self: Manifest): int =
## Number of EC groups in *protected* manifest
divUp(self.originalLen, self.ecK)
divUp(self.originalBlocksCount, self.ecK)
func verify*(self: Manifest): ?!void =
## Check manifest correctness
##
let originalLen = (if self.protected: self.originalLen else: self.len)
if divUp(self.originalBytes, self.blockSize) != originalLen:
return failure newException(CodexError, "Broken manifest: wrong originalBytes")
if self.protected and (self.len != self.steps * (self.ecK + self.ecM)):
return failure newException(CodexError, "Broken manifest: wrong originalLen")
if self.protected and (self.blocksCount != self.steps * (self.ecK + self.ecM)):
return failure newException(CodexError, "Broken manifest: wrong originalBlocksCount")
return success()
proc cid*(self: Manifest): ?!Cid {.deprecated: "use treeCid instead".} =
self.treeCid.success
############################################################
# Cid computation
############################################################
template hashBytes(mh: MultiHash): seq[byte] =
## get the hash bytes of a multihash object
##
mh.data.buffer[mh.dpos..(mh.dpos + mh.size - 1)]
proc makeRoot*(self: Manifest): ?!void =
## Create a tree hash root of the contained
## block hashes
##
var
stack: seq[MultiHash]
for cid in self:
stack.add(? cid.mhash.mapFailure)
while stack.len > 1:
let
(b1, b2) = (stack.pop(), stack.pop())
mh = ? MultiHash.digest(
$self.hcodec,
(b1.hashBytes() & b2.hashBytes()))
.mapFailure
stack.add(mh)
if stack.len == 1:
let digest = ? EmptyDigests[self.version][self.hcodec].catch
let cid = ? Cid.init(self.version, self.codec, digest).mapFailure
self.rootHash = cid.some
success()
proc cid*(self: Manifest): ?!Cid =
## Generate a root hash using the treehash algorithm
##
if self.rootHash.isNone:
? self.makeRoot()
(!self.rootHash).success
proc `==`*(a, b: Manifest): bool =
(a.treeCid == b.treeCid) and
(a.treeRoot == b.treeRoot) and
(a.datasetSize == b.datasetSize) and
(a.blockSize == b.blockSize) and
(a.version == b.version) and
(a.hcodec == b.hcodec) and
(a.codec == b.codec) and
(a.protected == b.protected) and
(if a.protected:
(a.ecK == b.ecK) and
(a.ecM == b.ecM) and
(a.originalTreeCid == b.originalTreeCid) and
(a.originalTreeRoot == b.originalTreeRoot) and
(a.originalDatasetSize == b.originalDatasetSize)
else:
true)
proc `$`*(self: Manifest): string =
"treeCid: " & $self.treeCid &
", treeRoot: " & $self.treeRoot &
", datasetSize: " & $self.datasetSize &
", blockSize: " & $self.blockSize &
", version: " & $self.version &
", hcodec: " & $self.hcodec &
", codec: " & $self.codec &
", protected: " & $self.protected &
(if self.protected:
", ecK: " & $self.ecK &
", ecM: " & $self.ecM &
", originalTreeCid: " & $self.originalTreeCid &
", originalTreeRoot: " & $self.originalTreeRoot &
", originalDatasetSize: " & $self.originalDatasetSize
else:
"")
############################################################
# Constructors
@ -213,67 +179,67 @@ proc cid*(self: Manifest): ?!Cid =
proc new*(
T: type Manifest,
blocks: openArray[Cid] = [],
protected = false,
version = CIDv1,
treeCid: Cid,
treeRoot: MultiHash,
blockSize: NBytes,
datasetSize: NBytes,
version: CidVersion = CIDv1,
hcodec = multiCodec("sha2-256"),
codec = multiCodec("raw"),
blockSize = DefaultBlockSize
): ?!Manifest =
## Create a manifest using an array of `Cid`s
##
if hcodec notin EmptyDigests[version]:
return failure("Unsupported manifest hash codec!")
protected = false,
): Manifest =
T(
blocks: @blocks,
treeCid: treeCid,
treeRoot: treeRoot,
blockSize: blockSize,
datasetSize: datasetSize,
version: version,
codec: codec,
hcodec: hcodec,
blockSize: blockSize,
originalBytes: blocks.len.NBytes * blockSize,
protected: protected).success
protected: protected)
proc new*(
T: type Manifest,
manifest: Manifest,
treeCid: Cid,
treeRoot: MultiHash,
datasetSize: NBytes,
ecK, ecM: int
): ?!Manifest =
): Manifest =
## Create an erasure protected dataset from an
## un-protected one
## unprotected one
##
Manifest(
treeCid: treeCid,
treeRoot: treeRoot,
datasetSize: datasetSize,
version: manifest.version,
codec: manifest.codec,
hcodec: manifest.hcodec,
blockSize: manifest.blockSize,
protected: true,
ecK: ecK, ecM: ecM,
originalTreeCid: manifest.treeCid,
originalTreeRoot: manifest.treeRoot,
originalDatasetSize: manifest.datasetSize)
var
self = Manifest(
version: manifest.version,
codec: manifest.codec,
hcodec: manifest.hcodec,
originalBytes: manifest.originalBytes,
blockSize: manifest.blockSize,
protected: true,
ecK: ecK, ecM: ecM,
originalCid: ? manifest.cid,
originalLen: manifest.len)
let
encodedLen = self.rounded + (self.steps * ecM)
self.blocks = newSeq[Cid](encodedLen)
# copy original manifest blocks
for i in 0..<self.rounded:
if i < manifest.len:
self.blocks[i] = manifest[i]
else:
self.blocks[i] = EmptyCid[manifest.version]
.catch
.get()[manifest.hcodec]
.catch
.get()
? self.verify()
self.success
proc new*(
T: type Manifest,
manifest: Manifest
): Manifest =
## Create an unprotected dataset from an
## erasure protected one
##
Manifest(
treeCid: manifest.originalCid,
treeRoot: manifest.originalTreeRoot,
datasetSize: manifest.originalDatasetSize,
version: manifest.version,
codec: manifest.codec,
hcodec: manifest.hcodec,
blockSize: manifest.blockSize,
protected: false)
proc new*(
T: type Manifest,
@ -286,50 +252,31 @@ proc new*(
proc new*(
T: type Manifest,
rootHash: Cid,
originalBytes: NBytes,
treeCid: Cid,
treeRoot: MultiHash,
datasetSize: NBytes,
blockSize: NBytes,
blocks: seq[Cid],
version: CidVersion,
hcodec: MultiCodec,
codec: MultiCodec,
ecK: int,
ecM: int,
originalCid: Cid,
originalLen: int
originalTreeCid: Cid,
originalTreeRoot: MultiHash,
originalDatasetSize: NBytes
): Manifest =
Manifest(
rootHash: rootHash.some,
originalBytes: originalBytes,
treeCid: treeCid,
treeRoot: treeRoot,
datasetSize: datasetSize,
blockSize: blockSize,
blocks: blocks,
version: version,
hcodec: hcodec,
codec: codec,
protected: true,
ecK: ecK,
ecM: ecM,
originalCid: originalCid,
originalLen: originalLen
)
proc new*(
T: type Manifest,
rootHash: Cid,
originalBytes: NBytes,
blockSize: NBytes,
blocks: seq[Cid],
version: CidVersion,
hcodec: MultiCodec,
codec: MultiCodec
): Manifest =
Manifest(
rootHash: rootHash.some,
originalBytes: originalBytes,
blockSize: blockSize,
blocks: blocks,
version: version,
hcodec: hcodec,
codec: codec,
protected: false,
originalTreeCid: originalTreeCid,
originalTreeRoot: originalTreeRoot,
originalDatasetSize: originalDatasetSize
)

4
codex/merkletree.nim Normal file
View File

@ -0,0 +1,4 @@
import ./merkletree/merkletree
import ./merkletree/coders
export merkletree, coders

View File

@ -0,0 +1,75 @@
## Nim-Codex
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/libp2p
import pkg/questionable
import pkg/questionable/results
import ./merkletree
import ../units
import ../errors
const MaxMerkleTreeSize = 100.MiBs.uint
const MaxMerkleProofSize = 1.MiBs.uint
proc encode*(self: MerkleTree): seq[byte] =
var pb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
pb.write(1, self.mcodec.uint64)
pb.write(2, self.digestSize.uint64)
pb.write(3, self.leavesCount.uint64)
pb.write(4, self.nodesBuffer)
pb.finish
pb.buffer
proc decode*(_: type MerkleTree, data: seq[byte]): ?!MerkleTree =
var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize)
var mcodecCode: uint64
var digestSize: uint64
var leavesCount: uint64
discard ? pb.getField(1, mcodecCode).mapFailure
discard ? pb.getField(2, digestSize).mapFailure
discard ? pb.getField(3, leavesCount).mapFailure
let mcodec = MultiCodec.codec(cast[int](mcodecCode))
if mcodec == InvalidMultiCodec:
return failure("Invalid MultiCodec code " & $cast[int](mcodec))
var nodesBuffer = newSeq[byte]()
discard ? pb.getField(4, nodesBuffer).mapFailure
let tree = ? MerkleTree.init(mcodec, digestSize, leavesCount, nodesBuffer)
success(tree)
proc encode*(self: MerkleProof): seq[byte] =
var pb = initProtoBuffer(maxSize = MaxMerkleProofSize)
pb.write(1, self.mcodec.uint64)
pb.write(2, self.digestSize.uint64)
pb.write(3, self.index.uint64)
pb.write(4, self.nodesBuffer)
pb.finish
pb.buffer
proc decode*(_: type MerkleProof, data: seq[byte]): ?!MerkleProof =
var pb = initProtoBuffer(data, maxSize = MaxMerkleProofSize)
var mcodecCode: uint64
var digestSize: uint64
var index: uint64
discard ? pb.getField(1, mcodecCode).mapFailure
discard ? pb.getField(2, digestSize).mapFailure
discard ? pb.getField(3, index).mapFailure
let mcodec = MultiCodec.codec(cast[int](mcodecCode))
if mcodec == InvalidMultiCodec:
return failure("Invalid MultiCodec code " & $cast[int](mcodec))
var nodesBuffer = newSeq[byte]()
discard ? pb.getField(4, nodesBuffer).mapFailure
let proof = ? MerkleProof.init(mcodec, digestSize, index, nodesBuffer)
success(proof)

View File

@ -1,5 +1,5 @@
## Nim-Codex
## Copyright (c) 2022 Status Research & Development GmbH
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
@ -7,41 +7,42 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sequtils
import std/math
import std/bitops
import std/sequtils
import std/sugar
import std/algorithm
import pkg/libp2p
import pkg/stew/byteutils
import pkg/questionable
import pkg/chronicles
import pkg/questionable/results
import pkg/nimcrypto/sha2
import pkg/libp2p/[multicodec, multihash, vbuffer]
import pkg/stew/byteutils
import ../errors
logScope:
topics = "codex merkletree"
type
MerkleHash* = MultiHash
MerkleTree* = object
leavesCount: int
nodes: seq[MerkleHash]
mcodec: MultiCodec
digestSize: Natural
leavesCount: Natural
nodesBuffer*: seq[byte]
MerkleProof* = object
index: int
path: seq[MerkleHash]
# Tree constructed from leaves H0..H2 is
# H5=H(H3 & H4)
# / \
# H3=H(H0 & H1) H4=H(H2 & H2)
# / \ /
# H0=H(A) H1=H(B) H2=H(C)
# | | |
# A B C
#
# Memory layout is [H0, H1, H2, H3, H4, H5]
#
# Proofs of inclusion are
# - [H1, H4] for A
# - [H0, H4] for B
# - [H2, H3] for C
mcodec: MultiCodec
digestSize: Natural
index: Natural
nodesBuffer*: seq[byte]
MerkleTreeBuilder* = object
mcodec: MultiCodec
digestSize: Natural
buffer: seq[byte]
###########################################################
# Helper functions
###########################################################
func computeTreeHeight(leavesCount: int): int =
if isPowerOfTwo(leavesCount):
@ -49,141 +50,325 @@ func computeTreeHeight(leavesCount: int): int =
else:
fastLog2(leavesCount) + 2
func getLowHigh(leavesCount, level: int): (int, int) =
var width = leavesCount
var low = 0
for _ in 0..<level:
low += width
width = (width + 1) div 2
(low, low + width - 1)
func getLowHigh(self: MerkleTree, level: int): (int, int) =
getLowHigh(self.leavesCount, level)
func computeTotalSize(leavesCount: int): int =
func computeLevels(leavesCount: int): seq[tuple[offset: int, width: int, index: int]] =
let height = computeTreeHeight(leavesCount)
getLowHigh(leavesCount, height - 1)[1] + 1
var levels = newSeq[tuple[offset: int, width: int, index: int]](height)
proc getWidth(self: MerkleTree, level: int): int =
let (low, high) = self.getLowHigh(level)
high - low + 1
levels[0].offset = 0
levels[0].width = leavesCount
levels[0].index = 0
for i in 1..<height:
levels[i].offset = levels[i - 1].offset + levels[i - 1].width
levels[i].width = (levels[i - 1].width + 1) div 2
levels[i].index = i
levels
func getChildren(self: MerkleTree, level, i: int): (MerkleHash, MerkleHash) =
let (low, high) = self.getLowHigh(level - 1)
let leftIdx = low + 2 * i
let rightIdx = min(leftIdx + 1, high)
proc digestFn(mcodec: MultiCodec, dst: var openArray[byte], dstPos: int, data: openArray[byte]): ?!void =
var mhash = ? MultiHash.digest($mcodec, data).mapFailure
if (dstPos + mhash.size) > dst.len:
return failure("Not enough space in a destination buffer")
dst[dstPos..<dstPos + mhash.size] = mhash.data.buffer[mhash.dpos..<mhash.dpos + mhash.size]
success()
(self.nodes[leftIdx], self.nodes[rightIdx])
###########################################################
# MerkleTreeBuilder
###########################################################
func getSibling(self: MerkleTree, level, i: int): MerkleHash =
let (low, high) = self.getLowHigh(level)
if i mod 2 == 0:
self.nodes[min(low + i + 1, high)]
else:
self.nodes[low + i - 1]
proc init*(
T: type MerkleTreeBuilder,
mcodec: MultiCodec = multiCodec("sha2-256")
): ?!MerkleTreeBuilder =
let mhash = ? MultiHash.digest($mcodec, "".toBytes).mapFailure
success(MerkleTreeBuilder(mcodec: mcodec, digestSize: mhash.size, buffer: newSeq[byte]()))
proc setNode(self: var MerkleTree, level, i: int, value: MerkleHash): void =
let (low, _) = self.getLowHigh(level)
self.nodes[low + i] = value
proc addDataBlock*(self: var MerkleTreeBuilder, dataBlock: openArray[byte]): ?!void =
## Hashes the data block and adds the result of hashing to a buffer
##
let oldLen = self.buffer.len
self.buffer.setLen(oldLen + self.digestSize)
digestFn(self.mcodec, self.buffer, oldLen, dataBlock)
proc root*(self: MerkleTree): MerkleHash =
self.nodes[^1]
proc addLeaf*(self: var MerkleTreeBuilder, leaf: MultiHash): ?!void =
if leaf.mcodec != self.mcodec or leaf.size != self.digestSize:
return failure("Expected mcodec to be " & $self.mcodec & " and digest size to be " &
$self.digestSize & " but was " & $leaf.mcodec & " and " & $leaf.size)
let oldLen = self.buffer.len
self.buffer.setLen(oldLen + self.digestSize)
self.buffer[oldLen..<oldLen + self.digestSize] = leaf.data.buffer[leaf.dpos..<leaf.dpos + self.digestSize]
success()
proc len*(self: MerkleTree): int =
self.nodes.len
proc build*(self: MerkleTreeBuilder): ?!MerkleTree =
## Builds a tree from previously added data blocks
##
## Tree built from data blocks A, B and C is
## H5=H(H3 & H4)
## / \
## H3=H(H0 & H1) H4=H(H2 & 0x00)
## / \ /
## H0=H(A) H1=H(B) H2=H(C)
## | | |
## A B C
##
## Memory layout is [H0, H1, H2, H3, H4, H5]
##
let
mcodec = self.mcodec
digestSize = self.digestSize
leavesCount = self.buffer.len div self.digestSize
proc leaves*(self: MerkleTree): seq[MerkleHash] =
self.nodes[0..<self.leavesCount]
if leavesCount == 0:
return failure("At least one data block is required")
proc nodes*(self: MerkleTree): seq[MerkleHash] =
self.nodes
proc height*(self: MerkleTree): int =
computeTreeHeight(self.leavesCount)
proc `$`*(self: MerkleTree): string =
result &= "leavesCount: " & $self.leavesCount
result &= "\nnodes: " & $self.nodes
proc getProof*(self: MerkleTree, index: int): ?!MerkleProof =
if index >= self.leavesCount or index < 0:
return failure("Index " & $index & " out of range [0.." & $self.leaves.high & "]" )
var path = newSeq[MerkleHash](self.height - 1)
for level in 0..<path.len:
let i = index div (1 shl level)
path[level] = self.getSibling(level, i)
success(MerkleProof(index: index, path: path))
proc initTreeFromLeaves(leaves: openArray[MerkleHash]): ?!MerkleTree =
without mcodec =? leaves.?[0].?mcodec and
digestSize =? leaves.?[0].?size:
return failure("At least one leaf is required")
if not leaves.allIt(it.mcodec == mcodec):
return failure("All leaves must use the same codec")
let totalSize = computeTotalSize(leaves.len)
var tree = MerkleTree(leavesCount: leaves.len, nodes: newSeq[MerkleHash](totalSize))
var buf = newSeq[byte](digestSize * 2)
proc combine(l, r: MerkleHash): ?!MerkleHash =
copyMem(addr buf[0], unsafeAddr l.data.buffer[0], digestSize)
copyMem(addr buf[digestSize], unsafeAddr r.data.buffer[0], digestSize)
MultiHash.digest($mcodec, buf).mapErr(
c => newException(CatchableError, "Error calculating hash using codec " & $mcodec & ": " & $c)
)
let levels = computeLevels(leavesCount)
let totalNodes = levels[^1].offset + 1
var tree = MerkleTree(mcodec: mcodec, digestSize: digestSize, leavesCount: leavesCount, nodesBuffer: newSeq[byte](totalNodes * digestSize))
# copy leaves
for i in 0..<tree.getWidth(0):
tree.setNode(0, i, leaves[i])
tree.nodesBuffer[0..<leavesCount * digestSize] = self.buffer[0..<leavesCount * digestSize]
# calculate intermediate nodes
for level in 1..<tree.height:
for i in 0..<tree.getWidth(level):
let (left, right) = tree.getChildren(level, i)
without mhash =? combine(left, right), error:
return failure(error)
tree.setNode(level, i, mhash)
var zero = newSeq[byte](digestSize)
var one = newSeq[byte](digestSize)
one[^1] = 0x01
success(tree)
var
concatBuf = newSeq[byte](2 * digestSize)
prevLevel = levels[0]
for level in levels[1..^1]:
for i in 0..<level.width:
let parentIndex = level.offset + i
let leftChildIndex = prevLevel.offset + 2 * i
let rightChildIndex = leftChildIndex + 1
concatBuf[0..<digestSize] = tree.nodesBuffer[leftChildIndex * digestSize..<(leftChildIndex + 1) * digestSize]
var dummyValue = if prevLevel.index == 0: zero else: one
if rightChildIndex < prevLevel.offset + prevLevel.width:
concatBuf[digestSize..^1] = tree.nodesBuffer[rightChildIndex * digestSize..<(rightChildIndex + 1) * digestSize]
else:
concatBuf[digestSize..^1] = dummyValue
? digestFn(mcodec, tree.nodesBuffer, parentIndex * digestSize, concatBuf)
prevLevel = level
return success(tree)
###########################################################
# MerkleTree
###########################################################
proc nodeBufferToMultiHash(self: (MerkleTree | MerkleProof), index: int): MultiHash =
var buf = newSeq[byte](self.digestSize)
let offset = index * self.digestSize
buf[0..^1] = self.nodesBuffer[offset..<(offset + self.digestSize)]
{.noSideEffect.}:
without mhash =? MultiHash.init($self.mcodec, buf).mapFailure, errx:
error "Error converting bytes to hash", msg = errx.msg
mhash
proc len*(self: (MerkleTree | MerkleProof)): Natural =
self.nodesBuffer.len div self.digestSize
proc nodes*(self: (MerkleTree | MerkleProof)): seq[MultiHash] {.noSideEffect.} =
toSeq(0..<self.len).map(i => self.nodeBufferToMultiHash(i))
proc mcodec*(self: (MerkleTree | MerkleProof)): MultiCodec =
self.mcodec
proc digestSize*(self: (MerkleTree | MerkleProof)): Natural =
self.digestSize
proc root*(self: MerkleTree): MultiHash =
let rootIndex = self.len - 1
self.nodeBufferToMultiHash(rootIndex)
proc leaves*(self: MerkleTree): seq[MultiHash] =
toSeq(0..<self.leavesCount).map(i => self.nodeBufferToMultiHash(i))
proc leavesCount*(self: MerkleTree): Natural =
self.leavesCount
proc getLeaf*(self: MerkleTree, index: Natural): ?!MultiHash =
if index >= self.leavesCount:
return failure("Index " & $index & " out of range [0.." & $(self.leavesCount - 1) & "]" )
success(self.nodeBufferToMultiHash(index))
proc height*(self: MerkleTree): Natural =
computeTreeHeight(self.leavesCount)
proc getProof*(self: MerkleTree, index: Natural): ?!MerkleProof =
## Extracts proof from a tree for a given index
##
## Given a tree built from data blocks A, B and C
## H5
## / \
## H3 H4
## / \ /
## H0 H1 H2
## | | |
## A B C
##
## Proofs of inclusion (index and path) are
## - 0,[H1, H4] for data block A
## - 1,[H0, H4] for data block B
## - 2,[0x00, H3] for data block C
##
if index >= self.leavesCount:
return failure("Index " & $index & " out of range [0.." & $(self.leavesCount - 1) & "]" )
var zero = newSeq[byte](self.digestSize)
var one = newSeq[byte](self.digestSize)
one[^1] = 0x01
let levels = computeLevels(self.leavesCount)
var proofNodesBuffer = newSeq[byte]((levels.len - 1) * self.digestSize)
for level in levels[0..^2]:
let lr = index shr level.index
let siblingIndex = if lr mod 2 == 0:
level.offset + lr + 1
else:
level.offset + lr - 1
var dummyValue = if level.index == 0: zero else: one
if siblingIndex < level.offset + level.width:
proofNodesBuffer[level.index * self.digestSize..<(level.index + 1) * self.digestSize] =
self.nodesBuffer[siblingIndex * self.digestSize..<(siblingIndex + 1) * self.digestSize]
else:
proofNodesBuffer[level.index * self.digestSize..<(level.index + 1) * self.digestSize] = dummyValue
success(MerkleProof(mcodec: self.mcodec, digestSize: self.digestSize, index: index, nodesBuffer: proofNodesBuffer))
proc `$`*(self: MerkleTree): string {.noSideEffect.} =
"mcodec:" & $self.mcodec &
", digestSize: " & $self.digestSize &
", leavesCount: " & $self.leavesCount &
", nodes: " & $self.nodes
proc `==`*(a, b: MerkleTree): bool =
(a.mcodec == b.mcodec) and
(a.digestSize == b.digestSize) and
(a.leavesCount == b.leavesCount) and
(a.nodesBuffer == b.nodesBuffer)
func init*(
T: type MerkleTree,
root: MerkleHash,
leavesCount: int
): MerkleTree =
let totalSize = computeTotalSize(leavesCount)
var nodes = newSeq[MerkleHash](totalSize)
nodes[^1] = root
MerkleTree(nodes: nodes, leavesCount: leavesCount)
proc init*(
T: type MerkleTree,
leaves: openArray[MerkleHash]
mcodec: MultiCodec,
digestSize: Natural,
leavesCount: Natural,
nodesBuffer: seq[byte]
): ?!MerkleTree =
initTreeFromLeaves(leaves)
let levels = computeLevels(leavesCount)
let totalNodes = levels[^1].offset + 1
if totalNodes * digestSize == nodesBuffer.len:
success(
MerkleTree(
mcodec: mcodec,
digestSize: digestSize,
leavesCount: leavesCount,
nodesBuffer: nodesBuffer
)
)
else:
failure("Expected nodesBuffer len to be " & $(totalNodes * digestSize) & " but was " & $nodesBuffer.len)
proc index*(self: MerkleProof): int =
###########################################################
# MerkleProof
###########################################################
proc verifyLeaf*(self: MerkleProof, leaf: MultiHash, treeRoot: MultiHash): ?!bool =
if leaf.mcodec != self.mcodec:
return failure("Leaf mcodec was " & $leaf.mcodec & ", but " & $self.mcodec & " expected")
if leaf.mcodec != self.mcodec:
return failure("Tree root mcodec was " & $treeRoot.mcodec & ", but " & $treeRoot.mcodec & " expected")
var digestBuf = newSeq[byte](self.digestSize)
digestBuf[0..^1] = leaf.data.buffer[leaf.dpos..<(leaf.dpos + self.digestSize)]
let proofLen = self.nodesBuffer.len div self.digestSize
var concatBuf = newSeq[byte](2 * self.digestSize)
for i in 0..<proofLen:
let offset = i * self.digestSize
let lr = self.index shr i
if lr mod 2 == 0:
concatBuf[0..^1] = digestBuf & self.nodesBuffer[offset..<(offset + self.digestSize)]
else:
concatBuf[0..^1] = self.nodesBuffer[offset..<(offset + self.digestSize)] & digestBuf
? digestFn(self.mcodec, digestBuf, 0, concatBuf)
let computedRoot = ? MultiHash.init(self.mcodec, digestBuf).mapFailure
success(computedRoot == treeRoot)
proc verifyDataBlock*(self: MerkleProof, dataBlock: openArray[byte], treeRoot: MultiHash): ?!bool =
var digestBuf = newSeq[byte](self.digestSize)
? digestFn(self.mcodec, digestBuf, 0, dataBlock)
let leaf = ? MultiHash.init(self.mcodec, digestBuf).mapFailure
self.verifyLeaf(leaf, treeRoot)
proc index*(self: MerkleProof): Natural =
self.index
proc path*(self: MerkleProof): seq[MerkleHash] =
self.path
proc `$`*(self: MerkleProof): string =
result &= "index: " & $self.index
result &= "\npath: " & $self.path
"mcodec:" & $self.mcodec &
", digestSize: " & $self.digestSize &
", index: " & $self.index &
", nodes: " & $self.nodes
func `==`*(a, b: MerkleProof): bool =
(a.index == b.index) and (a.path == b.path)
(a.index == b.index) and
(a.mcodec == b.mcodec) and
(a.digestSize == b.digestSize) and
(a.nodesBuffer == b.nodesBuffer)
proc init*(
T: type MerkleProof,
index: int,
path: seq[MerkleHash]
): MerkleProof =
MerkleProof(index: index, path: path)
index: Natural,
nodes: seq[MultiHash]
): ?!MerkleProof =
if nodes.len == 0:
return failure("At least one node is required")
let
mcodec = nodes[0].mcodec
digestSize = nodes[0].size
var nodesBuffer = newSeq[byte](nodes.len * digestSize)
for nodeIndex, node in nodes:
nodesBuffer[nodeIndex * digestSize..<(nodeIndex + 1) * digestSize] = node.data.buffer[node.dpos..<node.dpos + digestSize]
success(MerkleProof(mcodec: mcodec, digestSize: digestSize, index: index, nodesBuffer: nodesBuffer))
func init*(
T: type MerkleProof,
mcodec: MultiCodec,
digestSize: Natural,
index: Natural,
nodesBuffer: seq[byte]
): ?!MerkleProof =
if nodesBuffer.len mod digestSize != 0:
return failure("nodesBuffer len is not a multiple of digestSize")
let treeHeight = (nodesBuffer.len div digestSize) + 1
let maxLeavesCount = 1 shl treeHeight
if index < maxLeavesCount:
return success(
MerkleProof(
mcodec: mcodec,
digestSize: digestSize,
index: index,
nodesBuffer: nodesBuffer
)
)
else:
return failure("index higher than max leaves count")

View File

@ -11,13 +11,14 @@ import std/options
import std/tables
import std/sequtils
import std/strformat
import std/sugar
import pkg/questionable
import pkg/questionable/results
import pkg/chronicles
import pkg/chronos
import pkg/libp2p/switch
import pkg/libp2p/[switch, multicodec, multihash]
import pkg/libp2p/stream/bufferstream
# TODO: remove once exported by libp2p
@ -27,6 +28,7 @@ import pkg/libp2p/signed_envelope
import ./chunker
import ./blocktype as bt
import ./manifest
import ./merkletree
import ./stores/blockstore
import ./blockexchange
import ./streams
@ -34,6 +36,7 @@ import ./erasure
import ./discovery
import ./contracts
import ./node/batch
import ./utils
export batch
@ -106,18 +109,26 @@ proc fetchBatched*(
onBatch: BatchProc = nil): Future[?!void] {.async, gcsafe.} =
## Fetch manifest in batches of `batchSize`
##
let
batches =
(manifest.blocks.len div batchSize) +
(manifest.blocks.len mod batchSize)
let batchCount = divUp(manifest.blocksCount, batchSize)
trace "Fetching blocks in batches of", size = batchSize
for blks in manifest.blocks.distribute(max(1, batches), true):
try:
let
blocks = blks.mapIt(node.blockStore.getBlock( it ))
without iter =? await node.blockStore.getBlocks(manifest.treeCid, manifest.blocksCount, manifest.treeRoot), err:
return failure(err)
for batchNum in 0..<batchCount:
let blocks = collect:
for i in 0..<batchSize:
if not iter.finished:
iter.next()
# let
# indexRange = (batchNum * batchSize)..<(min((batchNum + 1) * batchSize, manifest.blocksCount))
# blocks = indexRange.mapIt(node.blockStore.getBlock(manifest.treeCid, it))
try:
await allFuturesThrowing(allFinished(blocks))
if not onBatch.isNil:
await onBatch(blocks.mapIt( it.read.get ))
@ -179,11 +190,13 @@ proc store*(
##
trace "Storing data"
without var blockManifest =? Manifest.new(blockSize = blockSize):
return failure("Unable to create Block Set")
let
hcodec = multiCodec("sha2-256")
dataCodec = multiCodec("raw")
chunker = LPStreamChunker.new(stream, chunkSize = blockSize)
# Manifest and chunker should use the same blockSize
let chunker = LPStreamChunker.new(stream, chunkSize = blockSize)
without var treeBuilder =? MerkleTreeBuilder.init(hcodec), err:
return failure(err)
try:
while (
@ -191,10 +204,19 @@ proc store*(
chunk.len > 0):
trace "Got data from stream", len = chunk.len
without blk =? bt.Block.new(chunk):
return failure("Unable to init block from chunk!")
blockManifest.add(blk.cid)
without mhash =? MultiHash.digest($hcodec, chunk).mapFailure, err:
return failure(err)
without cid =? Cid.init(CIDv1, dataCodec, mhash).mapFailure, err:
return failure(err)
without blk =? bt.Block.new(cid, chunk, verify = false):
return failure("Unable to init block from chunk!")
if err =? treeBuilder.addLeaf(mhash).errorOption:
return failure(err)
if err =? (await self.blockStore.putBlock(blk)).errorOption:
trace "Unable to store block", cid = blk.cid, err = err.msg
return failure(&"Unable to store block {blk.cid}")
@ -206,34 +228,47 @@ proc store*(
finally:
await stream.close()
without tree =? treeBuilder.build(), err:
return failure(err)
without treeBlk =? bt.Block.new(tree.encode()), err:
return failure(err)
if err =? (await self.blockStore.putBlock(treeBlk)).errorOption:
return failure("Unable to store merkle tree block " & $treeBlk.cid & ", nested err: " & err.msg)
let manifest = Manifest.new(
treeCid = treeBlk.cid,
treeRoot = tree.root,
blockSize = blockSize,
datasetSize = NBytes(chunker.offset),
version = CIDv1,
hcodec = hcodec,
codec = dataCodec
)
# Generate manifest
blockManifest.originalBytes = NBytes(chunker.offset) # store the exact file size
without data =? blockManifest.encode():
without data =? manifest.encode(), err:
return failure(
newException(CodexError, "Could not generate dataset manifest!"))
newException(CodexError, "Error encoding manifest: " & err.msg))
# Store as a dag-pb block
without manifest =? bt.Block.new(data = data, codec = DagPBCodec):
without manifestBlk =? bt.Block.new(data = data, codec = DagPBCodec):
trace "Unable to init block from manifest data!"
return failure("Unable to init block from manifest data!")
if isErr (await self.blockStore.putBlock(manifest)):
trace "Unable to store manifest", cid = manifest.cid
return failure("Unable to store manifest " & $manifest.cid)
if isErr (await self.blockStore.putBlock(manifestBlk)):
trace "Unable to store manifest", cid = manifestBlk.cid
return failure("Unable to store manifest " & $manifestBlk.cid)
without cid =? blockManifest.cid, error:
trace "Unable to generate manifest Cid!", exc = error.msg
return failure(error.msg)
trace "Stored data", manifestCid = manifest.cid,
contentCid = cid,
blocks = blockManifest.len,
size=blockManifest.originalBytes
info "Stored data", manifestCid = manifestBlk.cid,
treeCid = treeBlk.cid,
blocks = manifest.blocksCount,
datasetSize = manifest.datasetSize
# Announce manifest
await self.discovery.provide(manifest.cid)
await self.discovery.provide(manifestBlk.cid)
return manifest.cid.success
return manifestBlk.cid.success
proc requestStorage*(
self: CodexNodeRef,
@ -290,7 +325,7 @@ proc requestStorage*(
# because the slotSize is used to determine the amount of bytes to reserve
# in a Reservations
# TODO: slotSize: (encoded.blockSize.int * encoded.steps).u256,
slotSize: (encoded.blockSize.int * encoded.blocks.len).u256,
slotSize: (encoded.blockSize.int * encoded.blocksCount).u256,
duration: duration,
proofProbability: proofProbability,
reward: reward,
@ -300,7 +335,7 @@ proc requestStorage*(
content: StorageContent(
cid: $encodedBlk.cid,
erasure: StorageErasure(
totalChunks: encoded.len.uint64,
totalChunks: encoded.blocksCount.uint64,
),
por: StoragePoR(
u: @[], # TODO: PoR setup

View File

@ -69,7 +69,7 @@ proc setupProofs*(
let
(spk, ssk) = keyGen()
por = await PoR.init(
StoreStream.new(self.store, manifest),
SeekableStoreStream.new(self.store, manifest),
ssk,
spk,
manifest.blockSize)

View File

@ -4,5 +4,6 @@ import ./stores/networkstore
import ./stores/repostore
import ./stores/maintenance
import ./stores/keyutils
import ./stores/treereader
export cachestore, blockstore, networkstore, repostore, maintenance, keyutils
export cachestore, blockstore, networkstore, repostore, maintenance, keyutils, treereader

View File

@ -17,6 +17,8 @@ import pkg/questionable
import pkg/questionable/results
import ../blocktype
import ../merkletree
import ../utils
export blocktype
@ -26,24 +28,38 @@ type
BlockType* {.pure.} = enum
Manifest, Block, Both
GetNext* = proc(): Future[?Cid] {.upraises: [], gcsafe, closure.}
BlocksIter* = ref object
finished*: bool
next*: GetNext
BlockStore* = ref object of RootObj
iterator items*(self: BlocksIter): Future[?Cid] =
while not self.finished:
yield self.next()
method getBlock*(self: BlockStore, cid: Cid): Future[?!Block] {.base.} =
## Get a block from the blockstore
##
raiseAssert("Not implemented!")
method getTree*(self: BlockStore, treeCid: Cid): Future[?!MerkleTree] {.base.} =
## Get a merkle tree by Cid
##
raiseAssert("Not implemented!")
method getBlock*(self: BlockStore, treeCid: Cid, index: Natural, merkleRoot: MultiHash): Future[?!Block] {.base.} =
## Get a block by Cid of a merkle tree and an index of a leaf in a tree, validate inclusion using merkle root
##
raiseAssert("Not implemented!")
method getBlockAndProof*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!(Block, MerkleProof)] {.base.} =
## Get a block and associated inclusion proof by Cid of a merkle tree and an index of a leaf in a tree
##
raiseAssert("Not implemented!")
method getBlocks*(self: BlockStore, treeCid: Cid, leavesCount: Natural, merkleRoot: MultiHash): Future[?!AsyncIter[?!Block]] {.base.} =
## Get all blocks in range [0..<leavesCount] by Cid of a merkle tree, validate inclusion using merkle root
##
raiseAssert("Not implemented!")
method putBlock*(
self: BlockStore,
blk: Block,
@ -60,15 +76,27 @@ method delBlock*(self: BlockStore, cid: Cid): Future[?!void] {.base.} =
raiseAssert("Not implemented!")
method delBlock*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!void] {.base.} =
## Delete a block from the blockstore
##
raiseAssert("Not implemented!")
method hasBlock*(self: BlockStore, cid: Cid): Future[?!bool] {.base.} =
## Check if the block exists in the blockstore
##
raiseAssert("Not implemented!")
method hasBlock*(self: BlockStore, tree: Cid, index: Natural): Future[?!bool] {.base.} =
## Check if the block exists in the blockstore
##
raiseAssert("Not implemented!")
method listBlocks*(
self: BlockStore,
blockType = BlockType.Manifest): Future[?!BlocksIter] {.base.} =
blockType = BlockType.Manifest): Future[?!AsyncIter[?Cid]] {.base.} =
## Get the list of blocks in the BlockStore. This is an intensive operation
##
@ -87,3 +115,9 @@ proc contains*(self: BlockStore, blk: Cid): Future[bool] {.async.} =
##
return (await self.hasBlock(blk)) |? false
proc contains*(self: BlockStore, address: BlockAddress): Future[bool] {.async.} =
return if address.leaf:
(await self.hasBlock(address.treeCid, address.index)) |? false
else:
(await self.hasBlock(address.cid)) |? false

View File

@ -21,10 +21,13 @@ import pkg/questionable
import pkg/questionable/results
import ./blockstore
import ./treereader
import ../units
import ../chunker
import ../errors
import ../manifest
import ../merkletree
import ../utils
export blockstore
@ -33,6 +36,7 @@ logScope:
type
CacheStore* = ref object of BlockStore
treeReader: TreeReader
currentSize*: NBytes
size*: NBytes
cache: LruCache[Cid, Block]
@ -50,10 +54,10 @@ method getBlock*(self: CacheStore, cid: Cid): Future[?!Block] {.async.} =
if cid.isEmpty:
trace "Empty block, ignoring"
return success cid.emptyBlock
return cid.emptyBlock
if cid notin self.cache:
return failure (ref BlockNotFoundError)(msg: "Block not in cache")
return failure (ref BlockNotFoundError)(msg: "Block not in cache " & $cid)
try:
return success self.cache[cid]
@ -61,6 +65,18 @@ method getBlock*(self: CacheStore, cid: Cid): Future[?!Block] {.async.} =
trace "Error requesting block from cache", cid, error = exc.msg
return failure exc
method getTree*(self: CacheStore, treeCid: Cid): Future[?!MerkleTree] =
self.treeReader.getTree(treeCid)
method getBlock*(self: CacheStore, treeCid: Cid, index: Natural, merkleRoot: MultiHash): Future[?!Block] =
self.treeReader.getBlock(treeCid, index)
method getBlocks*(self: CacheStore, treeCid: Cid, leavesCount: Natural, merkleRoot: MultiHash): Future[?!AsyncIter[?!Block]] =
self.treeReader.getBlocks(treeCid, leavesCount)
method getBlockAndProof*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!(Block, MerkleProof)] =
self.treeReader.getBlockAndProof(treeCid, index)
method hasBlock*(self: CacheStore, cid: Cid): Future[?!bool] {.async.} =
## Check if the block exists in the blockstore
##
@ -72,6 +88,15 @@ method hasBlock*(self: CacheStore, cid: Cid): Future[?!bool] {.async.} =
return (cid in self.cache).success
method hasBlock*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!bool] {.async.} =
## Check if the block exists in the blockstore
##
without cid =? await self.treeReader.getBlockCid(treeCid, index), err:
return failure(err)
await self.hasBlock(cid)
func cids(self: CacheStore): (iterator: Cid {.gcsafe.}) =
return iterator(): Cid =
for cid in self.cache.keys:
@ -80,12 +105,12 @@ func cids(self: CacheStore): (iterator: Cid {.gcsafe.}) =
method listBlocks*(
self: CacheStore,
blockType = BlockType.Manifest
): Future[?!BlocksIter] {.async.} =
): Future[?!AsyncIter[?Cid]] {.async.} =
## Get the list of blocks in the BlockStore. This is an intensive operation
##
var
iter = BlocksIter()
iter = AsyncIter[?Cid]()
let
cids = self.cids()
@ -101,7 +126,7 @@ method listBlocks*(
cid = cids()
if finished(cids):
iter.finished = true
iter.finish
return Cid.none
without isManifest =? cid.isManifest, err:
@ -182,6 +207,12 @@ method delBlock*(self: CacheStore, cid: Cid): Future[?!void] {.async.} =
return success()
method delBlock*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!void] {.async.} =
without cid =? await self.treeReader.getBlockCid(treeCid, index), err:
return failure(err)
return await self.delBlock(cid)
method close*(self: CacheStore): Future[void] {.async.} =
## Close the blockstore, a no-op for this implementation
##
@ -202,15 +233,22 @@ proc new*(
if cacheSize < chunkSize:
raise newException(ValueError, "cacheSize cannot be less than chunkSize")
var treeReader = TreeReader.new()
let
currentSize = 0'nb
size = int(cacheSize div chunkSize)
cache = newLruCache[Cid, Block](size)
store = CacheStore(
treeReader: treeReader,
cache: cache,
currentSize: currentSize,
size: cacheSize)
proc getBlockFromStore(cid: Cid): Future[?!Block] = store.getBlock(cid)
treeReader.getBlockFromStore = getBlockFromStore
for blk in blocks:
discard store.putBlockSync(blk)

View File

@ -17,6 +17,7 @@ import pkg/questionable/results
import ./repostore
import ../utils/timer
import ../utils/asynciter
import ../clock
import ../systemclock

View File

@ -11,21 +11,27 @@ import pkg/upraises
push: {.upraises: [].}
import std/sugar
import pkg/chronicles
import pkg/chronos
import pkg/libp2p
import ../blocktype as bt
import ../utils/asyncheapqueue
import ../utils/asynciter
import ./blockstore
import ../blockexchange
import ../merkletree
export blockstore, blockexchange, asyncheapqueue
logScope:
topics = "codex networkstore"
const BlockPrefetchAmount = 5
type
NetworkStore* = ref object of BlockStore
engine*: BlockExcEngine # blockexc decision engine
@ -46,6 +52,35 @@ method getBlock*(self: NetworkStore, cid: Cid): Future[?!bt.Block] {.async.} =
return success blk
method getBlock*(self: NetworkStore, treeCid: Cid, index: Natural, merkleRoot: MultiHash): Future[?!Block] {.async.} =
without localBlock =? await self.localStore.getBlock(treeCid, index, merkleRoot), err:
if err of BlockNotFoundError:
trace "Requesting block from the network engine", treeCid, index
try:
let networkBlock = await self.engine.requestBlock(treeCid, index, merkleRoot)
return success(networkBlock)
except CatchableError as err:
return failure(err)
else:
failure(err)
return success(localBlock)
method getBlocks*(self: NetworkStore, treeCid: Cid, leavesCount: Natural, merkleRoot: MultiHash): Future[?!AsyncIter[?!Block]] {.async.} =
without localIter =? await self.localStore.getBlocks(treeCid, leavesCount, merkleRoot), err:
if err of BlockNotFoundError:
trace "Requesting blocks from the network engine", treeCid, leavesCount
without var networkIter =? self.engine.requestBlocks(treeCid, leavesCount, merkleRoot), err:
failure(err)
let iter = networkIter
.prefetch(BlockPrefetchAmount)
.map(proc (fut: Future[Block]): Future[?!Block] {.async.} = catch: (await fut))
return success(iter)
else:
return failure(err)
return success(localIter)
method putBlock*(
self: NetworkStore,
blk: bt.Block,

View File

@ -12,8 +12,10 @@ import pkg/upraises
push: {.upraises: [].}
import pkg/chronos
import pkg/chronos/futures
import pkg/chronicles
import pkg/libp2p/cid
import pkg/libp2p/[cid, multicodec, multihash]
import pkg/lrucache
import pkg/metrics
import pkg/questionable
import pkg/questionable/results
@ -22,9 +24,12 @@ import pkg/stew/endians2
import ./blockstore
import ./keyutils
import ./treereader
import ../blocktype
import ../clock
import ../systemclock
import ../merkletree
import ../utils
export blocktype, cid
@ -54,20 +59,12 @@ type
quotaReservedBytes*: uint # bytes reserved by the repo
blockTtl*: Duration
started*: bool
treeReader*: TreeReader
BlockExpiration* = object
cid*: Cid
expiration*: SecondsSince1970
GetNext = proc(): Future[?BlockExpiration] {.upraises: [], gcsafe, closure.}
BlockExpirationIter* = ref object
finished*: bool
next*: GetNext
iterator items*(q: BlockExpirationIter): Future[?BlockExpiration] =
while not q.finished:
yield q.next()
proc updateMetrics(self: RepoStore) =
codexRepostoreBlocks.set(self.totalBlocks.int64)
codexRepostoreBytesUsed.set(self.quotaUsedBytes.int64)
@ -91,7 +88,7 @@ method getBlock*(self: RepoStore, cid: Cid): Future[?!Block] {.async.} =
if cid.isEmpty:
trace "Empty block, ignoring"
return success cid.emptyBlock
return cid.emptyBlock
without key =? makePrefixKey(self.postFixLen, cid), err:
trace "Error getting key from provider", err = err.msg
@ -105,7 +102,19 @@ method getBlock*(self: RepoStore, cid: Cid): Future[?!Block] {.async.} =
return failure(newException(BlockNotFoundError, err.msg))
trace "Got block for cid", cid
return Block.new(cid, data)
return Block.new(cid, data, verify = true)
method getTree*(self: RepoStore, treeCid: Cid): Future[?!MerkleTree] =
self.treeReader.getTree(treeCid)
method getBlock*(self: RepoStore, treeCid: Cid, index: Natural, merkleRoot: MultiHash): Future[?!Block] =
self.treeReader.getBlock(treeCid, index)
method getBlocks*(self: RepoStore, treeCid: Cid, leavesCount: Natural, merkleRoot: MultiHash): Future[?!AsyncIter[?!Block]] =
self.treeReader.getBlocks(treeCid, leavesCount)
method getBlockAndProof*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!(Block, MerkleProof)] =
self.treeReader.getBlockAndProof(treeCid, index)
proc getBlockExpirationTimestamp(self: RepoStore, ttl: ?Duration): SecondsSince1970 =
let duration = ttl |? self.blockTtl
@ -252,6 +261,12 @@ method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} =
self.updateMetrics()
return success()
method delBlock*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!void] {.async.} =
without cid =? await self.treeReader.getBlockCid(treeCid, index), err:
return failure(err)
await self.delBlock(cid)
method hasBlock*(self: RepoStore, cid: Cid): Future[?!bool] {.async.} =
## Check if the block exists in the blockstore
##
@ -269,15 +284,22 @@ method hasBlock*(self: RepoStore, cid: Cid): Future[?!bool] {.async.} =
return await self.repoDs.has(key)
method hasBlock*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!bool] {.async.} =
without cid =? await self.treeReader.getBlockCid(treeCid, index), err:
return failure(err)
await self.hasBlock(cid)
method listBlocks*(
self: RepoStore,
blockType = BlockType.Manifest): Future[?!BlocksIter] {.async.} =
blockType = BlockType.Manifest
): Future[?!AsyncIter[?Cid]] {.async.} =
## Get the list of blocks in the RepoStore.
## This is an intensive operation
##
var
iter = BlocksIter()
iter = AsyncIter[?Cid]()
let key =
case blockType:
@ -311,8 +333,8 @@ proc createBlockExpirationQuery(maxNumber: int, offset: int): ?!Query =
method getBlockExpirations*(
self: RepoStore,
maxNumber: int,
offset: int): Future[?!BlockExpirationIter] {.async, base.} =
## Get block expirations from the given RepoStore
offset: int): Future[?!AsyncIter[?BlockExpiration]] {.async, base.} =
## Get block experiartions from the given RepoStore
##
without query =? createBlockExpirationQuery(maxNumber, offset), err:
@ -323,7 +345,7 @@ method getBlockExpirations*(
trace "Unable to execute block expirations query"
return failure(err)
var iter = BlockExpirationIter()
var iter = AsyncIter[?BlockExpiration]()
proc next(): Future[?BlockExpiration] {.async.} =
if not queryIter.finished:
@ -338,7 +360,7 @@ method getBlockExpirations*(
).some
else:
discard await queryIter.dispose()
iter.finished = true
iter.finish
return BlockExpiration.none
iter.next = next
@ -467,17 +489,25 @@ func new*(
T: type RepoStore,
repoDs: Datastore,
metaDs: Datastore,
treeReader: TreeReader = TreeReader.new(),
clock: Clock = SystemClock.new(),
postFixLen = 2,
quotaMaxBytes = DefaultQuotaBytes,
blockTtl = DefaultBlockTtl
blockTtl = DefaultBlockTtl,
treeCacheCapacity = DefaultTreeCacheCapacity
): RepoStore =
## Create new instance of a RepoStore
##
RepoStore(
let store = RepoStore(
repoDs: repoDs,
metaDs: metaDs,
treeReader: treeReader,
clock: clock,
postFixLen: postFixLen,
quotaMaxBytes: quotaMaxBytes,
blockTtl: blockTtl)
proc getBlockFromStore(cid: Cid): Future[?!Block] = store.getBlock(cid)
treeReader.getBlockFromStore = getBlockFromStore
store

120
codex/stores/treereader.nim Normal file
View File

@ -0,0 +1,120 @@
import pkg/upraises
import pkg/chronos
import pkg/chronos/futures
import pkg/chronicles
import pkg/libp2p/[cid, multicodec, multihash]
import pkg/lrucache
import pkg/questionable
import pkg/questionable/results
import ../blocktype
import ../merkletree
import ../utils
const DefaultTreeCacheCapacity* = 10 # Max number of trees stored in memory
type
GetBlock = proc (cid: Cid): Future[?!Block] {.upraises: [], gcsafe, closure.}
DelBlock = proc (cid: Cid): Future[?!void] {.upraises: [], gcsafe, closure.}
TreeReader* = ref object of RootObj
getBlockFromStore*: GetBlock
treeCache*: LruCache[Cid, MerkleTree]
method getTree*(self: TreeReader, cid: Cid): Future[?!MerkleTree] {.async.} =
if tree =? self.treeCache.getOption(cid):
return success(tree)
else:
without treeBlk =? await self.getBlockFromStore(cid), err:
return failure(err)
without tree =? MerkleTree.decode(treeBlk.data), err:
return failure("Error decoding a merkle tree with cid " & $cid & ". Nested error is: " & err.msg)
self.treeCache[cid] = tree
trace "Got merkle tree for cid", cid
return success(tree)
method getBlockCidAndProof*(self: TreeReader, treeCid: Cid, index: Natural): Future[?!(Cid, MerkleProof)] {.async.} =
without tree =? await self.getTree(treeCid), err:
return failure(err)
without proof =? tree.getProof(index), err:
return failure(err)
without leaf =? tree.getLeaf(index), err:
return failure(err)
without leafCid =? Cid.init(treeCid.cidver, treeCid.mcodec, leaf).mapFailure, err:
return failure(err)
return success((leafCid, proof))
method getBlockCid*(self: TreeReader, treeCid: Cid, index: Natural): Future[?!Cid] {.async.} =
without tree =? await self.getTree(treeCid), err:
return failure(err)
without leaf =? tree.getLeaf(index), err:
return failure(err)
without leafCid =? Cid.init(treeCid.cidver, treeCid.mcodec, leaf).mapFailure, err:
return failure(err)
return success(leafCid)
method getBlock*(self: TreeReader, treeCid: Cid, index: Natural): Future[?!Block] {.async.} =
without leafCid =? await self.getBlockCid(treeCid, index), err:
return failure(err)
without blk =? await self.getBlockFromStore(leafCid), err:
return failure(err)
return success(blk)
method getBlockAndProof*(self: TreeReader, treeCid: Cid, index: Natural): Future[?!(Block, MerkleProof)] {.async.} =
without (leafCid, proof) =? await self.getBlockCidAndProof(treeCid, index), err:
return failure(err)
without blk =? await self.getBlockFromStore(leafCid), err:
return failure(err)
return success((blk, proof))
method getBlocks*(self: TreeReader, treeCid: Cid, leavesCount: Natural): Future[?!AsyncIter[?!Block]] {.async.} =
without tree =? await self.getTree(treeCid), err:
return failure(err)
var iter = AsyncIter[?!Block]()
proc checkLen(index: Natural): void =
if index >= leavesCount:
iter.finish
checkLen(0)
var index = 0
proc next(): Future[?!Block] {.async.} =
if not iter.finished:
without leaf =? tree.getLeaf(index), err:
inc index
checkLen(index)
return failure(err)
inc index
checkLen(index)
without leafCid =? Cid.init(treeCid.cidver, treeCid.mcodec, leaf).mapFailure, err:
return failure(err)
without blk =? await self.getBlockFromStore(leafCid), err:
return failure(err)
return success(blk)
else:
return failure("No more elements for tree with cid " & $treeCid)
iter.next = next
return success(iter)
proc new*(T: type TreeReader, treeCacheCap = DefaultTreeCacheCapacity): TreeReader =
TreeReader(treeCache: newLruCache[Cid, MerkleTree](treeCacheCap))

View File

@ -1,5 +1,6 @@
import ./streams/seekablestream
import ./streams/storestream
import ./streams/seekablestorestream
import ./streams/asyncstreamwrapper
export seekablestream, storestream, asyncstreamwrapper
export seekablestream, storestream, seekablestorestream, asyncstreamwrapper

View File

@ -0,0 +1,123 @@
## Nim-Dagger
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/options
import pkg/upraises
push: {.upraises: [].}
import pkg/chronos
import pkg/chronicles
import pkg/stew/ptrops
import ../stores
import ../manifest
import ../blocktype
import ../utils
import ./seekablestream
export stores, blocktype, manifest, chronos
logScope:
topics = "codex storestream"
const
SeekableStoreStreamTrackerName* = "SeekableStoreStream"
type
# Make SeekableStream from a sequence of blocks stored in Manifest
# (only original file data - see StoreStream.size)
SeekableStoreStream* = ref object of SeekableStream
store*: BlockStore # Store where to lookup block contents
manifest*: Manifest # List of block CIDs
pad*: bool # Pad last block to manifest.blockSize?
method initStream*(s: SeekableStoreStream) =
if s.objName.len == 0:
s.objName = SeekableStoreStreamTrackerName
procCall SeekableStream(s).initStream()
proc new*(
T: type SeekableStoreStream,
store: BlockStore,
manifest: Manifest,
pad = true
): SeekableStoreStream =
## Create a new SeekableStoreStream instance for a given store and manifest
##
result = SeekableStoreStream(
store: store,
manifest: manifest,
pad: pad,
offset: 0)
result.initStream()
method `size`*(self: SeekableStoreStream): int =
bytes(self.manifest, self.pad).int
proc `size=`*(self: SeekableStoreStream, size: int)
{.error: "Setting the size is forbidden".} =
discard
method atEof*(self: SeekableStoreStream): bool =
self.offset >= self.size
method readOnce*(
self: SeekableStoreStream,
pbytes: pointer,
nbytes: int
): Future[int] {.async.} =
## Read `nbytes` from current position in the SeekableStoreStream into output buffer pointed by `pbytes`.
## Return how many bytes were actually read before EOF was encountered.
## Raise exception if we are already at EOF.
##
trace "Reading from manifest", cid = self.manifest.cid.get(), blocks = self.manifest.blocksCount
if self.atEof:
raise newLPStreamEOFError()
# The loop iterates over blocks in the SeekableStoreStream,
# reading them and copying their data into outbuf
var read = 0 # Bytes read so far, and thus write offset in the outbuf
while read < nbytes and not self.atEof:
# Compute from the current stream position `self.offset` the block num/offset to read
# Compute how many bytes to read from this block
let
blockNum = self.offset div self.manifest.blockSize.int
blockOffset = self.offset mod self.manifest.blockSize.int
readBytes = min([self.size - self.offset,
nbytes - read,
self.manifest.blockSize.int - blockOffset])
# Read contents of block `blockNum`
without blk =? await self.store.getBlock(self.manifest.treeCid, blockNum, self.manifest.treeRoot), error:
raise newLPStreamReadError(error)
trace "Reading bytes from store stream", blockNum, cid = blk.cid, bytes = readBytes, blockOffset
# Copy `readBytes` bytes starting at `blockOffset` from the block into the outbuf
if blk.isEmpty:
zeroMem(pbytes.offset(read), readBytes)
else:
copyMem(pbytes.offset(read), blk.data[blockOffset].addr, readBytes)
# Update current positions in the stream and outbuf
self.offset += readBytes
read += readBytes
return read
method closeImpl*(self: SeekableStoreStream) {.async.} =
trace "Closing SeekableStoreStream"
self.offset = self.size # set Eof
await procCall LPStream(self).closeImpl()

View File

@ -20,6 +20,7 @@ import pkg/stew/ptrops
import ../stores
import ../manifest
import ../blocktype
import ../utils
import ./seekablestream
@ -32,18 +33,20 @@ const
StoreStreamTrackerName* = "StoreStream"
type
# Make SeekableStream from a sequence of blocks stored in Manifest
# (only original file data - see StoreStream.size)
StoreStream* = ref object of SeekableStream
StoreStream* = ref object of LPStream
store*: BlockStore # Store where to lookup block contents
manifest*: Manifest # List of block CIDs
pad*: bool # Pad last block to manifest.blockSize?
iter*: AsyncIter[?!Block]
lastBlock: Block
lastIndex: int
offset: int
method initStream*(s: StoreStream) =
if s.objName.len == 0:
s.objName = StoreStreamTrackerName
procCall SeekableStream(s).initStream()
procCall LPStream(s).initStream()
proc new*(
T: type StoreStream,
@ -57,6 +60,7 @@ proc new*(
store: store,
manifest: manifest,
pad: pad,
lastIndex: -1,
offset: 0)
result.initStream()
@ -81,34 +85,37 @@ method readOnce*(
## Raise exception if we are already at EOF.
##
trace "Reading from manifest", cid = self.manifest.cid.get(), blocks = self.manifest.len
trace "Reading from manifest", cid = self.manifest.cid.get(), blocks = self.manifest.blocksCount
if self.atEof:
raise newLPStreamEOFError()
# Initialize a block iterator
if self.lastIndex < 0:
without iter =? await self.store.getBlocks(self.manifest.treeCid, self.manifest.blocksCount, self.manifest.treeRoot), err:
raise newLPStreamReadError(err)
self.iter = iter
# The loop iterates over blocks in the StoreStream,
# reading them and copying their data into outbuf
var read = 0 # Bytes read so far, and thus write offset in the outbuf
while read < nbytes and not self.atEof:
# Compute from the current stream position `self.offset` the block num/offset to read
if self.offset >= (self.lastIndex + 1) * self.manifest.blockSize.int:
if not self.iter.finished:
without lastBlock =? await self.iter.next(), err:
raise newLPStreamReadError(err)
self.lastBlock = lastBlock
inc self.lastIndex
else:
raise newLPStreamReadError(newException(CodexError, "Block iterator finished prematurely"))
# Compute how many bytes to read from this block
let
blockNum = self.offset div self.manifest.blockSize.int
blockOffset = self.offset mod self.manifest.blockSize.int
readBytes = min([self.size - self.offset,
nbytes - read,
self.manifest.blockSize.int - blockOffset])
# Read contents of block `blockNum`
without blk =? await self.store.getBlock(self.manifest[blockNum]), error:
raise newLPStreamReadError(error)
trace "Reading bytes from store stream", blockNum, cid = blk.cid, bytes = readBytes, blockOffset
# Copy `readBytes` bytes starting at `blockOffset` from the block into the outbuf
if blk.isEmpty:
if self.lastBlock.isEmpty:
zeroMem(pbytes.offset(read), readBytes)
else:
copyMem(pbytes.offset(read), blk.data[blockOffset].addr, readBytes)
copyMem(pbytes.offset(read), self.lastBlock.data[blockOffset].addr, readBytes)
# Update current positions in the stream and outbuf
self.offset += readBytes

View File

@ -9,13 +9,15 @@
##
import std/parseutils
import std/options
import pkg/chronos
import ./utils/asyncheapqueue
import ./utils/fileutils
import ./utils/asynciter
export asyncheapqueue, fileutils
export asyncheapqueue, fileutils, asynciter
func divUp*[T: SomeInteger](a, b : T): T =
@ -27,6 +29,20 @@ func roundUp*[T](a, b : T): T =
## Round up 'a' to the next value divisible by 'b'
divUp(a,b) * b
proc orElse*[A](a, b: Option[A]): Option[A] =
if (a.isSome()):
a
else:
b
template `..<`*(a, b: untyped): untyped =
## A shortcut for `a .. pred(b)`.
## ```
## for i in 5 ..< 9:
## echo i # => 5; 6; 7; 8
## ```
a .. (when b is BackwardsIndex: succ(b) else: pred(b))
when not declared(parseDuration): # Odd code formatting to minimize diff v. mainLine
const Whitespace = {' ', '\t', '\v', '\r', '\l', '\f'}

85
codex/utils/asynciter.nim Normal file
View File

@ -0,0 +1,85 @@
import pkg/questionable
import pkg/chronos
import pkg/upraises
type
MapItem*[T, U] = proc(fut: T): U {.upraises: [CatchableError], gcsafe, closure.}
NextItem*[T] = proc(): T {.upraises: [CatchableError], gcsafe, closure.}
Iter*[T] = ref object
finished*: bool
next*: NextItem[T]
AsyncIter*[T] = Iter[Future[T]]
proc finish*[T](self: Iter[T]): void =
self.finished = true
proc finished*[T](self: Iter[T]): bool =
self.finished
iterator items*[T](self: Iter[T]): T =
while not self.finished:
yield self.next()
proc map*[T, U](wrappedIter: Iter[T], mapItem: MapItem[T, U]): Iter[U] =
var iter = Iter[U]()
proc checkFinish(): void =
if wrappedIter.finished:
iter.finish
checkFinish()
proc next(): U {.upraises: [CatchableError].} =
if not iter.finished:
let fut = wrappedIter.next()
checkFinish()
return mapItem(fut)
else:
raise newException(CatchableError, "Iterator finished, but next element was requested")
iter.next = next
return iter
proc prefetch*[T](wrappedIter: Iter[T], n: Positive): Iter[T] =
var ringBuf = newSeq[T](n)
var wrappedLen = int.high
var iter = Iter[T]()
proc tryFetch(i: int): void =
if not wrappedIter.finished:
let res = wrappedIter.next()
ringBuf[i mod n] = res
if wrappedIter.finished:
wrappedLen = min(i + 1, wrappedLen)
else:
if i == 0:
wrappedLen = 0
proc checkLen(i: int): void =
if i >= wrappedLen:
iter.finish
# initialize buf with n prefetched values
for i in 0..<n:
tryFetch(i)
checkLen(0)
var i = 0
proc next(): T {.upraises: [CatchableError].} =
if not iter.finished:
let fut = ringBuf[i mod n]
# prefetch a value
tryFetch(i + n)
inc i
checkLen(i)
return fut
else:
raise newException(CatchableError, "Iterator finished, but next element was requested")
iter.next = next
return iter

View File

@ -12,6 +12,7 @@ import pkg/codex/stores
import pkg/codex/blockexchange
import pkg/codex/chunker
import pkg/codex/manifest
import pkg/codex/merkletree
import pkg/codex/blocktype as bt
import ../../helpers/mockdiscovery
@ -25,6 +26,7 @@ asyncchecksuite "Block Advertising and Discovery":
var
blocks: seq[bt.Block]
manifest: Manifest
tree: MerkleTree
manifestBlock: bt.Block
switch: Switch
peerStore: PeerCtxStore
@ -52,7 +54,7 @@ asyncchecksuite "Block Advertising and Discovery":
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
manifest = Manifest.new( blocks.mapIt( it.cid ) ).tryGet()
(manifest, tree) = makeManifestAndTree(blocks).tryGet()
manifestBlock = bt.Block.new(
manifest.encode().tryGet(), codec = DagPBCodec).tryGet()
@ -151,7 +153,7 @@ asyncchecksuite "Block Advertising and Discovery":
peerId = PeerId.example
haves = collect(initTable()):
for blk in blocks:
{ blk.cid: Presence(cid: blk.cid, price: 0.u256) }
{ blk.address: Presence(address: blk.address, price: 0.u256) }
engine.peers.add(
BlockExcPeerCtx(
@ -164,7 +166,7 @@ asyncchecksuite "Block Advertising and Discovery":
check false
await engine.start() # fire up discovery loop
engine.pendingBlocks.resolve(blocks)
engine.pendingBlocks.resolve(blocks.mapIt(BlockDelivery(blk: it, address: it.address)))
await allFuturesThrowing(
allFinished(pendingBlocks))
@ -240,9 +242,9 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
advertised[cid] = switch[3].peerInfo.signedPeerRecord
await blockexc[1].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[0..5])
await blockexc[2].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[4..10])
await blockexc[3].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[10..15])
await blockexc[1].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, blocks[0..5].mapIt(BlockDelivery(blk: it, address: it.address)))
await blockexc[2].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, blocks[4..10].mapIt(BlockDelivery(blk: it, address: it.address)))
await blockexc[3].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, blocks[10..15].mapIt(BlockDelivery(blk: it, address: it.address)))
MockDiscovery(blockexc[0].engine.discovery.discovery)
.findBlockProvidersHandler = proc(d: MockDiscovery, cid: Cid):
@ -282,9 +284,9 @@ asyncchecksuite "E2E - Multiple Nodes Discovery":
.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
advertised[cid] = switch[3].peerInfo.signedPeerRecord
await blockexc[1].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[0..5])
await blockexc[2].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[4..10])
await blockexc[3].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[10..15])
await blockexc[1].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, blocks[0..5].mapIt(BlockDelivery(blk: it, address: it.address)))
await blockexc[2].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, blocks[4..10].mapIt(BlockDelivery(blk: it, address: it.address)))
await blockexc[3].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, blocks[10..15].mapIt(BlockDelivery(blk: it, address: it.address)))
MockDiscovery(blockexc[0].engine.discovery.discovery)
.findBlockProvidersHandler = proc(d: MockDiscovery, cid: Cid):

View File

@ -57,7 +57,7 @@ asyncchecksuite "Test Discovery Engine":
blockDiscovery.findBlockProvidersHandler =
proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, gcsafe.} =
pendingBlocks.resolve(blocks.filterIt( it.cid == cid))
pendingBlocks.resolve(blocks.filterIt( it.cid == cid).mapIt(BlockDelivery(blk: it, address: it.address)))
await discoveryEngine.start()
await allFuturesThrowing(allFinished(wants)).wait(1.seconds)
@ -154,7 +154,9 @@ asyncchecksuite "Test Discovery Engine":
var
peerCtx = BlockExcPeerCtx(id: PeerId.example)
peerCtx.blocks[cid] = Presence(cid: cid, price: 0.u256)
let address = BlockAddress(leaf: false, cid: cid)
peerCtx.blocks[address] = Presence(address: address, price: 0.u256)
peerStore.add(peerCtx)
want.fire()

View File

@ -122,8 +122,8 @@ asyncchecksuite "NetworkStore engine - 2 nodes":
let blk = bt.Block.new("Block 1".toBytes).tryGet()
(await nodeCmps2.localStore.putBlock(blk)).tryGet()
let entry = Entry(
`block`: blk.cid.data.buffer,
let entry = WantListEntry(
address: blk.address,
priority: 1,
cancel: false,
wantType: WantType.WantBlock,

View File

@ -14,7 +14,7 @@ import pkg/codex/blockexchange
import pkg/codex/stores
import pkg/codex/chunker
import pkg/codex/discovery
import pkg/codex/blocktype as bt
import pkg/codex/blocktype
import pkg/codex/utils/asyncheapqueue
import ../../helpers
@ -30,7 +30,7 @@ asyncchecksuite "NetworkStore engine basic":
blockDiscovery: Discovery
peerStore: PeerCtxStore
pendingBlocks: PendingBlocksManager
blocks: seq[bt.Block]
blocks: seq[Block]
done: Future[void]
setup:
@ -48,20 +48,20 @@ asyncchecksuite "NetworkStore engine basic":
if chunk.len <= 0:
break
blocks.add(bt.Block.new(chunk).tryGet())
blocks.add(Block.new(chunk).tryGet())
done = newFuture[void]()
test "Should send want list to new peers":
proc sendWantList(
id: PeerId,
cids: seq[Cid],
addresses: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false) {.gcsafe, async.} =
check cids.mapIt($it).sorted == blocks.mapIt( $it.cid ).sorted
check addresses.mapIt($it.cidOrTreeCid).sorted == blocks.mapIt( $it.cid ).sorted
done.complete()
let
@ -140,7 +140,7 @@ asyncchecksuite "NetworkStore engine handlers":
discovery: DiscoveryEngine
peerCtx: BlockExcPeerCtx
localStore: BlockStore
blocks: seq[bt.Block]
blocks: seq[Block]
setup:
rng = Rng.instance()
@ -151,7 +151,7 @@ asyncchecksuite "NetworkStore engine handlers":
if chunk.len <= 0:
break
blocks.add(bt.Block.new(chunk).tryGet())
blocks.add(Block.new(chunk).tryGet())
seckey = PrivateKey.random(rng[]).tryGet()
peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet()
@ -193,7 +193,7 @@ asyncchecksuite "NetworkStore engine handlers":
let ctx = await engine.taskQueue.pop()
check ctx.id == peerId
# only `wantBlock` scheduled
check ctx.peerWants.mapIt( it.cid ) == blocks.mapIt( it.cid )
check ctx.peerWants.mapIt( it.address.cidOrTreeCid ) == blocks.mapIt( it.cid )
let done = handler()
await engine.wantListHandler(peerId, wantList)
@ -205,7 +205,7 @@ asyncchecksuite "NetworkStore engine handlers":
wantList = makeWantList(blocks.mapIt( it.cid ))
proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
check presence.mapIt( it.cid ) == wantList.entries.mapIt( it.`block` )
check presence.mapIt( it.address ) == wantList.entries.mapIt( it.address )
done.complete()
engine.network = BlockExcNetwork(
@ -227,7 +227,7 @@ asyncchecksuite "NetworkStore engine handlers":
sendDontHave = true)
proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
check presence.mapIt( it.cid ) == wantList.entries.mapIt( it.`block` )
check presence.mapIt( it.address ) == wantList.entries.mapIt( it.address )
for p in presence:
check:
p.`type` == BlockPresenceType.DontHave
@ -249,12 +249,8 @@ asyncchecksuite "NetworkStore engine handlers":
sendDontHave = true)
proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
let
cid1Buf = blocks[0].cid.data.buffer
cid2Buf = blocks[1].cid.data.buffer
for p in presence:
if p.cid != cid1Buf and p.cid != cid2Buf:
if p.address.cidOrTreeCid != blocks[0].cid and p.address.cidOrTreeCid != blocks[1].cid:
check p.`type` == BlockPresenceType.DontHave
else:
check p.`type` == BlockPresenceType.Have
@ -277,7 +273,9 @@ asyncchecksuite "NetworkStore engine handlers":
engine.pendingBlocks.getWantHandle( it.cid )
)
await engine.blocksHandler(peerId, blocks)
let blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address))
await engine.blocksDeliveryHandler(peerId, blocksDelivery)
let resolved = await allFinished(pending)
check resolved.mapIt( it.read ) == blocks
for b in blocks:
@ -292,7 +290,7 @@ asyncchecksuite "NetworkStore engine handlers":
peerContext.account = account.some
peerContext.blocks = blocks.mapIt(
(it.cid, Presence(cid: it.cid, price: rand(uint16).u256))
(it.address, Presence(address: it.address, price: rand(uint16).u256))
).toTable
engine.network = BlockExcNetwork(
@ -301,7 +299,7 @@ asyncchecksuite "NetworkStore engine handlers":
let
amount =
blocks.mapIt(
peerContext.blocks[it.cid].price
peerContext.blocks[it.address].price
).foldl(a + b)
balances = !payment.state.outcome.balances(Asset)
@ -311,22 +309,24 @@ asyncchecksuite "NetworkStore engine handlers":
done.complete()
))
await engine.blocksHandler(peerId, blocks)
await engine.blocksDeliveryHandler(peerId, blocks.mapIt(BlockDelivery(blk: it, address: it.address)))
await done.wait(100.millis)
test "Should handle block presence":
var
handles: Table[Cid, Future[bt.Block]]
handles: Table[Cid, Future[Block]]
proc sendWantList(
id: PeerId,
cids: seq[Cid],
addresses: seq[BlockAddress],
priority: int32 = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false) {.gcsafe, async.} =
engine.pendingBlocks.resolve(blocks.filterIt( it.cid in cids ))
engine.pendingBlocks.resolve(blocks
.filterIt( it.address in addresses )
.mapIt(BlockDelivery(blk: it, address: it.address)))
engine.network = BlockExcNetwork(
request: BlockExcRequest(
@ -343,14 +343,14 @@ asyncchecksuite "NetworkStore engine handlers":
blocks.mapIt(
PresenceMessage.init(
Presence(
cid: it.cid,
address: it.address,
have: true,
price: price
))))
for cid in blocks.mapIt(it.cid):
check cid in peerCtx.peerHave
check peerCtx.blocks[cid].price == price
for a in blocks.mapIt(it.address):
check a in peerCtx.peerHave
check peerCtx.blocks[a].price == price
asyncchecksuite "Task Handler":
var
@ -369,7 +369,7 @@ asyncchecksuite "Task Handler":
peersCtx: seq[BlockExcPeerCtx]
peers: seq[PeerId]
blocks: seq[bt.Block]
blocks: seq[Block]
setup:
rng = Rng.instance()
@ -379,7 +379,7 @@ asyncchecksuite "Task Handler":
if chunk.len <= 0:
break
blocks.add(bt.Block.new(chunk).tryGet())
blocks.add(Block.new(chunk).tryGet())
seckey = PrivateKey.random(rng[]).tryGet()
peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet()
@ -419,22 +419,22 @@ asyncchecksuite "Task Handler":
engine.pricing = Pricing.example.some
test "Should send want-blocks in priority order":
proc sendBlocks(
proc sendBlocksDelivery(
id: PeerId,
blks: seq[bt.Block]) {.gcsafe, async.} =
check blks.len == 2
blocksDelivery: seq[BlockDelivery]) {.gcsafe, async.} =
check blocksDelivery.len == 2
check:
blks[1].cid == blocks[0].cid
blks[0].cid == blocks[1].cid
blocksDelivery[1].address == blocks[0].address
blocksDelivery[0].address == blocks[1].address
for blk in blocks:
(await engine.localStore.putBlock(blk)).tryGet()
engine.network.request.sendBlocks = sendBlocks
engine.network.request.sendBlocksDelivery = sendBlocksDelivery
# second block to send by priority
peersCtx[0].peerWants.add(
Entry(
`block`: blocks[0].cid.data.buffer,
WantListEntry(
address: blocks[0].address,
priority: 49,
cancel: false,
wantType: WantType.WantBlock,
@ -443,8 +443,8 @@ asyncchecksuite "Task Handler":
# first block to send by priority
peersCtx[0].peerWants.add(
Entry(
`block`: blocks[1].cid.data.buffer,
WantListEntry(
address: blocks[1].address,
priority: 50,
cancel: false,
wantType: WantType.WantBlock,
@ -455,14 +455,14 @@ asyncchecksuite "Task Handler":
test "Should send presence":
let present = blocks
let missing = @[bt.Block.new("missing".toBytes).tryGet()]
let missing = @[Block.new("missing".toBytes).tryGet()]
let price = (!engine.pricing).price
proc sendPresence(id: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
check presence.mapIt(!Presence.init(it)) == @[
Presence(cid: present[0].cid, have: true, price: price),
Presence(cid: present[1].cid, have: true, price: price),
Presence(cid: missing[0].cid, have: false)
Presence(address: present[0].address, have: true, price: price),
Presence(address: present[1].address, have: true, price: price),
Presence(address: missing[0].address, have: false)
]
for blk in blocks:
@ -471,8 +471,8 @@ asyncchecksuite "Task Handler":
# have block
peersCtx[0].peerWants.add(
Entry(
`block`: present[0].cid.data.buffer,
WantListEntry(
address: present[0].address,
priority: 1,
cancel: false,
wantType: WantType.WantHave,
@ -481,8 +481,8 @@ asyncchecksuite "Task Handler":
# have block
peersCtx[0].peerWants.add(
Entry(
`block`: present[1].cid.data.buffer,
WantListEntry(
address: present[1].address,
priority: 1,
cancel: false,
wantType: WantType.WantHave,
@ -491,8 +491,8 @@ asyncchecksuite "Task Handler":
# don't have block
peersCtx[0].peerWants.add(
Entry(
`block`: missing[0].cid.data.buffer,
WantListEntry(
address: missing[0].address,
priority: 1,
cancel: false,
wantType: WantType.WantHave,

View File

@ -7,13 +7,12 @@ import ../../helpers
checksuite "block presence protobuf messages":
let cid = Cid.example
let price = UInt256.example
let presence = Presence(cid: cid, have: true, price: price)
let message = PresenceMessage.init(presence)
test "encodes CID":
check message.cid == cid.data.buffer
let
cid = Cid.example
address = BlockAddress(leaf: false, cid: cid)
price = UInt256.example
presence = Presence(address: address, have: true, price: price)
message = PresenceMessage.init(presence)
test "encodes have/donthave":
var presence = presence
@ -26,12 +25,7 @@ checksuite "block presence protobuf messages":
check message.price == @(price.toBytesBE)
test "decodes CID":
check Presence.init(message).?cid == cid.some
test "fails to decode when CID is invalid":
var incorrect = message
incorrect.cid.del(0)
check Presence.init(incorrect).isNone
check Presence.init(message).?address == address.some
test "decodes have/donthave":
var message = message

View File

@ -47,13 +47,13 @@ asyncchecksuite "Network - Handlers":
discard await networkPeer.connect()
test "Want List handler":
proc wantListHandler(peer: PeerId, wantList: Wantlist) {.gcsafe, async.} =
proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} =
# check that we got the correct amount of entries
check wantList.entries.len == 4
for b in blocks:
check b.cid in wantList.entries
let entry = wantList.entries[wantList.entries.find(b.cid)]
check b.address in wantList.entries
let entry = wantList.entries[wantList.entries.find(b.address)]
check entry.wantType == WantType.WantHave
check entry.priority == 1
check entry.cancel == true
@ -74,13 +74,13 @@ asyncchecksuite "Network - Handlers":
await done.wait(500.millis)
test "Blocks Handler":
proc blocksHandler(peer: PeerId, blks: seq[bt.Block]) {.gcsafe, async.} =
check blks == blocks
proc blocksDeliveryHandler(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.gcsafe, async.} =
check blocks == blocksDelivery.mapIt(it.blk)
done.complete()
network.handlers.onBlocks = blocksHandler
network.handlers.onBlocksDelivery = blocksDeliveryHandler
let msg = Message(payload: makeBlocks(blocks))
let msg = Message(payload: blocks.mapIt(BlockDelivery(blk: it, address: it.address)))
await buffer.pushData(lenPrefix(protobufEncode(msg)))
await done.wait(500.millis)
@ -88,10 +88,10 @@ asyncchecksuite "Network - Handlers":
test "Presence Handler":
proc presenceHandler(
peer: PeerId,
precense: seq[BlockPresence]) {.gcsafe, async.} =
presence: seq[BlockPresence]) {.gcsafe, async.} =
for b in blocks:
check:
b.cid in precense
b.address in presence
done.complete()
@ -100,7 +100,7 @@ asyncchecksuite "Network - Handlers":
let msg = Message(
blockPresences: blocks.mapIt(
BlockPresence(
cid: it.cid.data.buffer,
address: it.address,
type: BlockPresenceType.Have
)))
await buffer.pushData(lenPrefix(protobufEncode(msg)))
@ -177,13 +177,13 @@ asyncchecksuite "Network - Senders":
switch2.stop())
test "Send want list":
proc wantListHandler(peer: PeerId, wantList: Wantlist) {.gcsafe, async.} =
proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} =
# check that we got the correct amount of entries
check wantList.entries.len == 4
for b in blocks:
check b.cid in wantList.entries
let entry = wantList.entries[wantList.entries.find(b.cid)]
check b.address in wantList.entries
let entry = wantList.entries[wantList.entries.find(b.address)]
check entry.wantType == WantType.WantHave
check entry.priority == 1
check entry.cancel == true
@ -194,21 +194,21 @@ asyncchecksuite "Network - Senders":
network2.handlers.onWantList = wantListHandler
await network1.sendWantList(
switch2.peerInfo.peerId,
blocks.mapIt( it.cid ),
blocks.mapIt( it.address ),
1, true, WantType.WantHave,
true, true)
await done.wait(500.millis)
test "send blocks":
proc blocksHandler(peer: PeerId, blks: seq[bt.Block]) {.gcsafe, async.} =
check blks == blocks
proc blocksDeliveryHandler(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.gcsafe, async.} =
check blocks == blocksDelivery.mapIt(it.blk)
done.complete()
network2.handlers.onBlocks = blocksHandler
await network1.sendBlocks(
network2.handlers.onBlocksDelivery = blocksDeliveryHandler
await network1.sendBlocksDelivery(
switch2.peerInfo.peerId,
blocks)
blocks.mapIt(BlockDelivery(blk: it, address: it.address)))
await done.wait(500.millis)
@ -218,7 +218,7 @@ asyncchecksuite "Network - Senders":
precense: seq[BlockPresence]) {.gcsafe, async.} =
for b in blocks:
check:
b.cid in precense
b.address in precense
done.complete()
@ -228,7 +228,7 @@ asyncchecksuite "Network - Senders":
switch2.peerInfo.peerId,
blocks.mapIt(
BlockPresence(
cid: it.cid.data.buffer,
address: it.address,
type: BlockPresenceType.Have
)))

View File

@ -35,12 +35,12 @@ checksuite "Peer Context Store Peer Selection":
var
store: PeerCtxStore
peerCtxs: seq[BlockExcPeerCtx]
cids: seq[Cid]
addresses: seq[BlockAddress]
setup:
store = PeerCtxStore.new()
cids = collect(newSeq):
for i in 0..<10: Cid.example
addresses = collect(newSeq):
for i in 0..<10: BlockAddress(leaf: false, cid: Cid.example)
peerCtxs = collect(newSeq):
for i in 0..<10: BlockExcPeerCtx.example
@ -50,20 +50,20 @@ checksuite "Peer Context Store Peer Selection":
teardown:
store = nil
cids = @[]
addresses = @[]
peerCtxs = @[]
test "Should select peers that have Cid":
peerCtxs[0].blocks = collect(initTable):
for i, c in cids:
{ c: Presence(cid: c, price: i.u256) }
for i, a in addresses:
{ a: Presence(address: a, price: i.u256) }
peerCtxs[5].blocks = collect(initTable):
for i, c in cids:
{ c: Presence(cid: c, price: i.u256) }
for i, a in addresses:
{ a: Presence(address: a, price: i.u256) }
let
peers = store.peersHave(cids[0])
peers = store.peersHave(addresses[0])
check peers.len == 2
check peerCtxs[0] in peers
@ -71,19 +71,19 @@ checksuite "Peer Context Store Peer Selection":
test "Should select cheapest peers for Cid":
peerCtxs[0].blocks = collect(initTable):
for i, c in cids:
{ c: Presence(cid: c, price: (5 + i).u256) }
for i, a in addresses:
{ a: Presence(address: a, price: (5 + i).u256) }
peerCtxs[5].blocks = collect(initTable):
for i, c in cids:
{ c: Presence(cid: c, price: (2 + i).u256) }
for i, a in addresses:
{ a: Presence(address: a, price: (2 + i).u256) }
peerCtxs[9].blocks = collect(initTable):
for i, c in cids:
{ c: Presence(cid: c, price: i.u256) }
for i, a in addresses:
{ a: Presence(address: a, price: i.u256) }
let
peers = store.selectCheapest(cids[0])
peers = store.selectCheapest(addresses[0])
check peers.len == 3
check peers[0] == peerCtxs[9]
@ -92,9 +92,9 @@ checksuite "Peer Context Store Peer Selection":
test "Should select peers that want Cid":
let
entries = cids.mapIt(
Entry(
`block`: it.data.buffer,
entries = addresses.mapIt(
WantListEntry(
address: it,
priority: 1,
cancel: false,
wantType: WantType.WantBlock,
@ -104,7 +104,7 @@ checksuite "Peer Context Store Peer Selection":
peerCtxs[5].peerWants = entries
let
peers = store.peersWant(cids[4])
peers = store.peersWant(addresses[4])
check peers.len == 2
check peerCtxs[0] in peers

View File

@ -27,7 +27,7 @@ checksuite "Pending Blocks":
handle = pendingBlocks.getWantHandle(blk.cid)
check blk.cid in pendingBlocks
pendingBlocks.resolve(@[blk])
pendingBlocks.resolve(@[blk].mapIt(BlockDelivery(blk: it, address: it.address)))
check (await handle) == blk
check blk.cid notin pendingBlocks
@ -64,7 +64,7 @@ checksuite "Pending Blocks":
check:
blks.mapIt( $it.cid ).sorted(cmp[string]) ==
toSeq(pendingBlocks.wantList).mapIt( $it ).sorted(cmp[string])
toSeq(pendingBlocks.wantListBlockCids).mapIt( $it ).sorted(cmp[string])
test "Should get want handles list":
let
@ -74,7 +74,7 @@ checksuite "Pending Blocks":
wantHandles = toSeq(pendingBlocks.wantHandles)
check wantHandles.len == handles.len
pendingBlocks.resolve(blks)
pendingBlocks.resolve(blks.mapIt(BlockDelivery(blk: it, address: it.address)))
check:
(await allFinished(wantHandles)).mapIt( $it.read.cid ).sorted(cmp[string]) ==

View File

@ -53,6 +53,10 @@ proc example*(_: type BlockExcPeerCtx): BlockExcPeerCtx =
proc example*(_: type Cid): Cid =
bt.Block.example.cid
proc example*(_: type MultiHash, mcodec = multiCodec("sha2-256")): MultiHash =
let bytes = newSeqWith(256, rand(uint8))
MultiHash.digest($mcodec, bytes).tryGet()
proc example*(_: type Availability): Availability =
Availability.init(
size = uint16.example.u256,

View File

@ -1,25 +1,30 @@
import std/sequtils
import pkg/chronos
import pkg/libp2p
import pkg/libp2p/varint
import pkg/codex/blocktype as bt
import pkg/codex/blocktype
import pkg/codex/stores
import pkg/codex/manifest
import pkg/codex/merkletree
import pkg/codex/blockexchange
import pkg/codex/rng
import ./helpers/nodeutils
import ./helpers/randomchunker
import ./helpers/mockchunker
import ./helpers/mockdiscovery
import ./helpers/always
import ../checktest
export randomchunker, nodeutils, mockdiscovery, always, checktest, manifest
export randomchunker, nodeutils, mockdiscovery, mockchunker, always, checktest, manifest
export libp2p except setup, eventually
# NOTE: The meaning of equality for blocks
# is changed here, because blocks are now `ref`
# types. This is only in tests!!!
func `==`*(a, b: bt.Block): bool =
func `==`*(a, b: Block): bool =
(a.cid == b.cid) and (a.data == b.data)
proc lenPrefix*(msg: openArray[byte]): seq[byte] =
@ -33,21 +38,99 @@ proc lenPrefix*(msg: openArray[byte]): seq[byte] =
return buf
proc makeManifestAndTree*(blocks: seq[Block]): ?!(Manifest, MerkleTree) =
if blocks.len == 0:
return failure("Blocks list was empty")
let
mcodec = (? blocks[0].cid.mhash.mapFailure).mcodec
datasetSize = blocks.mapIt(it.data.len).foldl(a + b)
blockSize = blocks.mapIt(it.data.len).foldl(max(a, b))
var builder = ? MerkleTreeBuilder.init(mcodec)
for b in blocks:
let mhash = ? b.cid.mhash.mapFailure
if mhash.mcodec != mcodec:
return failure("Blocks are not using the same codec")
? builder.addLeaf(mhash)
let
tree = ? builder.build()
treeBlk = ? Block.new(tree.encode())
manifest = Manifest.new(
treeCid = treeBlk.cid,
treeRoot = tree.root,
blockSize = NBytes(blockSize),
datasetSize = NBytes(datasetSize),
version = CIDv1,
hcodec = mcodec
)
return success((manifest, tree))
proc makeWantList*(
cids: seq[Cid],
priority: int = 0,
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false
): WantList =
WantList(
entries: cids.mapIt(
WantListEntry(
address: BlockAddress(leaf: false, cid: it),
priority: priority.int32,
cancel: cancel,
wantType: wantType,
sendDontHave: sendDontHave) ),
full: full)
proc storeDataGetManifest*(store: BlockStore, chunker: Chunker): Future[Manifest] {.async.} =
var builder = MerkleTreeBuilder.init().tryGet()
while (
let chunk = await chunker.getBytes();
chunk.len > 0):
let blk = Block.new(chunk).tryGet()
# builder.addDataBlock(blk.data).tryGet()
let mhash = blk.cid.mhash.mapFailure.tryGet()
builder.addLeaf(mhash).tryGet()
(await store.putBlock(blk)).tryGet()
let
tree = builder.build().tryGet()
treeBlk = Block.new(tree.encode()).tryGet()
let manifest = Manifest.new(
treeCid = treeBlk.cid,
treeRoot = tree.root,
blockSize = NBytes(chunker.chunkSize),
datasetSize = NBytes(chunker.offset),
)
(await store.putBlock(treeBlk)).tryGet()
return manifest
proc corruptBlocks*(
store: BlockStore,
manifest: Manifest,
blks, bytes: int): Future[seq[int]] {.async.} =
var pos: seq[int]
doAssert blks < manifest.len
doAssert blks < manifest.blocksCount
while pos.len < blks:
let i = Rng.instance.rand(manifest.len - 1)
let i = Rng.instance.rand(manifest.blocksCount - 1)
if pos.find(i) >= 0:
continue
pos.add(i)
var
blk = (await store.getBlock(manifest[i])).tryGet()
blk = (await store.getBlock(manifest.treeCid, i, manifest.treeRoot)).tryGet()
bytePos: seq[int]
doAssert bytes < blk.data.len

View File

@ -0,0 +1,45 @@
import std/sequtils
import pkg/chronos
import pkg/codex/chunker
import pkg/codex/rng
export chunker
type
MockChunker* = Chunker
proc new*(
T: type MockChunker,
dataset: openArray[byte],
chunkSize: int | NBytes,
pad: bool = false
): MockChunker =
## Create a chunker that produces data
##
let
chunkSize = chunkSize.NBytes
dataset = @dataset
var consumed = 0
proc reader(data: ChunkBuffer, len: int): Future[int] {.async, gcsafe, raises: [Defect].} =
if consumed >= dataset.len:
return 0
var read = 0
while read < len and
read < chunkSize.int and
(consumed + read) < dataset.len:
data[read] = dataset[consumed + read]
read.inc
consumed += read
return read
Chunker.new(
reader = reader,
pad = pad,
chunkSize = chunkSize)

View File

@ -13,7 +13,8 @@ import pkg/libp2p
import pkg/questionable
import pkg/questionable/results
import codex/stores/repostore
import pkg/codex/stores/repostore
import pkg/codex/utils/asynciter
type
MockRepoStore* = ref object of RepoStore
@ -31,14 +32,14 @@ method delBlock*(self: MockRepoStore, cid: Cid): Future[?!void] {.async.} =
dec self.iteratorIndex
return success()
method getBlockExpirations*(self: MockRepoStore, maxNumber: int, offset: int): Future[?!BlockExpirationIter] {.async.} =
method getBlockExpirations*(self: MockRepoStore, maxNumber: int, offset: int): Future[?!AsyncIter[?BlockExpiration]] {.async.} =
if self.getBlockExpirationsThrows:
raise new CatchableError
self.getBeMaxNumber = maxNumber
self.getBeOffset = offset
var iter = BlockExpirationIter()
var iter = AsyncIter[?BlockExpiration]()
iter.finished = false
self.iteratorIndex = offset
@ -49,7 +50,7 @@ method getBlockExpirations*(self: MockRepoStore, maxNumber: int, offset: int): F
let selectedBlock = self.testBlockExpirations[self.iteratorIndex]
inc self.iteratorIndex
return selectedBlock.some
iter.finished = true
iter.finish
return BlockExpiration.none
iter.next = next

View File

@ -0,0 +1,42 @@
import std/unittest
import pkg/questionable/results
import pkg/stew/byteutils
import pkg/codex/merkletree
import ../helpers
checksuite "merkletree - coders":
const data =
[
"0123456789012345678901234567890123456789".toBytes,
"1234567890123456789012345678901234567890".toBytes,
"2345678901234567890123456789012345678901".toBytes,
"3456789012345678901234567890123456789012".toBytes,
"4567890123456789012345678901234567890123".toBytes,
"5678901234567890123456789012345678901234".toBytes,
"6789012345678901234567890123456789012345".toBytes,
"7890123456789012345678901234567890123456".toBytes,
"8901234567890123456789012345678901234567".toBytes,
"9012345678901234567890123456789012345678".toBytes,
]
test "encoding and decoding a tree yields the same tree":
var builder = MerkleTreeBuilder.init(multiCodec("sha2-256")).tryGet()
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
builder.addDataBlock(data[2]).tryGet()
builder.addDataBlock(data[3]).tryGet()
builder.addDataBlock(data[4]).tryGet()
builder.addDataBlock(data[5]).tryGet()
builder.addDataBlock(data[6]).tryGet()
builder.addDataBlock(data[7]).tryGet()
builder.addDataBlock(data[8]).tryGet()
builder.addDataBlock(data[9]).tryGet()
let tree = builder.build().tryGet()
let encodedBytes = tree.encode()
let decodedTree = MerkleTree.decode(encodedBytes).tryGet()
check:
tree == decodedTree

View File

@ -1,86 +1,165 @@
import std/unittest
import std/bitops
import std/random
import std/sequtils
import pkg/libp2p
import codex/merkletree/merkletree
import ../helpers
import std/tables
import pkg/questionable/results
import pkg/stew/byteutils
import pkg/nimcrypto/sha2
import pkg/codex/merkletree
import ../helpers
checksuite "merkletree":
const data =
[
"0123456789012345678901234567890123456789".toBytes,
"1234567890123456789012345678901234567890".toBytes,
"2345678901234567890123456789012345678901".toBytes,
"3456789012345678901234567890123456789012".toBytes,
"4567890123456789012345678901234567890123".toBytes,
"5678901234567890123456789012345678901234".toBytes,
"6789012345678901234567890123456789012345".toBytes,
"7890123456789012345678901234567890123456".toBytes,
"8901234567890123456789012345678901234567".toBytes,
"9012345678901234567890123456789012345678".toBytes,
]
const sha256 = multiCodec("sha2-256")
const sha512 = multiCodec("sha2-512")
proc randomHash(codec: MultiCodec = sha256): MerkleHash =
var data: array[0..31, byte]
for i in 0..31:
data[i] = rand(uint8)
return MultiHash.digest($codec, data).tryGet()
proc combine(a, b: MerkleHash, codec: MultiCodec = sha256): MerkleHash =
proc combine(a, b: MultiHash, codec: MultiCodec = sha256): MultiHash =
var buf = newSeq[byte](a.size + b.size)
for i in 0..<a.size:
buf[i] = a.data.buffer[i]
for i in 0..<b.size:
buf[i + a.size] = b.data.buffer[i]
copyMem(addr buf[0], unsafeAddr a.data.buffer[a.dpos], a.size)
copyMem(addr buf[a.size], unsafeAddr b.data.buffer[b.dpos], b.size)
return MultiHash.digest($codec, buf).tryGet()
var
leaves: array[0..10, MerkleHash]
var zeroHash: MultiHash
var oneHash: MultiHash
var expectedLeaves: array[data.len, MultiHash]
var builder: MerkleTreeBuilder
setup:
for i in 0..leaves.high:
leaves[i] = randomHash()
for i in 0..<data.len:
expectedLeaves[i] = MultiHash.digest($sha256, data[i]).tryGet()
builder = MerkleTreeBuilder.init(sha256).tryGet()
var zero: array[32, byte]
var one: array[32, byte]
one[^1] = 0x01
zeroHash = MultiHash.init($sha256, zero).tryGet()
oneHash = MultiHash.init($sha256, one).tryGet()
test "tree with one leaf has expected root":
let tree = MerkleTree.init(leaves[0..0]).tryGet()
test "tree with one leaf has expected structure":
builder.addDataBlock(data[0]).tryGet()
let tree = builder.build().tryGet()
check:
tree.leaves == leaves[0..0]
tree.root == leaves[0]
tree.leaves == expectedLeaves[0..0]
tree.root == expectedLeaves[0]
tree.len == 1
test "tree with two leaves has expected root":
let
expectedRoot = combine(leaves[0], leaves[1])
test "tree with two leaves has expected structure":
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
let tree = MerkleTree.init(leaves[0..1]).tryGet()
let tree = builder.build().tryGet()
let expectedRoot = combine(expectedLeaves[0], expectedLeaves[1])
check:
tree.leaves == leaves[0..1]
tree.leaves == expectedLeaves[0..1]
tree.len == 3
tree.root == expectedRoot
test "tree with three leaves has expected root":
let
expectedRoot = combine(combine(leaves[0], leaves[1]), combine(leaves[2], leaves[2]))
test "tree with three leaves has expected structure":
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
builder.addDataBlock(data[2]).tryGet()
let tree = MerkleTree.init(leaves[0..2]).tryGet()
let tree = builder.build().tryGet()
let
expectedRoot = combine(
combine(expectedLeaves[0], expectedLeaves[1]),
combine(expectedLeaves[2], zeroHash)
)
check:
tree.leaves == leaves[0..2]
tree.leaves == expectedLeaves[0..2]
tree.len == 6
tree.root == expectedRoot
test "tree with two leaves provides expected proofs":
let tree = MerkleTree.init(leaves[0..1]).tryGet()
test "tree with nine leaves has expected structure":
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
builder.addDataBlock(data[2]).tryGet()
builder.addDataBlock(data[3]).tryGet()
builder.addDataBlock(data[4]).tryGet()
builder.addDataBlock(data[5]).tryGet()
builder.addDataBlock(data[6]).tryGet()
builder.addDataBlock(data[7]).tryGet()
builder.addDataBlock(data[8]).tryGet()
let tree = builder.build().tryGet()
let
expectedRoot = combine(
combine(
combine(
combine(expectedLeaves[0], expectedLeaves[1]),
combine(expectedLeaves[2], expectedLeaves[3]),
),
combine(
combine(expectedLeaves[4], expectedLeaves[5]),
combine(expectedLeaves[6], expectedLeaves[7])
)
),
combine(
combine(
combine(expectedLeaves[8], zeroHash),
oneHash
),
oneHash
)
)
check:
tree.leaves == expectedLeaves[0..8]
tree.len == 20
tree.root == expectedRoot
test "tree with two leaves provides expected and valid proofs":
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
let tree = builder.build().tryGet()
let expectedProofs = [
MerkleProof.init(0, @[leaves[1]]),
MerkleProof.init(1, @[leaves[0]]),
MerkleProof.init(0, @[expectedLeaves[1]]).tryGet(),
MerkleProof.init(1, @[expectedLeaves[0]]).tryGet(),
]
check:
tree.getProof(0).tryGet() == expectedProofs[0]
tree.getProof(1).tryGet() == expectedProofs[1]
check:
tree.getProof(0).tryGet().verifyDataBlock(data[0], tree.root).tryGet()
tree.getProof(1).tryGet().verifyDataBlock(data[1], tree.root).tryGet()
test "tree with three leaves provides expected proofs":
let tree = MerkleTree.init(leaves[0..2]).tryGet()
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
builder.addDataBlock(data[2]).tryGet()
let tree = builder.build().tryGet()
let expectedProofs = [
MerkleProof.init(0, @[leaves[1], combine(leaves[2], leaves[2])]),
MerkleProof.init(1, @[leaves[0], combine(leaves[2], leaves[2])]),
MerkleProof.init(2, @[leaves[2], combine(leaves[0], leaves[1])]),
MerkleProof.init(0, @[expectedLeaves[1], combine(expectedLeaves[2], zeroHash)]).tryGet(),
MerkleProof.init(1, @[expectedLeaves[0], combine(expectedLeaves[2], zeroHash)]).tryGet(),
MerkleProof.init(2, @[zeroHash, combine(expectedLeaves[0], expectedLeaves[1])]).tryGet(),
]
check:
@ -88,21 +167,73 @@ checksuite "merkletree":
tree.getProof(1).tryGet() == expectedProofs[1]
tree.getProof(2).tryGet() == expectedProofs[2]
check:
tree.getProof(0).tryGet().verifyDataBlock(data[0], tree.root).tryGet()
tree.getProof(1).tryGet().verifyDataBlock(data[1], tree.root).tryGet()
tree.getProof(2).tryGet().verifyDataBlock(data[2], tree.root).tryGet()
test "tree with nine leaves provides expected proofs":
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
builder.addDataBlock(data[2]).tryGet()
builder.addDataBlock(data[3]).tryGet()
builder.addDataBlock(data[4]).tryGet()
builder.addDataBlock(data[5]).tryGet()
builder.addDataBlock(data[6]).tryGet()
builder.addDataBlock(data[7]).tryGet()
builder.addDataBlock(data[8]).tryGet()
let tree = builder.build().tryGet()
let expectedProofs = {
4:
MerkleProof.init(4, @[
expectedLeaves[5],
combine(expectedLeaves[6], expectedLeaves[7]),
combine(
combine(expectedLeaves[0], expectedLeaves[1]),
combine(expectedLeaves[2], expectedLeaves[3]),
),
combine(
combine(
combine(expectedLeaves[8], zeroHash),
oneHash
),
oneHash
)
]).tryGet(),
8:
MerkleProof.init(8, @[
zeroHash,
oneHash,
oneHash,
combine(
combine(
combine(expectedLeaves[0], expectedLeaves[1]),
combine(expectedLeaves[2], expectedLeaves[3]),
),
combine(
combine(expectedLeaves[4], expectedLeaves[5]),
combine(expectedLeaves[6], expectedLeaves[7])
)
)
]).tryGet(),
}.newTable
check:
tree.getProof(4).tryGet() == expectedProofs[4]
tree.getProof(8).tryGet() == expectedProofs[8]
check:
tree.getProof(4).tryGet().verifyDataBlock(data[4], tree.root).tryGet()
tree.getProof(8).tryGet().verifyDataBlock(data[8], tree.root).tryGet()
test "getProof fails for index out of bounds":
let tree = MerkleTree.init(leaves[0..3]).tryGet()
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
builder.addDataBlock(data[2]).tryGet()
let tree = builder.build().tryGet()
check:
isErr(tree.getProof(-1))
isErr(tree.getProof(4))
test "can create MerkleTree directly from root hash":
let tree = MerkleTree.init(leaves[0], 1)
check:
tree.root == leaves[0]
test "cannot create MerkleTree from leaves with different codec":
let res = MerkleTree.init(@[randomHash(sha256), randomHash(sha512)])
check:
isErr(res)

View File

@ -10,6 +10,7 @@ import pkg/codex/chunker
import pkg/codex/storageproofs
import pkg/codex/discovery
import pkg/codex/manifest
import pkg/codex/merkletree
import pkg/codex/stores
import pkg/codex/storageproofs as st
import pkg/codex/blocktype as bt
@ -21,6 +22,7 @@ import ../helpers
const
BlockSize = 31'nb * 64
DataSetSize = BlockSize * 100
CacheSize = DataSetSize * 2
asyncchecksuite "Storage Proofs Network":
let
@ -42,26 +44,19 @@ asyncchecksuite "Storage Proofs Network":
spk: st.PublicKey
porMsg: PorMessage
cid: Cid
porStream: StoreStream
porStream: SeekableStoreStream
por: PoR
tags: seq[Tag]
setup:
chunker = RandomChunker.new(Rng.instance(), size = DataSetSize.int, chunkSize = BlockSize)
store = CacheStore.new(cacheSize = DataSetSize, chunkSize = BlockSize)
manifest = Manifest.new(blockSize = BlockSize).tryGet()
store = CacheStore.new(cacheSize = CacheSize, chunkSize = BlockSize)
(spk, ssk) = st.keyGen()
while (
let chunk = await chunker.getBytes();
chunk.len > 0):
let blk = bt.Block.new(chunk).tryGet()
manifest.add(blk.cid)
(await store.putBlock(blk)).tryGet()
manifest = await storeDataGetManifest(store, chunker)
cid = manifest.cid.tryGet()
porStream = StoreStream.new(store, manifest)
porStream = SeekableStoreStream.new(store, manifest)
por = await PoR.init(
porStream,
ssk, spk,

View File

@ -7,6 +7,7 @@ import pkg/codex/streams
import pkg/codex/storageproofs as st
import pkg/codex/stores
import pkg/codex/manifest
import pkg/codex/merkletree
import pkg/codex/chunker
import pkg/codex/rng
import pkg/codex/blocktype as bt
@ -18,6 +19,7 @@ const
SectorSize = 31'nb
SectorsPerBlock = BlockSize div SectorSize
DataSetSize = BlockSize * 100
CacheSize = DataSetSize * 2
asyncchecksuite "BLS PoR":
var
@ -26,25 +28,18 @@ asyncchecksuite "BLS PoR":
store: BlockStore
ssk: st.SecretKey
spk: st.PublicKey
porStream: StoreStream
proofStream: StoreStream
porStream: SeekableStream
proofStream: SeekableStream
setup:
chunker = RandomChunker.new(Rng.instance(), size = DataSetSize.int, chunkSize = BlockSize)
store = CacheStore.new(cacheSize = DataSetSize, chunkSize = BlockSize)
manifest = Manifest.new(blockSize = BlockSize).tryGet()
store = CacheStore.new(cacheSize = CacheSize, chunkSize = BlockSize)
(spk, ssk) = st.keyGen()
porStream = StoreStream.new(store, manifest)
proofStream = StoreStream.new(store, manifest)
manifest = await storeDataGetManifest(store, chunker)
while (
let chunk = await chunker.getBytes();
chunk.len > 0):
let blk = bt.Block.new(chunk).tryGet()
manifest.add(blk.cid)
(await store.putBlock(blk)).tryGet()
porStream = SeekableStoreStream.new(store, manifest)
proofStream = SeekableStoreStream.new(store, manifest)
teardown:
await close(porStream)
@ -92,31 +87,23 @@ asyncchecksuite "Test Serialization":
por: PoR
q: seq[QElement]
proof: Proof
porStream: StoreStream
proofStream: StoreStream
porStream: SeekableStream
proofStream: SeekableStream
setup:
chunker = RandomChunker.new(Rng.instance(), size = DataSetSize.int, chunkSize = BlockSize)
store = CacheStore.new(cacheSize = DataSetSize, chunkSize = BlockSize)
manifest = Manifest.new(blockSize = BlockSize).tryGet()
while (
let chunk = await chunker.getBytes();
chunk.len > 0):
let blk = bt.Block.new(chunk).tryGet()
manifest.add(blk.cid)
(await store.putBlock(blk)).tryGet()
store = CacheStore.new(cacheSize = CacheSize, chunkSize = BlockSize)
manifest = await storeDataGetManifest(store, chunker)
(spk, ssk) = st.keyGen()
porStream = StoreStream.new(store, manifest)
porStream = SeekableStoreStream.new(store, manifest)
por = await PoR.init(
porStream,
ssk,
spk,
BlockSize.int)
q = generateQuery(por.tau, 22)
proofStream = StoreStream.new(store, manifest)
proofStream = SeekableStoreStream.new(store, manifest)
proof = await generateProof(
proofStream,
q,

View File

@ -6,6 +6,7 @@ import pkg/asynctest
import pkg/codex/rng
import pkg/codex/streams
import pkg/codex/merkletree
import pkg/codex/storageproofs as st
import pkg/codex/blocktype as bt
@ -14,6 +15,7 @@ import ../helpers
const
BlockSize = 31'nb * 64'nb
DataSetSize = BlockSize * 100
CacheSize = DataSetSize * 2
asyncchecksuite "Test PoR store":
let
@ -27,7 +29,7 @@ asyncchecksuite "Test PoR store":
spk: st.PublicKey
repoDir: string
stpstore: st.StpStore
porStream: StoreStream
porStream: SeekableStream
por: PoR
porMsg: PorMessage
cid: Cid
@ -35,29 +37,20 @@ asyncchecksuite "Test PoR store":
setup:
chunker = RandomChunker.new(Rng.instance(), size = DataSetSize.int, chunkSize = BlockSize)
store = CacheStore.new(cacheSize = DataSetSize, chunkSize = BlockSize)
manifest = Manifest.new(blockSize = BlockSize).tryGet()
store = CacheStore.new(cacheSize = CacheSize * 2, chunkSize = BlockSize)
(spk, ssk) = st.keyGen()
while (
let chunk = await chunker.getBytes();
chunk.len > 0):
manifest = await storeDataGetManifest(store, chunker)
let blk = bt.Block.new(chunk).tryGet()
manifest.add(blk.cid)
(await store.putBlock(blk)).tryGet()
cid = manifest.cid.tryGet()
porStream = StoreStream.new(store, manifest)
cid = manifest.treeCid
porStream = SeekableStoreStream.new(store, manifest)
por = await PoR.init(
porStream,
ssk, spk,
BlockSize.int)
porMsg = por.toMessage()
tags = blocks.mapIt(
Tag(idx: it, tag: porMsg.authenticators[it]) )
repoDir = getAppDir() / "stp"
createDir(repoDir)
stpstore = st.StpStore.init(repoDir)

View File

@ -11,6 +11,8 @@ import pkg/questionable/results
import pkg/codex/stores/cachestore
import pkg/codex/chunker
import pkg/codex/manifest
import pkg/codex/merkletree
import pkg/codex/utils
import ../helpers
@ -27,6 +29,8 @@ proc commonBlockStoreTests*(name: string,
asyncchecksuite name & " Store Common":
var
newBlock, newBlock1, newBlock2, newBlock3: Block
manifest: Manifest
tree: MerkleTree
store: BlockStore
setup:
@ -35,6 +39,8 @@ proc commonBlockStoreTests*(name: string,
newBlock2 = Block.new("2".repeat(100).toBytes()).tryGet()
newBlock3 = Block.new("3".repeat(100).toBytes()).tryGet()
(manifest, tree) = makeManifestAndTree(@[newBlock, newBlock1, newBlock2, newBlock3]).tryGet()
if not isNil(before):
await before()
@ -104,10 +110,10 @@ proc commonBlockStoreTests*(name: string,
test "listBlocks Manifest":
let
blocks = @[newBlock1, newBlock2, newBlock3]
manifest = Manifest.new(blocks = blocks.mapIt( it.cid )).tryGet()
manifestBlock = Block.new(manifest.encode().tryGet(), codec = DagPBCodec).tryGet()
treeBlock = Block.new(tree.encode()).tryGet()
putHandles = await allFinished(
(manifestBlock & blocks).mapIt( store.putBlock( it ) ))
(@[treeBlock, manifestBlock] & blocks).mapIt( store.putBlock( it ) ))
for handle in putHandles:
check not handle.failed
@ -128,10 +134,10 @@ proc commonBlockStoreTests*(name: string,
test "listBlocks Both":
let
blocks = @[newBlock1, newBlock2, newBlock3]
manifest = Manifest.new(blocks = blocks.mapIt( it.cid )).tryGet()
manifestBlock = Block.new(manifest.encode().tryGet(), codec = DagPBCodec).tryGet()
treeBlock = Block.new(tree.encode()).tryGet()
putHandles = await allFinished(
(manifestBlock & blocks).mapIt( store.putBlock( it ) ))
(@[treeBlock, manifestBlock] & blocks).mapIt( store.putBlock( it ) ))
for handle in putHandles:
check not handle.failed
@ -146,4 +152,4 @@ proc commonBlockStoreTests*(name: string,
check (await store.hasBlock(cid)).tryGet()
count.inc
check count == 4
check count == 5

View File

@ -70,4 +70,4 @@ checksuite "Cache Store":
commonBlockStoreTests(
"Cache", proc: BlockStore =
BlockStore(CacheStore.new(cacheSize = 500, chunkSize = 1)))
BlockStore(CacheStore.new(cacheSize = 1000, chunkSize = 1)))

View File

@ -16,6 +16,7 @@ import pkg/codex/chunker
import pkg/codex/stores
import pkg/codex/blocktype as bt
import pkg/codex/clock
import pkg/codex/utils/asynciter
import ../helpers
import ../helpers/mockclock
@ -72,7 +73,7 @@ asyncchecksuite "RepoStore":
mockClock = MockClock.new()
mockClock.set(now)
repo = RepoStore.new(repoDs, metaDs, mockClock, quotaMaxBytes = 200)
repo = RepoStore.new(repoDs, metaDs, clock = mockClock, quotaMaxBytes = 200)
teardown:
(await repoDs.close()).tryGet
@ -245,7 +246,7 @@ asyncchecksuite "RepoStore":
response.len == 0
test "Should retrieve block expiration information":
proc unpack(beIter: Future[?!BlockExpirationIter]): Future[seq[BlockExpiration]] {.async.} =
proc unpack(beIter: Future[?!AsyncIter[?BlockExpiration]]): Future[seq[BlockExpiration]] {.async.} =
var expirations = newSeq[BlockExpiration](0)
without iter =? (await beIter), err:
return expirations
@ -285,22 +286,22 @@ asyncchecksuite "RepoStore":
assertExpiration(blockExpirations2[0], blk3)
test "should put empty blocks":
let blk = Cid.example.emptyBlock
let blk = Cid.example.emptyBlock.tryGet()
check (await repo.putBlock(blk)).isOk
test "should get empty blocks":
let blk = Cid.example.emptyBlock
let blk = Cid.example.emptyBlock.tryGet()
let got = await repo.getBlock(blk.cid)
check got.isOk
check got.get.cid == blk.cid
test "should delete empty blocks":
let blk = Cid.example.emptyBlock
let blk = Cid.example.emptyBlock.tryGet()
check (await repo.delBlock(blk.cid)).isOk
test "should have empty block":
let blk = Cid.example.emptyBlock
let blk = Cid.example.emptyBlock.tryGet()
let has = await repo.hasBlock(blk.cid)
check has.isOk
@ -312,7 +313,7 @@ commonBlockStoreTests(
RepoStore.new(
SQLiteDatastore.new(Memory).tryGet(),
SQLiteDatastore.new(Memory).tryGet(),
MockClock.new())))
clock = MockClock.new())))
const
path = currentSourcePath().parentDir / "test"
@ -332,6 +333,6 @@ commonBlockStoreTests(
RepoStore.new(
FSDatastore.new(path, depth).tryGet(),
SQLiteDatastore.new(Memory).tryGet(),
MockClock.new())),
clock = MockClock.new())),
before = before,
after = after)

View File

@ -22,25 +22,13 @@ asyncchecksuite "Erasure encode/decode":
var manifest: Manifest
var store: BlockStore
var erasure: Erasure
var repoDs: Datastore
var metaDs: SQLiteDatastore
setup:
rng = Rng.instance()
chunker = RandomChunker.new(rng, size = dataSetSize, chunkSize = BlockSize)
manifest = !Manifest.new(blockSize = BlockSize)
repoDs = SQLiteDatastore.new(Memory).tryGet()
metaDs = SQLiteDatastore.new(Memory).tryGet()
store = RepoStore.new(repoDs, metaDs)
store = CacheStore.new(cacheSize = (dataSetSize * 4), chunkSize = BlockSize)
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider)
while (
let chunk = await chunker.getBytes();
chunk.len > 0):
let blk = bt.Block.new(chunk).tryGet()
manifest.add(blk.cid)
(await store.putBlock(blk)).tryGet()
manifest = await storeDataGetManifest(store, chunker)
proc encode(buffers, parity: int): Future[Manifest] {.async.} =
let
@ -50,8 +38,8 @@ asyncchecksuite "Erasure encode/decode":
parity)).tryGet()
check:
encoded.len mod (buffers + parity) == 0
encoded.rounded == (manifest.len + (buffers - (manifest.len mod buffers)))
encoded.blocksCount mod (buffers + parity) == 0
encoded.rounded == (manifest.blocksCount + (buffers - (manifest.blocksCount mod buffers)))
encoded.steps == encoded.rounded div buffers
return encoded
@ -64,12 +52,12 @@ asyncchecksuite "Erasure encode/decode":
let encoded = await encode(buffers, parity)
var
column = rng.rand((encoded.len - 1) div encoded.steps) # random column
dropped: seq[Cid]
column = rng.rand((encoded.blocksCount - 1) div encoded.steps) # random column
dropped: seq[int]
for _ in 0..<encoded.ecM:
dropped.add(encoded[column])
(await store.delBlock(encoded[column])).tryGet()
dropped.add(column)
(await store.delBlock(encoded.treeCid, column)).tryGet()
column.inc(encoded.steps - 1)
var
@ -78,10 +66,10 @@ asyncchecksuite "Erasure encode/decode":
check:
decoded.cid.tryGet() == manifest.cid.tryGet()
decoded.cid.tryGet() == encoded.originalCid
decoded.len == encoded.originalLen
decoded.blocksCount == encoded.originalBlocksCount
for d in dropped:
let present = await store.hasBlock(d)
let present = await store.hasBlock(encoded.treeCid, d)
check present.tryGet()
test "Should not tolerate losing more than M data blocks in a single random column":
@ -92,12 +80,12 @@ asyncchecksuite "Erasure encode/decode":
let encoded = await encode(buffers, parity)
var
column = rng.rand((encoded.len - 1) div encoded.steps) # random column
dropped: seq[Cid]
column = rng.rand((encoded.blocksCount - 1) div encoded.steps) # random column
dropped: seq[int]
for _ in 0..<encoded.ecM + 1:
dropped.add(encoded[column])
(await store.delBlock(encoded[column])).tryGet()
dropped.add(column)
(await store.delBlock(encoded.treeCid, column)).tryGet()
column.inc(encoded.steps)
var
@ -107,7 +95,7 @@ asyncchecksuite "Erasure encode/decode":
decoded = (await erasure.decode(encoded)).tryGet()
for d in dropped:
let present = await store.hasBlock(d)
let present = await store.hasBlock(encoded.treeCid, d)
check not present.tryGet()
test "Should tolerate losing M data blocks in M random columns":
@ -123,19 +111,20 @@ asyncchecksuite "Erasure encode/decode":
while offset < encoded.steps - 1:
let
blockIdx = toSeq(countup(offset, encoded.len - 1, encoded.steps))
blockIdx = toSeq(countup(offset, encoded.blocksCount - 1, encoded.steps))
for _ in 0..<encoded.ecM:
blocks.add(rng.sample(blockIdx, blocks))
offset.inc
for idx in blocks:
(await store.delBlock(encoded[idx])).tryGet()
(await store.delBlock(encoded.treeCid, idx)).tryGet()
discard
discard (await erasure.decode(encoded)).tryGet()
for d in manifest:
let present = await store.hasBlock(d)
for d in 0..<manifest.blocksCount:
let present = await store.hasBlock(manifest.treeCid, d)
check present.tryGet()
test "Should not tolerate losing more than M data blocks in M random columns":
@ -151,20 +140,22 @@ asyncchecksuite "Erasure encode/decode":
while offset < encoded.steps - 1:
let
blockIdx = toSeq(countup(offset, encoded.len - 1, encoded.steps))
blockIdx = toSeq(countup(offset, encoded.blocksCount - 1, encoded.steps))
for _ in 0..<encoded.ecM + 1: # NOTE: the +1
var idx: int
while true:
idx = rng.sample(blockIdx, blocks)
if not encoded[idx].isEmpty:
let blk = (await store.getBlock(encoded.treeCid, idx, encoded.treeRoot)).tryGet()
if not blk.isEmpty:
break
blocks.add(idx)
offset.inc
for idx in blocks:
(await store.delBlock(encoded[idx])).tryGet()
(await store.delBlock(encoded.treeCid, idx)).tryGet()
discard
var
decoded: Manifest
@ -179,13 +170,13 @@ asyncchecksuite "Erasure encode/decode":
let encoded = await encode(buffers, parity)
for b in encoded.blocks[0..<encoded.steps * encoded.ecM]:
(await store.delBlock(b)).tryGet()
for b in 0..<encoded.steps * encoded.ecM:
(await store.delBlock(encoded.treeCid, b)).tryGet()
discard (await erasure.decode(encoded)).tryGet()
for d in manifest:
let present = await store.hasBlock(d)
for d in 0..<manifest.blocksCount:
let present = await store.hasBlock(manifest.treeCid, d)
check present.tryGet()
test "Should tolerate losing M (a.k.a row) contiguous parity blocks":
@ -195,13 +186,13 @@ asyncchecksuite "Erasure encode/decode":
let encoded = await encode(buffers, parity)
for b in encoded.blocks[^(encoded.steps * encoded.ecM)..^1]:
(await store.delBlock(b)).tryGet()
for b in (encoded.blocksCount - encoded.steps * encoded.ecM)..<encoded.blocksCount:
(await store.delBlock(encoded.treeCid, b)).tryGet()
discard (await erasure.decode(encoded)).tryGet()
for d in manifest:
let present = await store.hasBlock(d)
for d in 0..<manifest.blocksCount:
let present = await store.hasBlock(manifest.treeCid, d)
check present.tryGet()
test "handles edge case of 0 parity blocks":

View File

@ -10,88 +10,21 @@ import pkg/codex/blocktype as bt
import pkg/codex/manifest
import ./helpers
import ./examples
checksuite "Manifest":
test "Should produce valid tree hash checksum":
var manifest = Manifest.new(
blocks = @[
Block.new("Block 1".toBytes).tryGet().cid,
Block.new("Block 2".toBytes).tryGet().cid,
Block.new("Block 3".toBytes).tryGet().cid,
Block.new("Block 4".toBytes).tryGet().cid,
Block.new("Block 5".toBytes).tryGet().cid,
Block.new("Block 6".toBytes).tryGet().cid,
Block.new("Block 7".toBytes).tryGet().cid,
]).tryGet()
let
encoded = @[byte 18, 32, 227, 176, 196, 66, 152,
252, 28, 20, 154, 251, 244, 200, 153,
111, 185, 36, 39, 174, 65, 228, 100,
155, 147, 76, 164, 149, 153, 27, 120,
82, 184, 85]
var mh: MultiHash
check MultiHash.decode(encoded, mh).tryGet() > 0
let encodedCid = Cid.init(manifest.version, manifest.codec, mh).tryGet()
check:
encodedCid == manifest.cid.tryGet()
test "Should encode/decode to/from manifest":
let
blocks = (0..<1000).mapIt(
Block.new(("Block " & $it).toBytes).tryGet().cid
)
var
manifest = Manifest.new(blocks).tryGet()
manifest = Manifest.new(
treeCid = Cid.example,
treeRoot = MultiHash.example,
blockSize = 1.MiBs,
datasetSize = 100.MiBs)
let
e = manifest.encode().tryGet()
decoded = Manifest.decode(e).tryGet()
check:
decoded.blocks == blocks
decoded.protected == false
decoded == manifest
test "Should produce a protected manifest":
let
blocks = (0..<333).mapIt(
Block.new(("Block " & $it).toBytes).tryGet().cid
)
manifest = Manifest.new(blocks).tryGet()
var
protected = Manifest.new(manifest, 2, 2).tryGet()
check:
protected.originalCid == manifest.cid.tryGet()
protected.blocks[0..<333] == manifest.blocks
protected.protected == true
protected.originalLen == manifest.len
# fill up with empty Cid's
for i in protected.rounded..<protected.len:
protected[i] = EmptyCid[manifest.version]
.catch
.get()[manifest.hcodec]
.catch
.get()
var
encoded = protected.encode().tryGet()
decoded = Manifest.decode(encoded).tryGet()
check:
decoded.protected == true
decoded.originalLen == manifest.len
decoded.ecK == protected.ecK
decoded.ecM == protected.ecM
decoded.originalCid == protected.originalCid
decoded.originalCid == manifest.cid.tryGet()
decoded.blocks == protected.blocks
decoded.blocks[0..<333] == manifest.blocks

View File

@ -1,3 +1,4 @@
import ./merkletree/testmerkletree
import ./merkletree/testcoders
{.warning[UnusedImport]: off.}

View File

@ -41,18 +41,7 @@ asyncchecksuite "Test Node":
proc fetch(T: type Manifest, chunker: Chunker): Future[Manifest] {.async.} =
# Collect blocks from Chunker into Manifest
var
manifest = Manifest.new().tryGet()
while (
let chunk = await chunker.getBytes();
chunk.len > 0):
let blk = bt.Block.new(chunk).tryGet()
(await localStore.putBlock(blk)).tryGet()
manifest.add(blk.cid)
return manifest
await storeDataGetManifest(localStore, chunker)
proc retrieve(cid: Cid): Future[seq[byte]] {.async.} =
# Retrieve an entire file contents by file Cid
@ -114,7 +103,7 @@ asyncchecksuite "Test Node":
check:
fetched.cid == manifest.cid
fetched.blocks == manifest.blocks
# fetched.blocks == manifest.blocks
test "Block Batching":
let
@ -159,7 +148,7 @@ asyncchecksuite "Test Node":
let data = await retrieve(manifestCid)
check:
data.len == localManifest.originalBytes.int
data.len == localManifest.datasetSize.int
data.len == original.len
sha256.digest(data) == sha256.digest(original)

View File

@ -23,32 +23,26 @@ asyncchecksuite "StoreStream":
return true
let
data = [
[byte 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[byte 10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
[byte 20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
[byte 30, 31, 32, 33, 34, 35, 36, 37, 38, 39],
[byte 40, 41, 42, 43, 44, 45, 46, 47, 48, 49],
[byte 50, 51, 52, 53, 54, 55, 56, 57, 58, 59],
[byte 60, 61, 62, 63, 64, 65, 66, 67, 68, 69],
[byte 70, 71, 72, 73, 74, 75, 76, 77, 78, 79],
[byte 80, 81, 82, 83, 84, 85, 86, 87, 88, 89],
[byte 90, 91, 92, 93, 94, 95, 96, 97, 98, 99],
]
data = [byte 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99]
chunkSize = 10
teardown:
await stream.close()
setup:
store = CacheStore.new()
manifest = Manifest.new(blockSize = 10'nb).tryGet()
manifest = await storeDataGetManifest(store, MockChunker.new(dataset = data, chunkSize = chunkSize))
stream = StoreStream.new(store, manifest)
for d in data:
let blk = bt.Block.new(d).tryGet()
manifest.add(blk.cid)
(await store.putBlock(blk)).tryGet()
test "Read all blocks < blockSize":
var
buf = newSeq[byte](8)

@ -1 +1 @@
Subproject commit 1854dfba9991a25532de5f6a53cf50e66afb3c8b
Subproject commit 230e7276e271ce53bce36fffdbb25a50621c33b9