Rework merkle tree (#654)

* rework merkle tree support

* deps

* rename merkletree -> codexmerkletree

* treed and proof encoding/decoding

* small change to invoke proof verification

* rename merkletree to codexmerkletree

* style

* adding codex merkle and coders tests

* fixup imports

* remove new codecs for now

* bump deps

* adding trace statement

* properly serde of manifest block codecs

* use default hash codec

* add more trace logging to aid debugging

* misc

* remove double import

* revert un-needded change

* proof size changed

* bump poseidon2

* add from nodes test

* shorte file names

* remove upraises

* wip poseidon tree

* adjust file names

* misc

* shorten file names

* fix bad `elements` iter

* don't do asserts

* add fromNodes and converters

* root and getProof now return result

* add poseidon2 tree tests

* root now returns result

* misc

* had to make merkletree a ref, because nim blows up otherwise

* root returns a result

* root returns a result

* import poseidon tests

* bump

* merkle poseidon2 digest

* misc

* add merkle digest tests

* bump

* don't use checksuite

* Update tests/codex/merkletree/generictreetests.nim

Co-authored-by: markspanbroek <mark@spanbroek.net>
Signed-off-by: Dmitriy Ryajov <dryajov@gmail.com>

* Update codex/merkletree/merkletree.nim

Co-authored-by: markspanbroek <mark@spanbroek.net>
Signed-off-by: Dmitriy Ryajov <dryajov@gmail.com>

* Update codex/merkletree/merkletree.nim

Co-authored-by: markspanbroek <mark@spanbroek.net>
Signed-off-by: Dmitriy Ryajov <dryajov@gmail.com>

* Update tests/codex/merkletree/generictreetests.nim

Co-authored-by: markspanbroek <mark@spanbroek.net>
Signed-off-by: Dmitriy Ryajov <dryajov@gmail.com>

* missing return

* make toBool private (it's still needed otherwise comparison won't work)

* added `digestTree` that returns a tree and `digest` for root

* test against both poseidon trees - codex and poseidon2

* shorten merkle tree names

* don't compare trees - it's going to be too slow

* move comparison to mekrle helper

* remove merkle utils

---------

Signed-off-by: Dmitriy Ryajov <dryajov@gmail.com>
Co-authored-by: markspanbroek <mark@spanbroek.net>
This commit is contained in:
Dmitriy Ryajov 2023-12-21 00:41:43 -06:00 committed by GitHub
parent 954c8edf76
commit 52c5578c46
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
46 changed files with 1264 additions and 865 deletions

View File

@ -328,11 +328,9 @@ proc validateBlockDelivery(
without treeRoot =? bd.address.treeCid.mhash.mapFailure, err:
return failure("Unable to get mhash from treeCid for block, nested err: " & err.msg)
without verifyOutcome =? proof.verifyLeaf(leaf, treeRoot), err:
if err =? proof.verify(leaf, treeRoot).errorOption:
return failure("Unable to verify proof for block, nested err: " & err.msg)
if not verifyOutcome:
return failure("Provided inclusion proof is invalid")
else: # not leaf
if bd.address.cid != bd.blk.cid:
return failure("Delivery cid " & $bd.address.cid & " doesn't match block cid " & $bd.blk.cid)
@ -537,12 +535,12 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} =
trace "Handling lookup for entry", address = e.address
if e.address.leaf:
(await b.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map(
(blkAndProof: (Block, MerkleProof)) =>
(blkAndProof: (Block, CodexProof)) =>
BlockDelivery(address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some)
)
else:
(await b.localStore.getBlock(e.address)).map(
(blk: Block) => BlockDelivery(address: e.address, blk: blk, proof: MerkleProof.none)
(blk: Block) => BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none)
)
let

View File

@ -18,11 +18,9 @@ import pkg/chronicles
import pkg/chronos
import pkg/libp2p
import pkg/metrics
import pkg/questionable/results
import ../protobuf/blockexc
import ../../blocktype
import ../../merkletree
logScope:
topics = "codex pendingblocks"

View File

@ -9,7 +9,6 @@
import std/sequtils
import std/tables
import std/sugar
import std/sets
import pkg/chronicles

View File

@ -9,7 +9,6 @@
import std/hashes
import std/sequtils
import pkg/libp2p
import pkg/stew/endians2
import message

View File

@ -37,7 +37,7 @@ type
BlockDelivery* = object
blk*: Block
address*: BlockAddress
proof*: ?MerkleProof # Present only if `address.leaf` is true
proof*: ?CodexProof # Present only if `address.leaf` is true
BlockPresenceType* = enum
Have = 0,
@ -152,7 +152,7 @@ proc decode*(_: type BlockAddress, pb: ProtoBuffer): ProtoResult[BlockAddress] =
if ? pb.getField(1, field):
leaf = bool(field)
if leaf:
var
treeCid: Cid
@ -215,16 +215,16 @@ proc decode*(_: type BlockDelivery, pb: ProtoBuffer): ProtoResult[BlockDelivery]
value.blk = ? Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob)
if ? pb.getField(3, ipb):
value.address = ? BlockAddress.decode(ipb)
if value.address.leaf:
var proofBuf = newSeq[byte]()
if ? pb.getField(4, proofBuf):
let proof = ? MerkleProof.decode(proofBuf).mapErr(x => ProtoError.IncorrectBlob)
let proof = ? CodexProof.decode(proofBuf).mapErr(x => ProtoError.IncorrectBlob)
value.proof = proof.some
else:
value.proof = MerkleProof.none
value.proof = CodexProof.none
else:
value.proof = MerkleProof.none
value.proof = CodexProof.none
ok(value)

View File

@ -34,6 +34,14 @@ const
# should be divisible by 31 for PoR and by 64 for Leopard ECC
DefaultBlockSize* = NBytes 31 * 64 * 33
# hashes
Sha256Hash* = multiCodec("sha2-256")
# CIDs
Raw = multiCodec("raw")
DagPB* = multiCodec("dag-pb")
DagJson* = multiCodec("dag-json")
type
Block* = ref object of RootObj
cid*: Cid

View File

@ -40,7 +40,12 @@ import ./units
import ./utils
export units
export net, DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockMaintenanceInterval, DefaultNumberOfBlocksToMaintainPerInterval
export net
export
DefaultQuotaBytes,
DefaultBlockTtl,
DefaultBlockMaintenanceInterval,
DefaultNumberOfBlocksToMaintainPerInterval
const
codex_enable_api_debug_peers* {.booldefine.} = false

View File

@ -97,7 +97,9 @@ proc getPendingBlocks(
var
# request blocks from the store
pendingBlocks = indicies.map( (i: int) =>
self.store.getBlock(BlockAddress.init(manifest.treeCid, i)).map((r: ?!bt.Block) => (r, i)) # Get the data blocks (first K)
self.store.getBlock(
BlockAddress.init(manifest.treeCid, i)
).map((r: ?!bt.Block) => (r, i)) # Get the data blocks (first K)
)
proc isFinished(): bool = pendingBlocks.len == 0
@ -291,7 +293,7 @@ proc encodeData(
return failure("Unable to store block!")
idx.inc(params.steps)
without tree =? MerkleTree.init(cids[]), err:
without tree =? CodexTree.init(cids[]), err:
return failure(err)
without treeCid =? tree.rootCid, err:
@ -308,6 +310,7 @@ proc encodeData(
ecM = params.ecM
)
trace "Encoded data successfully", treeCid, blocksCount = params.blocksCount
return encodedManifest.success
except CancelledError as exc:
trace "Erasure coding encoding cancelled"
@ -415,7 +418,7 @@ proc decode*(
finally:
decoder.release()
without tree =? MerkleTree.init(cids[0..<encoded.originalBlocksCount]), err:
without tree =? CodexTree.init(cids[0..<encoded.originalBlocksCount]), err:
return failure(err)
without treeCid =? tree.rootCid, err:

View File

@ -54,7 +54,10 @@ proc encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] =
# optional bytes treeCid = 1; # cid (root) of the tree
# optional uint32 blockSize = 2; # size of a single block
# optional uint64 datasetSize = 3; # size of the dataset
# optional ErasureInfo erasure = 4; # erasure coding info
# optional codec: MultiCodec = 4; # Dataset codec
# optional hcodec: MultiCodec = 5 # Multihash codec
# optional version: CidVersion = 6; # Cid version
# optional ErasureInfo erasure = 7; # erasure coding info
# }
# ```
#
@ -63,6 +66,9 @@ proc encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] =
header.write(1, manifest.treeCid.data.buffer)
header.write(2, manifest.blockSize.uint32)
header.write(3, manifest.datasetSize.uint32)
header.write(4, manifest.codec.uint32)
header.write(5, manifest.hcodec.uint32)
header.write(6, manifest.version.uint32)
if manifest.protected:
var erasureInfo = initProtoBuffer()
erasureInfo.write(1, manifest.ecK.uint32)
@ -78,7 +84,7 @@ proc encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] =
erasureInfo.write(5, verificationInfo)
erasureInfo.finish()
header.write(4, erasureInfo)
header.write(7, erasureInfo)
pbNode.write(1, header) # set the treeCid as the data field
pbNode.finish()
@ -97,6 +103,9 @@ proc decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
treeCidBuf: seq[byte]
originalTreeCid: seq[byte]
datasetSize: uint32
codec: uint32
hcodec: uint32
version: uint32
blockSize: uint32
originalDatasetSize: uint32
ecK, ecM: uint32
@ -117,7 +126,16 @@ proc decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
if pbHeader.getField(3, datasetSize).isErr:
return failure("Unable to decode `datasetSize` from manifest!")
if pbHeader.getField(4, pbErasureInfo).isErr:
if pbHeader.getField(4, codec).isErr:
return failure("Unable to decode `codec` from manifest!")
if pbHeader.getField(5, hcodec).isErr:
return failure("Unable to decode `hcodec` from manifest!")
if pbHeader.getField(6, version).isErr:
return failure("Unable to decode `version` from manifest!")
if pbHeader.getField(7, pbErasureInfo).isErr:
return failure("Unable to decode `erasureInfo` from manifest!")
let protected = pbErasureInfo.buffer.len > 0
@ -155,23 +173,21 @@ proc decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
treeCid = treeCid,
datasetSize = datasetSize.NBytes,
blockSize = blockSize.NBytes,
version = treeCid.cidver,
hcodec = (? treeCid.mhash.mapFailure).mcodec,
codec = treeCid.mcodec,
version = CidVersion(version),
hcodec = hcodec.MultiCodec,
codec = codec.MultiCodec,
ecK = ecK.int,
ecM = ecM.int,
originalTreeCid = ? Cid.init(originalTreeCid).mapFailure,
originalDatasetSize = originalDatasetSize.NBytes
)
originalDatasetSize = originalDatasetSize.NBytes)
else:
Manifest.new(
treeCid = treeCid,
datasetSize = datasetSize.NBytes,
blockSize = blockSize.NBytes,
version = treeCid.cidver,
hcodec = (? treeCid.mhash.mapFailure).mcodec,
codec = treeCid.mcodec
)
version = CidVersion(version),
hcodec = hcodec.MultiCodec,
codec = codec.MultiCodec)
? self.verify()

View File

@ -33,9 +33,9 @@ type
treeCid {.serialize.}: Cid # Root of the merkle tree
datasetSize {.serialize.}: NBytes # Total size of all blocks
blockSize {.serialize.}: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed)
version: CidVersion # Cid version
codec: MultiCodec # Dataset codec
hcodec: MultiCodec # Multihash codec
codec: MultiCodec # Data set codec
version: CidVersion # Cid version
case protected {.serialize.}: bool # Protected datasets have erasure coded info
of true:
ecK: int # Number of blocks to encode
@ -194,15 +194,14 @@ proc `$`*(self: Manifest): string =
############################################################
proc new*(
T: type Manifest,
treeCid: Cid,
blockSize: NBytes,
datasetSize: NBytes,
version: CidVersion = CIDv1,
hcodec = multiCodec("sha2-256"),
codec = multiCodec("raw"),
protected = false,
): Manifest =
T: type Manifest,
treeCid: Cid,
blockSize: NBytes,
datasetSize: NBytes,
version: CidVersion = CIDv1,
hcodec = multiCodec("sha2-256"),
codec = multiCodec("raw"),
protected = false): Manifest =
T(
treeCid: treeCid,
@ -214,15 +213,15 @@ proc new*(
protected: protected)
proc new*(
T: type Manifest,
manifest: Manifest,
treeCid: Cid,
datasetSize: NBytes,
ecK, ecM: int
): Manifest =
T: type Manifest,
manifest: Manifest,
treeCid: Cid,
datasetSize: NBytes,
ecK, ecM: int): Manifest =
## Create an erasure protected dataset from an
## unprotected one
##
Manifest(
treeCid: treeCid,
datasetSize: datasetSize,
@ -236,9 +235,8 @@ proc new*(
originalDatasetSize: manifest.datasetSize)
proc new*(
T: type Manifest,
manifest: Manifest
): Manifest =
T: type Manifest,
manifest: Manifest): Manifest =
## Create an unprotected dataset from an
## erasure protected one
##
@ -254,10 +252,10 @@ proc new*(
proc new*(
T: type Manifest,
data: openArray[byte],
decoder = ManifestContainers[$DagPBCodec]
): ?!Manifest =
decoder = ManifestContainers[$DagPBCodec]): ?!Manifest =
## Create a manifest instance from given data
##
Manifest.decode(data, decoder)
proc new*(
@ -271,8 +269,8 @@ proc new*(
ecK: int,
ecM: int,
originalTreeCid: Cid,
originalDatasetSize: NBytes
): Manifest =
originalDatasetSize: NBytes): Manifest =
Manifest(
treeCid: treeCid,
datasetSize: datasetSize,
@ -288,11 +286,10 @@ proc new*(
)
proc new*(
T: type Manifest,
manifest: Manifest,
verificationRoot: Cid,
slotRoots: seq[Cid]
): ?!Manifest =
T: type Manifest,
manifest: Manifest,
verificationRoot: Cid,
slotRoots: seq[Cid]): ?!Manifest =
## Create a verifiable dataset from an
## protected one
##
@ -313,5 +310,4 @@ proc new*(
originalDatasetSize: manifest.originalDatasetSize,
verifiable: true,
verificationRoot: verificationRoot,
slotRoots: slotRoots
))
slotRoots: slotRoots))

View File

@ -10,14 +10,12 @@
# This module defines Manifest and all related types
import std/tables
import std/strutils
import pkg/libp2p
import ../units
export units
const
BlockCodec* = multiCodec("raw")
DagPBCodec* = multiCodec("dag-pb")
type

View File

@ -1,4 +1,5 @@
import ./merkletree/merkletree
import ./merkletree/coders
import ./merkletree/codex
import ./merkletree/poseidon2
export merkletree, coders
export codex, poseidon2, merkletree

View File

@ -1,75 +0,0 @@
## Nim-Codex
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/libp2p
import pkg/questionable
import pkg/questionable/results
import ./merkletree
import ../units
import ../errors
const MaxMerkleTreeSize = 100.MiBs.uint
const MaxMerkleProofSize = 1.MiBs.uint
proc encode*(self: MerkleTree): seq[byte] =
var pb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
pb.write(1, self.mcodec.uint64)
pb.write(2, self.digestSize.uint64)
pb.write(3, self.leavesCount.uint64)
pb.write(4, self.nodesBuffer)
pb.finish
pb.buffer
proc decode*(_: type MerkleTree, data: seq[byte]): ?!MerkleTree =
var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize)
var mcodecCode: uint64
var digestSize: uint64
var leavesCount: uint64
discard ? pb.getField(1, mcodecCode).mapFailure
discard ? pb.getField(2, digestSize).mapFailure
discard ? pb.getField(3, leavesCount).mapFailure
let mcodec = MultiCodec.codec(cast[int](mcodecCode))
if mcodec == InvalidMultiCodec:
return failure("Invalid MultiCodec code " & $cast[int](mcodec))
var nodesBuffer = newSeq[byte]()
discard ? pb.getField(4, nodesBuffer).mapFailure
let tree = ? MerkleTree.init(mcodec, digestSize, leavesCount, nodesBuffer)
success(tree)
proc encode*(self: MerkleProof): seq[byte] =
var pb = initProtoBuffer(maxSize = MaxMerkleProofSize)
pb.write(1, self.mcodec.uint64)
pb.write(2, self.digestSize.uint64)
pb.write(3, self.index.uint64)
pb.write(4, self.nodesBuffer)
pb.finish
pb.buffer
proc decode*(_: type MerkleProof, data: seq[byte]): ?!MerkleProof =
var pb = initProtoBuffer(data, maxSize = MaxMerkleProofSize)
var mcodecCode: uint64
var digestSize: uint64
var index: uint64
discard ? pb.getField(1, mcodecCode).mapFailure
discard ? pb.getField(2, digestSize).mapFailure
discard ? pb.getField(3, index).mapFailure
let mcodec = MultiCodec.codec(cast[int](mcodecCode))
if mcodec == InvalidMultiCodec:
return failure("Invalid MultiCodec code " & $cast[int](mcodec))
var nodesBuffer = newSeq[byte]()
discard ? pb.getField(4, nodesBuffer).mapFailure
let proof = ? MerkleProof.init(mcodec, digestSize, index, nodesBuffer)
success(proof)

View File

@ -0,0 +1,4 @@
import ./codex/codex
import ./codex/coders
export codex, coders

View File

@ -0,0 +1,102 @@
## Nim-Codex
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push: {.upraises: [].}
import pkg/libp2p
import pkg/questionable
import pkg/questionable/results
import ../../units
import ../../errors
import ./codex
const MaxMerkleTreeSize = 100.MiBs.uint
const MaxMerkleProofSize = 1.MiBs.uint
proc encode*(self: CodexTree): seq[byte] =
var pb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
pb.write(1, self.mcodec.uint64)
pb.write(2, self.leavesCount.uint64)
for node in self.nodes:
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
nodesPb.write(1, node)
nodesPb.finish()
pb.write(3, nodesPb)
pb.finish
pb.buffer
proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize)
var mcodecCode: uint64
var leavesCount: uint64
discard ? pb.getField(1, mcodecCode).mapFailure
discard ? pb.getField(2, leavesCount).mapFailure
let mcodec = MultiCodec.codec(mcodecCode.int)
if mcodec == InvalidMultiCodec:
return failure("Invalid MultiCodec code " & $mcodecCode)
var
nodesBuff: seq[seq[byte]]
nodes: seq[ByteHash]
if ? pb.getRepeatedField(3, nodesBuff).mapFailure:
for nodeBuff in nodesBuff:
var node: ByteHash
discard ? initProtoBuffer(nodeBuff).getField(1, node).mapFailure
nodes.add node
CodexTree.fromNodes(mcodec, nodes, leavesCount.int)
proc encode*(self: CodexProof): seq[byte] =
var pb = initProtoBuffer(maxSize = MaxMerkleProofSize)
pb.write(1, self.mcodec.uint64)
pb.write(2, self.index.uint64)
pb.write(3, self.nleaves.uint64)
for node in self.path:
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
nodesPb.write(1, node)
nodesPb.finish()
pb.write(4, nodesPb)
pb.finish
pb.buffer
proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof =
var pb = initProtoBuffer(data, maxSize = MaxMerkleProofSize)
var mcodecCode: uint64
var index: uint64
var nleaves: uint64
discard ? pb.getField(1, mcodecCode).mapFailure
let mcodec = MultiCodec.codec(mcodecCode.int)
if mcodec == InvalidMultiCodec:
return failure("Invalid MultiCodec code " & $mcodecCode)
discard ? pb.getField(2, index).mapFailure
discard ? pb.getField(3, nleaves).mapFailure
var
nodesBuff: seq[seq[byte]]
nodes: seq[ByteHash]
if ? pb.getRepeatedField(4, nodesBuff).mapFailure:
for nodeBuff in nodesBuff:
var node: ByteHash
let nodePb = initProtoBuffer(nodeBuff)
discard ? nodePb.getField(1, node).mapFailure
nodes.add node
CodexProof.init(mcodec, index.int, nleaves.int, nodes)

View File

@ -0,0 +1,270 @@
## Nim-Codex
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push: {.upraises: [].}
import std/bitops
import std/sequtils
import pkg/questionable
import pkg/questionable/results
import pkg/libp2p/[cid, multicodec, multihash]
import pkg/stew/byteutils
import ../../utils
import ../../rng
import ../../errors
import ../../blocktype
import ../merkletree
export merkletree
logScope:
topics = "codex merkletree"
const
DatasetRootCodec* = multiCodec("codex-root") # TODO: move to blocktype
BlockCodec* = multiCodec("raw") # TODO: fix multicodec to `codex-block` and move to blocktype
type
ByteTreeKey* {.pure.} = enum
KeyNone = 0x0.byte
KeyBottomLayer = 0x1.byte
KeyOdd = 0x2.byte
KeyOddAndBottomLayer = 0x3.byte
ByteHash* = seq[byte]
ByteTree* = MerkleTree[ByteHash, ByteTreeKey]
ByteTreeProof* = MerkleProof[ByteHash, ByteTreeKey]
CodexTree* = ref object of ByteTree
mhash: MHash
CodexProof* = ref object of ByteTreeProof
mhash: MHash
func getMhash*(mcodec: MultiCodec): ?!MHash =
let
mhash = CodeHashes.getOrDefault(mcodec)
if isNil(mhash.coder):
return failure "Invalid multihash codec"
success mhash
func digestSize*(self: (CodexTree or CodexProof)): int =
## Number of leaves
##
self.mhash.size
func mcodec*(self: (CodexTree or CodexProof)): MultiCodec =
## Multicodec
##
self.mhash.mcodec
func bytes*(mhash: MultiHash): seq[byte] =
## Extract hash bytes
##
mhash.data.buffer[mhash.dpos..<mhash.dpos + mhash.size]
func getProof*(self: CodexTree, index: int): ?!CodexProof =
var
proof = CodexProof(mhash: self.mhash)
? self.getProof(index, proof)
success proof
func verify*(self: CodexProof, leaf: MultiHash, root: MultiHash): ?!void =
## Verify hash
##
let
rootBytes = root.bytes
leafBytes = leaf.bytes
if self.mcodec != root.mcodec or
self.mcodec != leaf.mcodec:
return failure "Hash codec mismatch"
if rootBytes.len != root.size and
leafBytes.len != leaf.size:
return failure "Invalid hash length"
? self.verify(leafBytes, rootBytes)
success()
func verify*(self: CodexProof, leaf: Cid, root: Cid): ?!void =
self.verify(? leaf.mhash.mapFailure, ? leaf.mhash.mapFailure)
proc rootCid*(
self: CodexTree,
version = CIDv1,
dataCodec = DatasetRootCodec): ?!Cid =
if (? self.root).len == 0:
return failure "Empty root"
let
mhash = ? MultiHash.init(self.mcodec, ? self.root).mapFailure
Cid.init(version, DatasetRootCodec, mhash).mapFailure
func getLeafCid*(
self: CodexTree,
i: Natural,
version = CIDv1,
dataCodec = BlockCodec): ?!Cid =
if i >= self.leavesCount:
return failure "Invalid leaf index " & $i
let
leaf = self.leaves[i]
mhash = ? MultiHash.init($self.mcodec, leaf).mapFailure
Cid.init(version, dataCodec, mhash).mapFailure
proc `$`*(self: CodexTree): string =
"CodexTree( mcodec: " &
$self.mcodec &
", leavesCount: " &
$self.leavesCount & " )"
proc `$`*(self: CodexProof): string =
"CodexProof( mcodec: " &
$self.mcodec & ", nleaves: " &
$self.nleaves & ", index: " &
$self.index & " )"
func compress*(
x, y: openArray[byte],
key: ByteTreeKey,
mhash: MHash): ?!ByteHash =
## Compress two hashes
##
var digest = newSeq[byte](mhash.size)
mhash.coder(@x & @y & @[ key.byte ], digest)
success digest
func init*(
_: type CodexTree,
mcodec: MultiCodec = multiCodec("sha2-256"),
leaves: openArray[ByteHash]): ?!CodexTree =
if leaves.len == 0:
return failure "Empty leaves"
let
mhash = ? mcodec.getMhash()
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
compress(x, y, key, mhash)
Zero: ByteHash = newSeq[byte](mhash.size)
if mhash.size != leaves[0].len:
return failure "Invalid hash length"
var
self = CodexTree(mhash: mhash, compress: compressor, zero: Zero)
self.layers = ? merkleTreeWorker(self, leaves, isBottomLayer = true)
success self
func init*(
_: type CodexTree,
leaves: openArray[MultiHash]): ?!CodexTree =
if leaves.len == 0:
return failure "Empty leaves"
let
mcodec = leaves[0].mcodec
leaves = leaves.mapIt( it.bytes )
CodexTree.init(mcodec, leaves)
func init*(
_: type CodexTree,
leaves: openArray[Cid]): ?!CodexTree =
if leaves.len == 0:
return failure "Empty leaves"
let
mcodec = (? leaves[0].mhash.mapFailure).mcodec
leaves = leaves.mapIt( (? it.mhash.mapFailure).bytes )
CodexTree.init(mcodec, leaves)
proc fromNodes*(
_: type CodexTree,
mcodec: MultiCodec = multiCodec("sha2-256"),
nodes: openArray[ByteHash],
nleaves: int): ?!CodexTree =
if nodes.len == 0:
return failure "Empty nodes"
let
mhash = ? mcodec.getMhash()
Zero = newSeq[byte](mhash.size)
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
compress(x, y, key, mhash)
if mhash.size != nodes[0].len:
return failure "Invalid hash length"
var
self = CodexTree(compress: compressor, zero: Zero, mhash: mhash)
layer = nleaves
pos = 0
while pos < nodes.len:
self.layers.add( nodes[pos..<(pos + layer)] )
pos += layer
layer = divUp(layer, 2)
let
index = Rng.instance.rand(nleaves - 1)
proof = ? self.getProof(index)
? proof.verify(self.leaves[index], ? self.root) # sanity check
success self
func init*(
_: type CodexProof,
mcodec: MultiCodec = multiCodec("sha2-256"),
index: int,
nleaves: int,
nodes: openArray[ByteHash]): ?!CodexProof =
if nodes.len == 0:
return failure "Empty nodes"
let
mhash = ? mcodec.getMhash()
Zero = newSeq[byte](mhash.size)
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} =
compress(x, y, key, mhash)
success CodexProof(
compress: compressor,
zero: Zero,
mhash: mhash,
index: index,
nleaves: nleaves,
path: @nodes)

View File

@ -7,412 +7,157 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/math
import std/bitops
import std/sequtils
import std/sugar
import std/algorithm
{.push raises: [].}
import std/bitops
import pkg/chronicles
import pkg/questionable
import pkg/questionable/results
import pkg/nimcrypto/sha2
import pkg/libp2p/[cid, multicodec, multihash, vbuffer]
import pkg/stew/byteutils
import ../errors
logScope:
topics = "codex merkletree"
type
MerkleTree* = object
mcodec: MultiCodec
digestSize: Natural
leavesCount: Natural
nodesBuffer*: seq[byte]
MerkleProof* = object
mcodec: MultiCodec
digestSize: Natural
index: Natural
nodesBuffer*: seq[byte]
MerkleTreeBuilder* = object
mcodec: MultiCodec
digestSize: Natural
buffer: seq[byte]
PutFn*[H] = proc(i: Natural, x: H): ?!void {.noSideEffect, raises: [].}
GetFn*[H] = proc(i: Natural): ?!H {.noSideEffect, raises: [].}
###########################################################
# Helper functions
###########################################################
StoreBackend*[H] = object
put: PutFn[H]
get: GetFn[H]
func computeTreeHeight(leavesCount: int): int =
if isPowerOfTwo(leavesCount):
fastLog2(leavesCount) + 1
else:
fastLog2(leavesCount) + 2
CompressFn*[H, K] = proc (x, y: H, key: K): ?!H {.noSideEffect, raises: [].}
func computeLevels(leavesCount: int): seq[tuple[offset: int, width: int, index: int]] =
let height = computeTreeHeight(leavesCount)
var levels = newSeq[tuple[offset: int, width: int, index: int]](height)
MerkleTree*[H, K] = ref object of RootObj
layers* : seq[seq[H]]
compress*: CompressFn[H, K]
zero* : H
levels[0].offset = 0
levels[0].width = leavesCount
levels[0].index = 0
for i in 1..<height:
levels[i].offset = levels[i - 1].offset + levels[i - 1].width
levels[i].width = (levels[i - 1].width + 1) div 2
levels[i].index = i
levels
MerkleProof*[H, K] = ref object of RootObj
index* : int # linear index of the leaf, starting from 0
path* : seq[H] # order: from the bottom to the top
nleaves* : int # number of leaves in the tree (=size of input)
compress*: CompressFn[H, K] # compress function
zero* : H # zero value
func depth*[H, K](self: MerkleTree[H, K]): int =
return self.layers.len - 1
func leavesCount*[H, K](self: MerkleTree[H, K]): int =
return self.layers[0].len
func levels*[H, K](self: MerkleTree[H, K]): int =
return self.layers.len
func leaves*[H, K](self: MerkleTree[H, K]): seq[H] =
return self.layers[0]
iterator layers*[H, K](self: MerkleTree[H, K]): seq[H] =
for layer in self.layers:
yield layer
iterator nodes*[H, K](self: MerkleTree[H, K]): H =
for layer in self.layers:
for node in layer:
yield node
func root*[H, K](self: MerkleTree[H, K]): ?!H =
let last = self.layers[^1]
if last.len != 1:
return failure "invalid tree"
return success last[0]
func getProof*[H, K](
self: MerkleTree[H, K],
index: int,
proof: MerkleProof[H, K]): ?!void =
let depth = self.depth
let nleaves = self.leavesCount
if not (index >= 0 and index < nleaves):
return failure "index out of bounds"
var path : seq[H] = newSeq[H](depth)
var k = index
var m = nleaves
for i in 0..<depth:
let j = k xor 1
path[i] = if (j < m): self.layers[i][j] else: self.zero
k = k shr 1
m = (m + 1) shr 1
proof.index = index
proof.path = path
proof.nleaves = nleaves
proof.compress = self.compress
proc digestFn(mcodec: MultiCodec, dst: var openArray[byte], dstPos: int, data: openArray[byte]): ?!void =
var mhash = ? MultiHash.digest($mcodec, data).mapFailure
if (dstPos + mhash.size) > dst.len:
return failure("Not enough space in a destination buffer")
dst[dstPos..<dstPos + mhash.size] = mhash.data.buffer[mhash.dpos..<mhash.dpos + mhash.size]
success()
###########################################################
# MerkleTreeBuilder
###########################################################
proc init*(
T: type MerkleTreeBuilder,
mcodec: MultiCodec = multiCodec("sha2-256")
): ?!MerkleTreeBuilder =
let mhash = ? MultiHash.digest($mcodec, "".toBytes).mapFailure
success(MerkleTreeBuilder(mcodec: mcodec, digestSize: mhash.size, buffer: newSeq[byte]()))
proc addDataBlock*(self: var MerkleTreeBuilder, dataBlock: openArray[byte]): ?!void =
## Hashes the data block and adds the result of hashing to a buffer
##
let oldLen = self.buffer.len
self.buffer.setLen(oldLen + self.digestSize)
digestFn(self.mcodec, self.buffer, oldLen, dataBlock)
proc addLeaf*(self: var MerkleTreeBuilder, leaf: MultiHash): ?!void =
if leaf.mcodec != self.mcodec or leaf.size != self.digestSize:
return failure("Expected mcodec to be " & $self.mcodec & " and digest size to be " &
$self.digestSize & " but was " & $leaf.mcodec & " and " & $leaf.size)
let oldLen = self.buffer.len
self.buffer.setLen(oldLen + self.digestSize)
self.buffer[oldLen..<oldLen + self.digestSize] = leaf.data.buffer[leaf.dpos..<leaf.dpos + self.digestSize]
success()
proc build*(self: MerkleTreeBuilder): ?!MerkleTree =
## Builds a tree from previously added data blocks
##
## Tree built from data blocks A, B and C is
## H5=H(H3 & H4)
## / \
## H3=H(H0 & H1) H4=H(H2 & 0x00)
## / \ /
## H0=H(A) H1=H(B) H2=H(C)
## | | |
## A B C
##
## Memory layout is [H0, H1, H2, H3, H4, H5]
##
let
mcodec = self.mcodec
digestSize = self.digestSize
leavesCount = self.buffer.len div self.digestSize
if leavesCount == 0:
return failure("At least one data block is required")
let levels = computeLevels(leavesCount)
let totalNodes = levels[^1].offset + 1
var tree = MerkleTree(mcodec: mcodec, digestSize: digestSize, leavesCount: leavesCount, nodesBuffer: newSeq[byte](totalNodes * digestSize))
# copy leaves
tree.nodesBuffer[0..<leavesCount * digestSize] = self.buffer[0..<leavesCount * digestSize]
# calculate intermediate nodes
var zero = newSeq[byte](digestSize)
var one = newSeq[byte](digestSize)
one[^1] = 0x01
func getProof*[H, K](self: MerkleTree[H, K], index: int): ?!MerkleProof[H, K] =
var
concatBuf = newSeq[byte](2 * digestSize)
prevLevel = levels[0]
for level in levels[1..^1]:
for i in 0..<level.width:
let parentIndex = level.offset + i
let leftChildIndex = prevLevel.offset + 2 * i
let rightChildIndex = leftChildIndex + 1
proof = MerkleProof[H, K]()
concatBuf[0..<digestSize] = tree.nodesBuffer[leftChildIndex * digestSize..<(leftChildIndex + 1) * digestSize]
? self.getProof(index, proof)
var dummyValue = if prevLevel.index == 0: zero else: one
success proof
if rightChildIndex < prevLevel.offset + prevLevel.width:
concatBuf[digestSize..^1] = tree.nodesBuffer[rightChildIndex * digestSize..<(rightChildIndex + 1) * digestSize]
func reconstructRoot*[H, K](proof: MerkleProof[H, K], leaf: H): ?!H =
var
m = proof.nleaves
j = proof.index
h = leaf
bottomFlag = K.KeyBottomLayer
for p in proof.path:
let oddIndex : bool = (bitand(j,1) != 0)
if oddIndex:
# the index of the child is odd, so the node itself can't be odd (a bit counterintuitive, yeah :)
h = ? proof.compress( p, h, bottomFlag )
else:
if j == m - 1:
# single child => odd node
h = ? proof.compress( h, p, K(bottomFlag.ord + 2) )
else:
concatBuf[digestSize..^1] = dummyValue
# even node
h = ? proof.compress( h , p, bottomFlag )
bottomFlag = K.KeyNone
j = j shr 1
m = (m+1) shr 1
? digestFn(mcodec, tree.nodesBuffer, parentIndex * digestSize, concatBuf)
prevLevel = level
return success h
return success(tree)
###########################################################
# MerkleTree
###########################################################
proc nodeBufferToMultiHash(self: (MerkleTree | MerkleProof), index: int): MultiHash =
var buf = newSeq[byte](self.digestSize)
let offset = index * self.digestSize
buf[0..^1] = self.nodesBuffer[offset..<(offset + self.digestSize)]
{.noSideEffect.}:
without mhash =? MultiHash.init($self.mcodec, buf).mapFailure, errx:
error "Error converting bytes to hash", msg = errx.msg
mhash
proc len*(self: (MerkleTree | MerkleProof)): Natural =
self.nodesBuffer.len div self.digestSize
proc nodes*(self: (MerkleTree | MerkleProof)): seq[MultiHash] {.noSideEffect.} =
toSeq(0..<self.len).map(i => self.nodeBufferToMultiHash(i))
proc mcodec*(self: (MerkleTree | MerkleProof)): MultiCodec =
self.mcodec
proc digestSize*(self: (MerkleTree | MerkleProof)): Natural =
self.digestSize
proc root*(self: MerkleTree): MultiHash =
let rootIndex = self.len - 1
self.nodeBufferToMultiHash(rootIndex)
proc rootCid*(self: MerkleTree, version = CIDv1, dataCodec = multiCodec("raw")): ?!Cid =
Cid.init(version, dataCodec, self.root).mapFailure
iterator leaves*(self: MerkleTree): MultiHash =
for i in 0..<self.leavesCount:
yield self.nodeBufferToMultiHash(i)
iterator leavesCids*(self: MerkleTree, version = CIDv1, dataCodec = multiCodec("raw")): ?!Cid =
for leaf in self.leaves:
yield Cid.init(version, dataCodec, leaf).mapFailure
proc leavesCount*(self: MerkleTree): Natural =
self.leavesCount
proc getLeaf*(self: MerkleTree, index: Natural): ?!MultiHash =
if index >= self.leavesCount:
return failure("Index " & $index & " out of range [0.." & $(self.leavesCount - 1) & "]" )
success(self.nodeBufferToMultiHash(index))
proc getLeafCid*(self: MerkleTree, index: Natural, version = CIDv1, dataCodec = multiCodec("raw")): ?!Cid =
let leaf = ? self.getLeaf(index)
Cid.init(version, dataCodec, leaf).mapFailure
proc height*(self: MerkleTree): Natural =
computeTreeHeight(self.leavesCount)
proc getProof*(self: MerkleTree, index: Natural): ?!MerkleProof =
## Extracts proof from a tree for a given index
##
## Given a tree built from data blocks A, B and C
## H5
## / \
## H3 H4
## / \ /
## H0 H1 H2
## | | |
## A B C
##
## Proofs of inclusion (index and path) are
## - 0,[H1, H4] for data block A
## - 1,[H0, H4] for data block B
## - 2,[0x00, H3] for data block C
##
if index >= self.leavesCount:
return failure("Index " & $index & " out of range [0.." & $(self.leavesCount - 1) & "]" )
var zero = newSeq[byte](self.digestSize)
var one = newSeq[byte](self.digestSize)
one[^1] = 0x01
let levels = computeLevels(self.leavesCount)
var proofNodesBuffer = newSeq[byte]((levels.len - 1) * self.digestSize)
for level in levels[0..^2]:
let lr = index shr level.index
let siblingIndex = if lr mod 2 == 0:
level.offset + lr + 1
func verify*[H, K](proof: MerkleProof[H, K], leaf: H, root: H): ?!void =
return if bool(root == ? proof.reconstructRoot(leaf)):
success()
else:
level.offset + lr - 1
failure("invalid proof")
var dummyValue = if level.index == 0: zero else: one
func merkleTreeWorker*[H, K](
self: MerkleTree[H, K],
xs: openArray[H],
isBottomLayer: static bool): ?!seq[seq[H]] =
if siblingIndex < level.offset + level.width:
proofNodesBuffer[level.index * self.digestSize..<(level.index + 1) * self.digestSize] =
self.nodesBuffer[siblingIndex * self.digestSize..<(siblingIndex + 1) * self.digestSize]
else:
proofNodesBuffer[level.index * self.digestSize..<(level.index + 1) * self.digestSize] = dummyValue
let a = low(xs)
let b = high(xs)
let m = b - a + 1
success(MerkleProof(mcodec: self.mcodec, digestSize: self.digestSize, index: index, nodesBuffer: proofNodesBuffer))
when not isBottomLayer:
if m == 1:
return success @[ @xs ]
proc `$`*(self: MerkleTree): string {.noSideEffect.} =
"mcodec:" & $self.mcodec &
", digestSize: " & $self.digestSize &
", leavesCount: " & $self.leavesCount &
", nodes: " & $self.nodes
let halfn: int = m div 2
let n : int = 2 * halfn
let isOdd: bool = (n != m)
proc `==`*(a, b: MerkleTree): bool =
(a.mcodec == b.mcodec) and
(a.digestSize == b.digestSize) and
(a.leavesCount == b.leavesCount) and
(a.nodesBuffer == b.nodesBuffer)
proc init*(
T: type MerkleTree,
mcodec: MultiCodec,
digestSize: Natural,
leavesCount: Natural,
nodesBuffer: seq[byte]
): ?!MerkleTree =
let levels = computeLevels(leavesCount)
let totalNodes = levels[^1].offset + 1
if totalNodes * digestSize == nodesBuffer.len:
success(
MerkleTree(
mcodec: mcodec,
digestSize: digestSize,
leavesCount: leavesCount,
nodesBuffer: nodesBuffer
)
)
var ys: seq[H]
if not isOdd:
ys = newSeq[H](halfn)
else:
failure("Expected nodesBuffer len to be " & $(totalNodes * digestSize) & " but was " & $nodesBuffer.len)
ys = newSeq[H](halfn + 1)
proc init*(
T: type MerkleTree,
leaves: openArray[MultiHash]
): ?!MerkleTree =
without leaf =? leaves.?[0]:
return failure("At least one leaf is required")
for i in 0..<halfn:
const key = when isBottomLayer: K.KeyBottomLayer else: K.KeyNone
ys[i] = ? self.compress( xs[a + 2 * i], xs[a + 2 * i + 1], key = key )
if isOdd:
const key = when isBottomLayer: K.KeyOddAndBottomLayer else: K.KeyOdd
ys[halfn] = ? self.compress( xs[n], self.zero, key = key )
var builder = ? MerkleTreeBuilder.init(mcodec = leaf.mcodec)
for l in leaves:
let res = builder.addLeaf(l)
if res.isErr:
return failure(res.error)
builder.build()
proc init*(
T: type MerkleTree,
cids: openArray[Cid]
): ?!MerkleTree =
var leaves = newSeq[MultiHash]()
for cid in cids:
let res = cid.mhash.mapFailure
if res.isErr:
return failure(res.error)
else:
leaves.add(res.value)
MerkleTree.init(leaves)
###########################################################
# MerkleProof
###########################################################
proc verifyLeaf*(self: MerkleProof, leaf: MultiHash, treeRoot: MultiHash): ?!bool =
if leaf.mcodec != self.mcodec:
return failure("Leaf mcodec was " & $leaf.mcodec & ", but " & $self.mcodec & " expected")
if leaf.mcodec != self.mcodec:
return failure("Tree root mcodec was " & $treeRoot.mcodec & ", but " & $treeRoot.mcodec & " expected")
var digestBuf = newSeq[byte](self.digestSize)
digestBuf[0..^1] = leaf.data.buffer[leaf.dpos..<(leaf.dpos + self.digestSize)]
let proofLen = self.nodesBuffer.len div self.digestSize
var concatBuf = newSeq[byte](2 * self.digestSize)
for i in 0..<proofLen:
let offset = i * self.digestSize
let lr = self.index shr i
if lr mod 2 == 0:
concatBuf[0..^1] = digestBuf & self.nodesBuffer[offset..<(offset + self.digestSize)]
else:
concatBuf[0..^1] = self.nodesBuffer[offset..<(offset + self.digestSize)] & digestBuf
? digestFn(self.mcodec, digestBuf, 0, concatBuf)
let computedRoot = ? MultiHash.init(self.mcodec, digestBuf).mapFailure
success(computedRoot == treeRoot)
proc verifyDataBlock*(self: MerkleProof, dataBlock: openArray[byte], treeRoot: MultiHash): ?!bool =
var digestBuf = newSeq[byte](self.digestSize)
? digestFn(self.mcodec, digestBuf, 0, dataBlock)
let leaf = ? MultiHash.init(self.mcodec, digestBuf).mapFailure
self.verifyLeaf(leaf, treeRoot)
proc index*(self: MerkleProof): Natural =
self.index
proc `$`*(self: MerkleProof): string =
"mcodec:" & $self.mcodec &
", digestSize: " & $self.digestSize &
", index: " & $self.index &
", nodes: " & $self.nodes
func `==`*(a, b: MerkleProof): bool =
(a.index == b.index) and
(a.mcodec == b.mcodec) and
(a.digestSize == b.digestSize) and
(a.nodesBuffer == b.nodesBuffer)
proc init*(
T: type MerkleProof,
index: Natural,
nodes: seq[MultiHash]
): ?!MerkleProof =
if nodes.len == 0:
return failure("At least one node is required")
let
mcodec = nodes[0].mcodec
digestSize = nodes[0].size
var nodesBuffer = newSeq[byte](nodes.len * digestSize)
for nodeIndex, node in nodes:
nodesBuffer[nodeIndex * digestSize..<(nodeIndex + 1) * digestSize] = node.data.buffer[node.dpos..<node.dpos + digestSize]
success(MerkleProof(mcodec: mcodec, digestSize: digestSize, index: index, nodesBuffer: nodesBuffer))
func init*(
T: type MerkleProof,
mcodec: MultiCodec,
digestSize: Natural,
index: Natural,
nodesBuffer: seq[byte]
): ?!MerkleProof =
if nodesBuffer.len mod digestSize != 0:
return failure("nodesBuffer len is not a multiple of digestSize")
let treeHeight = (nodesBuffer.len div digestSize) + 1
let maxLeavesCount = 1 shl treeHeight
if index < maxLeavesCount:
return success(
MerkleProof(
mcodec: mcodec,
digestSize: digestSize,
index: index,
nodesBuffer: nodesBuffer
)
)
else:
return failure("index higher than max leaves count")
success @[ @xs ] & ? self.merkleTreeWorker(ys, isBottomLayer = false)

View File

@ -0,0 +1,104 @@
## Nim-Codex
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sequtils
import pkg/poseidon2
import pkg/constantine/math/io/io_fields
import pkg/constantine/platforms/abstractions
import pkg/questionable/results
import ../utils
import ../rng
import ./merkletree
export merkletree, poseidon2
const
KeyNoneF = F.fromhex("0x0")
KeyBottomLayerF = F.fromhex("0x1")
KeyOddF = F.fromhex("0x2")
KeyOddAndBottomLayerF = F.fromhex("0x3")
type
Poseidon2Hash* = F
PoseidonKeysEnum* = enum # can't use non-ordinals as enum values
KeyNone
KeyBottomLayer
KeyOdd
KeyOddAndBottomLayer
Poseidon2Tree* = MerkleTree[Poseidon2Hash, PoseidonKeysEnum]
Poseidon2Proof* = MerkleProof[Poseidon2Hash, PoseidonKeysEnum]
converter toKey*(key: PoseidonKeysEnum): Poseidon2Hash =
case key:
of KeyNone: KeyNoneF
of KeyBottomLayer: KeyBottomLayerF
of KeyOdd: KeyOddF
of KeyOddAndBottomLayer: KeyOddAndBottomLayerF
func init*(
_: type Poseidon2Tree,
leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
if leaves.len == 0:
return failure "Empty leaves"
let
compressor = proc(
x, y: Poseidon2Hash,
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
success compress( x, y, key.toKey )
var
self = Poseidon2Tree(compress: compressor, zero: zero)
self.layers = ? merkleTreeWorker(self, leaves, isBottomLayer = true)
success self
func init*(
_: type Poseidon2Tree,
leaves: openArray[array[31, byte]]): ?!Poseidon2Tree =
Poseidon2Tree.init(
leaves.mapIt( Poseidon2Hash.fromBytes(it) ))
proc fromNodes*(
_: type Poseidon2Tree,
nodes: openArray[Poseidon2Hash],
nleaves: int): ?!Poseidon2Tree =
if nodes.len == 0:
return failure "Empty nodes"
let
compressor = proc(
x, y: Poseidon2Hash,
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
success compress( x, y, key.toKey )
var
self = Poseidon2Tree(compress: compressor, zero: zero)
layer = nleaves
pos = 0
while pos < nodes.len:
self.layers.add( nodes[pos..<(pos + layer)] )
pos += layer
layer = divUp(layer, 2)
let
index = Rng.instance.rand(nleaves - 1)
proof = ? self.getProof(index)
? proof.verify(self.leaves[index], ? self.root) # sanity check
success self

View File

@ -243,7 +243,7 @@ proc store*(
finally:
await stream.close()
without tree =? MerkleTree.init(cids), err:
without tree =? CodexTree.init(cids), err:
return failure(err)
without treeCid =? tree.rootCid(CIDv1, dataCodec), err:

View File

@ -2,7 +2,6 @@ import pkg/chronicles
import ../salesagent
import ../statemachine
import ./errorhandling
import ./errored
logScope:
topics = "marketplace sales cancelled"

View File

@ -6,4 +6,11 @@ import ./stores/maintenance
import ./stores/keyutils
import ./stores/treehelper
export cachestore, blockstore, networkstore, repostore, maintenance, keyutils, treehelper
export
cachestore,
blockstore,
networkstore,
repostore,
keyutils,
treehelper,
maintenance

View File

@ -49,7 +49,7 @@ method getBlock*(self: BlockStore, address: BlockAddress): Future[?!Block] {.bas
raiseAssert("getBlock by addr not implemented!")
method getBlockAndProof*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!(Block, MerkleProof)] {.base.} =
method getBlockAndProof*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!(Block, CodexProof)] {.base.} =
## Get a block and associated inclusion proof by Cid of a merkle tree and an index of a leaf in a tree
##
@ -70,7 +70,7 @@ method putBlockCidAndProof*(
treeCid: Cid,
index: Natural,
blockCid: Cid,
proof: MerkleProof
proof: CodexProof
): Future[?!void] {.base.} =
## Put a block to the blockstore
##

View File

@ -39,7 +39,7 @@ type
currentSize*: NBytes
size*: NBytes
cache: LruCache[Cid, Block]
cidAndProofCache: LruCache[(Cid, Natural), (Cid, MerkleProof)]
cidAndProofCache: LruCache[(Cid, Natural), (Cid, CodexProof)]
InvalidBlockSize* = object of CodexError
@ -65,7 +65,7 @@ method getBlock*(self: CacheStore, cid: Cid): Future[?!Block] {.async.} =
trace "Error requesting block from cache", cid, error = exc.msg
return failure exc
proc getCidAndProof(self: CacheStore, treeCid: Cid, index: Natural): ?!(Cid, MerkleProof) =
proc getCidAndProof(self: CacheStore, treeCid: Cid, index: Natural): ?!(Cid, CodexProof) =
if cidAndProof =? self.cidAndProofCache.getOption((treeCid, index)):
success(cidAndProof)
else:
@ -77,7 +77,7 @@ method getBlock*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!Block
await self.getBlock(cidAndProof[0])
method getBlockAndProof*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!(Block, MerkleProof)] {.async.} =
method getBlockAndProof*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!(Block, CodexProof)] {.async.} =
without cidAndProof =? self.getCidAndProof(treeCid, index), err:
return failure(err)
@ -215,7 +215,7 @@ method putBlockCidAndProof*(
treeCid: Cid,
index: Natural,
blockCid: Cid,
proof: MerkleProof
proof: CodexProof
): Future[?!void] {.async.} =
self.cidAndProofCache[(treeCid, index)] = (blockCid, proof)
success()
@ -288,7 +288,7 @@ proc new*(
currentSize = 0'nb
size = int(cacheSize div chunkSize)
cache = newLruCache[Cid, Block](size)
cidAndProofCache = newLruCache[(Cid, Natural), (Cid, MerkleProof)](size)
cidAndProofCache = newLruCache[(Cid, Natural), (Cid, CodexProof)](size)
store = CacheStore(
cache: cache,
cidAndProofCache: cidAndProofCache,

View File

@ -47,4 +47,3 @@ proc createBlockExpirationMetadataQueryKey*(): ?!Key =
proc createBlockCidAndProofMetadataKey*(treeCid: Cid, index: Natural): ?!Key =
(BlockProofKey / $treeCid).flatMap((k: Key) => k / $index)

View File

@ -11,8 +11,6 @@ import pkg/upraises
push: {.upraises: [].}
import std/sugar
import pkg/chronicles
import pkg/chronos
import pkg/libp2p
@ -87,7 +85,7 @@ method putBlockCidAndProof*(
treeCid: Cid,
index: Natural,
blockCid: Cid,
proof: MerkleProof): Future[?!void] =
proof: CodexProof): Future[?!void] =
self.localStore.putBlockCidAndProof(treeCid, index, blockCid, proof)
method ensureExpiry*(

View File

@ -77,7 +77,7 @@ func available*(self: RepoStore): uint =
func available*(self: RepoStore, bytes: uint): bool =
return bytes < self.available()
proc encode(cidAndProof: (Cid, MerkleProof)): seq[byte] =
proc encode(cidAndProof: (Cid, CodexProof)): seq[byte] =
## Encodes a tuple of cid and merkle proof in a following format:
## | 8-bytes | n-bytes | remaining bytes |
## | n | cid | proof |
@ -93,14 +93,14 @@ proc encode(cidAndProof: (Cid, MerkleProof)): seq[byte] =
@nBytes & cidBytes & proofBytes
proc decode(_: type (Cid, MerkleProof), data: seq[byte]): ?!(Cid, MerkleProof) =
proc decode(_: type (Cid, CodexProof), data: seq[byte]): ?!(Cid, CodexProof) =
let
n = uint64.fromBytesBE(data[0..<sizeof(uint64)]).int
cid = ? Cid.init(data[sizeof(uint64)..<sizeof(uint64) + n]).mapFailure
proof = ? MerkleProof.decode(data[sizeof(uint64) + n..^1])
proof = ? CodexProof.decode(data[sizeof(uint64) + n..^1])
success((cid, proof))
proc decodeCid(_: type (Cid, MerkleProof), data: seq[byte]): ?!Cid =
proc decodeCid(_: type (Cid, CodexProof), data: seq[byte]): ?!Cid =
let
n = uint64.fromBytesBE(data[0..<sizeof(uint64)]).int
cid = ? Cid.init(data[sizeof(uint64)..<sizeof(uint64) + n]).mapFailure
@ -111,7 +111,7 @@ method putBlockCidAndProof*(
treeCid: Cid,
index: Natural,
blockCid: Cid,
proof: MerkleProof
proof: CodexProof
): Future[?!void] {.async.} =
## Put a block to the blockstore
##
@ -119,6 +119,8 @@ method putBlockCidAndProof*(
without key =? createBlockCidAndProofMetadataKey(treeCid, index), err:
return failure(err)
trace "Storing block cid and proof with key", key
let value = (blockCid, proof).encode()
await self.metaDs.put(key, value)
@ -127,7 +129,7 @@ proc getCidAndProof(
self: RepoStore,
treeCid: Cid,
index: Natural
): Future[?!(Cid, MerkleProof)] {.async.} =
): Future[?!(Cid, CodexProof)] {.async.} =
without key =? createBlockCidAndProofMetadataKey(treeCid, index), err:
return failure(err)
@ -137,23 +139,29 @@ proc getCidAndProof(
else:
return failure(err)
return (Cid, MerkleProof).decode(value)
without (cid, proof) =? (Cid, CodexProof).decode(value), err:
trace "Unable to decode cid and proof", err = err.msg
return failure(err)
trace "Got cid and proof for block", cid, proof = $proof
return success (cid, proof)
proc getCid(
self: RepoStore,
treeCid: Cid,
index: Natural
): Future[?!Cid] {.async.} =
index: Natural): Future[?!Cid] {.async.} =
without key =? createBlockCidAndProofMetadataKey(treeCid, index), err:
return failure(err)
without value =? await self.metaDs.get(key), err:
if err of DatastoreKeyNotFound:
trace "Cid not found", treeCid, index
return failure(newException(BlockNotFoundError, err.msg))
else:
trace "Error getting cid from datastore", err = err.msg, key
return failure(err)
return (Cid, MerkleProof).decodeCid(value)
return (Cid, CodexProof).decodeCid(value)
method getBlock*(self: RepoStore, cid: Cid): Future[?!Block] {.async.} =
## Get a block from the blockstore
@ -181,7 +189,7 @@ method getBlock*(self: RepoStore, cid: Cid): Future[?!Block] {.async.} =
return Block.new(cid, data, verify = true)
method getBlockAndProof*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!(Block, MerkleProof)] {.async.} =
method getBlockAndProof*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!(Block, CodexProof)] {.async.} =
without cidAndProof =? await self.getCidAndProof(treeCid, index), err:
return failure(err)
@ -409,15 +417,17 @@ method delBlock*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!void]
without key =? createBlockCidAndProofMetadataKey(treeCid, index), err:
return failure(err)
trace "Fetching proof", key
without value =? await self.metaDs.get(key), err:
if err of DatastoreKeyNotFound:
return success()
else:
return failure(err)
without cid =? (Cid, MerkleProof).decodeCid(value), err:
without cid =? (Cid, CodexProof).decodeCid(value), err:
return failure(err)
trace "Deleting block", cid
if err =? (await self.delBlock(cid)).errorOption:
return failure(err)

View File

@ -22,7 +22,7 @@ import ./blockstore
import ../utils/asynciter
import ../merkletree
proc putSomeProofs*(store: BlockStore, tree: MerkleTree, iter: Iter[int]): Future[?!void] {.async.} =
proc putSomeProofs*(store: BlockStore, tree: CodexTree, iter: Iter[int]): Future[?!void] {.async.} =
without treeCid =? tree.rootCid, err:
return failure(err)
@ -40,13 +40,11 @@ proc putSomeProofs*(store: BlockStore, tree: MerkleTree, iter: Iter[int]): Futur
if err =? res.errorOption:
return failure(err)
success()
proc putSomeProofs*(store: BlockStore, tree: MerkleTree, iter: Iter[Natural]): Future[?!void] =
proc putSomeProofs*(store: BlockStore, tree: CodexTree, iter: Iter[Natural]): Future[?!void] =
store.putSomeProofs(tree, iter.map((i: Natural) => i.ord))
proc putAllProofs*(store: BlockStore, tree: MerkleTree): Future[?!void] =
proc putAllProofs*(store: BlockStore, tree: CodexTree): Future[?!void] =
store.putSomeProofs(tree, Iter.fromSlice(0..<tree.leavesCount))

View File

@ -6,7 +6,7 @@
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
##
##
import std/parseutils
import std/options
@ -30,12 +30,11 @@ func roundUp*[T](a, b : T): T =
divUp(a,b) * b
proc orElse*[A](a, b: Option[A]): Option[A] =
if (a.isSome()):
a
else:
if (a.isSome()):
a
else:
b
when not declared(parseDuration): # Odd code formatting to minimize diff v. mainLine
const Whitespace = {' ', '\t', '\v', '\r', '\l', '\f'}

View File

@ -145,4 +145,3 @@ proc prefetch*[T](iter: Iter[T], n: Positive): Iter[T] =
tryFetch(j)
Iter.new(genNext, isFinished)

40
codex/utils/digest.nim Normal file
View File

@ -0,0 +1,40 @@
## Nim-Codex
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/poseidon2
import pkg/poseidon2/io
import pkg/questionable/results
import ../merkletree
func digestTree*(
_: type Poseidon2Tree,
bytes: openArray[byte], chunkSize: int): ?!Poseidon2Tree =
## Hashes chunks of data with a sponge of rate 2, and combines the
## resulting chunk hashes in a merkle root.
##
var index = 0
var leaves: seq[Poseidon2Hash]
while index < bytes.len:
let start = index
let finish = min(index + chunkSize, bytes.len)
let digest = Sponge.digest(bytes.toOpenArray(start, finish - 1), rate = 2)
leaves.add(digest)
index += chunkSize
return Poseidon2Tree.init(leaves)
func digest*(
_: type Poseidon2Tree,
bytes: openArray[byte], chunkSize: int): ?!Poseidon2Hash =
## Hashes chunks of data with a sponge of rate 2, and combines the
## resulting chunk hashes in a merkle root.
##
(? Poseidon2Tree.digestTree(bytes, chunkSize)).root

View File

@ -10,9 +10,12 @@
## Timer
## Used to execute a callback in a loop
import pkg/upraises
push: {.upraises: [].}
import pkg/chronos
import pkg/chronicles
import pkg/upraises
type
TimerCallback* = proc(): Future[void] {.gcsafe, upraises:[].}

View File

@ -26,7 +26,7 @@ asyncchecksuite "Block Advertising and Discovery":
var
blocks: seq[bt.Block]
manifest: Manifest
tree: MerkleTree
tree: CodexTree
manifestBlock: bt.Block
switch: Switch
peerStore: PeerCtxStore

View File

@ -38,23 +38,20 @@ proc lenPrefix*(msg: openArray[byte]): seq[byte] =
return buf
proc makeManifestAndTree*(blocks: seq[Block]): ?!(Manifest, MerkleTree) =
proc makeManifestAndTree*(blocks: seq[Block]): ?!(Manifest, CodexTree) =
if blocks.len == 0:
return failure("Blocks list was empty")
let
let
datasetSize = blocks.mapIt(it.data.len).foldl(a + b)
blockSize = blocks.mapIt(it.data.len).foldl(max(a, b))
tree = ? MerkleTree.init(blocks.mapIt(it.cid))
tree = ? CodexTree.init(blocks.mapIt(it.cid))
treeCid = ? tree.rootCid
manifest = Manifest.new(
treeCid = treeCid,
blockSize = NBytes(blockSize),
datasetSize = NBytes(datasetSize),
version = CIDv1,
hcodec = tree.mcodec
)
datasetSize = NBytes(datasetSize))
return success((manifest, tree))
@ -87,14 +84,13 @@ proc storeDataGetManifest*(store: BlockStore, chunker: Chunker): Future[Manifest
cids.add(blk.cid)
(await store.putBlock(blk)).tryGet()
let
tree = MerkleTree.init(cids).tryGet()
let
tree = CodexTree.init(cids).tryGet()
treeCid = tree.rootCid.tryGet()
manifest = Manifest.new(
treeCid = treeCid,
blockSize = NBytes(chunker.chunkSize),
datasetSize = NBytes(chunker.offset),
)
datasetSize = NBytes(chunker.offset))
for i in 0..<tree.leavesCount:
let proof = tree.getProof(i).tryGet()

View File

@ -0,0 +1,130 @@
import std/unittest
import std/sequtils
import pkg/codex/merkletree
proc testGenericTree*[H, K, U](
name: string,
data: openArray[H],
zero: H,
compress: proc(z, y: H, key: K): H,
makeTree: proc(data: seq[H]): U) =
let
data = @data
suite "Correctness tests - " & name:
test "Should build correct tree for even bottom layer":
let
expectedRoot = compress(
compress(
compress(data[0], data[1], K.KeyBottomLayer),
compress(data[2], data[3], K.KeyBottomLayer),
K.KeyNone
),
compress(
compress(data[4], data[5], K.KeyBottomLayer),
compress(data[6], data[7], K.KeyBottomLayer),
K.KeyNone
),
K.KeyNone
)
let
tree = makeTree( data[0..7] )
check:
tree.root.tryGet == expectedRoot
test "Should build correct tree for odd bottom layer":
let
expectedRoot = compress(
compress(
compress(data[0], data[1], K.KeyBottomLayer),
compress(data[2], data[3], K.KeyBottomLayer),
K.KeyNone
),
compress(
compress(data[4], data[5], K.KeyBottomLayer),
compress(data[6], zero, K.KeyOddAndBottomLayer),
K.KeyNone
),
K.KeyNone
)
let
tree = makeTree( data[0..6] )
check:
tree.root.tryGet == expectedRoot
test "Should build correct tree for even bottom and odd upper layers":
let
expectedRoot = compress(
compress(
compress(
compress(data[0], data[1], K.KeyBottomLayer),
compress(data[2], data[3], K.KeyBottomLayer),
K.KeyNone
),
compress(
compress(data[4], data[5], K.KeyBottomLayer),
compress(data[6], data[7], K.KeyBottomLayer),
K.KeyNone
),
K.KeyNone
),
compress(
compress(
compress(data[8], data[9], K.KeyBottomLayer),
zero,
K.KeyOdd
),
zero,
K.KeyOdd
),
K.KeyNone
)
let
tree = makeTree( data[0..9] )
check:
tree.root.tryGet == expectedRoot
test "Should get and validate correct proofs":
let
expectedRoot = compress(
compress(
compress(
compress(data[0], data[1], K.KeyBottomLayer),
compress(data[2], data[3], K.KeyBottomLayer),
K.KeyNone
),
compress(
compress(data[4], data[5], K.KeyBottomLayer),
compress(data[6], data[7], K.KeyBottomLayer),
K.KeyNone
),
K.KeyNone
),
compress(
compress(
compress(data[8], data[9], K.KeyBottomLayer),
zero,
K.KeyOdd
),
zero,
K.KeyOdd
),
K.KeyNone
)
let
tree = makeTree( data )
for i in 0..<data.len:
let proof = tree.getProof(i).tryGet
check:
proof.verify(tree.leaves[i], expectedRoot).isOk

View File

@ -0,0 +1,31 @@
import pkg/constantine/platforms/abstractions
import pkg/codex/merkletree
import ../helpers
export merkletree, helpers
converter toBool*(x: CtBool): bool =
bool(x)
proc `==`*(a, b: Poseidon2Tree): bool =
(a.leavesCount == b.leavesCount) and
(a.levels == b.levels) and
(a.layers == b.layers)
proc `==`*(a, b: Poseidon2Proof): bool =
(a.nleaves == b.nleaves) and
(a.index == b.index) and
(a.path.len == b.path.len) and
(a.path == b.path)
proc `==`*(a, b: CodexTree): bool =
(a.mcodec == b.mcodec) and
(a.leavesCount == b.leavesCount) and
(a.levels == b.levels)
proc `==`*(a, b: CodexProof): bool =
(a.mcodec == b.mcodec) and
(a.nleaves == b.nleaves) and
(a.path == b.path) and
(a.index == b.index)

View File

@ -1,42 +0,0 @@
import std/unittest
import pkg/questionable/results
import pkg/stew/byteutils
import pkg/codex/merkletree
import ../helpers
checksuite "merkletree - coders":
const data =
[
"0123456789012345678901234567890123456789".toBytes,
"1234567890123456789012345678901234567890".toBytes,
"2345678901234567890123456789012345678901".toBytes,
"3456789012345678901234567890123456789012".toBytes,
"4567890123456789012345678901234567890123".toBytes,
"5678901234567890123456789012345678901234".toBytes,
"6789012345678901234567890123456789012345".toBytes,
"7890123456789012345678901234567890123456".toBytes,
"8901234567890123456789012345678901234567".toBytes,
"9012345678901234567890123456789012345678".toBytes,
]
test "encoding and decoding a tree yields the same tree":
var builder = MerkleTreeBuilder.init(multiCodec("sha2-256")).tryGet()
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
builder.addDataBlock(data[2]).tryGet()
builder.addDataBlock(data[3]).tryGet()
builder.addDataBlock(data[4]).tryGet()
builder.addDataBlock(data[5]).tryGet()
builder.addDataBlock(data[6]).tryGet()
builder.addDataBlock(data[7]).tryGet()
builder.addDataBlock(data[8]).tryGet()
builder.addDataBlock(data[9]).tryGet()
let tree = builder.build().tryGet()
let encodedBytes = tree.encode()
let decodedTree = MerkleTree.decode(encodedBytes).tryGet()
check:
tree == decodedTree

View File

@ -0,0 +1,48 @@
import std/unittest
import std/sequtils
import pkg/questionable/results
import pkg/stew/byteutils
import pkg/codex/merkletree
import ./helpers
const data =
[
"00000000000000000000000000000001".toBytes,
"00000000000000000000000000000002".toBytes,
"00000000000000000000000000000003".toBytes,
"00000000000000000000000000000004".toBytes,
"00000000000000000000000000000005".toBytes,
"00000000000000000000000000000006".toBytes,
"00000000000000000000000000000007".toBytes,
"00000000000000000000000000000008".toBytes,
"00000000000000000000000000000009".toBytes,
"00000000000000000000000000000010".toBytes,
]
checksuite "merkletree - coders":
test "encoding and decoding a tree yields the same tree":
let
tree = CodexTree.init(multiCodec("sha2-256"), data).tryGet()
encodedBytes = tree.encode()
decodedTree = CodexTree.decode(encodedBytes).tryGet()
check:
tree == decodedTree
test "encoding and decoding a proof yields the same proof":
let
tree = CodexTree.init(multiCodec("sha2-256"), data).tryGet()
proof = tree.getProof(4).tryGet()
check:
proof.verify(tree.leaves[4], tree.root.tryGet).isOk
let
encodedBytes = proof.encode()
decodedProof = CodexProof.decode(encodedBytes).tryGet()
check:
proof == decodedProof

View File

@ -0,0 +1,106 @@
import std/unittest
import std/sequtils
import std/tables
import pkg/questionable/results
import pkg/stew/byteutils
import pkg/nimcrypto/sha2
import pkg/libp2p
import pkg/codex/merkletree
import ./helpers
import ./generictreetests
# TODO: Generalize to other hashes
const
data =
[
"00000000000000000000000000000001".toBytes,
"00000000000000000000000000000002".toBytes,
"00000000000000000000000000000003".toBytes,
"00000000000000000000000000000004".toBytes,
"00000000000000000000000000000005".toBytes,
"00000000000000000000000000000006".toBytes,
"00000000000000000000000000000007".toBytes,
"00000000000000000000000000000008".toBytes,
"00000000000000000000000000000009".toBytes,
"00000000000000000000000000000010".toBytes,
]
sha256 = multiCodec("sha2-256")
suite "Test CodexTree":
test "Cannot init tree without any multihash leaves":
check:
CodexTree.init(leaves = newSeq[MultiHash]()).isErr
test "Cannot init tree without any cid leaves":
check:
CodexTree.init(leaves = newSeq[Cid]()).isErr
test "Cannot init tree without any byte leaves":
check:
CodexTree.init(sha256, leaves = newSeq[ByteHash]()).isErr
test "Should build tree from multihash leaves":
var
expectedLeaves = data.mapIt( MultiHash.digest($sha256, it).tryGet() )
var tree = CodexTree.init(leaves = expectedLeaves)
check:
tree.isOk
tree.get().leaves == expectedLeaves.mapIt( it.bytes )
tree.get().mcodec == sha256
test "Should build tree from cid leaves":
var
expectedLeaves = data.mapIt(
Cid.init(
CidVersion.CIDv1,
BlockCodec,
MultiHash.digest($sha256, it).tryGet
).tryGet )
let
tree = CodexTree.init(leaves = expectedLeaves)
check:
tree.isOk
tree.get().leaves == expectedLeaves.mapIt( it.mhash.tryGet.bytes )
tree.get().mcodec == sha256
test "Should build from raw bytes (should not hash leaves)":
let
tree = CodexTree.init(sha256, leaves = data).tryGet
check:
tree.mcodec == sha256
tree.leaves == data
test "Should build from nodes":
let
tree = CodexTree.init(sha256, leaves = data).tryGet
fromNodes = CodexTree.fromNodes(
nodes = toSeq(tree.nodes),
nleaves = tree.leavesCount).tryGet
check:
tree.mcodec == sha256
tree == fromNodes
let
mhash = sha256.getMhash().tryGet
zero: seq[byte] = newSeq[byte](mhash.size)
compress = proc(x, y: seq[byte], key: ByteTreeKey): seq[byte] =
compress(x, y, key, mhash).tryGet
makeTree = proc(data: seq[seq[byte]]): CodexTree =
CodexTree.init(sha256, leaves = data).tryGet
testGenericTree(
"CodexTree",
@data,
zero,
compress,
makeTree)

View File

@ -0,0 +1,62 @@
import std/unittest
import std/sequtils
import std/random
import pkg/constantine/math/arithmetic
import pkg/poseidon2
import pkg/poseidon2/io
import pkg/poseidon2/sponge
import pkg/questionable/results
import pkg/codex/merkletree
import pkg/codex/utils/digest
import ./helpers
suite "Digest - MerkleTree":
const KB = 1024
test "Hashes chunks of data with sponge, and combines them in merkle root":
let bytes = newSeqWith(64*KB, rand(byte))
var leaves: seq[Poseidon2Hash]
for i in 0..<32:
let
chunk = bytes[(i*2*KB)..<((i+1)*2*KB)]
digest = Sponge.digest(chunk, rate = 2)
leaves.add(digest)
let
digest = Poseidon2Tree.digest(bytes, chunkSize = 2*KB).tryGet
spongeDigest = SpongeMerkle.digest(bytes, chunkSize = 2*KB)
codexPosTree = Poseidon2Tree.init(leaves).tryGet
rootDigest = codexPosTree.root.tryGet
check:
bool( digest == spongeDigest )
bool( digest == rootDigest )
test "Handles partial chunk at the end":
let bytes = newSeqWith(63*KB, rand(byte))
var leaves: seq[Poseidon2Hash]
for i in 0..<31:
let
chunk = bytes[(i*2*KB)..<((i+1)*2*KB)]
digest = Sponge.digest(chunk, rate = 2)
leaves.add(digest)
let partialChunk = bytes[(62*KB)..<(63*KB)]
leaves.add(Sponge.digest(partialChunk, rate = 2))
let
digest = Poseidon2Tree.digest(bytes, chunkSize = 2*KB).tryGet
spongeDigest = SpongeMerkle.digest(bytes, chunkSize = 2*KB)
codexPosTree = Poseidon2Tree.init(leaves).tryGet
rootDigest = codexPosTree.root.tryGet
check:
bool( digest == spongeDigest )
bool( digest == rootDigest )

View File

@ -1,245 +0,0 @@
import std/unittest
import std/sequtils
import std/tables
import pkg/questionable/results
import pkg/stew/byteutils
import pkg/nimcrypto/sha2
import pkg/codex/merkletree
import ../helpers
checksuite "merkletree":
const data =
[
"0123456789012345678901234567890123456789".toBytes,
"1234567890123456789012345678901234567890".toBytes,
"2345678901234567890123456789012345678901".toBytes,
"3456789012345678901234567890123456789012".toBytes,
"4567890123456789012345678901234567890123".toBytes,
"5678901234567890123456789012345678901234".toBytes,
"6789012345678901234567890123456789012345".toBytes,
"7890123456789012345678901234567890123456".toBytes,
"8901234567890123456789012345678901234567".toBytes,
"9012345678901234567890123456789012345678".toBytes,
]
const sha256 = multiCodec("sha2-256")
const sha512 = multiCodec("sha2-512")
proc combine(a, b: MultiHash, codec: MultiCodec = sha256): MultiHash =
var buf = newSeq[byte](a.size + b.size)
copyMem(addr buf[0], unsafeAddr a.data.buffer[a.dpos], a.size)
copyMem(addr buf[a.size], unsafeAddr b.data.buffer[b.dpos], b.size)
return MultiHash.digest($codec, buf).tryGet()
var zeroHash: MultiHash
var oneHash: MultiHash
var expectedLeaves: array[data.len, MultiHash]
var builder: MerkleTreeBuilder
setup:
for i in 0..<data.len:
expectedLeaves[i] = MultiHash.digest($sha256, data[i]).tryGet()
builder = MerkleTreeBuilder.init(sha256).tryGet()
var zero: array[32, byte]
var one: array[32, byte]
one[^1] = 0x01
zeroHash = MultiHash.init($sha256, zero).tryGet()
oneHash = MultiHash.init($sha256, one).tryGet()
test "cannot init tree without any leafs":
let treeOrErr = MerkleTree.init(newSeq[MultiHash]())
check:
treeOrErr.isErr
test "tree with one leaf has expected structure":
builder.addDataBlock(data[0]).tryGet()
let tree = builder.build().tryGet()
check:
tree.leaves.toSeq == expectedLeaves[0..0]
tree.root == expectedLeaves[0]
tree.len == 1
test "tree with two leaves has expected structure":
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
let tree = builder.build().tryGet()
let expectedRoot = combine(expectedLeaves[0], expectedLeaves[1])
check:
tree.leaves.toSeq == expectedLeaves[0..1]
tree.len == 3
tree.root == expectedRoot
test "tree with three leaves has expected structure":
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
builder.addDataBlock(data[2]).tryGet()
let tree = builder.build().tryGet()
let
expectedRoot = combine(
combine(expectedLeaves[0], expectedLeaves[1]),
combine(expectedLeaves[2], zeroHash)
)
check:
tree.leaves.toSeq == expectedLeaves[0..2]
tree.len == 6
tree.root == expectedRoot
test "tree with nine leaves has expected structure":
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
builder.addDataBlock(data[2]).tryGet()
builder.addDataBlock(data[3]).tryGet()
builder.addDataBlock(data[4]).tryGet()
builder.addDataBlock(data[5]).tryGet()
builder.addDataBlock(data[6]).tryGet()
builder.addDataBlock(data[7]).tryGet()
builder.addDataBlock(data[8]).tryGet()
let tree = builder.build().tryGet()
let
expectedRoot = combine(
combine(
combine(
combine(expectedLeaves[0], expectedLeaves[1]),
combine(expectedLeaves[2], expectedLeaves[3]),
),
combine(
combine(expectedLeaves[4], expectedLeaves[5]),
combine(expectedLeaves[6], expectedLeaves[7])
)
),
combine(
combine(
combine(expectedLeaves[8], zeroHash),
oneHash
),
oneHash
)
)
check:
tree.leaves.toSeq == expectedLeaves[0..8]
tree.len == 20
tree.root == expectedRoot
test "tree with two leaves provides expected and valid proofs":
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
let tree = builder.build().tryGet()
let expectedProofs = [
MerkleProof.init(0, @[expectedLeaves[1]]).tryGet(),
MerkleProof.init(1, @[expectedLeaves[0]]).tryGet(),
]
check:
tree.getProof(0).tryGet() == expectedProofs[0]
tree.getProof(1).tryGet() == expectedProofs[1]
check:
tree.getProof(0).tryGet().verifyDataBlock(data[0], tree.root).tryGet()
tree.getProof(1).tryGet().verifyDataBlock(data[1], tree.root).tryGet()
test "tree with three leaves provides expected proofs":
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
builder.addDataBlock(data[2]).tryGet()
let tree = builder.build().tryGet()
let expectedProofs = [
MerkleProof.init(0, @[expectedLeaves[1], combine(expectedLeaves[2], zeroHash)]).tryGet(),
MerkleProof.init(1, @[expectedLeaves[0], combine(expectedLeaves[2], zeroHash)]).tryGet(),
MerkleProof.init(2, @[zeroHash, combine(expectedLeaves[0], expectedLeaves[1])]).tryGet(),
]
check:
tree.getProof(0).tryGet() == expectedProofs[0]
tree.getProof(1).tryGet() == expectedProofs[1]
tree.getProof(2).tryGet() == expectedProofs[2]
check:
tree.getProof(0).tryGet().verifyDataBlock(data[0], tree.root).tryGet()
tree.getProof(1).tryGet().verifyDataBlock(data[1], tree.root).tryGet()
tree.getProof(2).tryGet().verifyDataBlock(data[2], tree.root).tryGet()
test "tree with nine leaves provides expected proofs":
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
builder.addDataBlock(data[2]).tryGet()
builder.addDataBlock(data[3]).tryGet()
builder.addDataBlock(data[4]).tryGet()
builder.addDataBlock(data[5]).tryGet()
builder.addDataBlock(data[6]).tryGet()
builder.addDataBlock(data[7]).tryGet()
builder.addDataBlock(data[8]).tryGet()
let tree = builder.build().tryGet()
let expectedProofs = {
4:
MerkleProof.init(4, @[
expectedLeaves[5],
combine(expectedLeaves[6], expectedLeaves[7]),
combine(
combine(expectedLeaves[0], expectedLeaves[1]),
combine(expectedLeaves[2], expectedLeaves[3]),
),
combine(
combine(
combine(expectedLeaves[8], zeroHash),
oneHash
),
oneHash
)
]).tryGet(),
8:
MerkleProof.init(8, @[
zeroHash,
oneHash,
oneHash,
combine(
combine(
combine(expectedLeaves[0], expectedLeaves[1]),
combine(expectedLeaves[2], expectedLeaves[3]),
),
combine(
combine(expectedLeaves[4], expectedLeaves[5]),
combine(expectedLeaves[6], expectedLeaves[7])
)
)
]).tryGet(),
}.newTable
check:
tree.getProof(4).tryGet() == expectedProofs[4]
tree.getProof(8).tryGet() == expectedProofs[8]
check:
tree.getProof(4).tryGet().verifyDataBlock(data[4], tree.root).tryGet()
tree.getProof(8).tryGet().verifyDataBlock(data[8], tree.root).tryGet()
test "getProof fails for index out of bounds":
builder.addDataBlock(data[0]).tryGet()
builder.addDataBlock(data[1]).tryGet()
builder.addDataBlock(data[2]).tryGet()
let tree = builder.build().tryGet()
check:
isErr(tree.getProof(4))

View File

@ -0,0 +1,88 @@
import std/unittest
import std/sequtils
import std/sugar
import pkg/poseidon2
import pkg/poseidon2/io
import pkg/questionable/results
import pkg/results
import pkg/stew/byteutils
import pkg/stew/arrayops
import constantine/math/arithmetic
import constantine/math/io/io_bigints
import pkg/constantine/math/io/io_fields
import pkg/constantine/platforms/abstractions
import pkg/codex/merkletree
import ./generictreetests
import ./helpers
const
data =
[
"0000000000000000000000000000001".toBytes,
"0000000000000000000000000000002".toBytes,
"0000000000000000000000000000003".toBytes,
"0000000000000000000000000000004".toBytes,
"0000000000000000000000000000005".toBytes,
"0000000000000000000000000000006".toBytes,
"0000000000000000000000000000007".toBytes,
"0000000000000000000000000000008".toBytes,
"0000000000000000000000000000009".toBytes,
"0000000000000000000000000000010".toBytes,
]
suite "Test CodexTree":
var
expectedLeaves: seq[Poseidon2Hash]
setup:
expectedLeaves = toSeq( data.concat().elements(Poseidon2Hash) )
test "Should fail init tree from empty leaves":
check:
Poseidon2Tree.init( leaves = newSeq[Poseidon2Hash](0) ).isErr
test "Init tree from poseidon2 leaves":
let
tree = Poseidon2Tree.init( leaves = expectedLeaves ).tryGet
check:
tree.leaves == expectedLeaves
test "Init tree from byte leaves":
let
tree = Poseidon2Tree.init(
leaves = data.mapIt(
array[31, byte].initCopyFrom( it )
)).tryGet
check:
tree.leaves == expectedLeaves
test "Should build from nodes":
let
tree = Poseidon2Tree.init(leaves = expectedLeaves).tryGet
fromNodes = Poseidon2Tree.fromNodes(
nodes = toSeq(tree.nodes),
nleaves = tree.leavesCount).tryGet
check:
tree == fromNodes
let
compressor = proc(
x, y: Poseidon2Hash,
key: PoseidonKeysEnum): Poseidon2Hash {.noSideEffect.} =
compress(x, y, key.toKey)
makeTree = proc(data: seq[Poseidon2Hash]): Poseidon2Tree =
Poseidon2Tree.init(leaves = data).tryGet
testGenericTree(
"Poseidon2Tree",
toSeq( data.concat().elements(Poseidon2Hash) ),
zero,
compressor,
makeTree)

View File

@ -30,7 +30,7 @@ proc commonBlockStoreTests*(name: string,
var
newBlock, newBlock1, newBlock2, newBlock3: Block
manifest: Manifest
tree: MerkleTree
tree: CodexTree
store: BlockStore
setup:

View File

@ -1,4 +1,6 @@
import ./merkletree/testmerkletree
import ./merkletree/testcoders
import ./merkletree/testcodextree
import ./merkletree/testposeidon2tree
import ./merkletree/testcodexcoders
import ./merkletree/testmerkledigest
{.warning[UnusedImport]: off.}

View File

@ -51,7 +51,7 @@ twonodessuite "Integration tests", debug1 = false, debug2 = false:
check:
space.totalBlocks == 2.uint
space.quotaMaxBytes == 8589934592.uint
space.quotaUsedBytes == 65518.uint
space.quotaUsedBytes == 65526.uint
space.quotaReservedBytes == 12.uint
test "node allows local file downloads":

2
vendor/nim-libp2p vendored

@ -1 +1 @@
Subproject commit 440461b24b9e66542b34d26a0b908c17f6549d05
Subproject commit 4f2259e1cef65085d092b2b713bb67f5aac55626

@ -1 +1 @@
Subproject commit 9be7b0c134e64e3d57a38520a32af93a55a37c44
Subproject commit 0cfecf7d780b8c3295d11251d64a49f3c7258fbf