mirror of
https://github.com/logos-storage/logos-storage-nim.git
synced 2026-01-13 10:53:07 +00:00
chore: remove poseidon2 trees and other EC and prover leftovers
This commit is contained in:
parent
3d9f3716ab
commit
e5a43740ef
@ -26,34 +26,15 @@ export tables
|
||||
const
|
||||
# Size of blocks for storage / network exchange,
|
||||
DefaultBlockSize* = NBytes 1024 * 64
|
||||
DefaultCellSize* = NBytes 2048
|
||||
|
||||
# Proving defaults
|
||||
DefaultMaxSlotDepth* = 32
|
||||
DefaultMaxDatasetDepth* = 8
|
||||
DefaultBlockDepth* = 5
|
||||
DefaultCellElms* = 67
|
||||
DefaultSamplesNum* = 5
|
||||
|
||||
# hashes
|
||||
Sha256HashCodec* = multiCodec("sha2-256")
|
||||
Sha512HashCodec* = multiCodec("sha2-512")
|
||||
Pos2Bn128SpngCodec* = multiCodec("poseidon2-alt_bn_128-sponge-r2")
|
||||
Pos2Bn128MrklCodec* = multiCodec("poseidon2-alt_bn_128-merkle-2kb")
|
||||
|
||||
ManifestCodec* = multiCodec("codex-manifest")
|
||||
DatasetRootCodec* = multiCodec("codex-root")
|
||||
BlockCodec* = multiCodec("codex-block")
|
||||
SlotRootCodec* = multiCodec("codex-slot-root")
|
||||
SlotProvingRootCodec* = multiCodec("codex-proving-root")
|
||||
CodexSlotCellCodec* = multiCodec("codex-slot-cell")
|
||||
|
||||
CodexHashesCodecs* = [Sha256HashCodec, Pos2Bn128SpngCodec, Pos2Bn128MrklCodec]
|
||||
|
||||
CodexPrimitivesCodecs* = [
|
||||
ManifestCodec, DatasetRootCodec, BlockCodec, SlotRootCodec, SlotProvingRootCodec,
|
||||
CodexSlotCellCodec,
|
||||
]
|
||||
CodexPrimitivesCodecs* = [ManifestCodec, DatasetRootCodec, BlockCodec]
|
||||
|
||||
proc initEmptyCidTable(): ?!Table[(CidVersion, MultiCodec, MultiCodec), Cid] =
|
||||
## Initialize padding blocks table
|
||||
@ -66,8 +47,7 @@ proc initEmptyCidTable(): ?!Table[(CidVersion, MultiCodec, MultiCodec), Cid] =
|
||||
let
|
||||
emptyData: seq[byte] = @[]
|
||||
PadHashes = {
|
||||
Sha256HashCodec: ?MultiHash.digest($Sha256HashCodec, emptyData).mapFailure,
|
||||
Sha512HashCodec: ?MultiHash.digest($Sha512HashCodec, emptyData).mapFailure,
|
||||
Sha256HashCodec: ?MultiHash.digest($Sha256HashCodec, emptyData).mapFailure
|
||||
}.toTable
|
||||
|
||||
var table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]()
|
||||
|
||||
@ -1,8 +1,2 @@
|
||||
const ContentIdsExts = [
|
||||
multiCodec("codex-root"),
|
||||
multiCodec("codex-manifest"),
|
||||
multiCodec("codex-block"),
|
||||
multiCodec("codex-slot-root"),
|
||||
multiCodec("codex-proving-root"),
|
||||
multiCodec("codex-slot-cell"),
|
||||
]
|
||||
const ContentIdsExts =
|
||||
[multiCodec("codex-root"), multiCodec("codex-manifest"), multiCodec("codex-block")]
|
||||
|
||||
@ -1,126 +0,0 @@
|
||||
import ./errors
|
||||
import ./utils
|
||||
import ./utils/asynciter
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
type
|
||||
StrategyType* = enum
|
||||
# Simplest approach:
|
||||
# 0 => 0, 1, 2
|
||||
# 1 => 3, 4, 5
|
||||
# 2 => 6, 7, 8
|
||||
LinearStrategy
|
||||
|
||||
# Stepped indexing:
|
||||
# 0 => 0, 3, 6
|
||||
# 1 => 1, 4, 7
|
||||
# 2 => 2, 5, 8
|
||||
SteppedStrategy
|
||||
|
||||
# Representing a strategy for grouping indices (of blocks usually)
|
||||
# Given an interation-count as input, will produce a seq of
|
||||
# selected indices.
|
||||
IndexingError* = object of CodexError
|
||||
IndexingWrongIndexError* = object of IndexingError
|
||||
IndexingWrongIterationsError* = object of IndexingError
|
||||
IndexingWrongGroupCountError* = object of IndexingError
|
||||
IndexingWrongPadBlockCountError* = object of IndexingError
|
||||
|
||||
IndexingStrategy* = object
|
||||
strategyType*: StrategyType # Indexing strategy algorithm
|
||||
firstIndex*: int # Lowest index that can be returned
|
||||
lastIndex*: int # Highest index that can be returned
|
||||
iterations*: int # Number of iteration steps (0 ..< iterations)
|
||||
step*: int # Step size between generated indices
|
||||
groupCount*: int # Number of groups to partition indices into
|
||||
padBlockCount*: int # Number of padding blocks to append per group
|
||||
|
||||
func checkIteration(
|
||||
self: IndexingStrategy, iteration: int
|
||||
): void {.raises: [IndexingError].} =
|
||||
if iteration >= self.iterations:
|
||||
raise newException(
|
||||
IndexingError, "Indexing iteration can't be greater than or equal to iterations."
|
||||
)
|
||||
|
||||
func getIter(first, last, step: int): Iter[int] =
|
||||
{.cast(noSideEffect).}:
|
||||
Iter[int].new(first, last, step)
|
||||
|
||||
func getLinearIndices(self: IndexingStrategy, iteration: int): Iter[int] =
|
||||
let
|
||||
first = self.firstIndex + iteration * self.step
|
||||
last = min(first + self.step - 1, self.lastIndex)
|
||||
|
||||
getIter(first, last, 1)
|
||||
|
||||
func getSteppedIndices(self: IndexingStrategy, iteration: int): Iter[int] =
|
||||
let
|
||||
first = self.firstIndex + iteration
|
||||
last = self.lastIndex
|
||||
|
||||
getIter(first, last, self.iterations)
|
||||
|
||||
func getStrategyIndices(self: IndexingStrategy, iteration: int): Iter[int] =
|
||||
case self.strategyType
|
||||
of StrategyType.LinearStrategy:
|
||||
self.getLinearIndices(iteration)
|
||||
of StrategyType.SteppedStrategy:
|
||||
self.getSteppedIndices(iteration)
|
||||
|
||||
func getIndices*(
|
||||
self: IndexingStrategy, iteration: int
|
||||
): Iter[int] {.raises: [IndexingError].} =
|
||||
self.checkIteration(iteration)
|
||||
{.cast(noSideEffect).}:
|
||||
Iter[int].new(
|
||||
iterator (): int {.gcsafe.} =
|
||||
for value in self.getStrategyIndices(iteration):
|
||||
yield value
|
||||
|
||||
for i in 0 ..< self.padBlockCount:
|
||||
yield self.lastIndex + (iteration + 1) + i * self.groupCount
|
||||
|
||||
)
|
||||
|
||||
func init*(
|
||||
strategy: StrategyType,
|
||||
firstIndex, lastIndex, iterations: int,
|
||||
groupCount = 0,
|
||||
padBlockCount = 0,
|
||||
): IndexingStrategy {.raises: [IndexingError].} =
|
||||
if firstIndex > lastIndex:
|
||||
raise newException(
|
||||
IndexingWrongIndexError,
|
||||
"firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex &
|
||||
")",
|
||||
)
|
||||
|
||||
if iterations <= 0:
|
||||
raise newException(
|
||||
IndexingWrongIterationsError,
|
||||
"iterations (" & $iterations & ") must be greater than zero.",
|
||||
)
|
||||
|
||||
if padBlockCount < 0:
|
||||
raise newException(
|
||||
IndexingWrongPadBlockCountError,
|
||||
"padBlockCount (" & $padBlockCount & ") must be equal or greater than zero.",
|
||||
)
|
||||
|
||||
if padBlockCount > 0 and groupCount <= 0:
|
||||
raise newException(
|
||||
IndexingWrongGroupCountError,
|
||||
"groupCount (" & $groupCount & ") must be greater than zero.",
|
||||
)
|
||||
|
||||
IndexingStrategy(
|
||||
strategyType: strategy,
|
||||
firstIndex: firstIndex,
|
||||
lastIndex: lastIndex,
|
||||
iterations: iterations,
|
||||
step: divUp((lastIndex - firstIndex + 1), iterations),
|
||||
groupCount: groupCount,
|
||||
padBlockCount: padBlockCount,
|
||||
)
|
||||
@ -25,7 +25,6 @@ import ./manifest
|
||||
import ../errors
|
||||
import ../blocktype
|
||||
import ../logutils
|
||||
import ../indexingstrategy
|
||||
|
||||
proc encode*(manifest: Manifest): ?!seq[byte] =
|
||||
## Encode the manifest into a ``ManifestCodec``
|
||||
|
||||
@ -20,7 +20,6 @@ import ../utils
|
||||
import ../utils/json
|
||||
import ../units
|
||||
import ../blocktype
|
||||
import ../indexingstrategy
|
||||
import ../logutils
|
||||
|
||||
# TODO: Manifest should be reworked to more concrete types,
|
||||
|
||||
@ -1,10 +1,4 @@
|
||||
import ./merkletree/merkletree
|
||||
import ./merkletree/codex
|
||||
import ./merkletree/poseidon2
|
||||
|
||||
export codex, poseidon2, merkletree
|
||||
|
||||
type
|
||||
SomeMerkleTree* = ByteTree | CodexTree | Poseidon2Tree
|
||||
SomeMerkleProof* = ByteProof | CodexProof | Poseidon2Proof
|
||||
SomeMerkleHash* = ByteHash | Poseidon2Hash
|
||||
export codex, merkletree
|
||||
|
||||
@ -1,130 +0,0 @@
|
||||
## Logos Storage
|
||||
## Copyright (c) 2023 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
|
||||
import pkg/poseidon2
|
||||
import pkg/constantine/math/io/io_fields
|
||||
import pkg/constantine/platforms/abstractions
|
||||
import pkg/questionable/results
|
||||
|
||||
import ../utils
|
||||
import ../rng
|
||||
|
||||
import ./merkletree
|
||||
|
||||
export merkletree, poseidon2
|
||||
|
||||
const
|
||||
KeyNoneF = F.fromHex("0x0")
|
||||
KeyBottomLayerF = F.fromHex("0x1")
|
||||
KeyOddF = F.fromHex("0x2")
|
||||
KeyOddAndBottomLayerF = F.fromHex("0x3")
|
||||
|
||||
Poseidon2Zero* = zero
|
||||
|
||||
type
|
||||
Bn254Fr* = F
|
||||
Poseidon2Hash* = Bn254Fr
|
||||
|
||||
PoseidonKeysEnum* = enum # can't use non-ordinals as enum values
|
||||
KeyNone
|
||||
KeyBottomLayer
|
||||
KeyOdd
|
||||
KeyOddAndBottomLayer
|
||||
|
||||
Poseidon2Tree* = MerkleTree[Poseidon2Hash, PoseidonKeysEnum]
|
||||
Poseidon2Proof* = MerkleProof[Poseidon2Hash, PoseidonKeysEnum]
|
||||
|
||||
proc `$`*(self: Poseidon2Tree): string =
|
||||
let root = if self.root.isOk: self.root.get.toHex else: "none"
|
||||
"Poseidon2Tree(" & " root: " & root & ", leavesCount: " & $self.leavesCount &
|
||||
", levels: " & $self.levels & " )"
|
||||
|
||||
proc `$`*(self: Poseidon2Proof): string =
|
||||
"Poseidon2Proof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index &
|
||||
", path: " & $self.path.mapIt(it.toHex) & " )"
|
||||
|
||||
func toArray32*(bytes: openArray[byte]): array[32, byte] =
|
||||
result[0 ..< bytes.len] = bytes[0 ..< bytes.len]
|
||||
|
||||
converter toKey*(key: PoseidonKeysEnum): Poseidon2Hash =
|
||||
case key
|
||||
of KeyNone: KeyNoneF
|
||||
of KeyBottomLayer: KeyBottomLayerF
|
||||
of KeyOdd: KeyOddF
|
||||
of KeyOddAndBottomLayer: KeyOddAndBottomLayerF
|
||||
|
||||
func init*(_: type Poseidon2Tree, leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
|
||||
if leaves.len == 0:
|
||||
return failure "Empty leaves"
|
||||
|
||||
let compressor = proc(
|
||||
x, y: Poseidon2Hash, key: PoseidonKeysEnum
|
||||
): ?!Poseidon2Hash {.noSideEffect.} =
|
||||
success compress(x, y, key.toKey)
|
||||
|
||||
var self = Poseidon2Tree(compress: compressor, zero: Poseidon2Zero)
|
||||
|
||||
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
|
||||
success self
|
||||
|
||||
func init*(_: type Poseidon2Tree, leaves: openArray[array[31, byte]]): ?!Poseidon2Tree =
|
||||
Poseidon2Tree.init(leaves.mapIt(Poseidon2Hash.fromBytes(it)))
|
||||
|
||||
proc fromNodes*(
|
||||
_: type Poseidon2Tree, nodes: openArray[Poseidon2Hash], nleaves: int
|
||||
): ?!Poseidon2Tree =
|
||||
if nodes.len == 0:
|
||||
return failure "Empty nodes"
|
||||
|
||||
let compressor = proc(
|
||||
x, y: Poseidon2Hash, key: PoseidonKeysEnum
|
||||
): ?!Poseidon2Hash {.noSideEffect.} =
|
||||
success compress(x, y, key.toKey)
|
||||
|
||||
var
|
||||
self = Poseidon2Tree(compress: compressor, zero: zero)
|
||||
layer = nleaves
|
||||
pos = 0
|
||||
|
||||
while pos < nodes.len:
|
||||
self.layers.add(nodes[pos ..< (pos + layer)])
|
||||
pos += layer
|
||||
layer = divUp(layer, 2)
|
||||
|
||||
let
|
||||
index = Rng.instance.rand(nleaves - 1)
|
||||
proof = ?self.getProof(index)
|
||||
|
||||
if not ?proof.verify(self.leaves[index], ?self.root): # sanity check
|
||||
return failure "Unable to verify tree built from nodes"
|
||||
|
||||
success self
|
||||
|
||||
func init*(
|
||||
_: type Poseidon2Proof, index: int, nleaves: int, nodes: openArray[Poseidon2Hash]
|
||||
): ?!Poseidon2Proof =
|
||||
if nodes.len == 0:
|
||||
return failure "Empty nodes"
|
||||
|
||||
let compressor = proc(
|
||||
x, y: Poseidon2Hash, key: PoseidonKeysEnum
|
||||
): ?!Poseidon2Hash {.noSideEffect.} =
|
||||
success compress(x, y, key.toKey)
|
||||
|
||||
success Poseidon2Proof(
|
||||
compress: compressor,
|
||||
zero: Poseidon2Zero,
|
||||
index: index,
|
||||
nleaves: nleaves,
|
||||
path: @nodes,
|
||||
)
|
||||
@ -1,11 +1,2 @@
|
||||
const CodecExts = [
|
||||
("poseidon2-alt_bn_128-sponge-r2", 0xCD10), # bn128 rate 2 sponge
|
||||
("poseidon2-alt_bn_128-merkle-2kb", 0xCD11), # bn128 2kb compress & merkleize
|
||||
("poseidon2-alt_bn_128-keyed-compress", 0xCD12), # bn128 keyed compress]
|
||||
("codex-manifest", 0xCD01),
|
||||
("codex-block", 0xCD02),
|
||||
("codex-root", 0xCD03),
|
||||
("codex-slot-root", 0xCD04),
|
||||
("codex-proving-root", 0xCD05),
|
||||
("codex-slot-cell", 0xCD06),
|
||||
]
|
||||
const CodecExts =
|
||||
[("codex-manifest", 0xCD01), ("codex-block", 0xCD02), ("codex-root", 0xCD03)]
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
import blscurve/bls_public_exports
|
||||
import pkg/constantine/hashes
|
||||
import poseidon2
|
||||
|
||||
proc sha2_256hash_constantine(data: openArray[byte], output: var openArray[byte]) =
|
||||
# Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing
|
||||
@ -9,16 +8,6 @@ proc sha2_256hash_constantine(data: openArray[byte], output: var openArray[byte]
|
||||
let digest = hashes.sha256.hash(data)
|
||||
copyMem(addr output[0], addr digest[0], 32)
|
||||
|
||||
proc poseidon2_sponge_rate2(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = poseidon2.Sponge.digest(data).toBytes()
|
||||
copyMem(addr output[0], addr digest[0], uint(len(output)))
|
||||
|
||||
proc poseidon2_merkle_2kb_sponge(data: openArray[byte], output: var openArray[byte]) =
|
||||
if len(output) > 0:
|
||||
var digest = poseidon2.SpongeMerkle.digest(data, 2048).toBytes()
|
||||
copyMem(addr output[0], addr digest[0], uint(len(output)))
|
||||
|
||||
const Sha2256MultiHash* = MHash(
|
||||
mcodec: multiCodec("sha2-256"),
|
||||
size: sha256.sizeDigest,
|
||||
@ -26,15 +15,5 @@ const Sha2256MultiHash* = MHash(
|
||||
)
|
||||
const HashExts = [
|
||||
# override sha2-256 hash function
|
||||
Sha2256MultiHash,
|
||||
MHash(
|
||||
mcodec: multiCodec("poseidon2-alt_bn_128-sponge-r2"),
|
||||
size: 32,
|
||||
coder: poseidon2_sponge_rate2,
|
||||
),
|
||||
MHash(
|
||||
mcodec: multiCodec("poseidon2-alt_bn_128-merkle-2kb"),
|
||||
size: 32,
|
||||
coder: poseidon2_merkle_2kb_sponge,
|
||||
),
|
||||
Sha2256MultiHash
|
||||
]
|
||||
|
||||
@ -36,7 +36,6 @@ import ./stores
|
||||
import ./blockexchange
|
||||
import ./streams
|
||||
import ./discovery
|
||||
import ./indexingstrategy
|
||||
import ./utils
|
||||
import ./errors
|
||||
import ./logutils
|
||||
|
||||
@ -1,73 +0,0 @@
|
||||
## Logos Storage
|
||||
## Copyright (c) 2023 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/poseidon2
|
||||
import pkg/questionable/results
|
||||
import pkg/libp2p/multihash
|
||||
import pkg/stew/byteutils
|
||||
|
||||
import ../merkletree
|
||||
|
||||
func spongeDigest*(
|
||||
_: type Poseidon2Hash, bytes: openArray[byte], rate: static int = 2
|
||||
): ?!Poseidon2Hash =
|
||||
## Hashes chunks of data with a sponge of rate 1 or 2.
|
||||
##
|
||||
|
||||
success Sponge.digest(bytes, rate)
|
||||
|
||||
func spongeDigest*(
|
||||
_: type Poseidon2Hash, bytes: openArray[Bn254Fr], rate: static int = 2
|
||||
): ?!Poseidon2Hash =
|
||||
## Hashes chunks of elements with a sponge of rate 1 or 2.
|
||||
##
|
||||
|
||||
success Sponge.digest(bytes, rate)
|
||||
|
||||
func digestTree*(
|
||||
_: type Poseidon2Tree, bytes: openArray[byte], chunkSize: int
|
||||
): ?!Poseidon2Tree =
|
||||
## Hashes chunks of data with a sponge of rate 2, and combines the
|
||||
## resulting chunk hashes in a merkle root.
|
||||
##
|
||||
|
||||
# doAssert not(rate == 1 or rate == 2), "rate can only be 1 or 2"
|
||||
|
||||
if not chunkSize > 0:
|
||||
return failure("chunkSize must be greater than 0")
|
||||
|
||||
var index = 0
|
||||
var leaves: seq[Poseidon2Hash]
|
||||
while index < bytes.len:
|
||||
let start = index
|
||||
let finish = min(index + chunkSize, bytes.len)
|
||||
let digest = ?Poseidon2Hash.spongeDigest(bytes.toOpenArray(start, finish - 1), 2)
|
||||
leaves.add(digest)
|
||||
index += chunkSize
|
||||
return Poseidon2Tree.init(leaves)
|
||||
|
||||
func digest*(
|
||||
_: type Poseidon2Tree, bytes: openArray[byte], chunkSize: int
|
||||
): ?!Poseidon2Hash =
|
||||
## Hashes chunks of data with a sponge of rate 2, and combines the
|
||||
## resulting chunk hashes in a merkle root.
|
||||
##
|
||||
|
||||
(?Poseidon2Tree.digestTree(bytes, chunkSize)).root
|
||||
|
||||
func digestMhash*(
|
||||
_: type Poseidon2Tree, bytes: openArray[byte], chunkSize: int
|
||||
): ?!MultiHash =
|
||||
## Hashes chunks of data with a sponge of rate 2 and
|
||||
## returns the multihash of the root
|
||||
##
|
||||
|
||||
let hash = ?Poseidon2Tree.digest(bytes, chunkSize)
|
||||
|
||||
?MultiHash.init(Pos2Bn128MrklCodec, hash).mapFailure
|
||||
@ -1,20 +1,8 @@
|
||||
import pkg/constantine/platforms/abstractions
|
||||
|
||||
import pkg/codex/merkletree
|
||||
import ../helpers
|
||||
|
||||
export merkletree, helpers
|
||||
|
||||
converter toBool*(x: CTBool): bool =
|
||||
bool(x)
|
||||
|
||||
proc `==`*(a, b: Poseidon2Tree): bool =
|
||||
(a.leavesCount == b.leavesCount) and (a.levels == b.levels) and (a.layers == b.layers)
|
||||
|
||||
proc `==`*(a, b: Poseidon2Proof): bool =
|
||||
(a.nleaves == b.nleaves) and (a.index == b.index) and (a.path.len == b.path.len) and
|
||||
(a.path == b.path)
|
||||
|
||||
proc `==`*(a, b: CodexTree): bool =
|
||||
(a.mcodec == b.mcodec) and (a.leavesCount == b.leavesCount) and (a.levels == b.levels)
|
||||
|
||||
|
||||
@ -1,57 +0,0 @@
|
||||
import std/sequtils
|
||||
import std/random
|
||||
|
||||
import pkg/unittest2
|
||||
import pkg/poseidon2
|
||||
import pkg/poseidon2/sponge
|
||||
|
||||
import pkg/questionable/results
|
||||
|
||||
import pkg/codex/merkletree
|
||||
import pkg/codex/utils/poseidon2digest
|
||||
|
||||
import ./helpers
|
||||
|
||||
suite "Digest - MerkleTree":
|
||||
const KB = 1024
|
||||
|
||||
test "Hashes chunks of data with sponge, and combines them in merkle root":
|
||||
let bytes = newSeqWith(64 * KB, rand(byte))
|
||||
var leaves: seq[Poseidon2Hash]
|
||||
for i in 0 ..< 32:
|
||||
let
|
||||
chunk = bytes[(i * 2 * KB) ..< ((i + 1) * 2 * KB)]
|
||||
digest = Sponge.digest(chunk, rate = 2)
|
||||
leaves.add(digest)
|
||||
|
||||
let
|
||||
digest = Poseidon2Tree.digest(bytes, chunkSize = 2 * KB).tryGet
|
||||
spongeDigest = SpongeMerkle.digest(bytes, chunkSize = 2 * KB)
|
||||
codexPosTree = Poseidon2Tree.init(leaves).tryGet
|
||||
rootDigest = codexPosTree.root.tryGet
|
||||
|
||||
check:
|
||||
bool(digest == spongeDigest)
|
||||
bool(digest == rootDigest)
|
||||
|
||||
test "Handles partial chunk at the end":
|
||||
let bytes = newSeqWith(63 * KB, rand(byte))
|
||||
var leaves: seq[Poseidon2Hash]
|
||||
for i in 0 ..< 31:
|
||||
let
|
||||
chunk = bytes[(i * 2 * KB) ..< ((i + 1) * 2 * KB)]
|
||||
digest = Sponge.digest(chunk, rate = 2)
|
||||
leaves.add(digest)
|
||||
|
||||
let partialChunk = bytes[(62 * KB) ..< (63 * KB)]
|
||||
leaves.add(Sponge.digest(partialChunk, rate = 2))
|
||||
|
||||
let
|
||||
digest = Poseidon2Tree.digest(bytes, chunkSize = 2 * KB).tryGet
|
||||
spongeDigest = SpongeMerkle.digest(bytes, chunkSize = 2 * KB)
|
||||
codexPosTree = Poseidon2Tree.init(leaves).tryGet
|
||||
rootDigest = codexPosTree.root.tryGet
|
||||
|
||||
check:
|
||||
bool(digest == spongeDigest)
|
||||
bool(digest == rootDigest)
|
||||
@ -1,78 +0,0 @@
|
||||
import std/sequtils
|
||||
|
||||
import pkg/unittest2
|
||||
import pkg/poseidon2
|
||||
import pkg/poseidon2/io
|
||||
import pkg/questionable/results
|
||||
import pkg/results
|
||||
import pkg/stew/byteutils
|
||||
import pkg/stew/arrayops
|
||||
|
||||
import pkg/codex/merkletree
|
||||
|
||||
import ./generictreetests
|
||||
import ./helpers
|
||||
|
||||
const data = [
|
||||
"0000000000000000000000000000001".toBytes,
|
||||
"0000000000000000000000000000002".toBytes,
|
||||
"0000000000000000000000000000003".toBytes,
|
||||
"0000000000000000000000000000004".toBytes,
|
||||
"0000000000000000000000000000005".toBytes,
|
||||
"0000000000000000000000000000006".toBytes,
|
||||
"0000000000000000000000000000007".toBytes,
|
||||
"0000000000000000000000000000008".toBytes,
|
||||
"0000000000000000000000000000009".toBytes,
|
||||
# note one less to account for padding of field elements
|
||||
]
|
||||
|
||||
suite "Test Poseidon2Tree":
|
||||
var expectedLeaves: seq[Poseidon2Hash]
|
||||
|
||||
setup:
|
||||
expectedLeaves = toSeq(data.concat().elements(Poseidon2Hash))
|
||||
|
||||
test "Should fail init tree from empty leaves":
|
||||
check:
|
||||
Poseidon2Tree.init(leaves = newSeq[Poseidon2Hash](0)).isErr
|
||||
|
||||
test "Init tree from poseidon2 leaves":
|
||||
let tree = Poseidon2Tree.init(leaves = expectedLeaves).tryGet
|
||||
|
||||
check:
|
||||
tree.leaves == expectedLeaves
|
||||
|
||||
test "Init tree from byte leaves":
|
||||
let tree = Poseidon2Tree.init(
|
||||
leaves = expectedLeaves.mapIt(array[31, byte].initCopyFrom(it.toBytes))
|
||||
).tryGet
|
||||
|
||||
check:
|
||||
tree.leaves == expectedLeaves
|
||||
|
||||
test "Should build from nodes":
|
||||
let
|
||||
tree = Poseidon2Tree.init(leaves = expectedLeaves).tryGet
|
||||
fromNodes = Poseidon2Tree.fromNodes(
|
||||
nodes = toSeq(tree.nodes), nleaves = tree.leavesCount
|
||||
).tryGet
|
||||
|
||||
check:
|
||||
tree == fromNodes
|
||||
|
||||
let
|
||||
compressor = proc(
|
||||
x, y: Poseidon2Hash, key: PoseidonKeysEnum
|
||||
): Poseidon2Hash {.noSideEffect.} =
|
||||
compress(x, y, key.toKey)
|
||||
|
||||
makeTree = proc(data: seq[Poseidon2Hash]): Poseidon2Tree =
|
||||
Poseidon2Tree.init(leaves = data).tryGet
|
||||
|
||||
testGenericTree(
|
||||
"Poseidon2Tree",
|
||||
toSeq(data.concat().elements(Poseidon2Hash)),
|
||||
zero,
|
||||
compressor,
|
||||
makeTree,
|
||||
)
|
||||
@ -9,7 +9,6 @@ import pkg/codex/merkletree
|
||||
import pkg/codex/manifest
|
||||
import pkg/codex/blocktype as bt
|
||||
import pkg/codex/chunker
|
||||
import pkg/codex/indexingstrategy
|
||||
import pkg/codex/rng
|
||||
|
||||
import ../helpers
|
||||
|
||||
@ -1,74 +0,0 @@
|
||||
import std/sequtils
|
||||
import pkg/chronos
|
||||
|
||||
import pkg/codex/utils/asynciter
|
||||
|
||||
import ../asynctest
|
||||
import ./helpers
|
||||
|
||||
import pkg/codex/indexingstrategy
|
||||
|
||||
for offset in @[0, 1, 2, 100]:
|
||||
suite "Indexing strategies (Offset: " & $offset & ")":
|
||||
let
|
||||
firstIndex = 0 + offset
|
||||
lastIndex = 12 + offset
|
||||
nIters = 3
|
||||
linear = LinearStrategy.init(firstIndex, lastIndex, nIters)
|
||||
stepped = SteppedStrategy.init(firstIndex, lastIndex, nIters)
|
||||
|
||||
test "linear":
|
||||
check:
|
||||
toSeq(linear.getIndices(0)) == @[0, 1, 2, 3, 4].mapIt(it + offset)
|
||||
toSeq(linear.getIndices(1)) == @[5, 6, 7, 8, 9].mapIt(it + offset)
|
||||
toSeq(linear.getIndices(2)) == @[10, 11, 12].mapIt(it + offset)
|
||||
|
||||
test "stepped":
|
||||
check:
|
||||
toSeq(stepped.getIndices(0)) == @[0, 3, 6, 9, 12].mapIt(it + offset)
|
||||
toSeq(stepped.getIndices(1)) == @[1, 4, 7, 10].mapIt(it + offset)
|
||||
toSeq(stepped.getIndices(2)) == @[2, 5, 8, 11].mapIt(it + offset)
|
||||
|
||||
suite "Indexing strategies":
|
||||
let
|
||||
linear = LinearStrategy.init(0, 10, 3)
|
||||
stepped = SteppedStrategy.init(0, 10, 3)
|
||||
|
||||
test "smallest range 0":
|
||||
let
|
||||
l = LinearStrategy.init(0, 0, 1)
|
||||
s = SteppedStrategy.init(0, 0, 1)
|
||||
check:
|
||||
toSeq(l.getIndices(0)) == @[0]
|
||||
toSeq(s.getIndices(0)) == @[0]
|
||||
|
||||
test "smallest range 1":
|
||||
let
|
||||
l = LinearStrategy.init(0, 1, 1)
|
||||
s = SteppedStrategy.init(0, 1, 1)
|
||||
check:
|
||||
toSeq(l.getIndices(0)) == @[0, 1]
|
||||
toSeq(s.getIndices(0)) == @[0, 1]
|
||||
|
||||
test "first index must be smaller than last index":
|
||||
expect IndexingWrongIndexError:
|
||||
discard LinearStrategy.init(10, 0, 1)
|
||||
|
||||
test "iterations must be greater than zero":
|
||||
expect IndexingWrongIterationsError:
|
||||
discard LinearStrategy.init(0, 10, 0)
|
||||
|
||||
test "should split elements evenly when possible":
|
||||
let l = LinearStrategy.init(0, 11, 3)
|
||||
check:
|
||||
toSeq(l.getIndices(0)) == @[0, 1, 2, 3].mapIt(it)
|
||||
toSeq(l.getIndices(1)) == @[4, 5, 6, 7].mapIt(it)
|
||||
toSeq(l.getIndices(2)) == @[8, 9, 10, 11].mapIt(it)
|
||||
|
||||
test "linear - oob":
|
||||
expect IndexingError:
|
||||
discard linear.getIndices(3)
|
||||
|
||||
test "stepped - oob":
|
||||
expect IndexingError:
|
||||
discard stepped.getIndices(3)
|
||||
@ -3,10 +3,8 @@ import pkg/questionable/results
|
||||
import pkg/codex/chunker
|
||||
import pkg/codex/blocktype as bt
|
||||
import pkg/codex/manifest
|
||||
import pkg/poseidon2
|
||||
|
||||
import pkg/codex/merkletree
|
||||
import pkg/codex/indexingstrategy
|
||||
|
||||
import ../asynctest
|
||||
import ./helpers
|
||||
|
||||
@ -1,6 +1,4 @@
|
||||
import ./merkletree/testcodextree
|
||||
import ./merkletree/testposeidon2tree
|
||||
import ./merkletree/testcodexcoders
|
||||
import ./merkletree/testmerkledigest
|
||||
|
||||
{.warning[UnusedImport]: off.}
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import pkg/chronos
|
||||
|
||||
import pkg/codex/[streams, stores, indexingstrategy, manifest, blocktype as bt]
|
||||
import pkg/codex/[streams, stores, manifest, blocktype as bt]
|
||||
|
||||
import ../asynctest
|
||||
import ./examples
|
||||
|
||||
@ -1,30 +0,0 @@
|
||||
import std/json
|
||||
import pkg/ethers
|
||||
import pkg/chronos
|
||||
|
||||
import ./asynctest
|
||||
import ./checktest
|
||||
|
||||
## Unit testing suite that sets up an Ethereum testing environment.
|
||||
## Injects a `ethProvider` instance, and a list of `accounts`.
|
||||
## Calls the `evm_snapshot` and `evm_revert` methods to ensure that any
|
||||
## changes to the blockchain do not persist.
|
||||
template ethersuite*(name, body) =
|
||||
asyncchecksuite name:
|
||||
var ethProvider {.inject, used.}: JsonRpcProvider
|
||||
var accounts {.inject, used.}: seq[Address]
|
||||
var snapshot: JsonNode
|
||||
|
||||
setup:
|
||||
ethProvider = JsonRpcProvider.new("ws://localhost:8545")
|
||||
snapshot = await send(ethProvider, "evm_snapshot")
|
||||
accounts = await ethProvider.listAccounts()
|
||||
teardown:
|
||||
discard await send(ethProvider, "evm_revert", @[snapshot])
|
||||
|
||||
await ethProvider.close()
|
||||
|
||||
body
|
||||
|
||||
export asynctest
|
||||
export ethers except `%`
|
||||
@ -3,7 +3,6 @@ import pkg/questionable/results
|
||||
import pkg/confutils
|
||||
import pkg/chronicles
|
||||
import pkg/chronos/asyncproc
|
||||
import pkg/ethers
|
||||
import pkg/libp2p
|
||||
import std/os
|
||||
import std/strutils
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user