bump nim-ssz-serialization to `3db6cc0f282708aca6c290914488edd832971d61` (#3119)
This updates `nim-ssz-serialization` to `3db6cc0f282708aca6c290914488edd832971d61`. Notable changes: - Use `uint64` for `GeneralizedIndex` - Add support for building merkle multiproofs
This commit is contained in:
parent
b764f4f0d7
commit
aa1b8e4a17
|
@ -410,14 +410,9 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
|||
## Spec helpers
|
||||
```diff
|
||||
+ build_proof - BeaconState OK
|
||||
+ get_branch_indices OK
|
||||
+ get_helper_indices OK
|
||||
+ get_path_indices OK
|
||||
+ integer_squareroot OK
|
||||
+ is_valid_merkle_branch OK
|
||||
+ verify_merkle_multiproof OK
|
||||
```
|
||||
OK: 7/7 Fail: 0/7 Skip: 0/7
|
||||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||
## Specific field types
|
||||
```diff
|
||||
+ root update OK
|
||||
|
@ -573,4 +568,4 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
|||
OK: 9/9 Fail: 0/9 Skip: 0/9
|
||||
|
||||
---TOTAL---
|
||||
OK: 318/323 Fail: 0/323 Skip: 5/323
|
||||
OK: 313/318 Fail: 0/318 Skip: 5/318
|
||||
|
|
|
@ -720,7 +720,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
|||
|
||||
# The only allowed flag right now is verifyFinalization, as the others all
|
||||
# allow skipping some validation.
|
||||
updateFlags: {verifyFinalization} * updateFlags,
|
||||
updateFlags: {verifyFinalization, enableTestFeatures} * updateFlags,
|
||||
cfg: cfg,
|
||||
|
||||
vanityLogs: vanityLogs,
|
||||
|
|
|
@ -158,18 +158,15 @@ proc cacheLightClientData(
|
|||
## Cache data for a given block and its post-state to speed up creating future
|
||||
## `LightClientUpdate` and `LightClientBootstrap` instances that refer to this
|
||||
## block and state.
|
||||
var cachedData {.noinit.}: CachedLightClientData
|
||||
state.data.build_proof(
|
||||
altair.CURRENT_SYNC_COMMITTEE_INDEX,
|
||||
cachedData.current_sync_committee_branch)
|
||||
state.data.build_proof(
|
||||
altair.NEXT_SYNC_COMMITTEE_INDEX,
|
||||
cachedData.next_sync_committee_branch)
|
||||
cachedData.finalized_slot =
|
||||
state.data.finalized_checkpoint.epoch.start_slot
|
||||
state.data.build_proof(
|
||||
altair.FINALIZED_ROOT_INDEX,
|
||||
cachedData.finality_branch)
|
||||
let cachedData = CachedLightClientData(
|
||||
current_sync_committee_branch:
|
||||
state.data.build_proof(altair.CURRENT_SYNC_COMMITTEE_INDEX).get,
|
||||
next_sync_committee_branch:
|
||||
state.data.build_proof(altair.NEXT_SYNC_COMMITTEE_INDEX).get,
|
||||
finalized_slot:
|
||||
state.data.finalized_checkpoint.epoch.start_slot,
|
||||
finality_branch:
|
||||
state.data.build_proof(altair.FINALIZED_ROOT_INDEX).get)
|
||||
if dag.lcDataStore.cache.data.hasKeyOrPut(bid, cachedData):
|
||||
doAssert false, "Redundant `cacheLightClientData` call"
|
||||
|
||||
|
@ -529,16 +526,14 @@ proc initLightClientBootstrapForPeriod(
|
|||
boundarySlot = bid.slot.nextEpochBoundarySlot
|
||||
if boundarySlot == nextBoundarySlot and bid.slot >= lowSlot and
|
||||
not dag.lcDataStore.cache.bootstrap.hasKey(bid.slot):
|
||||
var cachedBootstrap {.noinit.}: CachedLightClientBootstrap
|
||||
if not dag.updateExistingState(
|
||||
tmpState[], bid.atSlot, save = false, tmpCache):
|
||||
dag.handleUnexpectedLightClientError(bid.slot)
|
||||
continue
|
||||
withState(tmpState[]):
|
||||
var cachedBootstrap {.noinit.}: CachedLightClientBootstrap
|
||||
cachedBootstrap.current_sync_committee_branch = withState(tmpState[]):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
state.data.build_proof(
|
||||
altair.CURRENT_SYNC_COMMITTEE_INDEX,
|
||||
cachedBootstrap.current_sync_committee_branch)
|
||||
state.data.build_proof(altair.CURRENT_SYNC_COMMITTEE_INDEX).get
|
||||
else: raiseAssert "Unreachable"
|
||||
dag.lcDataStore.cache.bootstrap[bid.slot] = cachedBootstrap
|
||||
|
||||
|
@ -679,15 +674,13 @@ proc initLightClientUpdateForPeriod(
|
|||
when stateFork >= BeaconStateFork.Altair:
|
||||
update.attested_header = blck.toBeaconBlockHeader()
|
||||
update.next_sync_committee = state.data.next_sync_committee
|
||||
state.data.build_proof(
|
||||
altair.NEXT_SYNC_COMMITTEE_INDEX,
|
||||
update.next_sync_committee_branch)
|
||||
update.next_sync_committee_branch =
|
||||
state.data.build_proof(altair.NEXT_SYNC_COMMITTEE_INDEX).get
|
||||
if finalizedBid.slot == FAR_FUTURE_SLOT:
|
||||
update.finality_branch.reset()
|
||||
else:
|
||||
state.data.build_proof(
|
||||
altair.FINALIZED_ROOT_INDEX,
|
||||
update.finality_branch)
|
||||
update.finality_branch =
|
||||
state.data.build_proof(altair.FINALIZED_ROOT_INDEX).get
|
||||
else: raiseAssert "Unreachable"
|
||||
do:
|
||||
dag.handleUnexpectedLightClientError(attestedBid.slot)
|
||||
|
@ -819,11 +812,9 @@ proc getLightClientBootstrap*(
|
|||
let bsi = ? dag.getExistingBlockIdAtSlot(slot)
|
||||
var tmpState = assignClone(dag.headState)
|
||||
dag.withUpdatedExistingState(tmpState[], bsi) do:
|
||||
withState(state):
|
||||
cachedBootstrap.current_sync_committee_branch = withState(state):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
state.data.build_proof(
|
||||
altair.CURRENT_SYNC_COMMITTEE_INDEX,
|
||||
cachedBootstrap.current_sync_committee_branch)
|
||||
state.data.build_proof(altair.CURRENT_SYNC_COMMITTEE_INDEX).get
|
||||
else: raiseAssert "Unreachable"
|
||||
do: return err()
|
||||
dag.lcDataStore.cache.bootstrap[slot] = cachedBootstrap
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
@ -10,11 +10,11 @@
|
|||
# Import this module to get access to `hash_tree_root` for spec types
|
||||
|
||||
import
|
||||
ssz_serialization/merkleization,
|
||||
ssz_serialization/[merkleization, proofs],
|
||||
./ssz_codec,
|
||||
./datatypes/[phase0, altair]
|
||||
|
||||
export ssz_codec, merkleization
|
||||
export ssz_codec, merkleization, proofs
|
||||
|
||||
func hash_tree_root*(x: phase0.HashedBeaconState | altair.HashedBeaconState) {.
|
||||
error: "HashedBeaconState should not be hashed".}
|
||||
|
|
|
@ -15,10 +15,9 @@
|
|||
|
||||
import
|
||||
# Standard lib
|
||||
std/[algorithm, math, sequtils, sets, tables],
|
||||
std/[algorithm, math, sets, tables],
|
||||
# Status libraries
|
||||
stew/[bitops2, byteutils, endians2, objects],
|
||||
chronicles,
|
||||
# Internal
|
||||
./datatypes/[phase0, altair, bellatrix],
|
||||
"."/[eth2_merkleization, forks, ssz_codec]
|
||||
|
@ -41,310 +40,6 @@ func integer_squareroot*(n: SomeInteger): SomeInteger =
|
|||
y = (x + n div x) div 2
|
||||
x
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/ssz/merkle-proofs.md#generalized_index_sibling
|
||||
template generalized_index_sibling*(
|
||||
index: GeneralizedIndex): GeneralizedIndex =
|
||||
index xor 1.GeneralizedIndex
|
||||
|
||||
template generalized_index_sibling_left(
|
||||
index: GeneralizedIndex): GeneralizedIndex =
|
||||
index and not 1.GeneralizedIndex
|
||||
|
||||
template generalized_index_sibling_right(
|
||||
index: GeneralizedIndex): GeneralizedIndex =
|
||||
index or 1.GeneralizedIndex
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/ssz/merkle-proofs.md#generalized_index_parent
|
||||
template generalized_index_parent*(
|
||||
index: GeneralizedIndex): GeneralizedIndex =
|
||||
index shr 1
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/ssz/merkle-proofs.md#merkle-multiproofs
|
||||
iterator get_branch_indices*(
|
||||
tree_index: GeneralizedIndex): GeneralizedIndex =
|
||||
## Get the generalized indices of the sister chunks along the path
|
||||
## from the chunk with the given tree index to the root.
|
||||
var index = tree_index
|
||||
while index > 1.GeneralizedIndex:
|
||||
yield generalized_index_sibling(index)
|
||||
index = generalized_index_parent(index)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/ssz/merkle-proofs.md#merkle-multiproofs
|
||||
iterator get_path_indices*(
|
||||
tree_index: GeneralizedIndex): GeneralizedIndex =
|
||||
## Get the generalized indices of the chunks along the path
|
||||
## from the chunk with the given tree index to the root.
|
||||
var index = tree_index
|
||||
while index > 1.GeneralizedIndex:
|
||||
yield index
|
||||
index = generalized_index_parent(index)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/ssz/merkle-proofs.md#merkle-multiproofs
|
||||
func get_helper_indices*(
|
||||
indices: openArray[GeneralizedIndex]): seq[GeneralizedIndex] =
|
||||
## Get the generalized indices of all "extra" chunks in the tree needed
|
||||
## to prove the chunks with the given generalized indices. Note that the
|
||||
## decreasing order is chosen deliberately to ensure equivalence to the order
|
||||
## of hashes in a regular single-item Merkle proof in the single-item case.
|
||||
var all_helper_indices = initHashSet[GeneralizedIndex]()
|
||||
for index in indices:
|
||||
for idx in get_branch_indices(index):
|
||||
all_helper_indices.incl idx
|
||||
for index in indices:
|
||||
for idx in get_path_indices(index):
|
||||
all_helper_indices.excl idx
|
||||
|
||||
var res = newSeqOfCap[GeneralizedIndex](all_helper_indices.len)
|
||||
for idx in all_helper_indices:
|
||||
res.add idx
|
||||
res.sort(SortOrder.Descending)
|
||||
res
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/ssz/merkle-proofs.md#merkle-multiproofs
|
||||
func check_multiproof_acceptable*(
|
||||
indices: openArray[GeneralizedIndex]): Result[void, string] =
|
||||
# Check that proof verification won't allocate excessive amounts of memory.
|
||||
const max_multiproof_complexity = nextPowerOfTwo(256)
|
||||
if indices.len > max_multiproof_complexity:
|
||||
trace "Max multiproof complexity exceeded",
|
||||
num_indices=indices.len, max_multiproof_complexity
|
||||
return err("Unsupported multiproof complexity (" & $indices.len & ")")
|
||||
|
||||
if indices.len == 0:
|
||||
return err("No indices specified")
|
||||
if indices.anyIt(it == 0.GeneralizedIndex):
|
||||
return err("Invalid index specified")
|
||||
ok()
|
||||
|
||||
func calculate_multi_merkle_root_impl(
|
||||
leaves: openArray[Eth2Digest],
|
||||
proof: openArray[Eth2Digest],
|
||||
indices: openArray[GeneralizedIndex],
|
||||
helper_indices: openArray[GeneralizedIndex]): Result[Eth2Digest, string] =
|
||||
# All callers have already verified the checks in check_multiproof_acceptable,
|
||||
# as well as whether lengths of leaves/indices and proof/helper_indices match.
|
||||
|
||||
# Helper to retrieve a value from a table that is statically known to exist.
|
||||
template getExisting[A, B](t: var Table[A, B], key: A): var B =
|
||||
try: t[key]
|
||||
except KeyError: raiseAssert "Unreachable"
|
||||
|
||||
# Populate data structure with all leaves.
|
||||
# This data structure only scales with the number of `leaves`,
|
||||
# in contrast to the spec one that also scales with the number of `proof`
|
||||
# items and the number of all intermediate roots, potentially the entire tree.
|
||||
let capacity = nextPowerOfTwo(leaves.len)
|
||||
var objects = initTable[GeneralizedIndex, Eth2Digest](capacity)
|
||||
for i, index in indices:
|
||||
if objects.mgetOrPut(index, leaves[i]) != leaves[i]:
|
||||
return err("Conflicting roots for same index")
|
||||
|
||||
# Create list with keys of all active nodes that need to be visited.
|
||||
# This list is sorted in descending order, same as `helper_indices`.
|
||||
# Pulling from `objects` instead of from `indices` deduplicates the list.
|
||||
var keys = newSeqOfCap[GeneralizedIndex](objects.len)
|
||||
for index in objects.keys:
|
||||
if index > 1.GeneralizedIndex: # For the root, no work needs to be done.
|
||||
keys.add index
|
||||
keys.sort(SortOrder.Descending)
|
||||
|
||||
# The merkle tree is processed from bottom to top, pulling in helper
|
||||
# indices from `proof` as needed. During processing, the `keys` list
|
||||
# may temporarily end up being split into two parts, sorted individually.
|
||||
# An additional index tracks the current maximum element of the list.
|
||||
var
|
||||
completed = 0 # All key indices before this are fully processed.
|
||||
maxIndex = completed # Index of the list's largest key.
|
||||
helper = 0 # Helper index from `proof` to be pulled next.
|
||||
|
||||
# Processing is done when there are no more keys to process.
|
||||
while completed < keys.len:
|
||||
let
|
||||
k = keys[maxIndex]
|
||||
sibling = generalized_index_sibling(k)
|
||||
left = generalized_index_sibling_left(k)
|
||||
right = generalized_index_sibling_right(k)
|
||||
parent = generalized_index_parent(k)
|
||||
parentRight = generalized_index_sibling_right(parent)
|
||||
|
||||
# Keys need to be processed in descending order to ensure that intermediate
|
||||
# roots remain available until they are no longer needed. This ensures that
|
||||
# conflicting roots are detected in all cases.
|
||||
keys[maxIndex] =
|
||||
if not objects.hasKey(k):
|
||||
# A previous computation did already merge this key with its sibling.
|
||||
0.GeneralizedIndex
|
||||
else:
|
||||
# Compute expected root for parent. This deletes child roots.
|
||||
# Because the list is sorted in descending order, they are not needed.
|
||||
let root = withEth2Hash:
|
||||
if helper < helper_indices.len and helper_indices[helper] == sibling:
|
||||
# The next proof item is required to form the parent hash.
|
||||
if sibling == left:
|
||||
h.update proof[helper].data
|
||||
h.update objects.getExisting(right).data; objects.del right
|
||||
else:
|
||||
h.update objects.getExisting(left).data; objects.del left
|
||||
h.update proof[helper].data
|
||||
inc helper
|
||||
else:
|
||||
# Both siblings are already known.
|
||||
h.update objects.getExisting(left).data; objects.del left
|
||||
h.update objects.getExisting(right).data; objects.del right
|
||||
|
||||
# Store parent root, and replace the current list entry with its parent.
|
||||
if objects.hasKeyOrPut(parent, root):
|
||||
if objects.getExisting(parent) != root:
|
||||
return err("Conflicting roots for same index")
|
||||
0.GeneralizedIndex
|
||||
elif parent > 1.GeneralizedIndex:
|
||||
# Note that the list may contain further nodes that are on a layer
|
||||
# beneath the parent, so this may break the strictly descending order
|
||||
# of the list. For example, given [12, 9], this will lead to [6, 9].
|
||||
# This will resolve itself after the additional nodes are processed,
|
||||
# i.e., [6, 9] -> [6, 4] -> [3, 4] -> [3, 2] -> [1].
|
||||
parent
|
||||
else:
|
||||
0.GeneralizedIndex
|
||||
if keys[maxIndex] != 0.GeneralizedIndex:
|
||||
# The list may have been temporarily split up into two parts that are
|
||||
# individually sorted in descending order. Have to first process further
|
||||
# nodes until the list is sorted once more.
|
||||
inc maxIndex
|
||||
|
||||
# Determine whether descending sort order has been restored.
|
||||
let isSorted =
|
||||
if maxIndex == completed: true
|
||||
else:
|
||||
while maxIndex < keys.len and keys[maxIndex] == 0.GeneralizedIndex:
|
||||
inc maxIndex
|
||||
maxIndex >= keys.len or keys[maxIndex] <= parentRight
|
||||
if isSorted:
|
||||
# List is sorted once more. Reset `maxIndex` to its start.
|
||||
while completed < keys.len and keys[completed] == 0.GeneralizedIndex:
|
||||
inc completed
|
||||
maxIndex = completed
|
||||
|
||||
# Proof is guaranteed to provide all info needed to reach the root.
|
||||
doAssert helper == helper_indices.len
|
||||
doAssert objects.len == 1
|
||||
ok(objects.getExisting(1.GeneralizedIndex))
|
||||
|
||||
func calculate_multi_merkle_root*(
|
||||
leaves: openArray[Eth2Digest],
|
||||
proof: openArray[Eth2Digest],
|
||||
indices: openArray[GeneralizedIndex],
|
||||
helper_indices: openArray[GeneralizedIndex]): Result[Eth2Digest, string] =
|
||||
doAssert proof.len == helper_indices.len
|
||||
if leaves.len != indices.len:
|
||||
return err("Length mismatch for leaves and indices")
|
||||
? check_multiproof_acceptable(indices)
|
||||
calculate_multi_merkle_root_impl(
|
||||
leaves, proof, indices, helper_indices)
|
||||
|
||||
func calculate_multi_merkle_root*(
|
||||
leaves: openArray[Eth2Digest],
|
||||
proof: openArray[Eth2Digest],
|
||||
indices: openArray[GeneralizedIndex]): Result[Eth2Digest, string] =
|
||||
if leaves.len != indices.len:
|
||||
return err("Length mismatch for leaves and indices")
|
||||
? check_multiproof_acceptable(indices)
|
||||
calculate_multi_merkle_root_impl(
|
||||
leaves, proof, indices, get_helper_indices(indices))
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/ssz/merkle-proofs.md#merkle-multiproofs
|
||||
func verify_merkle_multiproof*(
|
||||
leaves: openArray[Eth2Digest],
|
||||
proof: openArray[Eth2Digest],
|
||||
indices: openArray[GeneralizedIndex],
|
||||
helper_indices: openArray[GeneralizedIndex],
|
||||
root: Eth2Digest): bool =
|
||||
let calc = calculate_multi_merkle_root(leaves, proof, indices, helper_indices)
|
||||
if calc.isErr: return false
|
||||
calc.get == root
|
||||
|
||||
func verify_merkle_multiproof*(
|
||||
leaves: openArray[Eth2Digest],
|
||||
proof: openArray[Eth2Digest],
|
||||
indices: openArray[GeneralizedIndex],
|
||||
root: Eth2Digest): bool =
|
||||
let calc = calculate_multi_merkle_root(leaves, proof, indices)
|
||||
if calc.isErr: return false
|
||||
calc.get == root
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/phase0/beacon-chain.md#is_valid_merkle_branch
|
||||
func is_valid_merkle_branch*(leaf: Eth2Digest, branch: openArray[Eth2Digest],
|
||||
depth: int, index: uint64,
|
||||
root: Eth2Digest): bool =
|
||||
## Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and
|
||||
## ``branch``.
|
||||
var
|
||||
value = leaf
|
||||
buf: array[64, byte]
|
||||
|
||||
for i in 0 ..< depth:
|
||||
if (index div (1'u64 shl i)) mod 2 != 0:
|
||||
buf[0..31] = branch[i].data
|
||||
buf[32..63] = value.data
|
||||
else:
|
||||
buf[0..31] = value.data
|
||||
buf[32..63] = branch[i].data
|
||||
value = eth2digest(buf)
|
||||
value == root
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/tests/core/pyspec/eth2spec/test/helpers/merkle.py#L4-L21
|
||||
func build_proof_impl(anchor: object, leaf_index: uint64,
|
||||
proof: var openArray[Eth2Digest]) =
|
||||
let
|
||||
bottom_length = nextPow2(typeof(anchor).totalSerializedFields.uint64)
|
||||
tree_depth = log2trunc(bottom_length)
|
||||
parent_index =
|
||||
if leaf_index < bottom_length shl 1:
|
||||
0'u64
|
||||
else:
|
||||
var i = leaf_index
|
||||
while i >= bottom_length shl 1:
|
||||
i = i shr 1
|
||||
i
|
||||
|
||||
var
|
||||
prefix_len = 0
|
||||
proof_len = log2trunc(leaf_index)
|
||||
cache = newSeq[Eth2Digest](bottom_length shl 1)
|
||||
block:
|
||||
var i = bottom_length
|
||||
anchor.enumInstanceSerializedFields(fieldNameVar, fieldVar):
|
||||
if i == parent_index:
|
||||
when fieldVar is object:
|
||||
prefix_len = log2trunc(leaf_index) - tree_depth
|
||||
proof_len -= prefix_len
|
||||
let
|
||||
bottom_bits = leaf_index and not (uint64.high shl prefix_len)
|
||||
prefix_leaf_index = (1'u64 shl prefix_len) + bottom_bits
|
||||
build_proof_impl(fieldVar, prefix_leaf_index, proof)
|
||||
else: raiseAssert "Invalid leaf_index"
|
||||
cache[i] = hash_tree_root(fieldVar)
|
||||
i += 1
|
||||
for i in countdown(bottom_length - 1, 1):
|
||||
cache[i] = withEth2Hash:
|
||||
h.update cache[i shl 1].data
|
||||
h.update cache[i shl 1 + 1].data
|
||||
|
||||
var i = if parent_index != 0: parent_index
|
||||
else: leaf_index
|
||||
doAssert i > 0 and i < bottom_length shl 1
|
||||
for proof_index in prefix_len ..< prefix_len + proof_len:
|
||||
let b = (i and 1) != 0
|
||||
i = i shr 1
|
||||
proof[proof_index] = if b: cache[i shl 1]
|
||||
else: cache[i shl 1 + 1]
|
||||
|
||||
func build_proof*(anchor: object, leaf_index: uint64,
|
||||
proof: var openArray[Eth2Digest]) =
|
||||
doAssert leaf_index > 0
|
||||
doAssert proof.len == log2trunc(leaf_index)
|
||||
build_proof_impl(anchor, leaf_index, proof)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/phase0/beacon-chain.md#is_active_validator
|
||||
func is_active_validator*(validator: Validator, epoch: Epoch): bool =
|
||||
## Check if ``validator`` is active
|
||||
|
@ -493,11 +188,6 @@ template is_finality_update*(update: SomeLightClientUpdate): bool =
|
|||
else:
|
||||
false
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/altair/sync-protocol.md#get_subtree_index
|
||||
func get_subtree_index*(idx: GeneralizedIndex): uint64 =
|
||||
doAssert idx > 0
|
||||
uint64(idx mod (type(idx)(1) shl log2trunc(idx)))
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/vFuture/specs/altair/sync-protocol.md#is_next_sync_committee_known
|
||||
template is_next_sync_committee_known*(store: LightClientStore): bool =
|
||||
not isZeroMemory(store.next_sync_committee)
|
||||
|
|
|
@ -47,7 +47,7 @@ proc runTest(identifier: string) =
|
|||
altair.BeaconState))
|
||||
|
||||
var computedProof = newSeq[Eth2Digest](log2trunc(proof.leaf_index))
|
||||
build_proof(state[], proof.leaf_index, computedProof)
|
||||
build_proof(state[], proof.leaf_index, computedProof).get
|
||||
|
||||
check:
|
||||
computedProof == proof.branch.mapIt(Eth2Digest.fromHex(it))
|
||||
|
|
|
@ -239,7 +239,6 @@ suite "EF - Altair - Unittests - Sync protocol" & preset():
|
|||
default(array[log2trunc(altair.NEXT_SYNC_COMMITTEE_INDEX), Eth2Digest])
|
||||
|
||||
# Finality is unchanged
|
||||
let
|
||||
finality_header = BeaconBlockHeader()
|
||||
finality_branch =
|
||||
default(array[log2trunc(altair.FINALIZED_ROOT_INDEX), Eth2Digest])
|
||||
|
@ -292,13 +291,11 @@ suite "EF - Altair - Unittests - Sync protocol" & preset():
|
|||
|
||||
# Sync committee is updated
|
||||
template next_sync_committee(): auto = state.next_sync_committee
|
||||
var next_sync_committee_branch {.noinit.}:
|
||||
array[log2trunc(altair.NEXT_SYNC_COMMITTEE_INDEX), Eth2Digest]
|
||||
state.build_proof(
|
||||
altair.NEXT_SYNC_COMMITTEE_INDEX, next_sync_committee_branch)
|
||||
let
|
||||
next_sync_committee_branch =
|
||||
state.build_proof(altair.NEXT_SYNC_COMMITTEE_INDEX).get
|
||||
|
||||
# Finality is unchanged
|
||||
let
|
||||
finality_header = BeaconBlockHeader()
|
||||
finality_branch =
|
||||
default(array[log2trunc(altair.FINALIZED_ROOT_INDEX), Eth2Digest])
|
||||
|
@ -364,12 +361,9 @@ suite "EF - Altair - Unittests - Sync protocol" & preset():
|
|||
check:
|
||||
finalized_header.slot == start_slot(state.finalized_checkpoint.epoch)
|
||||
finalized_header.hash_tree_root() == state.finalized_checkpoint.root
|
||||
var finality_branch {.noinit.}:
|
||||
array[log2trunc(altair.FINALIZED_ROOT_INDEX), Eth2Digest]
|
||||
state.build_proof(
|
||||
altair.FINALIZED_ROOT_INDEX, finality_branch)
|
||||
|
||||
let
|
||||
finality_branch = state.build_proof(altair.FINALIZED_ROOT_INDEX).get
|
||||
|
||||
update = altair.LightClientUpdate(
|
||||
attested_header: attested_header,
|
||||
next_sync_committee: next_sync_committee,
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018, 2021 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
@ -8,8 +8,6 @@
|
|||
{.used.}
|
||||
|
||||
import
|
||||
# Standard library
|
||||
sequtils,
|
||||
# Status libraries
|
||||
stew/bitops2,
|
||||
# Beacon chain internals
|
||||
|
@ -48,9 +46,11 @@ suite "Spec helpers":
|
|||
anchor.enumInstanceSerializedFields(fieldNameVar, fieldVar):
|
||||
let depth = log2trunc(i)
|
||||
var proof = newSeq[Eth2Digest](depth)
|
||||
build_proof(state, i, proof)
|
||||
check: is_valid_merkle_branch(hash_tree_root(fieldVar), proof,
|
||||
depth, get_subtree_index(i), root)
|
||||
state.build_proof(i, proof).get
|
||||
check:
|
||||
hash_tree_root(fieldVar) == hash_tree_root(state, i).get
|
||||
is_valid_merkle_branch(hash_tree_root(fieldVar), proof,
|
||||
depth, get_subtree_index(i), root)
|
||||
when fieldVar is object and not (fieldVar is Eth2Digest):
|
||||
let
|
||||
numChildLeaves = fieldVar.numLeaves
|
||||
|
@ -58,162 +58,3 @@ suite "Spec helpers":
|
|||
process(fieldVar, i shl childDepth)
|
||||
i += 1
|
||||
process(state, state.numLeaves)
|
||||
|
||||
test "get_branch_indices":
|
||||
check:
|
||||
toSeq(get_branch_indices(1.GeneralizedIndex)) == []
|
||||
toSeq(get_branch_indices(0b101010.GeneralizedIndex)) ==
|
||||
[
|
||||
0b101011.GeneralizedIndex,
|
||||
0b10100.GeneralizedIndex,
|
||||
0b1011.GeneralizedIndex,
|
||||
0b100.GeneralizedIndex,
|
||||
0b11.GeneralizedIndex
|
||||
]
|
||||
|
||||
test "get_path_indices":
|
||||
check:
|
||||
toSeq(get_path_indices(1.GeneralizedIndex)) == []
|
||||
toSeq(get_path_indices(0b101010.GeneralizedIndex)) ==
|
||||
[
|
||||
0b101010.GeneralizedIndex,
|
||||
0b10101.GeneralizedIndex,
|
||||
0b1010.GeneralizedIndex,
|
||||
0b101.GeneralizedIndex,
|
||||
0b10.GeneralizedIndex
|
||||
]
|
||||
|
||||
test "get_helper_indices":
|
||||
check:
|
||||
get_helper_indices(
|
||||
[
|
||||
8.GeneralizedIndex,
|
||||
9.GeneralizedIndex,
|
||||
14.GeneralizedIndex]) ==
|
||||
[
|
||||
15.GeneralizedIndex,
|
||||
6.GeneralizedIndex,
|
||||
5.GeneralizedIndex
|
||||
]
|
||||
|
||||
test "verify_merkle_multiproof":
|
||||
var nodes: array[16, Eth2Digest]
|
||||
for i in countdown(15, 8):
|
||||
nodes[i] = eth2digest([i.byte])
|
||||
for i in countdown(7, 1):
|
||||
nodes[i] = withEth2Hash:
|
||||
h.update nodes[2 * i + 0].data
|
||||
h.update nodes[2 * i + 1].data
|
||||
|
||||
proc verify(indices_int: openArray[int]) =
|
||||
let
|
||||
indices = indices_int.mapIt(it.GeneralizedIndex)
|
||||
helper_indices = get_helper_indices(indices)
|
||||
leaves = indices.mapIt(nodes[it])
|
||||
proof = helper_indices.mapIt(nodes[it])
|
||||
root = nodes[1]
|
||||
checkpoint "Verifying " & $indices & "---" & $helper_indices
|
||||
check:
|
||||
verify_merkle_multiproof(leaves, proof, indices, root)
|
||||
|
||||
verify([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
|
||||
|
||||
for a in 1 .. 15:
|
||||
verify([a])
|
||||
for b in 1 .. 15:
|
||||
verify([a, b])
|
||||
for c in 1 .. 15:
|
||||
verify([a, b, c])
|
||||
for d in 8 .. 15:
|
||||
verify([a, b, c, d])
|
||||
for e in 1 .. 7:
|
||||
verify([a, b, c, d, e])
|
||||
|
||||
test "is_valid_merkle_branch":
|
||||
type TestCase = object
|
||||
root: string
|
||||
proof: seq[string]
|
||||
leaf: string
|
||||
index: uint64
|
||||
valid: bool
|
||||
|
||||
let testCases = @[
|
||||
TestCase(
|
||||
root:
|
||||
"2a23ef2b7a7221eaac2ffb3842a506a981c009ca6c2fcbf20adbc595e56f1a93",
|
||||
proof: @[
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
"f5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b"
|
||||
],
|
||||
leaf:
|
||||
"0100000000000000000000000000000000000000000000000000000000000000",
|
||||
index: 4,
|
||||
valid: true
|
||||
),
|
||||
TestCase(
|
||||
root:
|
||||
"2a23ef2b7a7221eaac2ffb3842a506a981c009ca6c2fcbf20adbc595e56f1a93",
|
||||
proof: @[
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
"f5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b"
|
||||
],
|
||||
leaf:
|
||||
"0100000000000000000000000000000000000000000000000000000000000000",
|
||||
index: 6,
|
||||
valid: false
|
||||
),
|
||||
TestCase(
|
||||
root:
|
||||
"2a23ef2b7a7221eaac2ffb3842a506a981c009ca6c2fcbf20adbc595e56f1a93",
|
||||
proof: @[
|
||||
"0100000000000000000000000000000000000000000000000000000000000000",
|
||||
"f5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b"
|
||||
],
|
||||
leaf:
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
index: 5,
|
||||
valid: true
|
||||
),
|
||||
TestCase(
|
||||
root:
|
||||
"f1824b0084956084591ff4c91c11bcc94a40be82da280e5171932b967dd146e9",
|
||||
proof: @[
|
||||
"35210d64853aee79d03f30cf0f29c1398706cbbcacaf05ab9524f00070aec91e",
|
||||
"f38a181470ef1eee90a29f0af0a9dba6b7e5d48af3c93c29b4f91fa11b777582"
|
||||
],
|
||||
leaf:
|
||||
"0100000000000000000000000000000000000000000000000000000000000000",
|
||||
index: 7,
|
||||
valid: true
|
||||
),
|
||||
TestCase(
|
||||
root:
|
||||
"f1824b0084956084591ff4c91c11bcc94a40be82da280e5171932b967dd146e9",
|
||||
proof: @[
|
||||
"0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"f5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b",
|
||||
"0100000000000000000000000000000000000000000000000000000000000000",
|
||||
"f38a181470ef1eee90a29f0af0a9dba6b7e5d48af3c93c29b4f91fa11b777582"
|
||||
],
|
||||
leaf:
|
||||
"6001000000000000000000000000000000000000000000000000000000000000",
|
||||
index: 49,
|
||||
valid: true
|
||||
)
|
||||
]
|
||||
|
||||
for testCase in testCases:
|
||||
let
|
||||
root = Eth2Digest.fromHex(testCase.root)
|
||||
proof = mapIt(testCase.proof, Eth2Digest.fromHex(it))
|
||||
leaf = Eth2Digest.fromHex(testCase.leaf)
|
||||
index = testCase.index.GeneralizedIndex
|
||||
valid = is_valid_merkle_branch(leaf, proof,
|
||||
log2trunc(index),
|
||||
get_subtree_index(index),
|
||||
root)
|
||||
if testCase.valid:
|
||||
check valid
|
||||
else:
|
||||
check (not valid)
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 07f96da6a6ec4a899c33072c368890bc7ff3536a
|
||||
Subproject commit 3db6cc0f282708aca6c290914488edd832971d61
|
Loading…
Reference in New Issue