Merge branch 'dev' into hwwhww/strict-uint64-2

This commit is contained in:
Hsiao-Wei Wang 2020-07-15 23:02:51 +08:00
commit 6e3a7ad8f2
No known key found for this signature in database
GPG Key ID: 95B070122902DEA4
19 changed files with 1208 additions and 321 deletions

View File

@ -35,39 +35,39 @@ commands:
description: "Restore the cache with pyspec keys" description: "Restore the cache with pyspec keys"
steps: steps:
- restore_cached_venv: - restore_cached_venv:
venv_name: v19-pyspec venv_name: v22-pyspec
reqs_checksum: cache-{{ checksum "setup.py" }} reqs_checksum: cache-{{ checksum "setup.py" }}
save_pyspec_cached_venv: save_pyspec_cached_venv:
description: Save a venv into a cache with pyspec keys" description: Save a venv into a cache with pyspec keys"
steps: steps:
- save_cached_venv: - save_cached_venv:
venv_name: v19-pyspec venv_name: v22-pyspec
reqs_checksum: cache-{{ checksum "setup.py" }} reqs_checksum: cache-{{ checksum "setup.py" }}
venv_path: ./venv venv_path: ./venv
restore_deposit_contract_compiler_cached_venv: restore_deposit_contract_compiler_cached_venv:
description: "Restore the venv from cache for the deposit contract compiler" description: "Restore the venv from cache for the deposit contract compiler"
steps: steps:
- restore_cached_venv: - restore_cached_venv:
venv_name: v18-deposit-contract-compiler venv_name: v23-deposit-contract-compiler
reqs_checksum: cache-{{ checksum "deposit_contract/compiler/requirements.txt" }} reqs_checksum: cache-{{ checksum "deposit_contract/compiler/requirements.txt" }}
save_deposit_contract_compiler_cached_venv: save_deposit_contract_compiler_cached_venv:
description: "Save the venv to cache for later use of the deposit contract compiler" description: "Save the venv to cache for later use of the deposit contract compiler"
steps: steps:
- save_cached_venv: - save_cached_venv:
venv_name: v18-deposit-contract-compiler venv_name: v23-deposit-contract-compiler
reqs_checksum: cache-{{ checksum "deposit_contract/compiler/requirements.txt" }} reqs_checksum: cache-{{ checksum "deposit_contract/compiler/requirements.txt" }}
venv_path: ./deposit_contract/compiler/venv venv_path: ./deposit_contract/compiler/venv
restore_deposit_contract_tester_cached_venv: restore_deposit_contract_tester_cached_venv:
description: "Restore the venv from cache for the deposit contract tester" description: "Restore the venv from cache for the deposit contract tester"
steps: steps:
- restore_cached_venv: - restore_cached_venv:
venv_name: v19-deposit-contract-tester venv_name: v22-deposit-contract-tester
reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "deposit_contract/tester/requirements.txt" }} reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "deposit_contract/tester/requirements.txt" }}
save_deposit_contract_tester_cached_venv: save_deposit_contract_tester_cached_venv:
description: "Save the venv to cache for later use of the deposit contract tester" description: "Save the venv to cache for later use of the deposit contract tester"
steps: steps:
- save_cached_venv: - save_cached_venv:
venv_name: v19-deposit-contract-tester venv_name: v22-deposit-contract-tester
reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "deposit_contract/tester/requirements.txt" }} reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "deposit_contract/tester/requirements.txt" }}
venv_path: ./deposit_contract/tester/venv venv_path: ./deposit_contract/tester/venv
jobs: jobs:

View File

@ -216,11 +216,21 @@ get_matching_head_attestations = cache_this(
_get_attesting_indices = get_attesting_indices _get_attesting_indices = get_attesting_indices
get_attesting_indices = cache_this( get_attesting_indices = cache_this(
lambda state, data, bits: (state.validators.hash_tree_root(), data.hash_tree_root(), bits.hash_tree_root()), lambda state, data, bits: (
state.randao_mixes.hash_tree_root(),
state.validators.hash_tree_root(), data.hash_tree_root(), bits.hash_tree_root()
),
_get_attesting_indices, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)''' _get_attesting_indices, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)'''
PHASE1_SUNDRY_FUNCTIONS = ''' PHASE1_SUNDRY_FUNCTIONS = '''
def get_block_data_merkle_root(data: ByteList) -> Root:
# To get the Merkle root of the block data, we need the Merkle root without the length mix-in
# The below implements this in the Remerkleable framework
return Root(data.get_backing().get_left().merkle_root())
_get_start_shard = get_start_shard _get_start_shard = get_start_shard
get_start_shard = cache_this( get_start_shard = cache_this(
lambda state, slot: (state.validators.hash_tree_root(), slot), lambda state, slot: (state.validators.hash_tree_root(), slot),

File diff suppressed because it is too large Load Diff

View File

@ -307,7 +307,7 @@ def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Da
valid_votes = [vote for vote in state.eth1_data_votes if vote in votes_to_consider] valid_votes = [vote for vote in state.eth1_data_votes if vote in votes_to_consider]
# Default vote on latest eth1 block data in the period range unless eth1 chain is not live # Default vote on latest eth1 block data in the period range unless eth1 chain is not live
default_vote = votes_to_consider[-1] if any(votes_to_consider) else state.eth1_data default_vote = votes_to_consider[len(votes_to_consider) - 1] if any(votes_to_consider) else state.eth1_data
return max( return max(
valid_votes, valid_votes,
@ -399,7 +399,7 @@ Set `attestation_data.beacon_block_root = hash_tree_root(head_block)`.
*Note*: `epoch_boundary_block_root` can be looked up in the state using: *Note*: `epoch_boundary_block_root` can be looked up in the state using:
- Let `start_slot = compute_start_slot_at_epoch(get_current_epoch(head_state))`. - Let `start_slot = compute_start_slot_at_epoch(get_current_epoch(head_state))`.
- Let `epoch_boundary_block_root = hash_tree_root(head_block) if start_slot == head_state.slot else get_block_root(state, start_slot)`. - Let `epoch_boundary_block_root = hash_tree_root(head_block) if start_slot == head_state.slot else get_block_root(state, get_current_epoch(head_state))`.
#### Construct attestation #### Construct attestation

View File

@ -1,4 +1,4 @@
# Ethereum 2.0 Phase 1 -- The Beacon Chain for Shards # Ethereum 2.0 Phase 1 -- The Beacon Chain with Shards
**Notice**: This document is a work-in-progress for researchers and implementers. **Notice**: This document is a work-in-progress for researchers and implementers.
@ -532,7 +532,7 @@ def get_active_shard_count(state: BeaconState) -> uint64:
```python ```python
def get_online_validator_indices(state: BeaconState) -> Set[ValidatorIndex]: def get_online_validator_indices(state: BeaconState) -> Set[ValidatorIndex]:
active_validators = get_active_validator_indices(state, get_current_epoch(state)) active_validators = get_active_validator_indices(state, get_current_epoch(state))
return set([i for i in active_validators if state.online_countdown[i] != 0]) return set(i for i in active_validators if state.online_countdown[i] != 0) # non-duplicate
``` ```
#### `get_shard_committee` #### `get_shard_committee`
@ -559,7 +559,7 @@ def get_shard_committee(beacon_state: BeaconState, epoch: Epoch, shard: Shard) -
```python ```python
def get_light_client_committee(beacon_state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]: def get_light_client_committee(beacon_state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
""" """
Return the light client committee of no more than ``TARGET_COMMITTEE_SIZE`` validators. Return the light client committee of no more than ``LIGHT_CLIENT_COMMITTEE_SIZE`` validators.
""" """
source_epoch = compute_committee_source_epoch(epoch, LIGHT_CLIENT_COMMITTEE_PERIOD) source_epoch = compute_committee_source_epoch(epoch, LIGHT_CLIENT_COMMITTEE_PERIOD)
active_validator_indices = get_active_validator_indices(beacon_state, source_epoch) active_validator_indices = get_active_validator_indices(beacon_state, source_epoch)
@ -1084,8 +1084,9 @@ def process_light_client_committee_updates(state: BeaconState) -> None:
""" """
Update light client committees. Update light client committees.
""" """
if get_current_epoch(state) % LIGHT_CLIENT_COMMITTEE_PERIOD == 0: next_epoch = compute_epoch_at_slot(Slot(state.slot + 1))
if next_epoch % LIGHT_CLIENT_COMMITTEE_PERIOD == 0:
state.current_light_committee = state.next_light_committee state.current_light_committee = state.next_light_committee
new_committee = get_light_client_committee(state, get_current_epoch(state) + LIGHT_CLIENT_COMMITTEE_PERIOD) new_committee = get_light_client_committee(state, next_epoch + LIGHT_CLIENT_COMMITTEE_PERIOD)
state.next_light_committee = committee_to_compact_committee(state, new_committee) state.next_light_committee = committee_to_compact_committee(state, new_committee)
``` ```

View File

@ -25,6 +25,7 @@
- [`CustodyKeyReveal`](#custodykeyreveal) - [`CustodyKeyReveal`](#custodykeyreveal)
- [`EarlyDerivedSecretReveal`](#earlyderivedsecretreveal) - [`EarlyDerivedSecretReveal`](#earlyderivedsecretreveal)
- [Helpers](#helpers) - [Helpers](#helpers)
- [`get_block_data_merkle_root`](#get_block_data_merkle_root)
- [`replace_empty_or_append`](#replace_empty_or_append) - [`replace_empty_or_append`](#replace_empty_or_append)
- [`legendre_bit`](#legendre_bit) - [`legendre_bit`](#legendre_bit)
- [`get_custody_atoms`](#get_custody_atoms) - [`get_custody_atoms`](#get_custody_atoms)
@ -182,6 +183,10 @@ class EarlyDerivedSecretReveal(Container):
## Helpers ## Helpers
### `get_block_data_merkle_root`
`get_block_data_merkle_root(data: ByteList) -> Root` is the function that returns the Merkle root of the block data without the length mix-in.
### `replace_empty_or_append` ### `replace_empty_or_append`
```python ```python
@ -229,9 +234,12 @@ Given one set of data, return the custody atoms: each atom will be combined with
```python ```python
def get_custody_atoms(bytez: bytes) -> Sequence[bytes]: def get_custody_atoms(bytez: bytes) -> Sequence[bytes]:
bytez += b'\x00' * (-len(bytez) % BYTES_PER_CUSTODY_ATOM) # right-padding length_remainder = len(bytez) % BYTES_PER_CUSTODY_ATOM
return [bytez[i:i + BYTES_PER_CUSTODY_ATOM] bytez += b'\x00' * ((BYTES_PER_CUSTODY_ATOM - length_remainder) % BYTES_PER_CUSTODY_ATOM) # right-padding
for i in range(0, len(bytez), BYTES_PER_CUSTODY_ATOM)] return [
bytez[i:i + BYTES_PER_CUSTODY_ATOM]
for i in range(0, len(bytez), BYTES_PER_CUSTODY_ATOM)
]
``` ```
### `get_custody_secrets` ### `get_custody_secrets`
@ -283,7 +291,7 @@ def get_randao_epoch_for_custody_period(period: uint64, validator_index: Validat
### `get_custody_period_for_validator` ### `get_custody_period_for_validator`
```python ```python
def get_custody_period_for_validator(validator_index: ValidatorIndex, epoch: Epoch) -> int: def get_custody_period_for_validator(validator_index: ValidatorIndex, epoch: Epoch) -> uint64:
''' '''
Return the reveal period for a given validator. Return the reveal period for a given validator.
''' '''
@ -515,7 +523,7 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed
assert hash_tree_root(shard_transition) == attestation.data.shard_transition_root assert hash_tree_root(shard_transition) == attestation.data.shard_transition_root
# Verify that the provided data matches the shard-transition # Verify that the provided data matches the shard-transition
assert ( assert (
custody_slashing.data.get_backing().get_left().merkle_root() get_block_data_merkle_root(custody_slashing.data)
== shard_transition.shard_data_roots[custody_slashing.data_index] == shard_transition.shard_data_roots[custody_slashing.data_index]
) )
assert len(custody_slashing.data) == shard_transition.shard_block_lengths[custody_slashing.data_index] assert len(custody_slashing.data) == shard_transition.shard_block_lengths[custody_slashing.data_index]
@ -552,7 +560,6 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed
slash_validator(state, custody_slashing.whistleblower_index) slash_validator(state, custody_slashing.whistleblower_index)
``` ```
## Per-epoch processing ## Per-epoch processing
### Handling of reveal deadlines ### Handling of reveal deadlines
@ -584,7 +591,7 @@ def process_custody_final_updates(state: BeaconState) -> None:
# Reset withdrawable epochs if challenge records are empty # Reset withdrawable epochs if challenge records are empty
records = state.custody_chunk_challenge_records records = state.custody_chunk_challenge_records
validator_indices_in_records = set([record.responder_index for record in records]) validator_indices_in_records = set(record.responder_index for record in records) # non-duplicate
for index, validator in enumerate(state.validators): for index, validator in enumerate(state.validators):
if validator.exit_epoch != FAR_FUTURE_EPOCH: if validator.exit_epoch != FAR_FUTURE_EPOCH:
not_all_secrets_are_revealed = validator.all_custody_secrets_revealed_epoch == FAR_FUTURE_EPOCH not_all_secrets_are_revealed = validator.all_custody_secrets_revealed_epoch == FAR_FUTURE_EPOCH

View File

@ -174,9 +174,7 @@ def on_shard_block(store: Store, shard_store: ShardStore, signed_shard_block: Si
# Check the block is valid and compute the post-state # Check the block is valid and compute the post-state
shard_state = shard_parent_state.copy() shard_state = shard_parent_state.copy()
shard_state_transition( shard_state_transition(shard_state, signed_shard_block, beacon_parent_state, validate_result=True)
shard_state, signed_shard_block,
validate=True, beacon_parent_state=beacon_parent_state)
# Add new block to the store # Add new block to the store
# Note: storing `SignedShardBlock` format for computing `ShardTransition.proposer_signature_aggregate` # Note: storing `SignedShardBlock` format for computing `ShardTransition.proposer_signature_aggregate`

View File

@ -11,7 +11,9 @@
- [Introduction](#introduction) - [Introduction](#introduction)
- [Helper functions](#helper-functions) - [Helper functions](#helper-functions)
- [Shard block verification functions](#shard-block-verification-functions) - [Shard block verification functions](#shard-block-verification-functions)
- [Shard state transition](#shard-state-transition) - [`verify_shard_block_message`](#verify_shard_block_message)
- [`verify_shard_block_signature`](#verify_shard_block_signature)
- [Shard state transition function](#shard-state-transition-function)
- [Fraud proofs](#fraud-proofs) - [Fraud proofs](#fraud-proofs)
- [Verifying the proof](#verifying-the-proof) - [Verifying the proof](#verifying-the-proof)
@ -25,6 +27,8 @@ This document describes the shard transition function and fraud proofs as part o
### Shard block verification functions ### Shard block verification functions
#### `verify_shard_block_message`
```python ```python
def verify_shard_block_message(beacon_parent_state: BeaconState, def verify_shard_block_message(beacon_parent_state: BeaconState,
shard_parent_state: ShardState, shard_parent_state: ShardState,
@ -49,6 +53,8 @@ def verify_shard_block_message(beacon_parent_state: BeaconState,
return True return True
``` ```
#### `verify_shard_block_signature`
```python ```python
def verify_shard_block_signature(beacon_parent_state: BeaconState, def verify_shard_block_signature(beacon_parent_state: BeaconState,
signed_block: SignedShardBlock) -> bool: signed_block: SignedShardBlock) -> bool:
@ -58,16 +64,18 @@ def verify_shard_block_signature(beacon_parent_state: BeaconState,
return bls.Verify(proposer.pubkey, signing_root, signed_block.signature) return bls.Verify(proposer.pubkey, signing_root, signed_block.signature)
``` ```
## Shard state transition ## Shard state transition function
The post-state corresponding to a pre-state `shard_state` and a signed block `signed_block` is defined as `shard_state_transition(shard_state, signed_block, beacon_parent_state)`, where `beacon_parent_state` is the parent beacon state of the `signed_block`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid. State transitions that cause a `uint64` overflow or underflow are also considered invalid.
```python ```python
def shard_state_transition(shard_state: ShardState, def shard_state_transition(shard_state: ShardState,
signed_block: SignedShardBlock, signed_block: SignedShardBlock,
validate: bool = True, beacon_parent_state: BeaconState,
beacon_parent_state: Optional[BeaconState] = None) -> ShardState: validate_result: bool = True) -> ShardState:
if validate: assert verify_shard_block_message(beacon_parent_state, shard_state, signed_block.message)
assert beacon_parent_state is not None
assert verify_shard_block_message(beacon_parent_state, shard_state, signed_block.message) if validate_result:
assert verify_shard_block_signature(beacon_parent_state, signed_block) assert verify_shard_block_signature(beacon_parent_state, signed_block)
process_shard_block(shard_state, signed_block.message) process_shard_block(shard_state, signed_block.message)
@ -84,11 +92,8 @@ def process_shard_block(shard_state: ShardState,
prev_gasprice = shard_state.gasprice prev_gasprice = shard_state.gasprice
shard_block_length = len(block.body) shard_block_length = len(block.body)
shard_state.gasprice = compute_updated_gasprice(prev_gasprice, uint64(shard_block_length)) shard_state.gasprice = compute_updated_gasprice(prev_gasprice, uint64(shard_block_length))
if shard_block_length == 0: if shard_block_length != 0:
latest_block_root = shard_state.latest_block_root shard_state.latest_block_root = hash_tree_root(block)
else:
latest_block_root = hash_tree_root(block)
shard_state.latest_block_root = latest_block_root
``` ```
## Fraud proofs ## Fraud proofs
@ -128,7 +133,7 @@ def is_valid_fraud_proof(beacon_state: BeaconState,
else: else:
shard_state = transition.shard_states[offset_index - 1] # Not doing the actual state updates here. shard_state = transition.shard_states[offset_index - 1] # Not doing the actual state updates here.
shard_state_transition(shard_state, block, validate=False) process_shard_block(shard_state, block)
if shard_state != transition.shard_states[offset_index]: if shard_state != transition.shard_states[offset_index]:
return True return True

View File

@ -40,7 +40,7 @@
- [`SignedAggregateAndProof`](#signedaggregateandproof) - [`SignedAggregateAndProof`](#signedaggregateandproof)
- [Light client committee](#light-client-committee) - [Light client committee](#light-client-committee)
- [Preparation](#preparation) - [Preparation](#preparation)
- [Light clent vote](#light-clent-vote) - [Light client vote](#light-client-vote)
- [Light client vote data](#light-client-vote-data) - [Light client vote data](#light-client-vote-data)
- [`LightClientVoteData`](#lightclientvotedata) - [`LightClientVoteData`](#lightclientvotedata)
- [Construct vote](#construct-vote) - [Construct vote](#construct-vote)
@ -162,7 +162,7 @@ def get_shard_winning_roots(state: BeaconState,
committee = get_beacon_committee(state, on_time_attestation_slot, committee_index) committee = get_beacon_committee(state, on_time_attestation_slot, committee_index)
# Loop over all shard transition roots, looking for a winning root # Loop over all shard transition roots, looking for a winning root
shard_transition_roots = set([a.data.shard_transition_root for a in shard_attestations]) shard_transition_roots = set(a.data.shard_transition_root for a in shard_attestations) # non-duplicate
for shard_transition_root in sorted(shard_transition_roots): for shard_transition_root in sorted(shard_transition_roots):
transition_attestations = [ transition_attestations = [
a for a in shard_attestations a for a in shard_attestations
@ -294,12 +294,12 @@ def get_shard_transition_fields(
for slot in offset_slots: for slot in offset_slots:
if slot in shard_block_slots: if slot in shard_block_slots:
shard_block = shard_blocks[shard_block_slots.index(slot)] shard_block = shard_blocks[shard_block_slots.index(slot)]
shard_data_roots.append(hash_tree_root(shard_block.message.body)) shard_data_roots.append(get_block_data_merkle_root(shard_block.message.body))
else: else:
shard_block = SignedShardBlock(message=ShardBlock(slot=slot, shard=shard)) shard_block = SignedShardBlock(message=ShardBlock(slot=slot, shard=shard))
shard_data_roots.append(Root()) shard_data_roots.append(Root())
shard_state = shard_state.copy() shard_state = shard_state.copy()
shard_state_transition(shard_state, shard_block, validate=False) process_shard_block(shard_state, shard_block.message)
shard_states.append(shard_state) shard_states.append(shard_state)
shard_block_lengths.append(len(shard_block.message.body)) shard_block_lengths.append(len(shard_block.message.body))
@ -390,11 +390,11 @@ def is_in_next_light_client_committee(state: BeaconState, index: ValidatorIndex)
return index in next_committee return index in next_committee
``` ```
#### Light clent vote #### Light client vote
During a period of epochs that the validator is a part of the light client committee (`validator_index in get_light_client_committee(state, epoch)`), the validator creates and broadcasts a `LightClientVote` at each slot. During a period of epochs that the validator is a part of the light client committee (`validator_index in get_light_client_committee(state, epoch)`), the validator creates and broadcasts a `LightClientVote` at each slot.
A validator should create and broadcast the `light_client_vote` to the `light_client_votes` pubsub topic when either (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) two-thirds of the `slot` have transpired (`SECONDS_PER_SLOT / 3` seconds after the start of `slot`) -- whichever comes _first_. A validator should create and broadcast the `light_client_vote` to the `light_client_votes` pubsub topic when either (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the `slot` have transpired (`SECONDS_PER_SLOT / 3` seconds after the start of `slot`) -- whichever comes _first_.
- Let `light_client_committee = get_light_client_committee(state, compute_epoch_at_slot(slot))` - Let `light_client_committee = get_light_client_committee(state, compute_epoch_at_slot(slot))`

View File

@ -26,25 +26,34 @@
## Helper functions ## Helper functions
```python ```python
def get_next_power_of_two(x: int) -> int: def get_power_of_two_ceil(x: int) -> int:
""" """
Get next power of 2 >= the input. Get the power of 2 for given input, or the closest higher power of 2 if the input is not a power of 2.
Commonly used for "how many nodes do I need for a bottom tree layer fitting x elements?"
Example: 0->1, 1->1, 2->2, 3->4, 4->4, 5->8, 6->8, 7->8, 8->8, 9->16.
""" """
if x <= 2: if x <= 1:
return x return 1
elif x == 2:
return 2
else: else:
return 2 * get_next_power_of_two((x + 1) // 2) return 2 * get_power_of_two_ceil((x + 1) // 2)
``` ```
```python ```python
def get_previous_power_of_two(x: int) -> int: def get_power_of_two_floor(x: int) -> int:
""" """
Get the previous power of 2 >= the input. Get the power of 2 for given input, or the closest lower power of 2 if the input is not a power of 2.
The zero case is a placeholder and not used for math with generalized indices.
Commonly used for "what power of two makes up the root bit of the generalized index?"
Example: 0->1, 1->1, 2->2, 3->2, 4->4, 5->4, 6->4, 7->4, 8->8, 9->8
""" """
if x <= 2: if x <= 1:
return 1
if x == 2:
return x return x
else: else:
return 2 * get_previous_power_of_two(x // 2) return 2 * get_power_of_two_floor(x // 2)
``` ```
## Generalized Merkle tree index ## Generalized Merkle tree index
@ -62,9 +71,14 @@ Note that the generalized index has the convenient property that the two childre
```python ```python
def merkle_tree(leaves: Sequence[Bytes32]) -> Sequence[Bytes32]: def merkle_tree(leaves: Sequence[Bytes32]) -> Sequence[Bytes32]:
padded_length = get_next_power_of_two(len(leaves)) """
o = [Bytes32()] * padded_length + list(leaves) + [Bytes32()] * (padded_length - len(leaves)) Return an array representing the tree nodes by generalized index:
for i in range(padded_length - 1, 0, -1): [0, 1, 2, 3, 4, 5, 6, 7], where each layer is a power of 2. The 0 index is ignored. The 1 index is the root.
The result will be twice the size as the padded bottom layer for the input leaves.
"""
bottom_length = get_power_of_two_ceil(len(leaves))
o = [Bytes32()] * bottom_length + list(leaves) + [Bytes32()] * (bottom_length - len(leaves))
for i in range(bottom_length - 1, 0, -1):
o[i] = hash(o[i * 2] + o[i * 2 + 1]) o[i] = hash(o[i * 2] + o[i * 2 + 1])
return o return o
``` ```
@ -169,7 +183,7 @@ def get_generalized_index(typ: SSZType, path: Sequence[Union[int, SSZVariableNam
else: else:
pos, _, _ = get_item_position(typ, p) pos, _, _ = get_item_position(typ, p)
base_index = (GeneralizedIndex(2) if issubclass(typ, (List, ByteList)) else GeneralizedIndex(1)) base_index = (GeneralizedIndex(2) if issubclass(typ, (List, ByteList)) else GeneralizedIndex(1))
root = GeneralizedIndex(root * base_index * get_next_power_of_two(chunk_count(typ)) + pos) root = GeneralizedIndex(root * base_index * get_power_of_two_ceil(chunk_count(typ)) + pos)
typ = get_elem_type(typ, p) typ = get_elem_type(typ, p)
return root return root
``` ```
@ -188,7 +202,7 @@ def concat_generalized_indices(*indices: GeneralizedIndex) -> GeneralizedIndex:
""" """
o = GeneralizedIndex(1) o = GeneralizedIndex(1)
for i in indices: for i in indices:
o = GeneralizedIndex(o * get_previous_power_of_two(i) + (i - get_previous_power_of_two(i))) o = GeneralizedIndex(o * get_power_of_two_floor(i) + (i - get_power_of_two_floor(i)))
return o return o
``` ```

View File

@ -213,13 +213,17 @@ We first define helper functions:
* `List[B, N]` and `Vector[B, N]`, where `B` is a basic type: `(N * size_of(B) + 31) // 32` (dividing by chunk size, rounding up) * `List[B, N]` and `Vector[B, N]`, where `B` is a basic type: `(N * size_of(B) + 31) // 32` (dividing by chunk size, rounding up)
* `List[C, N]` and `Vector[C, N]`, where `C` is a composite type: `N` * `List[C, N]` and `Vector[C, N]`, where `C` is a composite type: `N`
* containers: `len(fields)` * containers: `len(fields)`
* `pack(value)`: given ordered objects of the same basic type, serialize them, pack them into `BYTES_PER_CHUNK`-byte chunks, right-pad the last chunk with zero bytes, and return the chunks. * `pack(values)`: Given ordered objects of the same basic type:
* `pack_bits(bits)`: Given the `bits` of bitlist or bitvector, get `bitfield_bytes` by packing them in bytes and aligning to the start. The length-delimiting bit for bitlists is excluded. And then pack `bitfield_bytes` into `BYTES_PER_CHUNK`-byte chunks, right-pad the last chunk with zero bytes, and return the chunks. 1. Serialize `values` into bytes.
2. If not aligned to a multiple of `BYTES_PER_CHUNK` bytes, right-pad with zeroes to the next multiple.
3. Partition the bytes into `BYTES_PER_CHUNK`-byte chunks.
4. Return the chunks.
* `pack_bits(bits)`: Given the bits of bitlist or bitvector, get `bitfield_bytes` by packing them in bytes and aligning to the start. The length-delimiting bit for bitlists is excluded. Then return `pack(bitfield_bytes)`.
* `next_pow_of_two(i)`: get the next power of 2 of `i`, if not already a power of 2, with 0 mapping to 1. Examples: `0->1, 1->1, 2->2, 3->4, 4->4, 6->8, 9->16` * `next_pow_of_two(i)`: get the next power of 2 of `i`, if not already a power of 2, with 0 mapping to 1. Examples: `0->1, 1->1, 2->2, 3->4, 4->4, 6->8, 9->16`
* `merkleize(chunks, limit=None)`: Given ordered `BYTES_PER_CHUNK`-byte chunks, merkleize the chunks, and return the root: * `merkleize(chunks, limit=None)`: Given ordered `BYTES_PER_CHUNK`-byte chunks, merkleize the chunks, and return the root:
* The merkleization depends on the effective input, which can be padded/limited: * The merkleization depends on the effective input, which must be padded/limited:
- if no limit: pad the `chunks` with zeroed chunks to `next_pow_of_two(len(chunks))` (virtually for memory efficiency). - if no limit: pad the `chunks` with zeroed chunks to `next_pow_of_two(len(chunks))` (virtually for memory efficiency).
- if `limit > len(chunks)`, pad the `chunks` with zeroed chunks to `next_pow_of_two(limit)` (virtually for memory efficiency). - if `limit >= len(chunks)`, pad the `chunks` with zeroed chunks to `next_pow_of_two(limit)` (virtually for memory efficiency).
- if `limit < len(chunks)`: do not merkleize, input exceeds limit. Raise an error instead. - if `limit < len(chunks)`: do not merkleize, input exceeds limit. Raise an error instead.
* Then, merkleize the chunks (empty input is padded to 1 zero chunk): * Then, merkleize the chunks (empty input is padded to 1 zero chunk):
- If `1` chunk: the root is the chunk itself. - If `1` chunk: the root is the chunk itself.

View File

@ -136,17 +136,16 @@ def build_proof(anchor, leaf_index):
return list(reversed(proof)) return list(reversed(proof))
def get_block_data_merkle_root(data_as_bytelist): def get_valid_custody_chunk_response(spec, state, chunk_challenge, challenge_index,
# To get the Merkle root of the block data, we need the Merkle root without the length Mixing block_length_or_custody_data,
# The below implements this in the Remerkleable framework
return data_as_bytelist.get_backing().get_left().merkle_root()
def get_valid_custody_chunk_response(spec, state, chunk_challenge, block_length, challenge_index,
invalid_chunk_data=False): invalid_chunk_data=False):
custody_data = get_custody_test_vector(block_length) if isinstance(block_length_or_custody_data, int):
custody_data = get_custody_test_vector(block_length_or_custody_data)
else:
custody_data = block_length_or_custody_data
custody_data_block = ByteList[spec.MAX_SHARD_BLOCK_SIZE](custody_data) custody_data_block = ByteList[spec.MAX_SHARD_BLOCK_SIZE](custody_data)
chunks = custody_chunkify(spec, custody_data) chunks = custody_chunkify(spec, custody_data_block)
chunk_index = chunk_challenge.chunk_index chunk_index = chunk_challenge.chunk_index
@ -166,7 +165,7 @@ def get_custody_test_vector(bytelength, offset=0):
def get_sample_shard_transition(spec, start_slot, block_lengths): def get_sample_shard_transition(spec, start_slot, block_lengths):
b = [get_block_data_merkle_root(ByteList[spec.MAX_SHARD_BLOCK_SIZE](get_custody_test_vector(x))) b = [spec.get_block_data_merkle_root(ByteList[spec.MAX_SHARD_BLOCK_SIZE](get_custody_test_vector(x)))
for x in block_lengths] for x in block_lengths]
shard_transition = spec.ShardTransition( shard_transition = spec.ShardTransition(
start_slot=start_slot, start_slot=start_slot,
@ -192,7 +191,7 @@ def get_custody_slashable_test_vector(spec, custody_secret, length, slashable=Tr
offset = 0 offset = 0
while spec.compute_custody_bit(custody_secret, test_vector) != slashable: while spec.compute_custody_bit(custody_secret, test_vector) != slashable:
offset += 1 offset += 1
test_vector = test_vector = get_custody_test_vector(length, offset) test_vector = get_custody_test_vector(length, offset)
return test_vector return test_vector
@ -201,5 +200,5 @@ def get_custody_slashable_shard_transition(spec, start_slot, block_lengths, cust
slashable_test_vector = get_custody_slashable_test_vector(spec, custody_secret, slashable_test_vector = get_custody_slashable_test_vector(spec, custody_secret,
block_lengths[0], slashable=slashable) block_lengths[0], slashable=slashable)
block_data = ByteList[spec.MAX_SHARD_BLOCK_SIZE](slashable_test_vector) block_data = ByteList[spec.MAX_SHARD_BLOCK_SIZE](slashable_test_vector)
shard_transition.shard_data_roots[0] = get_block_data_merkle_root(block_data) shard_transition.shard_data_roots[0] = spec.get_block_data_merkle_root(block_data)
return shard_transition, slashable_test_vector return shard_transition, slashable_test_vector

View File

@ -35,3 +35,8 @@ def get_shard_transition_of_committee(spec, state, committee_index, shard_blocks
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot) shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
shard_transition = spec.get_shard_transition(state, shard, shard_blocks=shard_blocks) shard_transition = spec.get_shard_transition(state, shard, shard_blocks=shard_blocks)
return shard_transition return shard_transition
def is_full_crosslink(spec, state):
epoch = spec.compute_epoch_at_slot(state.slot)
return spec.get_committee_count_per_slot(state, epoch) >= spec.get_active_shard_count(state)

View File

@ -242,7 +242,8 @@ def test_custody_response(spec, state):
chunk_challenge_index = state.custody_chunk_challenge_index - 1 chunk_challenge_index = state.custody_chunk_challenge_index - 1
custody_response = get_valid_custody_chunk_response(spec, state, challenge, 2**15 // 3, chunk_challenge_index) custody_response = get_valid_custody_chunk_response(
spec, state, challenge, chunk_challenge_index, block_length_or_custody_data=2**15 // 3)
yield from run_custody_chunk_response_processing(spec, state, custody_response) yield from run_custody_chunk_response_processing(spec, state, custody_response)
@ -270,7 +271,8 @@ def test_custody_response_multiple_epochs(spec, state):
chunk_challenge_index = state.custody_chunk_challenge_index - 1 chunk_challenge_index = state.custody_chunk_challenge_index - 1
custody_response = get_valid_custody_chunk_response(spec, state, challenge, 2**15 // 3, chunk_challenge_index) custody_response = get_valid_custody_chunk_response(
spec, state, challenge, chunk_challenge_index, block_length_or_custody_data=2**15 // 3)
yield from run_custody_chunk_response_processing(spec, state, custody_response) yield from run_custody_chunk_response_processing(spec, state, custody_response)
@ -298,6 +300,7 @@ def test_custody_response_many_epochs(spec, state):
chunk_challenge_index = state.custody_chunk_challenge_index - 1 chunk_challenge_index = state.custody_chunk_challenge_index - 1
custody_response = get_valid_custody_chunk_response(spec, state, challenge, 2**15 // 3, chunk_challenge_index) custody_response = get_valid_custody_chunk_response(
spec, state, challenge, chunk_challenge_index, block_length_or_custody_data=2**15 // 3)
yield from run_custody_chunk_response_processing(spec, state, custody_response) yield from run_custody_chunk_response_processing(spec, state, custody_response)

View File

@ -8,7 +8,10 @@ from eth2spec.test.helpers.attestations import (
get_valid_on_time_attestation, get_valid_on_time_attestation,
run_attestation_processing, run_attestation_processing,
) )
from eth2spec.test.helpers.shard_transitions import run_shard_transitions_processing from eth2spec.test.helpers.shard_transitions import (
run_shard_transitions_processing,
is_full_crosslink,
)
from eth2spec.test.helpers.shard_block import ( from eth2spec.test.helpers.shard_block import (
build_shard_block, build_shard_block,
get_shard_transitions, get_shard_transitions,
@ -42,11 +45,6 @@ def get_attestations_and_shard_transitions(spec, state, shard_block_dict):
return attestations, shard_transitions return attestations, shard_transitions
def is_full_crosslink(spec, state):
epoch = spec.compute_epoch_at_slot(state.slot)
return spec.get_committee_count_per_slot(state, epoch) >= spec.get_active_shard_count(state)
def run_successful_crosslink_tests(spec, state, target_len_offset_slot): def run_successful_crosslink_tests(spec, state, target_len_offset_slot):
state, shard, target_shard_slot = get_initial_env(spec, state, target_len_offset_slot) state, shard, target_shard_slot = get_initial_env(spec, state, target_len_offset_slot)
init_slot = state.slot init_slot = state.slot

View File

@ -159,7 +159,8 @@ def test_validator_withdrawal_resume_after_chunk_challenge_response(spec, state)
assert state.validators[validator_index].withdrawable_epoch == spec.FAR_FUTURE_EPOCH assert state.validators[validator_index].withdrawable_epoch == spec.FAR_FUTURE_EPOCH
chunk_challenge_index = state.custody_chunk_challenge_index - 1 chunk_challenge_index = state.custody_chunk_challenge_index - 1
custody_response = get_valid_custody_chunk_response(spec, state, challenge, 2**15 // 3, chunk_challenge_index) custody_response = get_valid_custody_chunk_response(
spec, state, challenge, chunk_challenge_index, block_length_or_custody_data=2**15 // 3)
_, _, _ = run_custody_chunk_response_processing(spec, state, custody_response) _, _, _ = run_custody_chunk_response_processing(spec, state, custody_response)

View File

@ -7,17 +7,48 @@ from eth2spec.test.context import (
) )
from eth2spec.test.helpers.attestations import get_valid_on_time_attestation from eth2spec.test.helpers.attestations import get_valid_on_time_attestation
from eth2spec.test.helpers.block import build_empty_block from eth2spec.test.helpers.block import build_empty_block
from eth2spec.test.helpers.custody import (
get_custody_secret,
get_custody_slashable_test_vector,
get_valid_chunk_challenge,
get_valid_custody_chunk_response,
get_valid_custody_key_reveal,
get_valid_custody_slashing,
get_valid_early_derived_secret_reveal,
)
from eth2spec.test.helpers.shard_block import ( from eth2spec.test.helpers.shard_block import (
build_shard_block, build_shard_block,
get_committee_index_of_shard,
get_sample_shard_block_body,
get_shard_transitions, get_shard_transitions,
) )
from eth2spec.test.helpers.shard_transitions import is_full_crosslink
from eth2spec.test.helpers.state import state_transition_and_sign_block, transition_to_valid_shard_slot, transition_to from eth2spec.test.helpers.state import state_transition_and_sign_block, transition_to_valid_shard_slot, transition_to
def run_beacon_block(spec, state, block, valid=True):
yield 'pre', state.copy()
if not valid:
signed_beacon_block = state_transition_and_sign_block(spec, state, block, expect_fail=True)
yield 'block', signed_beacon_block
yield 'post', None
return
signed_beacon_block = state_transition_and_sign_block(spec, state, block)
yield 'block', signed_beacon_block
yield 'post', state
#
# Beacon block with non-empty shard transitions
#
def run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, committee_index, shard, valid=True): def run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, committee_index, shard, valid=True):
transition_to(spec, state, state.slot + target_len_offset_slot) transition_to(spec, state, state.slot + target_len_offset_slot)
body = b'\x56' * spec.MAX_SHARD_BLOCK_SIZE body = get_sample_shard_block_body(spec, is_max=True)
shard_block = build_shard_block(spec, state, shard, body=body, slot=state.slot, signed=True) shard_block = build_shard_block(spec, state, shard, body=body, slot=state.slot, signed=True)
shard_block_dict: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]} shard_block_dict: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]}
@ -40,14 +71,17 @@ def run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, comm
pre_gasprice = state.shard_states[shard].gasprice pre_gasprice = state.shard_states[shard].gasprice
pre_shard_states = state.shard_states.copy() pre_shard_states = state.shard_states.copy()
yield 'pre', state.copy() yield 'pre', state.copy()
yield 'block', beacon_block
state_transition_and_sign_block(spec, state, beacon_block) if not valid:
if valid: state_transition_and_sign_block(spec, state, beacon_block, expect_fail=True)
yield 'post', state yield 'block', beacon_block
else:
yield 'post', None yield 'post', None
return return
signed_beacon_block = state_transition_and_sign_block(spec, state, beacon_block)
yield 'block', signed_beacon_block
yield 'post', state
for shard in range(spec.get_active_shard_count(state)): for shard in range(spec.get_active_shard_count(state)):
post_shard_state = state.shard_states[shard] post_shard_state = state.shard_states[shard]
if shard in shard_block_dict: if shard in shard_block_dict:
@ -67,6 +101,10 @@ def run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, comm
@spec_state_test @spec_state_test
def test_process_beacon_block_with_normal_shard_transition(spec, state): def test_process_beacon_block_with_normal_shard_transition(spec, state):
# NOTE: this test is only for full crosslink (minimal config), not for mainnet # NOTE: this test is only for full crosslink (minimal config), not for mainnet
if not is_full_crosslink(spec, state):
# skip
return
state = transition_to_valid_shard_slot(spec, state) state = transition_to_valid_shard_slot(spec, state)
target_len_offset_slot = 1 target_len_offset_slot = 1
@ -81,6 +119,10 @@ def test_process_beacon_block_with_normal_shard_transition(spec, state):
@spec_state_test @spec_state_test
def test_process_beacon_block_with_empty_proposal_transition(spec, state): def test_process_beacon_block_with_empty_proposal_transition(spec, state):
# NOTE: this test is only for full crosslink (minimal config), not for mainnet # NOTE: this test is only for full crosslink (minimal config), not for mainnet
if not is_full_crosslink(spec, state):
# skip
return
state = transition_to_valid_shard_slot(spec, state) state = transition_to_valid_shard_slot(spec, state)
target_len_offset_slot = 1 target_len_offset_slot = 1
@ -89,3 +131,112 @@ def test_process_beacon_block_with_empty_proposal_transition(spec, state):
assert state.shard_states[shard].slot == state.slot - 1 assert state.shard_states[shard].slot == state.slot - 1
yield from run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, committee_index, shard) yield from run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, committee_index, shard)
#
# Beacon block with custody operations
#
@with_all_phases_except([PHASE0])
@spec_state_test
def test_with_shard_transition_with_custody_challenge_and_response(spec, state):
# NOTE: this test is only for full crosslink (minimal config), not for mainnet
if not is_full_crosslink(spec, state):
# skip
return
state = transition_to_valid_shard_slot(spec, state)
# build shard block
shard = 0
committee_index = get_committee_index_of_shard(spec, state, state.slot, shard)
body = get_sample_shard_block_body(spec)
shard_block = build_shard_block(spec, state, shard, body=body, slot=state.slot, signed=True)
shard_block_dict: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]}
shard_transitions = get_shard_transitions(spec, state, shard_block_dict)
attestation = get_valid_on_time_attestation(
spec, state, index=committee_index,
shard_transition=shard_transitions[shard], signed=True,
)
block = build_empty_block(spec, state, slot=state.slot + 1)
block.body.attestations = [attestation]
block.body.shard_transitions = shard_transitions
# CustodyChunkChallenge operation
challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transitions[shard])
block.body.chunk_challenges = [challenge]
# CustodyChunkResponse operation
chunk_challenge_index = state.custody_chunk_challenge_index
custody_response = get_valid_custody_chunk_response(
spec, state, challenge, chunk_challenge_index, block_length_or_custody_data=body)
block.body.chunk_challenge_responses = [custody_response]
yield from run_beacon_block(spec, state, block)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_custody_key_reveal(spec, state):
state = transition_to_valid_shard_slot(spec, state)
transition_to(spec, state, state.slot + spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH)
block = build_empty_block(spec, state, slot=state.slot + 1)
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
block.body.custody_key_reveals = [custody_key_reveal]
yield from run_beacon_block(spec, state, block)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_early_derived_secret_reveal(spec, state):
state = transition_to_valid_shard_slot(spec, state)
block = build_empty_block(spec, state, slot=state.slot + 1)
early_derived_secret_reveal = get_valid_early_derived_secret_reveal(spec, state)
block.body.early_derived_secret_reveals = [early_derived_secret_reveal]
yield from run_beacon_block(spec, state, block)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_custody_slashing(spec, state):
# NOTE: this test is only for full crosslink (minimal config), not for mainnet
if not is_full_crosslink(spec, state):
# skip
return
state = transition_to_valid_shard_slot(spec, state)
# Build shard block
shard = 0
committee_index = get_committee_index_of_shard(spec, state, state.slot, shard)
# Create slashable shard block body
validator_index = spec.get_beacon_committee(state, state.slot, committee_index)[0]
custody_secret = get_custody_secret(spec, state, validator_index)
slashable_body = get_custody_slashable_test_vector(spec, custody_secret, length=100, slashable=True)
shard_block = build_shard_block(spec, state, shard, body=slashable_body, slot=state.slot, signed=True)
shard_block_dict: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]}
shard_transitions = get_shard_transitions(spec, state, shard_block_dict)
attestation = get_valid_on_time_attestation(
spec, state, index=committee_index,
shard_transition=shard_transitions[shard], signed=True,
)
block = build_empty_block(spec, state, slot=state.slot + 1)
block.body.attestations = [attestation]
block.body.shard_transitions = shard_transitions
_, _, _ = run_beacon_block(spec, state, block)
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1))
block = build_empty_block(spec, state, slot=state.slot + 1)
custody_slashing = get_valid_custody_slashing(
spec, state, attestation, shard_transitions[shard], custody_secret, slashable_body
)
block.body.custody_slashings = [custody_slashing]
yield from run_beacon_block(spec, state, block)

View File

@ -0,0 +1,271 @@
from eth2spec.test.context import (
PHASE0,
always_bls,
expect_assertion_error,
spec_state_test,
with_all_phases_except,
)
from eth2spec.test.helpers.shard_block import (
build_shard_block,
sign_shard_block,
)
from eth2spec.test.helpers.shard_transitions import is_full_crosslink
from eth2spec.test.helpers.state import next_slot, transition_to_valid_shard_slot, transition_to
def run_shard_blocks(spec, shard_state, signed_shard_block, beacon_parent_state, valid=True):
pre_shard_state = shard_state.copy()
yield 'pre', pre_shard_state
yield 'signed_shard_block', signed_shard_block
yield 'beacon_parent_state', beacon_parent_state
if not valid:
expect_assertion_error(
lambda: spec.shard_state_transition(shard_state, signed_shard_block, beacon_parent_state)
)
yield 'post', None
return
spec.shard_state_transition(shard_state, signed_shard_block, beacon_parent_state)
yield 'post', shard_state
# Verify `process_shard_block`
block = signed_shard_block.message
assert shard_state.slot == block.slot
shard_block_length = len(block.body)
assert shard_state.gasprice == spec.compute_updated_gasprice(pre_shard_state.gasprice, shard_block_length)
if shard_block_length != 0:
shard_state.latest_block_root == block.hash_tree_root()
else:
shard_state.latest_block_root == pre_shard_state.latest_block_root
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_valid_shard_block(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state)
#
# verify_shard_block_message
#
@with_all_phases_except([PHASE0])
@spec_state_test
def test_invalid_shard_parent_root(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
signed_shard_block.message.shard_parent_root = b'\x12' * 32
sign_shard_block(spec, beacon_state, shard, signed_shard_block)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_invalid_beacon_parent_root(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
signed_shard_block.message.beacon_parent_root = b'\x12' * 32
sign_shard_block(spec, beacon_state, shard, signed_shard_block)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_invalid_slot(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
signed_shard_block.message.slot = beacon_state.slot + 1
proposer_index = spec.get_shard_proposer_index(beacon_state, signed_shard_block.message.slot, shard)
sign_shard_block(spec, beacon_state, shard, signed_shard_block, proposer_index=proposer_index)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_invalid_proposer_index(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
active_validator_indices = spec.get_active_validator_indices(beacon_state, spec.get_current_epoch(beacon_state))
proposer_index = (
(spec.get_shard_proposer_index(beacon_state, signed_shard_block.message.slot, shard) + 1)
% len(active_validator_indices)
)
signed_shard_block.message.proposer_index = proposer_index
sign_shard_block(spec, beacon_state, shard, signed_shard_block, proposer_index=proposer_index)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_out_of_bound_offset(spec, state):
# TODO: Handle this edge case properly
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
slot = (
beacon_state.shard_states[shard].slot
+ spec.SHARD_BLOCK_OFFSETS[spec.MAX_SHARD_BLOCKS_PER_ATTESTATION - 1]
+ 1 # out-of-bound
)
transition_to(spec, beacon_state, slot)
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_invalid_offset(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
# 4 is not in `SHARD_BLOCK_OFFSETS`
shard = 0
slot = beacon_state.shard_states[shard].slot + 4
assert slot not in spec.SHARD_BLOCK_OFFSETS
transition_to(spec, beacon_state, slot)
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_empty_block_body(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, body=b'', signed=True)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
#
# verify_shard_block_signature
#
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_invalid_signature(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=False)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
#
# Other cases
#
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_max_offset(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
slot = beacon_state.shard_states[shard].slot + spec.SHARD_BLOCK_OFFSETS[spec.MAX_SHARD_BLOCKS_PER_ATTESTATION - 1]
transition_to(spec, beacon_state, slot)
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state)
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_pending_shard_parent_block(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
# Block N
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block_1 = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
_, _, _, _ = run_shard_blocks(spec, shard_state, signed_shard_block_1, beacon_state)
# Block N+1
next_slot(spec, beacon_state)
signed_shard_block_2 = build_shard_block(
spec, beacon_state, shard,
slot=beacon_state.slot, shard_parent_state=shard_state, signed=True
)
assert signed_shard_block_2.message.shard_parent_root == shard_state.latest_block_root
assert signed_shard_block_2.message.slot == signed_shard_block_1.message.slot + 1
yield from run_shard_blocks(spec, shard_state, signed_shard_block_2, beacon_state)

View File

@ -0,0 +1,47 @@
import pytest
# Note: these functions are extract from merkle-proofs.md (deprecated),
# the tests are temporary to show correctness while the document is still there.
def get_power_of_two_ceil(x: int) -> int:
if x <= 1:
return 1
elif x == 2:
return 2
else:
return 2 * get_power_of_two_ceil((x + 1) // 2)
def get_power_of_two_floor(x: int) -> int:
if x <= 1:
return 1
if x == 2:
return x
else:
return 2 * get_power_of_two_floor(x // 2)
power_of_two_ceil_cases = [
(0, 1), (1, 1), (2, 2), (3, 4), (4, 4), (5, 8), (6, 8), (7, 8), (8, 8), (9, 16),
]
power_of_two_floor_cases = [
(0, 1), (1, 1), (2, 2), (3, 2), (4, 4), (5, 4), (6, 4), (7, 4), (8, 8), (9, 8),
]
@pytest.mark.parametrize(
'value,expected',
power_of_two_ceil_cases,
)
def test_get_power_of_two_ceil(value, expected):
assert get_power_of_two_ceil(value) == expected
@pytest.mark.parametrize(
'value,expected',
power_of_two_floor_cases,
)
def test_get_power_of_two_floor(value, expected):
assert get_power_of_two_floor(value) == expected