Merge branch 'dev' into exec_v_spec
* dev: delayed_activation_exit_epoch -> compute_activation_exit_epoch remove 'int' types from spec typehints. replace with uint64 'beacon state getters' -> ' beacon state accessors' PR feedback Cleanup Fix bug ready for review A few more cleanups Push draft WIP WIP2 WIP
This commit is contained in:
commit
9deb4090b0
|
@ -12,7 +12,7 @@ from typing import (
|
||||||
|
|
||||||
|
|
||||||
PHASE0_IMPORTS = '''from typing import (
|
PHASE0_IMPORTS = '''from typing import (
|
||||||
Any, Callable, Dict, Set, Sequence, Tuple,
|
Any, Dict, Set, Sequence, Tuple,
|
||||||
)
|
)
|
||||||
|
|
||||||
from dataclasses import (
|
from dataclasses import (
|
||||||
|
@ -38,7 +38,7 @@ from eth2spec.utils.bls import (
|
||||||
from eth2spec.utils.hash_function import hash
|
from eth2spec.utils.hash_function import hash
|
||||||
'''
|
'''
|
||||||
PHASE1_IMPORTS = '''from typing import (
|
PHASE1_IMPORTS = '''from typing import (
|
||||||
Any, Callable, Dict, Optional, Set, Sequence, MutableSequence, Tuple,
|
Any, Dict, Optional, Set, Sequence, MutableSequence, Tuple,
|
||||||
)
|
)
|
||||||
|
|
||||||
from dataclasses import (
|
from dataclasses import (
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -69,7 +69,7 @@ class LatestMessage(object):
|
||||||
```python
|
```python
|
||||||
@dataclass
|
@dataclass
|
||||||
class Store(object):
|
class Store(object):
|
||||||
time: int
|
time: uint64
|
||||||
justified_checkpoint: Checkpoint
|
justified_checkpoint: Checkpoint
|
||||||
finalized_checkpoint: Checkpoint
|
finalized_checkpoint: Checkpoint
|
||||||
blocks: Dict[Hash, BeaconBlock] = field(default_factory=dict)
|
blocks: Dict[Hash, BeaconBlock] = field(default_factory=dict)
|
||||||
|
@ -124,7 +124,7 @@ def get_latest_attesting_balance(store: Store, root: Hash) -> Gwei:
|
||||||
def get_head(store: Store) -> Hash:
|
def get_head(store: Store) -> Hash:
|
||||||
# Execute the LMD-GHOST fork choice
|
# Execute the LMD-GHOST fork choice
|
||||||
head = store.justified_checkpoint.root
|
head = store.justified_checkpoint.root
|
||||||
justified_slot = get_epoch_start_slot(store.justified_checkpoint.epoch)
|
justified_slot = epoch_start_slot(store.justified_checkpoint.epoch)
|
||||||
while True:
|
while True:
|
||||||
children = [
|
children = [
|
||||||
root for root in store.blocks.keys()
|
root for root in store.blocks.keys()
|
||||||
|
@ -141,7 +141,7 @@ def get_head(store: Store) -> Hash:
|
||||||
#### `on_tick`
|
#### `on_tick`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def on_tick(store: Store, time: int) -> None:
|
def on_tick(store: Store, time: uint64) -> None:
|
||||||
store.time = time
|
store.time = time
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -162,7 +162,7 @@ def on_block(store: Store, block: BeaconBlock) -> None:
|
||||||
store.finalized_checkpoint.root
|
store.finalized_checkpoint.root
|
||||||
)
|
)
|
||||||
# Check that block is later than the finalized epoch slot
|
# Check that block is later than the finalized epoch slot
|
||||||
assert block.slot > get_epoch_start_slot(store.finalized_checkpoint.epoch)
|
assert block.slot > epoch_start_slot(store.finalized_checkpoint.epoch)
|
||||||
# Check the block is valid and compute the post-state
|
# Check the block is valid and compute the post-state
|
||||||
state = state_transition(pre_state, block)
|
state = state_transition(pre_state, block)
|
||||||
# Add new state for this block to the store
|
# Add new state for this block to the store
|
||||||
|
@ -190,11 +190,11 @@ def on_attestation(store: Store, attestation: Attestation) -> None:
|
||||||
|
|
||||||
# Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrivesr
|
# Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrivesr
|
||||||
base_state = store.block_states[target.root].copy()
|
base_state = store.block_states[target.root].copy()
|
||||||
assert store.time >= base_state.genesis_time + get_epoch_start_slot(target.epoch) * SECONDS_PER_SLOT
|
assert store.time >= base_state.genesis_time + epoch_start_slot(target.epoch) * SECONDS_PER_SLOT
|
||||||
|
|
||||||
# Store target checkpoint state if not yet seen
|
# Store target checkpoint state if not yet seen
|
||||||
if target not in store.checkpoint_states:
|
if target not in store.checkpoint_states:
|
||||||
process_slots(base_state, get_epoch_start_slot(target.epoch))
|
process_slots(base_state, epoch_start_slot(target.epoch))
|
||||||
store.checkpoint_states[target] = base_state
|
store.checkpoint_states[target] = base_state
|
||||||
target_state = store.checkpoint_states[target]
|
target_state = store.checkpoint_states[target]
|
||||||
|
|
||||||
|
@ -204,7 +204,7 @@ def on_attestation(store: Store, attestation: Attestation) -> None:
|
||||||
assert store.time >= (attestation_slot + 1) * SECONDS_PER_SLOT
|
assert store.time >= (attestation_slot + 1) * SECONDS_PER_SLOT
|
||||||
|
|
||||||
# Get state at the `target` to validate attestation and calculate the committees
|
# Get state at the `target` to validate attestation and calculate the committees
|
||||||
indexed_attestation = convert_to_indexed(target_state, attestation)
|
indexed_attestation = get_indexed_attestation(target_state, attestation)
|
||||||
validate_indexed_attestation(target_state, indexed_attestation)
|
validate_indexed_attestation(target_state, indexed_attestation)
|
||||||
|
|
||||||
# Update latest messages
|
# Update latest messages
|
||||||
|
|
|
@ -259,7 +259,7 @@ class BeaconBlockBody(Container):
|
||||||
### `ceillog2`
|
### `ceillog2`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def ceillog2(x: int) -> int:
|
def ceillog2(x: uint64) -> int:
|
||||||
return x.bit_length()
|
return x.bit_length()
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -275,7 +275,7 @@ def get_custody_chunk_count(crosslink: Crosslink) -> int:
|
||||||
### `get_bit`
|
### `get_bit`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_bit(serialization: bytes, i: int) -> int:
|
def get_bit(serialization: bytes, i: uint64) -> int:
|
||||||
"""
|
"""
|
||||||
Extract the bit in ``serialization`` at position ``i``.
|
Extract the bit in ``serialization`` at position ``i``.
|
||||||
"""
|
"""
|
||||||
|
@ -304,7 +304,7 @@ def get_chunk_bits_root(chunk_bits: bytes) -> Bytes32:
|
||||||
### `get_randao_epoch_for_custody_period`
|
### `get_randao_epoch_for_custody_period`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_randao_epoch_for_custody_period(period: int, validator_index: ValidatorIndex) -> Epoch:
|
def get_randao_epoch_for_custody_period(period: uint64, validator_index: ValidatorIndex) -> Epoch:
|
||||||
next_period_start = (period + 1) * EPOCHS_PER_CUSTODY_PERIOD - validator_index % EPOCHS_PER_CUSTODY_PERIOD
|
next_period_start = (period + 1) * EPOCHS_PER_CUSTODY_PERIOD - validator_index % EPOCHS_PER_CUSTODY_PERIOD
|
||||||
return Epoch(next_period_start + CUSTODY_PERIOD_TO_RANDAO_PADDING)
|
return Epoch(next_period_start + CUSTODY_PERIOD_TO_RANDAO_PADDING)
|
||||||
```
|
```
|
||||||
|
@ -473,7 +473,7 @@ For each `challenge` in `block.body.custody_chunk_challenges`, run the following
|
||||||
```python
|
```python
|
||||||
def process_chunk_challenge(state: BeaconState, challenge: CustodyChunkChallenge) -> None:
|
def process_chunk_challenge(state: BeaconState, challenge: CustodyChunkChallenge) -> None:
|
||||||
# Verify the attestation
|
# Verify the attestation
|
||||||
validate_indexed_attestation(state, convert_to_indexed(state, challenge.attestation))
|
validate_indexed_attestation(state, get_indexed_attestation(state, challenge.attestation))
|
||||||
# Verify it is not too late to challenge
|
# Verify it is not too late to challenge
|
||||||
assert slot_to_epoch(challenge.attestation.data.slot) >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY
|
assert slot_to_epoch(challenge.attestation.data.slot) >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY
|
||||||
responder = state.validators[challenge.responder_index]
|
responder = state.validators[challenge.responder_index]
|
||||||
|
@ -526,7 +526,7 @@ def process_bit_challenge(state: BeaconState, challenge: CustodyBitChallenge) ->
|
||||||
# Verify challenger is slashable
|
# Verify challenger is slashable
|
||||||
assert is_slashable_validator(challenger, get_current_epoch(state))
|
assert is_slashable_validator(challenger, get_current_epoch(state))
|
||||||
# Verify attestation
|
# Verify attestation
|
||||||
validate_indexed_attestation(state, convert_to_indexed(state, attestation))
|
validate_indexed_attestation(state, get_indexed_attestation(state, attestation))
|
||||||
# Verify attestation is eligible for challenging
|
# Verify attestation is eligible for challenging
|
||||||
responder = state.validators[challenge.responder_index]
|
responder = state.validators[challenge.responder_index]
|
||||||
assert epoch + responder.max_reveal_lateness <= get_reveal_period(state, challenge.responder_index)
|
assert epoch + responder.max_reveal_lateness <= get_reveal_period(state, challenge.responder_index)
|
||||||
|
@ -595,13 +595,13 @@ def process_chunk_challenge_response(state: BeaconState,
|
||||||
# Verify chunk index
|
# Verify chunk index
|
||||||
assert response.chunk_index == challenge.chunk_index
|
assert response.chunk_index == challenge.chunk_index
|
||||||
# Verify bit challenge data is null
|
# Verify bit challenge data is null
|
||||||
assert response.chunk_bits_branch == [] and response.chunk_bits_leaf == ZERO_HASH
|
assert response.chunk_bits_branch == [] and response.chunk_bits_leaf == Hash()
|
||||||
# Verify minimum delay
|
# Verify minimum delay
|
||||||
assert get_current_epoch(state) >= challenge.inclusion_epoch + ACTIVATION_EXIT_DELAY
|
assert get_current_epoch(state) >= challenge.inclusion_epoch + ACTIVATION_EXIT_DELAY
|
||||||
# Verify the chunk matches the crosslink data root
|
# Verify the chunk matches the crosslink data root
|
||||||
assert verify_merkle_branch(
|
assert is_valid_merkle_branch(
|
||||||
leaf=hash_tree_root(response.chunk),
|
leaf=hash_tree_root(response.chunk),
|
||||||
proof=response.data_branch,
|
branch=response.data_branch,
|
||||||
depth=challenge.depth,
|
depth=challenge.depth,
|
||||||
index=response.chunk_index,
|
index=response.chunk_index,
|
||||||
root=challenge.data_root,
|
root=challenge.data_root,
|
||||||
|
@ -624,17 +624,17 @@ def process_bit_challenge_response(state: BeaconState,
|
||||||
responder = state.validators[challenge.responder_index]
|
responder = state.validators[challenge.responder_index]
|
||||||
assert not responder.slashed
|
assert not responder.slashed
|
||||||
# Verify the chunk matches the crosslink data root
|
# Verify the chunk matches the crosslink data root
|
||||||
assert verify_merkle_branch(
|
assert is_valid_merkle_branch(
|
||||||
leaf=hash_tree_root(response.chunk),
|
leaf=hash_tree_root(response.chunk),
|
||||||
proof=response.data_branch,
|
branch=response.data_branch,
|
||||||
depth=ceillog2(challenge.chunk_count),
|
depth=ceillog2(challenge.chunk_count),
|
||||||
index=response.chunk_index,
|
index=response.chunk_index,
|
||||||
root=challenge.data_root,
|
root=challenge.data_root,
|
||||||
)
|
)
|
||||||
# Verify the chunk bit leaf matches the challenge data
|
# Verify the chunk bit leaf matches the challenge data
|
||||||
assert verify_merkle_branch(
|
assert is_valid_merkle_branch(
|
||||||
leaf=response.chunk_bits_leaf,
|
leaf=response.chunk_bits_leaf,
|
||||||
proof=response.chunk_bits_branch,
|
branch=response.chunk_bits_branch,
|
||||||
depth=ceillog2(challenge.chunk_count) >> 8,
|
depth=ceillog2(challenge.chunk_count) >> 8,
|
||||||
index=response.chunk_index // 256,
|
index=response.chunk_index // 256,
|
||||||
root=challenge.chunk_bits_merkle_root
|
root=challenge.chunk_bits_merkle_root
|
||||||
|
|
|
@ -132,14 +132,14 @@ class ShardBlockHeader(Container):
|
||||||
def get_period_committee(state: BeaconState,
|
def get_period_committee(state: BeaconState,
|
||||||
epoch: Epoch,
|
epoch: Epoch,
|
||||||
shard: Shard,
|
shard: Shard,
|
||||||
index: int,
|
index: uint64,
|
||||||
count: int) -> Sequence[ValidatorIndex]:
|
count: uint64) -> Sequence[ValidatorIndex]:
|
||||||
"""
|
"""
|
||||||
Return committee for a period. Used to construct persistent committees.
|
Return committee for a period. Used to construct persistent committees.
|
||||||
"""
|
"""
|
||||||
return compute_committee(
|
return compute_committee(
|
||||||
indices=get_active_validator_indices(state, epoch),
|
indices=get_active_validator_indices(state, epoch),
|
||||||
seed=generate_seed(state, epoch),
|
seed=get_seed(state, epoch),
|
||||||
index=shard * count + index,
|
index=shard * count + index,
|
||||||
count=SHARD_COUNT * count,
|
count=SHARD_COUNT * count,
|
||||||
)
|
)
|
||||||
|
@ -150,7 +150,7 @@ def get_period_committee(state: BeaconState,
|
||||||
```python
|
```python
|
||||||
def get_switchover_epoch(state: BeaconState, epoch: Epoch, index: ValidatorIndex) -> int:
|
def get_switchover_epoch(state: BeaconState, epoch: Epoch, index: ValidatorIndex) -> int:
|
||||||
earlier_start_epoch = Epoch(epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2)
|
earlier_start_epoch = Epoch(epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2)
|
||||||
return (bytes_to_int(hash(generate_seed(state, earlier_start_epoch) + int_to_bytes(index, length=3)[0:8]))
|
return (bytes_to_int(hash(get_seed(state, earlier_start_epoch) + int_to_bytes(index, length=3)[0:8]))
|
||||||
% PERSISTENT_COMMITTEE_PERIOD)
|
% PERSISTENT_COMMITTEE_PERIOD)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -248,7 +248,7 @@ def verify_shard_attestation_signature(state: BeaconState,
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def compute_crosslink_data_root(blocks: Sequence[ShardBlock]) -> Bytes32:
|
def compute_crosslink_data_root(blocks: Sequence[ShardBlock]) -> Bytes32:
|
||||||
def is_power_of_two(value: int) -> bool:
|
def is_power_of_two(value: uint64) -> bool:
|
||||||
return (value > 0) and (value & (value - 1) == 0)
|
return (value > 0) and (value & (value - 1) == 0)
|
||||||
|
|
||||||
def pad_to_power_of_2(values: MutableSequence[bytes]) -> Sequence[bytes]:
|
def pad_to_power_of_2(values: MutableSequence[bytes]) -> Sequence[bytes]:
|
||||||
|
@ -259,7 +259,7 @@ def compute_crosslink_data_root(blocks: Sequence[ShardBlock]) -> Bytes32:
|
||||||
def hash_tree_root_of_bytes(data: bytes) -> bytes:
|
def hash_tree_root_of_bytes(data: bytes) -> bytes:
|
||||||
return hash_tree_root([data[i:i + 32] for i in range(0, len(data), 32)])
|
return hash_tree_root([data[i:i + 32] for i in range(0, len(data), 32)])
|
||||||
|
|
||||||
def zpad(data: bytes, length: int) -> bytes:
|
def zpad(data: bytes, length: uint64) -> bytes:
|
||||||
return data + b'\x00' * (length - len(data))
|
return data + b'\x00' * (length - len(data))
|
||||||
|
|
||||||
return hash(
|
return hash(
|
||||||
|
@ -309,11 +309,11 @@ def is_valid_shard_block(beacon_blocks: Sequence[BeaconBlock],
|
||||||
assert beacon_block.slot <= candidate.slot
|
assert beacon_block.slot <= candidate.slot
|
||||||
|
|
||||||
# Check state root
|
# Check state root
|
||||||
assert candidate.state_root == ZERO_HASH # [to be removed in phase 2]
|
assert candidate.state_root == Hash() # [to be removed in phase 2]
|
||||||
|
|
||||||
# Check parent block
|
# Check parent block
|
||||||
if candidate.slot == PHASE_1_FORK_SLOT:
|
if candidate.slot == PHASE_1_FORK_SLOT:
|
||||||
assert candidate.parent_root == ZERO_HASH
|
assert candidate.parent_root == Hash()
|
||||||
else:
|
else:
|
||||||
parent_block = next(
|
parent_block = next(
|
||||||
(block for block in valid_shard_blocks if signing_root(block) == candidate.parent_root),
|
(block for block in valid_shard_blocks if signing_root(block) == candidate.parent_root),
|
||||||
|
@ -395,7 +395,7 @@ def is_valid_beacon_attestation(shard: Shard,
|
||||||
|
|
||||||
# Check previous attestation
|
# Check previous attestation
|
||||||
if candidate.data.previous_crosslink.epoch <= PHASE_1_FORK_EPOCH:
|
if candidate.data.previous_crosslink.epoch <= PHASE_1_FORK_EPOCH:
|
||||||
assert candidate.data.previous_crosslink.data_root == ZERO_HASH
|
assert candidate.data.previous_crosslink.data_root == Hash()
|
||||||
else:
|
else:
|
||||||
previous_attestation = next(
|
previous_attestation = next(
|
||||||
(attestation for attestation in valid_attestations if
|
(attestation for attestation in valid_attestations if
|
||||||
|
|
|
@ -84,7 +84,7 @@ def get_period_data(block: ExtendedBeaconBlock, shard_id: Shard, later: bool) ->
|
||||||
indices = get_period_committee(block.state, shard_id, period_start, 0, committee_count)
|
indices = get_period_committee(block.state, shard_id, period_start, 0, committee_count)
|
||||||
return PeriodData(
|
return PeriodData(
|
||||||
validator_count,
|
validator_count,
|
||||||
generate_seed(block.state, period_start),
|
get_seed(block.state, period_start),
|
||||||
[block.state.validators[i] for i in indices],
|
[block.state.validators[i] for i in indices],
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
|
@ -146,11 +146,11 @@ def get_committee_assignment(
|
||||||
next_epoch = get_current_epoch(state) + 1
|
next_epoch = get_current_epoch(state) + 1
|
||||||
assert epoch <= next_epoch
|
assert epoch <= next_epoch
|
||||||
|
|
||||||
committees_per_slot = get_epoch_committee_count(state, epoch) // SLOTS_PER_EPOCH
|
committees_per_slot = get_committee_count(state, epoch) // SLOTS_PER_EPOCH
|
||||||
epoch_start_slot = get_epoch_start_slot(epoch)
|
start_slot = epoch_start_slot(epoch)
|
||||||
for slot in range(epoch_start_slot, epoch_start_slot + SLOTS_PER_EPOCH):
|
for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH):
|
||||||
offset = committees_per_slot * (slot % SLOTS_PER_EPOCH)
|
offset = committees_per_slot * (slot % SLOTS_PER_EPOCH)
|
||||||
slot_start_shard = (get_epoch_start_shard(state, epoch) + offset) % SHARD_COUNT
|
slot_start_shard = (get_start_shard(state, epoch) + offset) % SHARD_COUNT
|
||||||
for i in range(committees_per_slot):
|
for i in range(committees_per_slot):
|
||||||
shard = Shard((slot_start_shard + i) % SHARD_COUNT)
|
shard = Shard((slot_start_shard + i) % SHARD_COUNT)
|
||||||
committee = get_crosslink_committee(state, epoch, shard)
|
committee = get_crosslink_committee(state, epoch, shard)
|
||||||
|
@ -229,7 +229,7 @@ def get_epoch_signature(state: BeaconState, block: BeaconBlock, privkey: int) ->
|
||||||
|
|
||||||
The `block.eth1_data` field is for block proposers to vote on recent Eth 1.0 data. This recent data contains an Eth 1.0 block hash as well as the associated deposit root (as calculated by the `get_hash_tree_root()` method of the deposit contract) and deposit count after execution of the corresponding Eth 1.0 block. If over half of the block proposers in the current Eth 1.0 voting period vote for the same `eth1_data` then `state.eth1_data` updates at the end of the voting period. Each deposit in `block.body.deposits` must verify against `state.eth1_data.eth1_deposit_root`.
|
The `block.eth1_data` field is for block proposers to vote on recent Eth 1.0 data. This recent data contains an Eth 1.0 block hash as well as the associated deposit root (as calculated by the `get_hash_tree_root()` method of the deposit contract) and deposit count after execution of the corresponding Eth 1.0 block. If over half of the block proposers in the current Eth 1.0 voting period vote for the same `eth1_data` then `state.eth1_data` updates at the end of the voting period. Each deposit in `block.body.deposits` must verify against `state.eth1_data.eth1_deposit_root`.
|
||||||
|
|
||||||
Let `get_eth1_data(distance: int) -> Eth1Data` be the (subjective) function that returns the Eth 1.0 data at distance `distance` relative to the Eth 1.0 head at the start of the current Eth 1.0 voting period. Let `previous_eth1_distance` be the distance relative to the Eth 1.0 block corresponding to `state.eth1_data.block_hash` at the start of the current Eth 1.0 voting period. An honest block proposer sets `block.eth1_data = get_eth1_vote(state, previous_eth1_distance)` where:
|
Let `get_eth1_data(distance: uint64) -> Eth1Data` be the (subjective) function that returns the Eth 1.0 data at distance `distance` relative to the Eth 1.0 head at the start of the current Eth 1.0 voting period. Let `previous_eth1_distance` be the distance relative to the Eth 1.0 block corresponding to `state.eth1_data.block_hash` at the start of the current Eth 1.0 voting period. An honest block proposer sets `block.eth1_data = get_eth1_vote(state, previous_eth1_distance)` where:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_eth1_vote(state: BeaconState, previous_eth1_distance: uint64) -> Eth1Data:
|
def get_eth1_vote(state: BeaconState, previous_eth1_distance: uint64) -> Eth1Data:
|
||||||
|
@ -315,8 +315,8 @@ Set `attestation_data.beacon_block_root = signing_root(head_block)`.
|
||||||
* Set `attestation_data.target_root = epoch_boundary_block_root` where `epoch_boundary_block_root` is the root of block at the most recent epoch boundary.
|
* Set `attestation_data.target_root = epoch_boundary_block_root` where `epoch_boundary_block_root` is the root of block at the most recent epoch boundary.
|
||||||
|
|
||||||
*Note*: `epoch_boundary_block_root` can be looked up in the state using:
|
*Note*: `epoch_boundary_block_root` can be looked up in the state using:
|
||||||
* Let `epoch_start_slot = get_epoch_start_slot(get_current_epoch(head_state))`.
|
* Let `start_slot = epoch_start_slot(get_current_epoch(head_state))`.
|
||||||
* Let `epoch_boundary_block_root = signing_root(head_block) if epoch_start_slot == head_state.slot else get_block_root(state, epoch_start_slot)`.
|
* Let `epoch_boundary_block_root = signing_root(head_block) if start_slot == head_state.slot else get_block_root(state, start_slot)`.
|
||||||
|
|
||||||
##### Crosslink vote
|
##### Crosslink vote
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ from preset_loader import loader
|
||||||
def shuffling_case(seed, count):
|
def shuffling_case(seed, count):
|
||||||
yield 'seed', '0x' + seed.hex()
|
yield 'seed', '0x' + seed.hex()
|
||||||
yield 'count', count
|
yield 'count', count
|
||||||
yield 'shuffled', [spec.get_shuffled_index(i, count, seed) for i in range(count)]
|
yield 'shuffled', [spec.shuffle_index(i, count, seed) for i in range(count)]
|
||||||
|
|
||||||
|
|
||||||
@to_tuple
|
@to_tuple
|
||||||
|
|
|
@ -14,7 +14,7 @@ def run_on_attestation(spec, state, store, attestation, valid=True):
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
|
|
||||||
indexed_attestation = spec.convert_to_indexed(state, attestation)
|
indexed_attestation = spec.get_indexed_attestation(state, attestation)
|
||||||
spec.on_attestation(store, attestation)
|
spec.on_attestation(store, attestation)
|
||||||
assert (
|
assert (
|
||||||
store.latest_messages[indexed_attestation.custody_bit_0_indices[0]] ==
|
store.latest_messages[indexed_attestation.custody_bit_0_indices[0]] ==
|
||||||
|
|
|
@ -15,7 +15,7 @@ def build_attestation_data(spec, state, slot, shard):
|
||||||
else:
|
else:
|
||||||
block_root = spec.get_block_root_at_slot(state, slot)
|
block_root = spec.get_block_root_at_slot(state, slot)
|
||||||
|
|
||||||
current_epoch_start_slot = spec.get_epoch_start_slot(spec.get_current_epoch(state))
|
current_epoch_start_slot = spec.epoch_start_slot(spec.get_current_epoch(state))
|
||||||
if slot < current_epoch_start_slot:
|
if slot < current_epoch_start_slot:
|
||||||
epoch_boundary_root = spec.get_block_root(state, spec.get_previous_epoch(state))
|
epoch_boundary_root = spec.get_block_root(state, spec.get_previous_epoch(state))
|
||||||
elif slot == current_epoch_start_slot:
|
elif slot == current_epoch_start_slot:
|
||||||
|
@ -43,7 +43,7 @@ def build_attestation_data(spec, state, slot, shard):
|
||||||
shard=shard,
|
shard=shard,
|
||||||
start_epoch=parent_crosslink.end_epoch,
|
start_epoch=parent_crosslink.end_epoch,
|
||||||
end_epoch=min(spec.slot_to_epoch(slot), parent_crosslink.end_epoch + spec.MAX_EPOCHS_PER_CROSSLINK),
|
end_epoch=min(spec.slot_to_epoch(slot), parent_crosslink.end_epoch + spec.MAX_EPOCHS_PER_CROSSLINK),
|
||||||
data_root=spec.ZERO_HASH,
|
data_root=spec.Hash(),
|
||||||
parent_root=hash_tree_root(parent_crosslink),
|
parent_root=hash_tree_root(parent_crosslink),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
@ -54,8 +54,8 @@ def get_valid_attestation(spec, state, slot=None, signed=False):
|
||||||
slot = state.slot
|
slot = state.slot
|
||||||
|
|
||||||
epoch = spec.slot_to_epoch(slot)
|
epoch = spec.slot_to_epoch(slot)
|
||||||
epoch_start_shard = spec.get_epoch_start_shard(state, epoch)
|
epoch_start_shard = spec.get_start_shard(state, epoch)
|
||||||
committees_per_slot = spec.get_epoch_committee_count(state, epoch) // spec.SLOTS_PER_EPOCH
|
committees_per_slot = spec.get_committee_count(state, epoch) // spec.SLOTS_PER_EPOCH
|
||||||
shard = (epoch_start_shard + committees_per_slot * (slot % spec.SLOTS_PER_EPOCH)) % spec.SHARD_COUNT
|
shard = (epoch_start_shard + committees_per_slot * (slot % spec.SLOTS_PER_EPOCH)) % spec.SHARD_COUNT
|
||||||
|
|
||||||
attestation_data = build_attestation_data(spec, state, slot, shard)
|
attestation_data = build_attestation_data(spec, state, slot, shard)
|
||||||
|
|
|
@ -13,6 +13,6 @@ def get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False):
|
||||||
sign_attestation(spec, state, attestation_2)
|
sign_attestation(spec, state, attestation_2)
|
||||||
|
|
||||||
return spec.AttesterSlashing(
|
return spec.AttesterSlashing(
|
||||||
attestation_1=spec.convert_to_indexed(state, attestation_1),
|
attestation_1=spec.get_indexed_attestation(state, attestation_1),
|
||||||
attestation_2=spec.convert_to_indexed(state, attestation_2),
|
attestation_2=spec.get_indexed_attestation(state, attestation_2),
|
||||||
)
|
)
|
||||||
|
|
|
@ -59,7 +59,7 @@ def build_empty_block(spec, state, slot=None, signed=False):
|
||||||
empty_block.slot = slot
|
empty_block.slot = slot
|
||||||
empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index
|
empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index
|
||||||
previous_block_header = deepcopy(state.latest_block_header)
|
previous_block_header = deepcopy(state.latest_block_header)
|
||||||
if previous_block_header.state_root == spec.ZERO_HASH:
|
if previous_block_header.state_root == spec.Hash():
|
||||||
previous_block_header.state_root = state.hash_tree_root()
|
previous_block_header.state_root = state.hash_tree_root()
|
||||||
empty_block.parent_root = signing_root(previous_block_header)
|
empty_block.parent_root = signing_root(previous_block_header)
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ def build_deposit(spec,
|
||||||
tree = calc_merkle_tree_from_leaves(tuple([d.hash_tree_root() for d in deposit_data_list]))
|
tree = calc_merkle_tree_from_leaves(tuple([d.hash_tree_root() for d in deposit_data_list]))
|
||||||
proof = list(get_merkle_proof(tree, item_index=index)) + [(index + 1).to_bytes(32, 'little')]
|
proof = list(get_merkle_proof(tree, item_index=index)) + [(index + 1).to_bytes(32, 'little')]
|
||||||
leaf = deposit_data.hash_tree_root()
|
leaf = deposit_data.hash_tree_root()
|
||||||
assert spec.verify_merkle_branch(leaf, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH + 1, index, root)
|
assert spec.is_valid_merkle_branch(leaf, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH + 1, index, root)
|
||||||
deposit = spec.Deposit(proof=proof, data=deposit_data)
|
deposit = spec.Deposit(proof=proof, data=deposit_data)
|
||||||
|
|
||||||
return deposit, root, deposit_data_list
|
return deposit, root, deposit_data_list
|
||||||
|
|
|
@ -27,7 +27,7 @@ def create_genesis_state(spec, num_validators):
|
||||||
eth1_data=spec.Eth1Data(
|
eth1_data=spec.Eth1Data(
|
||||||
deposit_root=deposit_root,
|
deposit_root=deposit_root,
|
||||||
deposit_count=num_validators,
|
deposit_count=num_validators,
|
||||||
block_hash=spec.ZERO_HASH,
|
block_hash=spec.Hash(),
|
||||||
),
|
),
|
||||||
latest_block_header=spec.BeaconBlockHeader(body_root=spec.hash_tree_root(spec.BeaconBlockBody())),
|
latest_block_header=spec.BeaconBlockHeader(body_root=spec.hash_tree_root(spec.BeaconBlockBody())),
|
||||||
)
|
)
|
||||||
|
|
|
@ -195,7 +195,7 @@ def test_bad_merkle_proof(spec, state):
|
||||||
deposit = prepare_state_and_deposit(spec, state, validator_index, amount)
|
deposit = prepare_state_and_deposit(spec, state, validator_index, amount)
|
||||||
|
|
||||||
# mess up merkle branch
|
# mess up merkle branch
|
||||||
deposit.proof[5] = spec.ZERO_HASH
|
deposit.proof[5] = spec.Hash()
|
||||||
|
|
||||||
sign_deposit_data(spec, deposit.data, privkeys[validator_index], state=state)
|
sign_deposit_data(spec, deposit.data, privkeys[validator_index], state=state)
|
||||||
|
|
||||||
|
|
|
@ -360,7 +360,7 @@ def test_non_existent_recipient(spec, state):
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_invalid_pubkey(spec, state):
|
def test_invalid_pubkey(spec, state):
|
||||||
transfer = get_valid_transfer(spec, state, signed=True)
|
transfer = get_valid_transfer(spec, state, signed=True)
|
||||||
state.validators[transfer.sender].withdrawal_credentials = spec.ZERO_HASH
|
state.validators[transfer.sender].withdrawal_credentials = spec.Hash()
|
||||||
|
|
||||||
# un-activate so validator can transfer
|
# un-activate so validator can transfer
|
||||||
state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
|
state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
|
|
@ -71,7 +71,7 @@ def test_success_exit_queue(spec, state):
|
||||||
current_epoch = spec.get_current_epoch(state)
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
|
||||||
# exit `MAX_EXITS_PER_EPOCH`
|
# exit `MAX_EXITS_PER_EPOCH`
|
||||||
initial_indices = spec.get_active_validator_indices(state, current_epoch)[:spec.get_churn_limit(state)]
|
initial_indices = spec.get_active_validator_indices(state, current_epoch)[:spec.get_validator_churn_limit(state)]
|
||||||
|
|
||||||
# Prepare a bunch of exits, based on the current state
|
# Prepare a bunch of exits, based on the current state
|
||||||
exit_queue = []
|
exit_queue = []
|
||||||
|
|
|
@ -83,7 +83,7 @@ def test_single_crosslink_update_from_previous_epoch(spec, state):
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_double_late_crosslink(spec, state):
|
def test_double_late_crosslink(spec, state):
|
||||||
if spec.get_epoch_committee_count(state, spec.get_current_epoch(state)) < spec.SHARD_COUNT:
|
if spec.get_committee_count(state, spec.get_current_epoch(state)) < spec.SHARD_COUNT:
|
||||||
print("warning: ignoring test, test-assumptions are incompatible with configuration")
|
print("warning: ignoring test, test-assumptions are incompatible with configuration")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
|
@ -10,8 +10,8 @@ def run_process_just_and_fin(spec, state):
|
||||||
|
|
||||||
def get_shards_for_slot(spec, state, slot):
|
def get_shards_for_slot(spec, state, slot):
|
||||||
epoch = spec.slot_to_epoch(slot)
|
epoch = spec.slot_to_epoch(slot)
|
||||||
epoch_start_shard = spec.get_epoch_start_shard(state, epoch)
|
epoch_start_shard = spec.get_start_shard(state, epoch)
|
||||||
committees_per_slot = spec.get_epoch_committee_count(state, epoch) // spec.SLOTS_PER_EPOCH
|
committees_per_slot = spec.get_committee_count(state, epoch) // spec.SLOTS_PER_EPOCH
|
||||||
shard = (epoch_start_shard + committees_per_slot * (slot % spec.SLOTS_PER_EPOCH)) % spec.SHARD_COUNT
|
shard = (epoch_start_shard + committees_per_slot * (slot % spec.SLOTS_PER_EPOCH)) % spec.SHARD_COUNT
|
||||||
return [shard + i for i in range(committees_per_slot)]
|
return [shard + i for i in range(committees_per_slot)]
|
||||||
|
|
||||||
|
@ -33,8 +33,8 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support
|
||||||
total_balance = spec.get_total_active_balance(state)
|
total_balance = spec.get_total_active_balance(state)
|
||||||
remaining_balance = total_balance * 2 // 3
|
remaining_balance = total_balance * 2 // 3
|
||||||
|
|
||||||
epoch_start_slot = spec.get_epoch_start_slot(epoch)
|
start_slot = spec.epoch_start_slot(epoch)
|
||||||
for slot in range(epoch_start_slot, epoch_start_slot + spec.SLOTS_PER_EPOCH):
|
for slot in range(start_slot, start_slot + spec.SLOTS_PER_EPOCH):
|
||||||
for shard in get_shards_for_slot(spec, state, slot):
|
for shard in get_shards_for_slot(spec, state, slot):
|
||||||
# Check if we already have had sufficient balance. (and undone if we don't want it).
|
# Check if we already have had sufficient balance. (and undone if we don't want it).
|
||||||
# If so, do not create more attestations. (we do not have empty pending attestations normally anyway)
|
# If so, do not create more attestations. (we do not have empty pending attestations normally anyway)
|
||||||
|
@ -80,7 +80,7 @@ def get_checkpoints(spec, epoch):
|
||||||
|
|
||||||
def put_checkpoints_in_block_roots(spec, state, checkpoints):
|
def put_checkpoints_in_block_roots(spec, state, checkpoints):
|
||||||
for c in checkpoints:
|
for c in checkpoints:
|
||||||
state.block_roots[spec.get_epoch_start_slot(c.epoch) % spec.SLOTS_PER_HISTORICAL_ROOT] = c.root
|
state.block_roots[spec.epoch_start_slot(c.epoch) % spec.SLOTS_PER_HISTORICAL_ROOT] = c.root
|
||||||
|
|
||||||
|
|
||||||
def finalize_on_234(spec, state, epoch, sufficient_support):
|
def finalize_on_234(spec, state, epoch, sufficient_support):
|
||||||
|
|
|
@ -45,7 +45,7 @@ def test_activation_queue_sorting(spec, state):
|
||||||
state.validators[mock_activations - 1].activation_eligibility_epoch = epoch
|
state.validators[mock_activations - 1].activation_eligibility_epoch = epoch
|
||||||
|
|
||||||
# make sure we are hitting the churn
|
# make sure we are hitting the churn
|
||||||
churn_limit = spec.get_churn_limit(state)
|
churn_limit = spec.get_validator_churn_limit(state)
|
||||||
assert mock_activations > churn_limit
|
assert mock_activations > churn_limit
|
||||||
|
|
||||||
yield from run_process_registry_updates(spec, state)
|
yield from run_process_registry_updates(spec, state)
|
||||||
|
|
|
@ -63,7 +63,7 @@ def test_empty_block_transition(spec, state):
|
||||||
|
|
||||||
assert len(state.eth1_data_votes) == pre_eth1_votes + 1
|
assert len(state.eth1_data_votes) == pre_eth1_votes + 1
|
||||||
assert spec.get_block_root_at_slot(state, pre_slot) == block.parent_root
|
assert spec.get_block_root_at_slot(state, pre_slot) == block.parent_root
|
||||||
assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.ZERO_HASH
|
assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.Hash()
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
|
@ -98,7 +98,7 @@ def test_skipped_slots(spec, state):
|
||||||
yield 'post', state
|
yield 'post', state
|
||||||
|
|
||||||
assert state.slot == block.slot
|
assert state.slot == block.slot
|
||||||
assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.ZERO_HASH
|
assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.Hash()
|
||||||
for slot in range(pre_slot, state.slot):
|
for slot in range(pre_slot, state.slot):
|
||||||
assert spec.get_block_root_at_slot(state, slot) == block.parent_root
|
assert spec.get_block_root_at_slot(state, slot) == block.parent_root
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,7 @@ def next_epoch_with_attestations(spec,
|
||||||
block = build_empty_block_for_next_slot(spec, post_state)
|
block = build_empty_block_for_next_slot(spec, post_state)
|
||||||
if fill_cur_epoch and post_state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY:
|
if fill_cur_epoch and post_state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY:
|
||||||
slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1
|
slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1
|
||||||
if slot_to_attest >= spec.get_epoch_start_slot(spec.get_current_epoch(post_state)):
|
if slot_to_attest >= spec.epoch_start_slot(spec.get_current_epoch(post_state)):
|
||||||
cur_attestation = get_valid_attestation(spec, post_state, slot_to_attest)
|
cur_attestation = get_valid_attestation(spec, post_state, slot_to_attest)
|
||||||
block.body.attestations.append(cur_attestation)
|
block.body.attestations.append(cur_attestation)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue