Merge branch 'dev' into hwwhww/shard_fork_choice
This commit is contained in:
commit
58e75c27ed
|
@ -76,8 +76,8 @@ BLS_WITHDRAWAL_PREFIX: 0x00
|
|||
|
||||
# Time parameters
|
||||
# ---------------------------------------------------------------
|
||||
# 86400 seconds (1 day)
|
||||
MIN_GENESIS_DELAY: 86400
|
||||
# 172800 seconds (2 days)
|
||||
GENESIS_DELAY: 172800
|
||||
# 12 seconds
|
||||
SECONDS_PER_SLOT: 12
|
||||
# 2**0 (= 1) slots 12 seconds
|
||||
|
|
|
@ -77,7 +77,7 @@ BLS_WITHDRAWAL_PREFIX: 0x00
|
|||
# Time parameters
|
||||
# ---------------------------------------------------------------
|
||||
# [customized] Faster to spin up testnets, but does not give validator reasonable warning time for genesis
|
||||
MIN_GENESIS_DELAY: 300
|
||||
GENESIS_DELAY: 300
|
||||
# [customized] Faster for testing purposes
|
||||
SECONDS_PER_SLOT: 6
|
||||
# 2**0 (= 1) slots 6 seconds
|
||||
|
@ -165,7 +165,7 @@ PHASE_1_FORK_VERSION: 0x01000001
|
|||
# [customized] for testing
|
||||
PHASE_1_GENESIS_SLOT: 8
|
||||
# [customized] reduced for testing
|
||||
INITIAL_ACTIVE_SHARDS: 4
|
||||
INITIAL_ACTIVE_SHARDS: 2
|
||||
|
||||
|
||||
# Phase 1: General
|
||||
|
|
15
setup.py
15
setup.py
|
@ -140,7 +140,7 @@ SUNDRY_CONSTANTS_FUNCTIONS = '''
|
|||
def ceillog2(x: uint64) -> int:
|
||||
return (x - 1).bit_length()
|
||||
'''
|
||||
SUNDRY_FUNCTIONS = '''
|
||||
PHASE0_SUNDRY_FUNCTIONS = '''
|
||||
# Monkey patch hash cache
|
||||
_hash = hash
|
||||
hash_cache: Dict[bytes, Bytes32] = {}
|
||||
|
@ -220,6 +220,13 @@ get_attesting_indices = cache_this(
|
|||
_get_attesting_indices, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)'''
|
||||
|
||||
|
||||
PHASE1_SUNDRY_FUNCTIONS = '''
|
||||
_get_start_shard = get_start_shard
|
||||
get_start_shard = cache_this(
|
||||
lambda state, slot: (state.validators.hash_tree_root(), slot),
|
||||
_get_start_shard, lru_size=SLOTS_PER_EPOCH * 3)'''
|
||||
|
||||
|
||||
def objects_to_spec(spec_object: SpecObject, imports: str, fork: str) -> str:
|
||||
"""
|
||||
Given all the objects that constitute a spec, combine them into a single pyfile.
|
||||
|
@ -250,9 +257,11 @@ def objects_to_spec(spec_object: SpecObject, imports: str, fork: str) -> str:
|
|||
+ '\n\n' + CONFIG_LOADER
|
||||
+ '\n\n' + ssz_objects_instantiation_spec
|
||||
+ '\n\n' + functions_spec
|
||||
+ '\n' + SUNDRY_FUNCTIONS
|
||||
+ '\n'
|
||||
+ '\n' + PHASE0_SUNDRY_FUNCTIONS
|
||||
)
|
||||
if fork == 'phase1':
|
||||
spec += '\n' + PHASE1_SUNDRY_FUNCTIONS
|
||||
spec += '\n'
|
||||
return spec
|
||||
|
||||
|
||||
|
|
|
@ -218,7 +218,7 @@ The following values are (non-configurable) constants used throughout the specif
|
|||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | :-: | :-: |
|
||||
| `MIN_GENESIS_DELAY` | `86400` | seconds | 1 day |
|
||||
| `GENESIS_DELAY` | `172800` | seconds | 2 days |
|
||||
| `SECONDS_PER_SLOT` | `12` | seconds | 12 seconds |
|
||||
| `SECONDS_PER_ETH1_BLOCK` | `14` | seconds | 14 seconds |
|
||||
| `MIN_ATTESTATION_INCLUSION_DELAY` | `2**0` (= 1) | slots | 12 seconds |
|
||||
|
@ -1137,7 +1137,7 @@ Before the Ethereum 2.0 genesis has been triggered, and for every Ethereum 1.0 b
|
|||
- `eth1_timestamp` is the Unix timestamp corresponding to `eth1_block_hash`
|
||||
- `deposits` is the sequence of all deposits, ordered chronologically, up to (and including) the block with hash `eth1_block_hash`
|
||||
|
||||
Eth1 blocks must only be considered once they are at least `SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE` seconds old (i.e. `eth1_timestamp + SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= current_unix_time`). Due to this constraint, if `MIN_GENESIS_DELAY < SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE`, then the `genesis_time` can happen before the time/state is first known. Values should be configured to avoid this case.
|
||||
Eth1 blocks must only be considered once they are at least `SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE` seconds old (i.e. `eth1_timestamp + SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= current_unix_time`). Due to this constraint, if `GENESIS_DELAY < SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE`, then the `genesis_time` can happen before the time/state is first known. Values should be configured to avoid this case.
|
||||
|
||||
```python
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
||||
|
@ -1149,7 +1149,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
|||
epoch=GENESIS_EPOCH,
|
||||
)
|
||||
state = BeaconState(
|
||||
genesis_time=eth1_timestamp - eth1_timestamp % MIN_GENESIS_DELAY + 2 * MIN_GENESIS_DELAY,
|
||||
genesis_time=eth1_timestamp + GENESIS_DELAY,
|
||||
fork=fork,
|
||||
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=len(deposits)),
|
||||
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||
|
|
|
@ -150,7 +150,7 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
|
|||
elif block.slot == slot:
|
||||
return root
|
||||
else:
|
||||
# root is older than queried slot, thus a skip slot. Return earliest root prior to slot
|
||||
# root is older than queried slot, thus a skip slot. Return most recent root prior to slot
|
||||
return root
|
||||
```
|
||||
|
||||
|
@ -285,7 +285,7 @@ def validate_on_attestation(store: Store, attestation: Attestation) -> None:
|
|||
# Attestations must not be for blocks in the future. If not, the attestation should not be considered
|
||||
assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot
|
||||
|
||||
# FFG and LMD vote must be consistent with each other
|
||||
# LMD vote must be consistent with FFG vote target
|
||||
target_slot = compute_start_slot_at_epoch(target.epoch)
|
||||
assert target.root == get_ancestor(store, attestation.data.beacon_block_root, target_slot)
|
||||
|
||||
|
|
|
@ -150,6 +150,7 @@ This section outlines constants that are used in this spec.
|
|||
| Name | Value | Description |
|
||||
|---|---|---|
|
||||
| `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The maximum allowed size of uncompressed gossip messages. |
|
||||
| `MAX_REQUEST_BLOCKS` | `2**10` (= 1024) | Maximum number of blocks in a single request |
|
||||
| `MAX_CHUNK_SIZE` | `2**20` (1048576, 1 MiB) | The maximum allowed size of uncompressed req/resp chunked responses. |
|
||||
| `TTFB_TIMEOUT` | `5s` | The maximum time to wait for first byte of request response (time-to-first-byte). |
|
||||
| `RESP_TIMEOUT` | `10s` | The maximum time for complete response transfer. |
|
||||
|
@ -391,11 +392,11 @@ The `ErrorMessage` schema is:
|
|||
|
||||
```
|
||||
(
|
||||
error_message: String
|
||||
error_message: List[byte, 256]
|
||||
)
|
||||
```
|
||||
|
||||
*Note*: The String type is encoded as UTF-8 bytes without NULL terminator when SSZ-encoded. As the `ErrorMessage` is not an SSZ-container, only the UTF-8 bytes will be sent when SSZ-encoded.
|
||||
*Note*: By convention, the `error_message` is a sequence of bytes that MAY be interpreted as a UTF-8 string (for debugging purposes). Clients MUST treat as valid any byte sequences.
|
||||
|
||||
### Encoding strategies
|
||||
|
||||
|
@ -443,9 +444,9 @@ In case of an invalid input (header or payload), a reader MUST:
|
|||
|
||||
All messages that contain only a single field MUST be encoded directly as the type of that field and MUST NOT be encoded as an SSZ container.
|
||||
|
||||
Responses that are SSZ-lists (for example `[]SignedBeaconBlock`) send their
|
||||
Responses that are SSZ-lists (for example `List[SignedBeaconBlock, ...]`) send their
|
||||
constituents individually as `response_chunk`s. For example, the
|
||||
`[]SignedBeaconBlock` response type sends zero or more `response_chunk`s. Each _successful_ `response_chunk` contains a single `SignedBeaconBlock` payload.
|
||||
`List[SignedBeaconBlock, ...]` response type sends zero or more `response_chunk`s. Each _successful_ `response_chunk` contains a single `SignedBeaconBlock` payload.
|
||||
|
||||
### Messages
|
||||
|
||||
|
@ -468,9 +469,9 @@ The fields are, as seen by the client at the time of sending the message:
|
|||
- `fork_digest`: The node's `ForkDigest` (`compute_fork_digest(current_fork_version, genesis_validators_root)`) where
|
||||
- `current_fork_version` is the fork version at the node's current epoch defined by the wall-clock time (not necessarily the epoch to which the node is sync)
|
||||
- `genesis_validators_root` is the static `Root` found in `state.genesis_validators_root`
|
||||
- `finalized_root`: `state.finalized_checkpoint.root` for the state corresponding to the head block.
|
||||
- `finalized_root`: `state.finalized_checkpoint.root` for the state corresponding to the head block (Note this defaults to `Root(b'\x00' * 32)` for the genesis finalized checkpoint).
|
||||
- `finalized_epoch`: `state.finalized_checkpoint.epoch` for the state corresponding to the head block.
|
||||
- `head_root`: The hash_tree_root root of the current head block.
|
||||
- `head_root`: The `hash_tree_root` root of the current head block (`BeaconBlock`).
|
||||
- `head_slot`: The slot of the block corresponding to the `head_root`.
|
||||
|
||||
The dialing client MUST send a `Status` request upon connection.
|
||||
|
@ -528,7 +529,7 @@ Request Content:
|
|||
Response Content:
|
||||
```
|
||||
(
|
||||
[]SignedBeaconBlock
|
||||
List[SignedBeaconBlock, MAX_REQUEST_BLOCKS]
|
||||
)
|
||||
```
|
||||
|
||||
|
@ -545,7 +546,7 @@ The response MUST consist of zero or more `response_chunk`. Each _successful_ `r
|
|||
|
||||
Clients MUST keep a record of signed blocks seen since the since the start of the weak subjectivity period and MUST support serving requests of blocks up to their own `head_block_root`.
|
||||
|
||||
Clients MUST respond with at least the first block that exists in the range, if they have it.
|
||||
Clients MUST respond with at least the first block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOCKS` blocks.
|
||||
|
||||
The following blocks, where they exist, MUST be send in consecutive order.
|
||||
|
||||
|
@ -568,7 +569,7 @@ Request Content:
|
|||
|
||||
```
|
||||
(
|
||||
[]Root
|
||||
List[Root, MAX_REQUEST_BLOCKS]
|
||||
)
|
||||
```
|
||||
|
||||
|
@ -576,12 +577,14 @@ Response Content:
|
|||
|
||||
```
|
||||
(
|
||||
[]SignedBeaconBlock
|
||||
List[SignedBeaconBlock, MAX_REQUEST_BLOCKS]
|
||||
)
|
||||
```
|
||||
|
||||
Requests blocks by block root (= `hash_tree_root(SignedBeaconBlock.message)`). The response is a list of `SignedBeaconBlock` whose length is less than or equal to the number of requested blocks. It may be less in the case that the responding peer is missing blocks.
|
||||
|
||||
No more than `MAX_REQUEST_BLOCKS` may be requested at a time.
|
||||
|
||||
`BeaconBlocksByRoot` is primarily used to recover recent blocks (e.g. when receiving a block or attestation whose parent is unknown).
|
||||
|
||||
The request MUST be encoded as an SSZ-field.
|
||||
|
@ -1052,7 +1055,7 @@ discv5 uses ENRs and we will presumably need to:
|
|||
|
||||
Although client software might very well be running locally prior to the solidification of the eth2 genesis state and block, clients cannot form valid ENRs prior to this point. ENRs contain `fork_digest` which utilizes the `genesis_validators_root` for a cleaner separation between chains so prior to knowing genesis, we cannot use `fork_digest` to cleanly find peers on our intended chain. Once genesis data is known, we can then form ENRs and safely find peers.
|
||||
|
||||
When using an eth1 deposit contract for deposits, `fork_digest` will be known at least `MIN_GENESIS_DELAY` (24 hours in mainnet configuration) before `genesis_time`, providing ample time to find peers and form initial connections and gossip subnets prior to genesis.
|
||||
When using an eth1 deposit contract for deposits, `fork_digest` will be known `GENESIS_DELAY` (48hours in mainnet configuration) before `genesis_time`, providing ample time to find peers and form initial connections and gossip subnets prior to genesis.
|
||||
|
||||
## Compression/Encoding
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
- [`compute_shard_from_committee_index`](#compute_shard_from_committee_index)
|
||||
- [`compute_offset_slots`](#compute_offset_slots)
|
||||
- [`compute_updated_gasprice`](#compute_updated_gasprice)
|
||||
- [`compute_committee_source_epoch`](#compute_committee_source_epoch)
|
||||
- [Beacon state accessors](#beacon-state-accessors)
|
||||
- [`get_active_shard_count`](#get_active_shard_count)
|
||||
- [`get_online_validator_indices`](#get_online_validator_indices)
|
||||
|
@ -47,13 +48,15 @@
|
|||
- [`get_light_client_committee`](#get_light_client_committee)
|
||||
- [`get_shard_proposer_index`](#get_shard_proposer_index)
|
||||
- [`get_indexed_attestation`](#get_indexed_attestation)
|
||||
- [`get_committee_count_delta`](#get_committee_count_delta)
|
||||
- [`get_start_shard`](#get_start_shard)
|
||||
- [`get_shard`](#get_shard)
|
||||
- [`get_latest_slot_for_shard`](#get_latest_slot_for_shard)
|
||||
- [`get_offset_slots`](#get_offset_slots)
|
||||
- [Predicates](#predicates)
|
||||
- [`verify_attestation_custody`](#verify_attestation_custody)
|
||||
- [Updated `is_valid_indexed_attestation`](#updated-is_valid_indexed_attestation)
|
||||
- [`is_shard_attestation`](#is_shard_attestation)
|
||||
- [`is_on_time_attestation`](#is_on_time_attestation)
|
||||
- [`is_winning_attestation`](#is_winning_attestation)
|
||||
- [`optional_aggregate_verify`](#optional_aggregate_verify)
|
||||
- [`optional_fast_aggregate_verify`](#optional_fast_aggregate_verify)
|
||||
|
@ -61,14 +64,17 @@
|
|||
- [Operations](#operations)
|
||||
- [New Attestation processing](#new-attestation-processing)
|
||||
- [`validate_attestation`](#validate_attestation)
|
||||
- [Updated `process_attestation`](#updated-process_attestation)
|
||||
- [Shard transition processing](#shard-transition-processing)
|
||||
- [`apply_shard_transition`](#apply_shard_transition)
|
||||
- [`process_crosslink_for_shard`](#process_crosslink_for_shard)
|
||||
- [`process_crosslinks`](#process_crosslinks)
|
||||
- [`process_attestation`](#process_attestation)
|
||||
- [`verify_empty_shard_transition`](#verify_empty_shard_transition)
|
||||
- [`process_shard_transitions`](#process_shard_transitions)
|
||||
- [New Attester slashing processing](#new-attester-slashing-processing)
|
||||
- [Shard transition false positives](#shard-transition-false-positives)
|
||||
- [Light client processing](#light-client-processing)
|
||||
- [Epoch transition](#epoch-transition)
|
||||
- [Phase 1 final updates](#phase-1-final-updates)
|
||||
- [Custody game updates](#custody-game-updates)
|
||||
- [Online-tracking](#online-tracking)
|
||||
- [Light client committee updates](#light-client-committee-updates)
|
||||
|
@ -130,7 +136,7 @@ class AttestationData(Container):
|
|||
source: Checkpoint
|
||||
target: Checkpoint
|
||||
# Current-slot shard block root
|
||||
head_shard_root: Root
|
||||
shard_head_root: Root
|
||||
# Shard transition root
|
||||
shard_transition_root: Root
|
||||
```
|
||||
|
@ -153,6 +159,7 @@ class PendingAttestation(Container):
|
|||
data: AttestationData
|
||||
inclusion_delay: Slot
|
||||
proposer_index: ValidatorIndex
|
||||
# Phase 1
|
||||
crosslink_success: boolean
|
||||
```
|
||||
|
||||
|
@ -280,6 +287,7 @@ class BeaconState(Container):
|
|||
current_justified_checkpoint: Checkpoint
|
||||
finalized_checkpoint: Checkpoint
|
||||
# Phase 1
|
||||
current_epoch_start_shard: Shard
|
||||
shard_states: List[ShardState, MAX_SHARDS]
|
||||
online_countdown: List[OnlineEpochs, VALIDATOR_REGISTRY_LIMIT] # not a raw byte array, considered its large size.
|
||||
current_light_committee: CompactCommittee
|
||||
|
@ -415,7 +423,7 @@ def unpack_compact_validator(compact_validator: uint64) -> Tuple[ValidatorIndex,
|
|||
```python
|
||||
def committee_to_compact_committee(state: BeaconState, committee: Sequence[ValidatorIndex]) -> CompactCommittee:
|
||||
"""
|
||||
Given a state and a list of validator indices, outputs the CompactCommittee representing them.
|
||||
Given a state and a list of validator indices, outputs the ``CompactCommittee`` representing them.
|
||||
"""
|
||||
validators = [state.validators[i] for i in committee]
|
||||
compact_validators = [
|
||||
|
@ -447,17 +455,30 @@ def compute_offset_slots(start_slot: Slot, end_slot: Slot) -> Sequence[Slot]:
|
|||
#### `compute_updated_gasprice`
|
||||
|
||||
```python
|
||||
def compute_updated_gasprice(prev_gasprice: Gwei, length: uint8) -> Gwei:
|
||||
if length > TARGET_SHARD_BLOCK_SIZE:
|
||||
delta = (prev_gasprice * (length - TARGET_SHARD_BLOCK_SIZE)
|
||||
def compute_updated_gasprice(prev_gasprice: Gwei, shard_block_length: uint8) -> Gwei:
|
||||
if shard_block_length > TARGET_SHARD_BLOCK_SIZE:
|
||||
delta = (prev_gasprice * (shard_block_length - TARGET_SHARD_BLOCK_SIZE)
|
||||
// TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT)
|
||||
return min(prev_gasprice + delta, MAX_GASPRICE)
|
||||
else:
|
||||
delta = (prev_gasprice * (TARGET_SHARD_BLOCK_SIZE - length)
|
||||
delta = (prev_gasprice * (TARGET_SHARD_BLOCK_SIZE - shard_block_length)
|
||||
// TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT)
|
||||
return max(prev_gasprice, MIN_GASPRICE + delta) - delta
|
||||
```
|
||||
|
||||
#### `compute_committee_source_epoch`
|
||||
|
||||
```python
|
||||
def compute_committee_source_epoch(epoch: Epoch, period: uint64) -> Epoch:
|
||||
"""
|
||||
Return the source epoch for computing the committee.
|
||||
"""
|
||||
source_epoch = epoch - epoch % period
|
||||
if source_epoch >= period:
|
||||
source_epoch -= period # `period` epochs lookahead
|
||||
return source_epoch
|
||||
```
|
||||
|
||||
### Beacon state accessors
|
||||
|
||||
#### `get_active_shard_count`
|
||||
|
@ -479,9 +500,10 @@ def get_online_validator_indices(state: BeaconState) -> Set[ValidatorIndex]:
|
|||
|
||||
```python
|
||||
def get_shard_committee(beacon_state: BeaconState, epoch: Epoch, shard: Shard) -> Sequence[ValidatorIndex]:
|
||||
source_epoch = epoch - epoch % SHARD_COMMITTEE_PERIOD
|
||||
if source_epoch >= SHARD_COMMITTEE_PERIOD:
|
||||
source_epoch -= SHARD_COMMITTEE_PERIOD
|
||||
"""
|
||||
Return the shard committee of the given ``epoch`` of the given ``shard``.
|
||||
"""
|
||||
source_epoch = compute_committee_source_epoch(epoch, SHARD_COMMITTEE_PERIOD)
|
||||
active_validator_indices = get_active_validator_indices(beacon_state, source_epoch)
|
||||
seed = get_seed(beacon_state, source_epoch, DOMAIN_SHARD_COMMITTEE)
|
||||
active_shard_count = get_active_shard_count(beacon_state)
|
||||
|
@ -497,9 +519,10 @@ def get_shard_committee(beacon_state: BeaconState, epoch: Epoch, shard: Shard) -
|
|||
|
||||
```python
|
||||
def get_light_client_committee(beacon_state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
|
||||
source_epoch = epoch - epoch % LIGHT_CLIENT_COMMITTEE_PERIOD
|
||||
if source_epoch >= LIGHT_CLIENT_COMMITTEE_PERIOD:
|
||||
source_epoch -= LIGHT_CLIENT_COMMITTEE_PERIOD
|
||||
"""
|
||||
Return the light client committee of no more than ``TARGET_COMMITTEE_SIZE`` validators.
|
||||
"""
|
||||
source_epoch = compute_committee_source_epoch(epoch, LIGHT_CLIENT_COMMITTEE_PERIOD)
|
||||
active_validator_indices = get_active_validator_indices(beacon_state, source_epoch)
|
||||
seed = get_seed(beacon_state, source_epoch, DOMAIN_LIGHT_CLIENT)
|
||||
return compute_committee(
|
||||
|
@ -530,18 +553,49 @@ def get_indexed_attestation(beacon_state: BeaconState, attestation: Attestation)
|
|||
)
|
||||
```
|
||||
|
||||
#### `get_committee_count_delta`
|
||||
|
||||
```python
|
||||
def get_committee_count_delta(state: BeaconState, start_slot: Slot, stop_slot: Slot) -> uint64:
|
||||
"""
|
||||
Return the sum of committee counts in range ``[start_slot, stop_slot)``.
|
||||
"""
|
||||
return sum(get_committee_count_at_slot(state, Slot(slot)) for slot in range(start_slot, stop_slot))
|
||||
```
|
||||
|
||||
#### `get_start_shard`
|
||||
|
||||
```python
|
||||
def get_start_shard(state: BeaconState, slot: Slot) -> Shard:
|
||||
# TODO: implement start shard logic
|
||||
return Shard(0)
|
||||
"""
|
||||
Return the start shard at ``slot``.
|
||||
"""
|
||||
current_epoch_start_slot = compute_start_slot_at_epoch(get_current_epoch(state))
|
||||
active_shard_count = get_active_shard_count(state)
|
||||
if current_epoch_start_slot == slot:
|
||||
return state.current_epoch_start_shard
|
||||
elif slot > current_epoch_start_slot:
|
||||
# Current epoch or the next epoch lookahead
|
||||
shard_delta = get_committee_count_delta(state, start_slot=current_epoch_start_slot, stop_slot=slot)
|
||||
return Shard((state.current_epoch_start_shard + shard_delta) % active_shard_count)
|
||||
else:
|
||||
# Previous epoch
|
||||
shard_delta = get_committee_count_delta(state, start_slot=slot, stop_slot=current_epoch_start_slot)
|
||||
max_committees_per_epoch = MAX_COMMITTEES_PER_SLOT * SLOTS_PER_EPOCH
|
||||
return Shard(
|
||||
# Ensure positive
|
||||
(state.current_epoch_start_shard + max_committees_per_epoch * active_shard_count - shard_delta)
|
||||
% active_shard_count
|
||||
)
|
||||
```
|
||||
|
||||
#### `get_shard`
|
||||
|
||||
```python
|
||||
def get_shard(state: BeaconState, attestation: Attestation) -> Shard:
|
||||
"""
|
||||
Return the shard that the given ``attestation`` is attesting.
|
||||
"""
|
||||
return compute_shard_from_committee_index(state, attestation.data.index, attestation.data.slot)
|
||||
```
|
||||
|
||||
|
@ -549,6 +603,9 @@ def get_shard(state: BeaconState, attestation: Attestation) -> Shard:
|
|||
|
||||
```python
|
||||
def get_latest_slot_for_shard(state: BeaconState, shard: Shard) -> Slot:
|
||||
"""
|
||||
Return the latest slot number of the given ``shard``.
|
||||
"""
|
||||
return state.shard_states[shard].slot
|
||||
```
|
||||
|
||||
|
@ -556,11 +613,46 @@ def get_latest_slot_for_shard(state: BeaconState, shard: Shard) -> Slot:
|
|||
|
||||
```python
|
||||
def get_offset_slots(state: BeaconState, shard: Shard) -> Sequence[Slot]:
|
||||
return compute_offset_slots(state.shard_states[shard].slot, state.slot)
|
||||
"""
|
||||
Return the offset slots of the given ``shard``.
|
||||
The offset slot are after the latest slot and before current slot.
|
||||
"""
|
||||
return compute_offset_slots(get_latest_slot_for_shard(state, shard), state.slot)
|
||||
```
|
||||
|
||||
### Predicates
|
||||
|
||||
#### `verify_attestation_custody`
|
||||
|
||||
```python
|
||||
def verify_attestation_custody(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool:
|
||||
"""
|
||||
Check if ``indexed_attestation`` has valid signature against non-empty custody bits.
|
||||
"""
|
||||
attestation = indexed_attestation.attestation
|
||||
aggregation_bits = attestation.aggregation_bits
|
||||
domain = get_domain(state, DOMAIN_BEACON_ATTESTER, attestation.data.target.epoch)
|
||||
all_pubkeys = []
|
||||
all_signing_roots = []
|
||||
for block_index, custody_bits in enumerate(attestation.custody_bits_blocks):
|
||||
assert len(custody_bits) == len(indexed_attestation.committee)
|
||||
for participant, aggregation_bit, custody_bit in zip(
|
||||
indexed_attestation.committee, aggregation_bits, custody_bits
|
||||
):
|
||||
if aggregation_bit:
|
||||
all_pubkeys.append(state.validators[participant].pubkey)
|
||||
# Note: only 2N distinct message hashes
|
||||
attestation_wrapper = AttestationCustodyBitWrapper(
|
||||
attestation_data_root=hash_tree_root(attestation.data),
|
||||
block_index=block_index,
|
||||
bit=custody_bit,
|
||||
)
|
||||
all_signing_roots.append(compute_signing_root(attestation_wrapper, domain))
|
||||
else:
|
||||
assert not custody_bit
|
||||
return bls.AggregateVerify(all_pubkeys, all_signing_roots, signature=attestation.signature)
|
||||
```
|
||||
|
||||
#### Updated `is_valid_indexed_attestation`
|
||||
|
||||
Note that this replaces the Phase 0 `is_valid_indexed_attestation`.
|
||||
|
@ -571,53 +663,33 @@ def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: Indexe
|
|||
Check if ``indexed_attestation`` has valid indices and signature.
|
||||
"""
|
||||
# Verify aggregate signature
|
||||
all_pubkeys = []
|
||||
all_signing_roots = []
|
||||
attestation = indexed_attestation.attestation
|
||||
domain = get_domain(state, DOMAIN_BEACON_ATTESTER, attestation.data.target.epoch)
|
||||
aggregation_bits = attestation.aggregation_bits
|
||||
if not any(aggregation_bits) or len(aggregation_bits) != len(indexed_attestation.committee):
|
||||
return False
|
||||
|
||||
if len(attestation.custody_bits_blocks) == 0:
|
||||
# fall back on phase0 behavior if there is no shard data.
|
||||
for participant, abit in zip(indexed_attestation.committee, aggregation_bits):
|
||||
if abit:
|
||||
domain = get_domain(state, DOMAIN_BEACON_ATTESTER, attestation.data.target.epoch)
|
||||
all_pubkeys = []
|
||||
for participant, aggregation_bit in zip(indexed_attestation.committee, aggregation_bits):
|
||||
if aggregation_bit:
|
||||
all_pubkeys.append(state.validators[participant].pubkey)
|
||||
signing_root = compute_signing_root(indexed_attestation.attestation.data, domain)
|
||||
return bls.FastAggregateVerify(all_pubkeys, signing_root, signature=attestation.signature)
|
||||
else:
|
||||
for i, custody_bits in enumerate(attestation.custody_bits_blocks):
|
||||
assert len(custody_bits) == len(indexed_attestation.committee)
|
||||
for participant, abit, cbit in zip(indexed_attestation.committee, aggregation_bits, custody_bits):
|
||||
if abit:
|
||||
all_pubkeys.append(state.validators[participant].pubkey)
|
||||
# Note: only 2N distinct message hashes
|
||||
attestation_wrapper = AttestationCustodyBitWrapper(
|
||||
attestation_data_root=hash_tree_root(attestation.data),
|
||||
block_index=i,
|
||||
bit=cbit
|
||||
)
|
||||
all_signing_roots.append(compute_signing_root(attestation_wrapper, domain))
|
||||
else:
|
||||
assert not cbit
|
||||
return bls.AggregateVerify(all_pubkeys, all_signing_roots, signature=attestation.signature)
|
||||
return verify_attestation_custody(state, indexed_attestation)
|
||||
```
|
||||
|
||||
#### `is_shard_attestation`
|
||||
#### `is_on_time_attestation`
|
||||
|
||||
```python
|
||||
def is_shard_attestation(state: BeaconState,
|
||||
attestation: Attestation,
|
||||
committee_index: CommitteeIndex) -> bool:
|
||||
if not (
|
||||
attestation.data.index == committee_index
|
||||
and attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY == state.slot # Must be on-time attestation
|
||||
# TODO: MIN_ATTESTATION_INCLUSION_DELAY should always be 1
|
||||
):
|
||||
return False
|
||||
|
||||
return True
|
||||
def is_on_time_attestation(state: BeaconState,
|
||||
attestation: Attestation) -> bool:
|
||||
"""
|
||||
Check if the given attestation is on-time.
|
||||
"""
|
||||
return attestation.data.slot == compute_previous_slot(state.slot)
|
||||
```
|
||||
|
||||
#### `is_winning_attestation`
|
||||
|
@ -677,7 +749,6 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
|||
process_eth1_data(state, block.body)
|
||||
process_light_client_signatures(state, block.body)
|
||||
process_operations(state, block.body)
|
||||
verify_shard_transition_false_positives(state, block.body)
|
||||
```
|
||||
|
||||
#### Operations
|
||||
|
@ -701,7 +772,7 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
|||
# See custody game spec.
|
||||
process_custody_game_operations(state, body)
|
||||
|
||||
process_crosslinks(state, body.shard_transitions, body.attestations)
|
||||
process_shard_transitions(state, body.shard_transitions, body.attestations)
|
||||
|
||||
# TODO process_operations(body.shard_receipt_proofs, process_shard_receipt_proofs)
|
||||
```
|
||||
|
@ -727,20 +798,19 @@ def validate_attestation(state: BeaconState, attestation: Attestation) -> None:
|
|||
else:
|
||||
assert attestation.data.source == state.previous_justified_checkpoint
|
||||
|
||||
shard = get_shard(state, attestation)
|
||||
|
||||
# Type 1: on-time attestations, the custody bits should be non-empty.
|
||||
if attestation.custody_bits_blocks != []:
|
||||
# Ensure on-time attestation
|
||||
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY == state.slot
|
||||
assert is_on_time_attestation(state, attestation)
|
||||
# Correct data root count
|
||||
shard = get_shard(state, attestation)
|
||||
assert len(attestation.custody_bits_blocks) == len(get_offset_slots(state, shard))
|
||||
# Correct parent block root
|
||||
assert data.beacon_block_root == get_block_root_at_slot(state, compute_previous_slot(state.slot))
|
||||
# Type 2: no shard transition, no custody bits
|
||||
else:
|
||||
# Ensure delayed attestation
|
||||
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY < state.slot
|
||||
assert data.slot < compute_previous_slot(state.slot)
|
||||
# Late attestations cannot have a shard transition root
|
||||
assert data.shard_transition_root == Root()
|
||||
|
||||
|
@ -748,6 +818,27 @@ def validate_attestation(state: BeaconState, attestation: Attestation) -> None:
|
|||
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
|
||||
```
|
||||
|
||||
###### Updated `process_attestation`
|
||||
|
||||
```python
|
||||
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
validate_attestation(state, attestation)
|
||||
# Store pending attestation for epoch processing
|
||||
pending_attestation = PendingAttestation(
|
||||
aggregation_bits=attestation.aggregation_bits,
|
||||
data=attestation.data,
|
||||
inclusion_delay=state.slot - attestation.data.slot,
|
||||
proposer_index=get_beacon_proposer_index(state),
|
||||
crosslink_success=False, # To be filled in during process_shard_transitions
|
||||
)
|
||||
if attestation.data.target.epoch == get_current_epoch(state):
|
||||
state.current_epoch_attestations.append(pending_attestation)
|
||||
else:
|
||||
state.previous_epoch_attestations.append(pending_attestation)
|
||||
```
|
||||
|
||||
##### Shard transition processing
|
||||
|
||||
###### `apply_shard_transition`
|
||||
|
||||
```python
|
||||
|
@ -769,21 +860,21 @@ def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTr
|
|||
proposers = []
|
||||
prev_gasprice = state.shard_states[shard].gasprice
|
||||
shard_parent_root = state.shard_states[shard].latest_block_root
|
||||
for i in range(len(offset_slots)):
|
||||
for i, offset_slot in enumerate(offset_slots):
|
||||
shard_block_length = transition.shard_block_lengths[i]
|
||||
shard_state = transition.shard_states[i]
|
||||
# Verify correct calculation of gas prices and slots
|
||||
assert shard_state.gasprice == compute_updated_gasprice(prev_gasprice, shard_block_length)
|
||||
assert shard_state.slot == offset_slots[i]
|
||||
assert shard_state.slot == offset_slot
|
||||
# Collect the non-empty proposals result
|
||||
is_empty_proposal = shard_block_length == 0
|
||||
if not is_empty_proposal:
|
||||
proposal_index = get_shard_proposer_index(state, offset_slots[i], shard)
|
||||
proposal_index = get_shard_proposer_index(state, offset_slot, shard)
|
||||
# Reconstruct shard headers
|
||||
header = ShardBlockHeader(
|
||||
shard_parent_root=shard_parent_root,
|
||||
beacon_parent_root=get_block_root_at_slot(state, offset_slots[i]),
|
||||
slot=offset_slots[i],
|
||||
beacon_parent_root=get_block_root_at_slot(state, offset_slot),
|
||||
slot=offset_slot,
|
||||
shard=shard,
|
||||
proposer_index=proposal_index,
|
||||
body_root=transition.shard_data_roots[i]
|
||||
|
@ -814,9 +905,10 @@ def process_crosslink_for_shard(state: BeaconState,
|
|||
committee_index: CommitteeIndex,
|
||||
shard_transition: ShardTransition,
|
||||
attestations: Sequence[Attestation]) -> Root:
|
||||
committee = get_beacon_committee(state, state.slot, committee_index)
|
||||
on_time_attestation_slot = compute_previous_slot(state.slot)
|
||||
committee = get_beacon_committee(state, on_time_attestation_slot, committee_index)
|
||||
online_indices = get_online_validator_indices(state)
|
||||
shard = compute_shard_from_committee_index(state, committee_index, state.slot)
|
||||
shard = compute_shard_from_committee_index(state, committee_index, on_time_attestation_slot)
|
||||
|
||||
# Loop over all shard transition roots
|
||||
shard_transition_roots = set([a.data.shard_transition_root for a in attestations])
|
||||
|
@ -826,7 +918,7 @@ def process_crosslink_for_shard(state: BeaconState,
|
|||
for attestation in transition_attestations:
|
||||
participants = get_attesting_indices(state, attestation.data, attestation.aggregation_bits)
|
||||
transition_participants = transition_participants.union(participants)
|
||||
assert attestation.data.head_shard_root == shard_transition.shard_data_roots[
|
||||
assert attestation.data.shard_head_root == shard_transition.shard_data_roots[
|
||||
len(shard_transition.shard_data_roots) - 1
|
||||
]
|
||||
|
||||
|
@ -871,17 +963,16 @@ def process_crosslink_for_shard(state: BeaconState,
|
|||
def process_crosslinks(state: BeaconState,
|
||||
shard_transitions: Sequence[ShardTransition],
|
||||
attestations: Sequence[Attestation]) -> None:
|
||||
committee_count = get_committee_count_at_slot(state, state.slot)
|
||||
on_time_attestation_slot = compute_previous_slot(state.slot)
|
||||
committee_count = get_committee_count_at_slot(state, on_time_attestation_slot)
|
||||
for committee_index in map(CommitteeIndex, range(committee_count)):
|
||||
shard = compute_shard_from_committee_index(state, committee_index, state.slot)
|
||||
# All attestations in the block for this committee/shard and current slot
|
||||
shard_transition = shard_transitions[shard]
|
||||
shard_attestations = [
|
||||
attestation for attestation in attestations
|
||||
if is_shard_attestation(state, attestation, committee_index)
|
||||
if is_on_time_attestation(state, attestation) and attestation.data.index == committee_index
|
||||
]
|
||||
|
||||
winning_root = process_crosslink_for_shard(state, committee_index, shard_transition, shard_attestations)
|
||||
shard = compute_shard_from_committee_index(state, committee_index, on_time_attestation_slot)
|
||||
winning_root = process_crosslink_for_shard(state, committee_index, shard_transitions[shard], shard_attestations)
|
||||
if winning_root != Root():
|
||||
# Mark relevant pending attestations as creating a successful crosslink
|
||||
for pending_attestation in state.current_epoch_attestations:
|
||||
|
@ -889,23 +980,30 @@ def process_crosslinks(state: BeaconState,
|
|||
pending_attestation.crosslink_success = True
|
||||
```
|
||||
|
||||
###### `process_attestation`
|
||||
###### `verify_empty_shard_transition`
|
||||
|
||||
```python
|
||||
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
validate_attestation(state, attestation)
|
||||
# Store pending attestation for epoch processing
|
||||
pending_attestation = PendingAttestation(
|
||||
aggregation_bits=attestation.aggregation_bits,
|
||||
data=attestation.data,
|
||||
inclusion_delay=state.slot - attestation.data.slot,
|
||||
proposer_index=get_beacon_proposer_index(state),
|
||||
crosslink_success=False, # To be filled in during process_crosslinks
|
||||
)
|
||||
if attestation.data.target.epoch == get_current_epoch(state):
|
||||
state.current_epoch_attestations.append(pending_attestation)
|
||||
else:
|
||||
state.previous_epoch_attestations.append(pending_attestation)
|
||||
def verify_empty_shard_transition(state: BeaconState, shard_transitions: Sequence[ShardTransition]) -> bool:
|
||||
"""
|
||||
Verify that a `shard_transition` in a block is empty if an attestation was not processed for it.
|
||||
"""
|
||||
for shard in range(get_active_shard_count(state)):
|
||||
if state.shard_states[shard].slot != compute_previous_slot(state.slot):
|
||||
if shard_transitions[shard] != ShardTransition():
|
||||
return False
|
||||
return True
|
||||
```
|
||||
|
||||
###### `process_shard_transitions`
|
||||
|
||||
```python
|
||||
def process_shard_transitions(state: BeaconState,
|
||||
shard_transitions: Sequence[ShardTransition],
|
||||
attestations: Sequence[Attestation]) -> None:
|
||||
# Process crosslinks
|
||||
process_crosslinks(state, shard_transitions, attestations)
|
||||
# Verify the empty proposal shard states
|
||||
assert verify_empty_shard_transition(state, shard_transitions)
|
||||
```
|
||||
|
||||
##### New Attester slashing processing
|
||||
|
@ -913,11 +1011,9 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
|||
```python
|
||||
def get_indices_from_committee(
|
||||
committee: List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE],
|
||||
bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]) -> List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE]:
|
||||
bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]) -> Sequence[ValidatorIndex]:
|
||||
assert len(bits) == len(committee)
|
||||
return List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE](
|
||||
[validator_index for i, validator_index in enumerate(committee) if bits[i]]
|
||||
)
|
||||
return [validator_index for i, validator_index in enumerate(committee) if bits[i]]
|
||||
```
|
||||
|
||||
```python
|
||||
|
@ -950,16 +1046,6 @@ def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSla
|
|||
assert slashed_any
|
||||
```
|
||||
|
||||
#### Shard transition false positives
|
||||
|
||||
```python
|
||||
def verify_shard_transition_false_positives(state: BeaconState, block_body: BeaconBlockBody) -> None:
|
||||
# Verify that a `shard_transition` in a block is empty if an attestation was not processed for it
|
||||
for shard in range(get_active_shard_count(state)):
|
||||
if state.shard_states[shard].slot != compute_previous_slot(state.slot):
|
||||
assert block_body.shard_transitions[shard] == ShardTransition()
|
||||
```
|
||||
|
||||
#### Light client processing
|
||||
|
||||
```python
|
||||
|
@ -992,10 +1078,20 @@ def process_epoch(state: BeaconState) -> None:
|
|||
process_registry_updates(state)
|
||||
process_reveal_deadlines(state)
|
||||
process_slashings(state)
|
||||
process_final_updates(state)
|
||||
process_final_updates(state) # phase 0 final updates
|
||||
process_phase_1_final_updates(state)
|
||||
```
|
||||
|
||||
#### Phase 1 final updates
|
||||
|
||||
```python
|
||||
def process_phase_1_final_updates(state: BeaconState) -> None:
|
||||
process_custody_final_updates(state)
|
||||
process_online_tracking(state)
|
||||
process_light_client_committee_updates(state)
|
||||
|
||||
# Update current_epoch_start_shard
|
||||
state.current_epoch_start_shard = get_start_shard(state, Slot(state.slot + 1))
|
||||
```
|
||||
|
||||
#### Custody game updates
|
||||
|
@ -1021,7 +1117,9 @@ def process_online_tracking(state: BeaconState) -> None:
|
|||
|
||||
```python
|
||||
def process_light_client_committee_updates(state: BeaconState) -> None:
|
||||
# Update light client committees
|
||||
"""
|
||||
Update light client committees.
|
||||
"""
|
||||
if get_current_epoch(state) % LIGHT_CLIENT_COMMITTEE_PERIOD == 0:
|
||||
state.current_light_committee = state.next_light_committee
|
||||
new_committee = get_light_client_committee(state, get_current_epoch(state) + LIGHT_CLIENT_COMMITTEE_PERIOD)
|
||||
|
|
|
@ -99,6 +99,7 @@ def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState:
|
|||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
finalized_checkpoint=pre.finalized_checkpoint,
|
||||
# Phase 1
|
||||
current_epoch_start_shard=Shard(0),
|
||||
shard_states=List[ShardState, MAX_SHARDS](
|
||||
ShardState(
|
||||
slot=pre.slot,
|
||||
|
|
|
@ -71,22 +71,23 @@ def verify_shard_block_signature(beacon_state: BeaconState,
|
|||
def shard_state_transition(beacon_state: BeaconState,
|
||||
shard_state: ShardState,
|
||||
block: ShardBlock) -> None:
|
||||
# Update shard state
|
||||
"""
|
||||
Update ``shard_state`` with shard ``block`` and ``beacon_state`.
|
||||
"""
|
||||
shard_state.slot = block.slot
|
||||
prev_gasprice = shard_state.gasprice
|
||||
shard_state.gasprice = compute_updated_gasprice(prev_gasprice, len(block.body))
|
||||
if len(block.body) == 0:
|
||||
latest_block_root = shard_state.latest_block_root
|
||||
else:
|
||||
latest_block_root = hash_tree_root(block)
|
||||
|
||||
shard_state.latest_block_root = latest_block_root
|
||||
shard_state.transition_digest = compute_shard_transition_digest(
|
||||
beacon_state,
|
||||
shard_state,
|
||||
block.beacon_parent_root,
|
||||
hash_tree_root(block.body),
|
||||
)
|
||||
shard_state.gasprice = compute_updated_gasprice(prev_gasprice, len(block.body))
|
||||
shard_state.slot = block.slot
|
||||
shard_state.latest_block_root = latest_block_root
|
||||
```
|
||||
|
||||
We have a pure function `get_post_shard_state` for describing the fraud proof verification and honest validator behavior.
|
||||
|
@ -269,12 +270,8 @@ def get_shard_transition(beacon_state: BeaconState,
|
|||
shard: Shard,
|
||||
shard_blocks: Sequence[SignedShardBlock]) -> ShardTransition:
|
||||
offset_slots = get_offset_slots(beacon_state, shard)
|
||||
start_slot = offset_slots[0]
|
||||
proposals, shard_states, shard_data_roots = get_shard_state_transition_result(beacon_state, shard, shard_blocks)
|
||||
|
||||
assert len(proposals) > 0
|
||||
assert len(shard_data_roots) > 0
|
||||
|
||||
shard_block_lengths = []
|
||||
proposer_signatures = []
|
||||
for proposal in proposals:
|
||||
|
@ -288,7 +285,7 @@ def get_shard_transition(beacon_state: BeaconState,
|
|||
proposer_signature_aggregate = NO_SIGNATURE
|
||||
|
||||
return ShardTransition(
|
||||
start_slot=start_slot,
|
||||
start_slot=offset_slots[0],
|
||||
shard_block_lengths=shard_block_lengths,
|
||||
shard_data_roots=shard_data_roots,
|
||||
shard_states=shard_states,
|
||||
|
|
|
@ -1 +1 @@
|
|||
0.12.0
|
||||
0.12.1
|
|
@ -21,7 +21,7 @@ def test_initialize_beacon_state_from_eth1(spec):
|
|||
# initialize beacon_state
|
||||
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
|
||||
|
||||
assert state.genesis_time == eth1_timestamp - eth1_timestamp % spec.MIN_GENESIS_DELAY + 2 * spec.MIN_GENESIS_DELAY
|
||||
assert state.genesis_time == eth1_timestamp + spec.GENESIS_DELAY
|
||||
assert len(state.validators) == deposit_count
|
||||
assert state.eth1_data.deposit_root == deposit_root
|
||||
assert state.eth1_data.deposit_count == deposit_count
|
||||
|
@ -57,7 +57,7 @@ def test_initialize_beacon_state_some_small_balances(spec):
|
|||
# initialize beacon_state
|
||||
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
|
||||
|
||||
assert state.genesis_time == eth1_timestamp - eth1_timestamp % spec.MIN_GENESIS_DELAY + 2 * spec.MIN_GENESIS_DELAY
|
||||
assert state.genesis_time == eth1_timestamp + spec.GENESIS_DELAY
|
||||
assert len(state.validators) == small_deposit_count
|
||||
assert state.eth1_data.deposit_root == deposit_root
|
||||
assert state.eth1_data.deposit_count == len(deposits)
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
from typing import List
|
||||
|
||||
from eth2spec.test.context import expect_assertion_error, PHASE0, PHASE1
|
||||
from eth2spec.test.helpers.state import state_transition_and_sign_block, next_epoch, next_slot, transition_to
|
||||
from eth2spec.test.helpers.state import state_transition_and_sign_block, next_epoch, next_slot
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||
from eth2spec.test.helpers.shard_transitions import get_shard_transition_of_committee
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.ssz.ssz_typing import Bitlist
|
||||
|
@ -78,20 +79,20 @@ def build_attestation_data(spec, state, slot, index, shard_transition=None, on_t
|
|||
if spec.fork == PHASE1:
|
||||
if shard_transition is not None:
|
||||
lastest_shard_data_root_index = len(shard_transition.shard_data_roots) - 1
|
||||
attestation_data.head_shard_root = shard_transition.shard_data_roots[lastest_shard_data_root_index]
|
||||
attestation_data.shard_head_root = shard_transition.shard_data_roots[lastest_shard_data_root_index]
|
||||
attestation_data.shard_transition_root = shard_transition.hash_tree_root()
|
||||
else:
|
||||
# No shard transition
|
||||
# No shard transition -> no shard block
|
||||
shard = spec.get_shard(state, spec.Attestation(data=attestation_data))
|
||||
if on_time:
|
||||
temp_state = state.copy()
|
||||
next_slot(spec, temp_state)
|
||||
shard_transition = spec.get_shard_transition(temp_state, shard, [])
|
||||
shard_transition = spec.get_shard_transition(temp_state, shard, shard_blocks=[])
|
||||
lastest_shard_data_root_index = len(shard_transition.shard_data_roots) - 1
|
||||
attestation_data.head_shard_root = shard_transition.shard_data_roots[lastest_shard_data_root_index]
|
||||
attestation_data.shard_head_root = shard_transition.shard_data_roots[lastest_shard_data_root_index]
|
||||
attestation_data.shard_transition_root = shard_transition.hash_tree_root()
|
||||
else:
|
||||
attestation_data.head_shard_root = state.shard_states[shard].transition_digest
|
||||
attestation_data.shard_head_root = state.shard_states[shard].transition_digest
|
||||
attestation_data.shard_transition_root = spec.Root()
|
||||
return attestation_data
|
||||
|
||||
|
@ -180,7 +181,7 @@ def get_valid_attestation(spec,
|
|||
fill_aggregate_attestation(spec, state, attestation, signed=signed, filter_participant_set=filter_participant_set)
|
||||
|
||||
if spec.fork == PHASE1 and on_time:
|
||||
attestation = convert_to_valid_on_time_attestation(spec, state, attestation, signed)
|
||||
attestation = convert_to_valid_on_time_attestation(spec, state, attestation, signed=signed)
|
||||
|
||||
return attestation
|
||||
|
||||
|
@ -317,7 +318,19 @@ def next_epoch_with_attestations(spec,
|
|||
committees_per_slot = spec.get_committee_count_at_slot(state, slot_to_attest)
|
||||
if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(post_state)):
|
||||
for index in range(committees_per_slot):
|
||||
cur_attestation = get_valid_attestation(spec, post_state, slot_to_attest, index=index, signed=True)
|
||||
if spec.fork == PHASE1:
|
||||
shard = spec.compute_shard_from_committee_index(post_state, index, slot_to_attest)
|
||||
shard_transition = get_shard_transition_of_committee(
|
||||
spec, post_state, index, slot=slot_to_attest
|
||||
)
|
||||
block.body.shard_transitions[shard] = shard_transition
|
||||
else:
|
||||
shard_transition = None
|
||||
|
||||
cur_attestation = get_valid_attestation(
|
||||
spec, post_state, slot_to_attest,
|
||||
shard_transition=shard_transition, index=index, signed=True, on_time=True
|
||||
)
|
||||
block.body.attestations.append(cur_attestation)
|
||||
|
||||
if fill_prev_epoch:
|
||||
|
@ -328,9 +341,6 @@ def next_epoch_with_attestations(spec,
|
|||
spec, post_state, slot_to_attest, index=index, signed=True, on_time=False)
|
||||
block.body.attestations.append(prev_attestation)
|
||||
|
||||
if spec.fork == PHASE1:
|
||||
fill_block_shard_transitions_by_attestations(spec, post_state, block)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, post_state, block)
|
||||
signed_blocks.append(signed_block)
|
||||
|
||||
|
@ -396,14 +406,3 @@ def cached_prepare_state_with_attestations(spec, state):
|
|||
|
||||
# Put the LRU cache result into the state view, as if we transitioned the original view
|
||||
state.set_backing(_prep_state_cache_dict[key])
|
||||
|
||||
|
||||
def fill_block_shard_transitions_by_attestations(spec, state, block):
|
||||
block.body.shard_transitions = [spec.ShardTransition()] * spec.MAX_SHARDS
|
||||
for attestation in block.body.attestations:
|
||||
shard = spec.get_shard(state, attestation)
|
||||
if attestation.data.slot == state.slot:
|
||||
temp_state = state.copy()
|
||||
transition_to(spec, temp_state, slot=block.slot)
|
||||
shard_transition = spec.get_shard_transition(temp_state, shard, [])
|
||||
block.body.shard_transitions[shard] = shard_transition
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
from eth2spec.test.context import expect_assertion_error
|
||||
|
||||
|
||||
def run_crosslinks_processing(spec, state, shard_transitions, attestations, valid=True):
|
||||
"""
|
||||
Run ``process_attestation``, yielding:
|
||||
- pre-state ('pre')
|
||||
- shard_transitions ('shard_transitions')
|
||||
- attestations ('attestations')
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
# yield pre-state
|
||||
yield 'pre', state
|
||||
yield 'shard_transitions', shard_transitions
|
||||
yield 'attestations', attestations
|
||||
|
||||
# If the attestation is invalid, processing is aborted, and there is no post-state.
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_crosslinks(state, shard_transitions, attestations))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
# process crosslinks
|
||||
spec.process_crosslinks(state, shard_transitions, attestations)
|
||||
|
||||
# yield post-state
|
||||
yield 'post', state
|
|
@ -70,7 +70,7 @@ def build_shard_transitions_till_slot(spec, state, shard_blocks, on_time_slot):
|
|||
return shard_transitions
|
||||
|
||||
|
||||
def build_attestation_with_shard_transition(spec, state, index, on_time_slot, shard_transition=None):
|
||||
def build_attestation_with_shard_transition(spec, state, index, on_time_slot, shard_transition):
|
||||
temp_state = state.copy()
|
||||
transition_to(spec, temp_state, on_time_slot - 1)
|
||||
attestation = get_valid_on_time_attestation(
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
from eth2spec.test.context import expect_assertion_error
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
|
||||
|
||||
def run_shard_transitions_processing(spec, state, shard_transitions, attestations, valid=True):
|
||||
"""
|
||||
Run ``process_shard_transitions``, yielding:
|
||||
- pre-state ('pre')
|
||||
- shard_transitions ('shard_transitions')
|
||||
- attestations ('attestations')
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
# yield pre-state
|
||||
yield 'pre', state
|
||||
yield 'shard_transitions', shard_transitions
|
||||
yield 'attestations', attestations
|
||||
|
||||
# If the attestation is invalid, processing is aborted, and there is no post-state.
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_shard_transitions(state, shard_transitions, attestations))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
# process crosslinks
|
||||
spec.process_shard_transitions(state, shard_transitions, attestations)
|
||||
|
||||
# yield post-state
|
||||
yield 'post', state
|
||||
|
||||
|
||||
def get_shard_transition_of_committee(spec, state, committee_index, slot=None, shard_blocks=None):
|
||||
if shard_blocks is None:
|
||||
shard_blocks = []
|
||||
|
||||
if slot is None:
|
||||
slot = state.slot
|
||||
|
||||
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
|
||||
temp_state = state.copy()
|
||||
transition_to(spec, temp_state, slot + 1)
|
||||
shard_transition = spec.get_shard_transition(temp_state, shard, shard_blocks=shard_blocks)
|
||||
return shard_transition
|
|
@ -16,8 +16,9 @@ from eth2spec.test.helpers.attester_slashings import (
|
|||
get_indexed_attestation_participants,
|
||||
)
|
||||
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing, check_proposer_slashing_effect
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation, fill_block_shard_transitions_by_attestations
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation
|
||||
from eth2spec.test.helpers.deposits import prepare_state_and_deposit
|
||||
from eth2spec.test.helpers.shard_transitions import get_shard_transition_of_committee
|
||||
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test, with_all_phases, expect_assertion_error, always_bls, with_phases,
|
||||
|
@ -687,14 +688,23 @@ def test_attestation(spec, state):
|
|||
|
||||
yield 'pre', state
|
||||
|
||||
attestation = get_valid_attestation(spec, state, signed=True, on_time=True)
|
||||
attestation_block = build_empty_block(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
index = 0
|
||||
if spec.fork == PHASE1:
|
||||
shard = spec.compute_shard_from_committee_index(state, index, state.slot)
|
||||
shard_transition = get_shard_transition_of_committee(spec, state, index)
|
||||
attestation_block.body.shard_transitions[shard] = shard_transition
|
||||
else:
|
||||
shard_transition = None
|
||||
|
||||
attestation = get_valid_attestation(
|
||||
spec, state, shard_transition=shard_transition, index=index, signed=True, on_time=True
|
||||
)
|
||||
|
||||
# Add to state via block transition
|
||||
pre_current_attestations_len = len(state.current_epoch_attestations)
|
||||
attestation_block = build_empty_block(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
attestation_block.body.attestations.append(attestation)
|
||||
if spec.fork == PHASE1:
|
||||
fill_block_shard_transitions_by_attestations(spec, state, attestation_block)
|
||||
signed_attestation_block = state_transition_and_sign_block(spec, state, attestation_block)
|
||||
|
||||
assert len(state.current_epoch_attestations) == pre_current_attestations_len + 1
|
||||
|
|
|
@ -4,7 +4,7 @@ from eth2spec.test.context import (
|
|||
spec_state_test,
|
||||
always_bls,
|
||||
)
|
||||
from eth2spec.test.helpers.crosslinks import run_crosslinks_processing
|
||||
from eth2spec.test.helpers.shard_transitions import run_shard_transitions_processing
|
||||
from eth2spec.test.helpers.shard_block import (
|
||||
build_attestation_with_shard_transition,
|
||||
build_shard_block,
|
||||
|
@ -15,11 +15,10 @@ from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard
|
|||
|
||||
def run_basic_crosslink_tests(spec, state, target_len_offset_slot, valid=True):
|
||||
state = transition_to_valid_shard_slot(spec, state)
|
||||
# At the beginning, let `x = state.slot`, `state.shard_states[shard].slot == x - 1`
|
||||
slot_x = state.slot
|
||||
init_slot = state.slot
|
||||
committee_index = spec.CommitteeIndex(0)
|
||||
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
|
||||
assert state.shard_states[shard].slot == slot_x - 1
|
||||
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot + target_len_offset_slot - 1)
|
||||
assert state.shard_states[shard].slot == state.slot - 1
|
||||
|
||||
# Create SignedShardBlock
|
||||
body = b'\x56' * spec.MAX_SHARD_BLOCK_SIZE
|
||||
|
@ -46,11 +45,11 @@ def run_basic_crosslink_tests(spec, state, target_len_offset_slot, valid=True):
|
|||
transition_to(spec, state, state.slot + target_len_offset_slot)
|
||||
pre_shard_state = state.shard_states[shard]
|
||||
|
||||
yield from run_crosslinks_processing(spec, state, shard_transitions, [attestation], valid=valid)
|
||||
yield from run_shard_transitions_processing(spec, state, shard_transitions, [attestation], valid=valid)
|
||||
|
||||
if valid:
|
||||
# After state transition,
|
||||
assert state.slot == slot_x + target_len_offset_slot
|
||||
assert state.slot == init_slot + target_len_offset_slot
|
||||
shard_state = state.shard_states[shard]
|
||||
assert shard_state != pre_shard_state
|
||||
assert shard_state == shard_transition.shard_states[len(shard_transition.shard_states) - 1]
|
|
@ -67,7 +67,7 @@ def test_process_beacon_block_with_normal_shard_transition(spec, state):
|
|||
|
||||
target_len_offset_slot = 1
|
||||
committee_index = spec.CommitteeIndex(0)
|
||||
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
|
||||
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot + target_len_offset_slot - 1)
|
||||
assert state.shard_states[shard].slot == state.slot - 1
|
||||
|
||||
pre_gasprice = state.shard_states[shard].gasprice
|
||||
|
@ -93,7 +93,7 @@ def test_process_beacon_block_with_empty_proposal_transition(spec, state):
|
|||
|
||||
target_len_offset_slot = 1
|
||||
committee_index = spec.CommitteeIndex(0)
|
||||
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
|
||||
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot + target_len_offset_slot - 1)
|
||||
assert state.shard_states[shard].slot == state.slot - 1
|
||||
|
||||
# No new shard block
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
)
|
||||
from eth2spec.test.helpers.state import next_epoch
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
def test_get_committee_count_delta(spec, state):
|
||||
assert spec.get_committee_count_delta(state, 0, 0) == 0
|
||||
assert spec.get_committee_count_at_slot(state, 0) != 0
|
||||
assert spec.get_committee_count_delta(state, 0, 1) == spec.get_committee_count_at_slot(state, 0)
|
||||
assert spec.get_committee_count_delta(state, 1, 2) == spec.get_committee_count_at_slot(state, 1)
|
||||
assert spec.get_committee_count_delta(state, 0, 2) == (
|
||||
spec.get_committee_count_at_slot(state, 0) + spec.get_committee_count_at_slot(state, 1)
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
def test_get_start_shard_current_epoch_start(spec, state):
|
||||
assert state.current_epoch_start_shard == 0
|
||||
next_epoch(spec, state)
|
||||
active_shard_count = spec.get_active_shard_count(state)
|
||||
assert state.current_epoch_start_shard == (
|
||||
spec.get_committee_count_delta(state, 0, spec.SLOTS_PER_EPOCH) % active_shard_count
|
||||
)
|
||||
current_epoch_start_slot = spec.compute_start_slot_at_epoch(spec.get_current_epoch(state))
|
||||
|
||||
slot = current_epoch_start_slot
|
||||
start_shard = spec.get_start_shard(state, slot)
|
||||
assert start_shard == state.current_epoch_start_shard
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
def test_get_start_shard_next_slot(spec, state):
|
||||
next_epoch(spec, state)
|
||||
active_shard_count = spec.get_active_shard_count(state)
|
||||
current_epoch_start_slot = spec.compute_start_slot_at_epoch(spec.get_current_epoch(state))
|
||||
|
||||
slot = current_epoch_start_slot + 1
|
||||
start_shard = spec.get_start_shard(state, slot)
|
||||
|
||||
current_epoch_start_slot = spec.compute_start_slot_at_epoch(spec.get_current_epoch(state))
|
||||
expected_start_shard = (
|
||||
state.current_epoch_start_shard
|
||||
+ spec.get_committee_count_delta(state, start_slot=current_epoch_start_slot, stop_slot=slot)
|
||||
) % active_shard_count
|
||||
assert start_shard == expected_start_shard
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
def test_get_start_shard_previous_slot(spec, state):
|
||||
next_epoch(spec, state)
|
||||
active_shard_count = spec.get_active_shard_count(state)
|
||||
current_epoch_start_slot = spec.compute_start_slot_at_epoch(spec.get_current_epoch(state))
|
||||
|
||||
slot = current_epoch_start_slot - 1
|
||||
start_shard = spec.get_start_shard(state, slot)
|
||||
|
||||
current_epoch_start_slot = spec.compute_start_slot_at_epoch(spec.get_current_epoch(state))
|
||||
expected_start_shard = (
|
||||
state.current_epoch_start_shard
|
||||
+ spec.MAX_COMMITTEES_PER_SLOT * spec.SLOTS_PER_EPOCH * active_shard_count
|
||||
- spec.get_committee_count_delta(state, start_slot=slot, stop_slot=current_epoch_start_slot)
|
||||
) % active_shard_count
|
||||
assert start_shard == expected_start_shard
|
|
@ -184,7 +184,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
|
|||
|
||||
if __name__ == "__main__":
|
||||
gen_runner.run_generator("epoch_processing", [
|
||||
create_provider('crosslinks', test_process_crosslinks, 'minimal'),
|
||||
create_provider('final_updates', test_process_final_updates, 'minimal'),
|
||||
...
|
||||
])
|
||||
|
||||
|
|
Loading…
Reference in New Issue