Merge branch 'dev' into exec_v_spec
* dev: (112 commits) Minor copyedit Fix typo minor typo fix genesis tests; sign deposit-datas, and lower min validator count for testing add comments to make variations in genesis initialization clear, and about the mix-in in verification Refactor Update 0_beacon-chain.md quick comment on avoiding underflow fix basic test Make timestamp a uint64 Fixes typo Fix genesis balance bug (git add -u) Cleanups and fixes fix finalize on double justification in 123 rule Merge is_genesis_trigger into get_genesis_state Cleanups; think about merging is_genesis_trigger into get_genesis_state Renames: fix typo in justification wording rename/fix roots in justification tests for consistency ...
This commit is contained in:
commit
fa7f80157d
|
@ -10,13 +10,15 @@ SHARD_COUNT: 1024
|
||||||
# 2**7 (= 128)
|
# 2**7 (= 128)
|
||||||
TARGET_COMMITTEE_SIZE: 128
|
TARGET_COMMITTEE_SIZE: 128
|
||||||
# 2**12 (= 4,096)
|
# 2**12 (= 4,096)
|
||||||
MAX_INDICES_PER_ATTESTATION: 4096
|
MAX_VALIDATORS_PER_COMMITTEE: 4096
|
||||||
# 2**2 (= 4)
|
# 2**2 (= 4)
|
||||||
MIN_PER_EPOCH_CHURN_LIMIT: 4
|
MIN_PER_EPOCH_CHURN_LIMIT: 4
|
||||||
# 2**16 (= 65,536)
|
# 2**16 (= 65,536)
|
||||||
CHURN_LIMIT_QUOTIENT: 65536
|
CHURN_LIMIT_QUOTIENT: 65536
|
||||||
# See issue 563
|
# See issue 563
|
||||||
SHUFFLE_ROUND_COUNT: 90
|
SHUFFLE_ROUND_COUNT: 90
|
||||||
|
# `2**16` (= 65,536)
|
||||||
|
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 65536
|
||||||
|
|
||||||
|
|
||||||
# Deposit contract
|
# Deposit contract
|
||||||
|
|
|
@ -9,13 +9,15 @@ SHARD_COUNT: 8
|
||||||
# [customized] unsecure, but fast
|
# [customized] unsecure, but fast
|
||||||
TARGET_COMMITTEE_SIZE: 4
|
TARGET_COMMITTEE_SIZE: 4
|
||||||
# 2**12 (= 4,096)
|
# 2**12 (= 4,096)
|
||||||
MAX_INDICES_PER_ATTESTATION: 4096
|
MAX_VALIDATORS_PER_COMMITTEE: 4096
|
||||||
# 2**2 (= 4)
|
# 2**2 (= 4)
|
||||||
MIN_PER_EPOCH_CHURN_LIMIT: 4
|
MIN_PER_EPOCH_CHURN_LIMIT: 4
|
||||||
# 2**16 (= 65,536)
|
# 2**16 (= 65,536)
|
||||||
CHURN_LIMIT_QUOTIENT: 65536
|
CHURN_LIMIT_QUOTIENT: 65536
|
||||||
# [customized] Faster, but unsecure.
|
# [customized] Faster, but unsecure.
|
||||||
SHUFFLE_ROUND_COUNT: 10
|
SHUFFLE_ROUND_COUNT: 10
|
||||||
|
# [customized]
|
||||||
|
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 64
|
||||||
|
|
||||||
|
|
||||||
# Deposit contract
|
# Deposit contract
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -6,7 +6,7 @@ WITHDRAWAL_CREDENTIALS_LENGTH: constant(uint256) = 32 # bytes
|
||||||
AMOUNT_LENGTH: constant(uint256) = 8 # bytes
|
AMOUNT_LENGTH: constant(uint256) = 8 # bytes
|
||||||
SIGNATURE_LENGTH: constant(uint256) = 96 # bytes
|
SIGNATURE_LENGTH: constant(uint256) = 96 # bytes
|
||||||
|
|
||||||
Deposit: event({
|
DepositEvent: event({
|
||||||
pubkey: bytes[48],
|
pubkey: bytes[48],
|
||||||
withdrawal_credentials: bytes[32],
|
withdrawal_credentials: bytes[32],
|
||||||
amount: bytes[8],
|
amount: bytes[8],
|
||||||
|
@ -42,8 +42,9 @@ def to_little_endian_64(value: uint256) -> bytes[8]:
|
||||||
|
|
||||||
@public
|
@public
|
||||||
@constant
|
@constant
|
||||||
def get_deposit_root() -> bytes32:
|
def get_hash_tree_root() -> bytes32:
|
||||||
node: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000
|
zero_bytes32: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000
|
||||||
|
node: bytes32 = zero_bytes32
|
||||||
size: uint256 = self.deposit_count
|
size: uint256 = self.deposit_count
|
||||||
for height in range(DEPOSIT_CONTRACT_TREE_DEPTH):
|
for height in range(DEPOSIT_CONTRACT_TREE_DEPTH):
|
||||||
if bitwise_and(size, 1) == 1: # More gas efficient than `size % 2 == 1`
|
if bitwise_and(size, 1) == 1: # More gas efficient than `size % 2 == 1`
|
||||||
|
@ -51,7 +52,7 @@ def get_deposit_root() -> bytes32:
|
||||||
else:
|
else:
|
||||||
node = sha256(concat(node, self.zero_hashes[height]))
|
node = sha256(concat(node, self.zero_hashes[height]))
|
||||||
size /= 2
|
size /= 2
|
||||||
return node
|
return sha256(concat(node, self.to_little_endian_64(self.deposit_count), slice(zero_bytes32, start=0, len=24)))
|
||||||
|
|
||||||
|
|
||||||
@public
|
@public
|
||||||
|
@ -75,11 +76,11 @@ def deposit(pubkey: bytes[PUBKEY_LENGTH],
|
||||||
assert len(withdrawal_credentials) == WITHDRAWAL_CREDENTIALS_LENGTH
|
assert len(withdrawal_credentials) == WITHDRAWAL_CREDENTIALS_LENGTH
|
||||||
assert len(signature) == SIGNATURE_LENGTH
|
assert len(signature) == SIGNATURE_LENGTH
|
||||||
|
|
||||||
# Emit `Deposit` log
|
# Emit `DepositEvent` log
|
||||||
amount: bytes[8] = self.to_little_endian_64(deposit_amount)
|
amount: bytes[8] = self.to_little_endian_64(deposit_amount)
|
||||||
log.Deposit(pubkey, withdrawal_credentials, amount, signature, self.to_little_endian_64(self.deposit_count))
|
log.DepositEvent(pubkey, withdrawal_credentials, amount, signature, self.to_little_endian_64(self.deposit_count))
|
||||||
|
|
||||||
# Compute `DepositData` root
|
# Compute `DepositData` hash tree root
|
||||||
zero_bytes32: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000
|
zero_bytes32: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000
|
||||||
pubkey_root: bytes32 = sha256(concat(pubkey, slice(zero_bytes32, start=0, len=64 - PUBKEY_LENGTH)))
|
pubkey_root: bytes32 = sha256(concat(pubkey, slice(zero_bytes32, start=0, len=64 - PUBKEY_LENGTH)))
|
||||||
signature_root: bytes32 = sha256(concat(
|
signature_root: bytes32 = sha256(concat(
|
||||||
|
@ -91,7 +92,7 @@ def deposit(pubkey: bytes[PUBKEY_LENGTH],
|
||||||
sha256(concat(amount, slice(zero_bytes32, start=0, len=32 - AMOUNT_LENGTH), signature_root)),
|
sha256(concat(amount, slice(zero_bytes32, start=0, len=32 - AMOUNT_LENGTH), signature_root)),
|
||||||
))
|
))
|
||||||
|
|
||||||
# Add `DepositData` root to Merkle tree (update a single `branch` node)
|
# Add `DepositData` hash tree root to Merkle tree (update a single `branch` node)
|
||||||
self.deposit_count += 1
|
self.deposit_count += 1
|
||||||
size: uint256 = self.deposit_count
|
size: uint256 = self.deposit_count
|
||||||
for height in range(DEPOSIT_CONTRACT_TREE_DEPTH):
|
for height in range(DEPOSIT_CONTRACT_TREE_DEPTH):
|
||||||
|
|
|
@ -15,26 +15,12 @@ from eth2spec.phase0.spec import (
|
||||||
DepositData,
|
DepositData,
|
||||||
)
|
)
|
||||||
from eth2spec.utils.hash_function import hash
|
from eth2spec.utils.hash_function import hash
|
||||||
|
from eth2spec.utils.ssz.ssz_typing import List
|
||||||
from eth2spec.utils.ssz.ssz_impl import (
|
from eth2spec.utils.ssz.ssz_impl import (
|
||||||
hash_tree_root,
|
hash_tree_root,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def compute_merkle_root(leaf_nodes):
|
|
||||||
assert len(leaf_nodes) >= 1
|
|
||||||
empty_node = b'\x00' * 32
|
|
||||||
child_nodes = leaf_nodes[:]
|
|
||||||
for _ in range(DEPOSIT_CONTRACT_TREE_DEPTH):
|
|
||||||
parent_nodes = []
|
|
||||||
if len(child_nodes) % 2 == 1:
|
|
||||||
child_nodes.append(empty_node)
|
|
||||||
for j in range(0, len(child_nodes), 2):
|
|
||||||
parent_nodes.append(hash(child_nodes[j] + child_nodes[j + 1]))
|
|
||||||
child_nodes = parent_nodes
|
|
||||||
empty_node = hash(empty_node + empty_node)
|
|
||||||
return child_nodes[0]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def deposit_input():
|
def deposit_input():
|
||||||
"""
|
"""
|
||||||
|
@ -110,8 +96,8 @@ def test_deposit_inputs(registration_contract,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_deposit_log(registration_contract, a0, w3, deposit_input):
|
def test_deposit_event_log(registration_contract, a0, w3, deposit_input):
|
||||||
log_filter = registration_contract.events.Deposit.createFilter(
|
log_filter = registration_contract.events.DepositEvent.createFilter(
|
||||||
fromBlock='latest',
|
fromBlock='latest',
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -131,13 +117,14 @@ def test_deposit_log(registration_contract, a0, w3, deposit_input):
|
||||||
assert log['signature'] == deposit_input[2]
|
assert log['signature'] == deposit_input[2]
|
||||||
assert log['index'] == i.to_bytes(8, 'little')
|
assert log['index'] == i.to_bytes(8, 'little')
|
||||||
|
|
||||||
|
|
||||||
def test_deposit_tree(registration_contract, w3, assert_tx_failed, deposit_input):
|
def test_deposit_tree(registration_contract, w3, assert_tx_failed, deposit_input):
|
||||||
log_filter = registration_contract.events.Deposit.createFilter(
|
log_filter = registration_contract.events.DepositEvent.createFilter(
|
||||||
fromBlock='latest',
|
fromBlock='latest',
|
||||||
)
|
)
|
||||||
|
|
||||||
deposit_amount_list = [randint(MIN_DEPOSIT_AMOUNT, FULL_DEPOSIT_AMOUNT * 2) for _ in range(10)]
|
deposit_amount_list = [randint(MIN_DEPOSIT_AMOUNT, FULL_DEPOSIT_AMOUNT * 2) for _ in range(10)]
|
||||||
leaf_nodes = []
|
deposit_data_list = []
|
||||||
for i in range(0, 10):
|
for i in range(0, 10):
|
||||||
tx_hash = registration_contract.functions.deposit(
|
tx_hash = registration_contract.functions.deposit(
|
||||||
*deposit_input,
|
*deposit_input,
|
||||||
|
@ -151,13 +138,12 @@ def test_deposit_tree(registration_contract, w3, assert_tx_failed, deposit_input
|
||||||
|
|
||||||
assert log["index"] == i.to_bytes(8, 'little')
|
assert log["index"] == i.to_bytes(8, 'little')
|
||||||
|
|
||||||
deposit_data = DepositData(
|
deposit_data_list.append(DepositData(
|
||||||
pubkey=deposit_input[0],
|
pubkey=deposit_input[0],
|
||||||
withdrawal_credentials=deposit_input[1],
|
withdrawal_credentials=deposit_input[1],
|
||||||
amount=deposit_amount_list[i],
|
amount=deposit_amount_list[i],
|
||||||
signature=deposit_input[2],
|
signature=deposit_input[2],
|
||||||
)
|
))
|
||||||
hash_tree_root_result = hash_tree_root(deposit_data)
|
|
||||||
leaf_nodes.append(hash_tree_root_result)
|
root = hash_tree_root(List[DepositData, 2**32](*deposit_data_list))
|
||||||
root = compute_merkle_root(leaf_nodes)
|
assert root == registration_contract.functions.get_hash_tree_root().call()
|
||||||
assert root == registration_contract.functions.get_deposit_root().call()
|
|
||||||
|
|
|
@ -34,6 +34,7 @@
|
||||||
- [`Eth1Data`](#eth1data)
|
- [`Eth1Data`](#eth1data)
|
||||||
- [`HistoricalBatch`](#historicalbatch)
|
- [`HistoricalBatch`](#historicalbatch)
|
||||||
- [`DepositData`](#depositdata)
|
- [`DepositData`](#depositdata)
|
||||||
|
- [`CompactCommittee`](#compactcommittee)
|
||||||
- [`BeaconBlockHeader`](#beaconblockheader)
|
- [`BeaconBlockHeader`](#beaconblockheader)
|
||||||
- [Beacon operations](#beacon-operations)
|
- [Beacon operations](#beacon-operations)
|
||||||
- [`ProposerSlashing`](#proposerslashing)
|
- [`ProposerSlashing`](#proposerslashing)
|
||||||
|
@ -69,7 +70,7 @@
|
||||||
- [`get_block_root_at_slot`](#get_block_root_at_slot)
|
- [`get_block_root_at_slot`](#get_block_root_at_slot)
|
||||||
- [`get_block_root`](#get_block_root)
|
- [`get_block_root`](#get_block_root)
|
||||||
- [`get_randao_mix`](#get_randao_mix)
|
- [`get_randao_mix`](#get_randao_mix)
|
||||||
- [`get_active_index_root`](#get_active_index_root)
|
- [`get_compact_committees_root`](#get_compact_committees_root)
|
||||||
- [`generate_seed`](#generate_seed)
|
- [`generate_seed`](#generate_seed)
|
||||||
- [`get_beacon_proposer_index`](#get_beacon_proposer_index)
|
- [`get_beacon_proposer_index`](#get_beacon_proposer_index)
|
||||||
- [`verify_merkle_branch`](#verify_merkle_branch)
|
- [`verify_merkle_branch`](#verify_merkle_branch)
|
||||||
|
@ -94,7 +95,6 @@
|
||||||
- [`initiate_validator_exit`](#initiate_validator_exit)
|
- [`initiate_validator_exit`](#initiate_validator_exit)
|
||||||
- [`slash_validator`](#slash_validator)
|
- [`slash_validator`](#slash_validator)
|
||||||
- [Genesis](#genesis)
|
- [Genesis](#genesis)
|
||||||
- [Genesis trigger](#genesis-trigger)
|
|
||||||
- [Genesis state](#genesis-state)
|
- [Genesis state](#genesis-state)
|
||||||
- [Genesis block](#genesis-block)
|
- [Genesis block](#genesis-block)
|
||||||
- [Beacon chain state transition function](#beacon-chain-state-transition-function)
|
- [Beacon chain state transition function](#beacon-chain-state-transition-function)
|
||||||
|
@ -125,7 +125,6 @@
|
||||||
This document represents the specification for Phase 0 of Ethereum 2.0 -- The Beacon Chain.
|
This document represents the specification for Phase 0 of Ethereum 2.0 -- The Beacon Chain.
|
||||||
|
|
||||||
At the core of Ethereum 2.0 is a system chain called the "beacon chain". The beacon chain stores and manages the registry of [validators](#dfn-validator). In the initial deployment phases of Ethereum 2.0, the only mechanism to become a [validator](#dfn-validator) is to make a one-way ETH transaction to a deposit contract on Ethereum 1.0. Activation as a [validator](#dfn-validator) happens when Ethereum 1.0 deposit receipts are processed by the beacon chain, the activation balance is reached, and a queuing process is completed. Exit is either voluntary or done forcibly as a penalty for misbehavior.
|
At the core of Ethereum 2.0 is a system chain called the "beacon chain". The beacon chain stores and manages the registry of [validators](#dfn-validator). In the initial deployment phases of Ethereum 2.0, the only mechanism to become a [validator](#dfn-validator) is to make a one-way ETH transaction to a deposit contract on Ethereum 1.0. Activation as a [validator](#dfn-validator) happens when Ethereum 1.0 deposit receipts are processed by the beacon chain, the activation balance is reached, and a queuing process is completed. Exit is either voluntary or done forcibly as a penalty for misbehavior.
|
||||||
|
|
||||||
The primary source of load on the beacon chain is "attestations". Attestations are simultaneously availability votes for a shard block and proof-of-stake votes for a beacon block. A sufficient number of attestations for the same shard block create a "crosslink", confirming the shard segment up to that shard block into the beacon chain. Crosslinks also serve as infrastructure for asynchronous cross-shard communication.
|
The primary source of load on the beacon chain is "attestations". Attestations are simultaneously availability votes for a shard block and proof-of-stake votes for a beacon block. A sufficient number of attestations for the same shard block create a "crosslink", confirming the shard segment up to that shard block into the beacon chain. Crosslinks also serve as infrastructure for asynchronous cross-shard communication.
|
||||||
|
|
||||||
## Notation
|
## Notation
|
||||||
|
@ -175,6 +174,7 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
| `ZERO_HASH` | `Hash(b'\x00' * 32)` |
|
| `ZERO_HASH` | `Hash(b'\x00' * 32)` |
|
||||||
| `BASE_REWARDS_PER_EPOCH` | `5` |
|
| `BASE_REWARDS_PER_EPOCH` | `5` |
|
||||||
| `DEPOSIT_CONTRACT_TREE_DEPTH` | `2**5` (= 32) |
|
| `DEPOSIT_CONTRACT_TREE_DEPTH` | `2**5` (= 32) |
|
||||||
|
| `SECONDS_PER_DAY` | `86400` |
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
|
@ -186,10 +186,12 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
| - | - |
|
| - | - |
|
||||||
| `SHARD_COUNT` | `2**10` (= 1,024) |
|
| `SHARD_COUNT` | `2**10` (= 1,024) |
|
||||||
| `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) |
|
| `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) |
|
||||||
| `MAX_INDICES_PER_ATTESTATION` | `2**12` (= 4,096) |
|
| `MAX_VALIDATORS_PER_COMMITTEE` | `2**12` (= 4,096) |
|
||||||
| `MIN_PER_EPOCH_CHURN_LIMIT` | `2**2` (= 4) |
|
| `MIN_PER_EPOCH_CHURN_LIMIT` | `2**2` (= 4) |
|
||||||
| `CHURN_LIMIT_QUOTIENT` | `2**16` (= 65,536) |
|
| `CHURN_LIMIT_QUOTIENT` | `2**16` (= 65,536) |
|
||||||
| `SHUFFLE_ROUND_COUNT` | `90` |
|
| `SHUFFLE_ROUND_COUNT` | `90` |
|
||||||
|
| `MIN_GENESIS_ACTIVE_VALIDATOR_COUNT` | `2**16` (= 65,536) |
|
||||||
|
| `MIN_GENESIS_TIME` | `1578009600` (Jan 3, 2020) |
|
||||||
| `JUSTIFICATION_BITS_LENGTH` | `4` |
|
| `JUSTIFICATION_BITS_LENGTH` | `4` |
|
||||||
|
|
||||||
* For the safety of crosslinks, `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.)
|
* For the safety of crosslinks, `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.)
|
||||||
|
@ -350,8 +352,8 @@ class AttestationDataAndCustodyBit(Container):
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class IndexedAttestation(Container):
|
class IndexedAttestation(Container):
|
||||||
custody_bit_0_indices: List[ValidatorIndex, MAX_INDICES_PER_ATTESTATION] # Indices with custody bit equal to 0
|
custody_bit_0_indices: List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE] # Indices with custody bit equal to 0
|
||||||
custody_bit_1_indices: List[ValidatorIndex, MAX_INDICES_PER_ATTESTATION] # Indices with custody bit equal to 1
|
custody_bit_1_indices: List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE] # Indices with custody bit equal to 1
|
||||||
data: AttestationData
|
data: AttestationData
|
||||||
signature: BLSSignature
|
signature: BLSSignature
|
||||||
```
|
```
|
||||||
|
@ -360,7 +362,7 @@ class IndexedAttestation(Container):
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class PendingAttestation(Container):
|
class PendingAttestation(Container):
|
||||||
aggregation_bits: Bitlist[MAX_INDICES_PER_ATTESTATION]
|
aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
|
||||||
data: AttestationData
|
data: AttestationData
|
||||||
inclusion_delay: Slot
|
inclusion_delay: Slot
|
||||||
proposer_index: ValidatorIndex
|
proposer_index: ValidatorIndex
|
||||||
|
@ -393,6 +395,14 @@ class DepositData(Container):
|
||||||
signature: BLSSignature
|
signature: BLSSignature
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### `CompactCommittee`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class CompactCommittee(Container):
|
||||||
|
pubkeys: List[Bytes48, MAX_VALIDATORS_PER_COMMITTEE]
|
||||||
|
compact_validators: List[uint64, MAX_VALIDATORS_PER_COMMITTEE]
|
||||||
|
```
|
||||||
|
|
||||||
#### `BeaconBlockHeader`
|
#### `BeaconBlockHeader`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -427,9 +437,9 @@ class AttesterSlashing(Container):
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class Attestation(Container):
|
class Attestation(Container):
|
||||||
aggregation_bits: Bitlist[MAX_INDICES_PER_ATTESTATION]
|
aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
|
||||||
data: AttestationData
|
data: AttestationData
|
||||||
custody_bits: Bitlist[MAX_INDICES_PER_ATTESTATION]
|
custody_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
|
||||||
signature: BLSSignature
|
signature: BLSSignature
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -437,7 +447,7 @@ class Attestation(Container):
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class Deposit(Container):
|
class Deposit(Container):
|
||||||
proof: Vector[Hash, DEPOSIT_CONTRACT_TREE_DEPTH] # Merkle path to deposit root
|
proof: Vector[Hash, DEPOSIT_CONTRACT_TREE_DEPTH + 1] # Merkle path to deposit data list root
|
||||||
data: DepositData
|
data: DepositData
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -517,7 +527,7 @@ class BeaconState(Container):
|
||||||
# Shuffling
|
# Shuffling
|
||||||
start_shard: Shard
|
start_shard: Shard
|
||||||
randao_mixes: Vector[Hash, EPOCHS_PER_HISTORICAL_VECTOR]
|
randao_mixes: Vector[Hash, EPOCHS_PER_HISTORICAL_VECTOR]
|
||||||
active_index_roots: Vector[Hash, EPOCHS_PER_HISTORICAL_VECTOR] # Active registry digests for light clients
|
compact_committees_roots: Vector[Hash, EPOCHS_PER_HISTORICAL_VECTOR] # Committee digests for light clients
|
||||||
# Slashings
|
# Slashings
|
||||||
slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances
|
slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances
|
||||||
# Attestations
|
# Attestations
|
||||||
|
@ -691,6 +701,9 @@ def get_shard_delta(state: BeaconState, epoch: Epoch) -> int:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_epoch_start_shard(state: BeaconState, epoch: Epoch) -> Shard:
|
def get_epoch_start_shard(state: BeaconState, epoch: Epoch) -> Shard:
|
||||||
|
"""
|
||||||
|
Return the start shard of the 0th committee in an epoch.
|
||||||
|
"""
|
||||||
assert epoch <= get_current_epoch(state) + 1
|
assert epoch <= get_current_epoch(state) + 1
|
||||||
check_epoch = Epoch(get_current_epoch(state) + 1)
|
check_epoch = Epoch(get_current_epoch(state) + 1)
|
||||||
shard = Shard((state.start_shard + get_shard_delta(state, get_current_epoch(state))) % SHARD_COUNT)
|
shard = Shard((state.start_shard + get_shard_delta(state, get_current_epoch(state))) % SHARD_COUNT)
|
||||||
|
@ -744,17 +757,25 @@ def get_randao_mix(state: BeaconState,
|
||||||
return state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR]
|
return state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR]
|
||||||
```
|
```
|
||||||
|
|
||||||
### `get_active_index_root`
|
### `get_compact_committees_root`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_active_index_root(state: BeaconState,
|
def get_compact_committees_root(state: BeaconState, epoch: Epoch) -> Hash:
|
||||||
epoch: Epoch) -> Hash:
|
|
||||||
"""
|
"""
|
||||||
Return the index root at a recent ``epoch``.
|
Return the compact committee root for the current epoch.
|
||||||
``epoch`` expected to be between
|
|
||||||
(current_epoch - EPOCHS_PER_HISTORICAL_VECTOR + ACTIVATION_EXIT_DELAY, current_epoch + ACTIVATION_EXIT_DELAY].
|
|
||||||
"""
|
"""
|
||||||
return state.active_index_roots[epoch % EPOCHS_PER_HISTORICAL_VECTOR]
|
committees = [CompactCommittee() for _ in range(SHARD_COUNT)]
|
||||||
|
start_shard = get_epoch_start_shard(state, epoch)
|
||||||
|
for committee_number in range(get_epoch_committee_count(state, epoch)):
|
||||||
|
shard = Shard((start_shard + committee_number) % SHARD_COUNT)
|
||||||
|
for index in get_crosslink_committee(state, epoch, shard):
|
||||||
|
validator = state.validators[index]
|
||||||
|
committees[shard].pubkeys.append(validator.pubkey)
|
||||||
|
compact_balance = validator.effective_balance // EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
# `index` (top 6 bytes) + `slashed` (16th bit) + `compact_balance` (bottom 15 bits)
|
||||||
|
compact_validator = uint64((index << 16) + (validator.slashed << 15) + compact_balance)
|
||||||
|
committees[shard].compact_validators.append(compact_validator)
|
||||||
|
return hash_tree_root(Vector[CompactCommittee, SHARD_COUNT](committees))
|
||||||
```
|
```
|
||||||
|
|
||||||
### `generate_seed`
|
### `generate_seed`
|
||||||
|
@ -766,8 +787,8 @@ def generate_seed(state: BeaconState,
|
||||||
Generate a seed for the given ``epoch``.
|
Generate a seed for the given ``epoch``.
|
||||||
"""
|
"""
|
||||||
return hash(
|
return hash(
|
||||||
get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD)) +
|
get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD)) + # Avoid underflow
|
||||||
get_active_index_root(state, epoch) +
|
hash_tree_root(List[ValidatorIndex, VALIDATOR_REGISTRY_LIMIT](get_active_validator_indices(state, epoch))) +
|
||||||
int_to_bytes(epoch, length=32)
|
int_to_bytes(epoch, length=32)
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
@ -867,7 +888,7 @@ def get_crosslink_committee(state: BeaconState, epoch: Epoch, shard: Shard) -> S
|
||||||
```python
|
```python
|
||||||
def get_attesting_indices(state: BeaconState,
|
def get_attesting_indices(state: BeaconState,
|
||||||
data: AttestationData,
|
data: AttestationData,
|
||||||
bits: Bitlist[MAX_INDICES_PER_ATTESTATION]) -> Set[ValidatorIndex]:
|
bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]) -> Set[ValidatorIndex]:
|
||||||
"""
|
"""
|
||||||
Return the set of attesting indices corresponding to ``data`` and ``bitfield``.
|
Return the set of attesting indices corresponding to ``data`` and ``bitfield``.
|
||||||
"""
|
"""
|
||||||
|
@ -946,7 +967,7 @@ def validate_indexed_attestation(state: BeaconState, indexed_attestation: Indexe
|
||||||
# Verify no index has custody bit equal to 1 [to be removed in phase 1]
|
# Verify no index has custody bit equal to 1 [to be removed in phase 1]
|
||||||
assert len(bit_1_indices) == 0
|
assert len(bit_1_indices) == 0
|
||||||
# Verify max number of indices
|
# Verify max number of indices
|
||||||
assert len(bit_0_indices) + len(bit_1_indices) <= MAX_INDICES_PER_ATTESTATION
|
assert len(bit_0_indices) + len(bit_1_indices) <= MAX_VALIDATORS_PER_COMMITTEE
|
||||||
# Verify index sets are disjoint
|
# Verify index sets are disjoint
|
||||||
assert len(set(bit_0_indices).intersection(bit_1_indices)) == 0
|
assert len(set(bit_0_indices).intersection(bit_1_indices)) == 0
|
||||||
# Verify indices are sorted
|
# Verify indices are sorted
|
||||||
|
@ -1089,74 +1110,63 @@ def slash_validator(state: BeaconState,
|
||||||
|
|
||||||
## Genesis
|
## Genesis
|
||||||
|
|
||||||
### Genesis trigger
|
|
||||||
|
|
||||||
Before genesis has been triggered and whenever the deposit contract emits a `Deposit` log, call the function `is_genesis_trigger(deposits: Sequence[Deposit], timestamp: uint64) -> bool` where:
|
|
||||||
|
|
||||||
* `deposits` is the list of all deposits, ordered chronologically, up to and including the deposit triggering the latest `Deposit` log
|
|
||||||
* `timestamp` is the Unix timestamp in the Ethereum 1.0 block that emitted the latest `Deposit` log
|
|
||||||
|
|
||||||
When `is_genesis_trigger(deposits, timestamp) is True` for the first time, let:
|
|
||||||
|
|
||||||
* `genesis_deposits = deposits`
|
|
||||||
* `genesis_time = timestamp - timestamp % SECONDS_PER_DAY + 2 * SECONDS_PER_DAY` where `SECONDS_PER_DAY = 86400`
|
|
||||||
* `genesis_eth1_data` be the object of type `Eth1Data` where:
|
|
||||||
* `genesis_eth1_data.block_hash` is the Ethereum 1.0 block hash that emitted the log for the last deposit in `deposits`
|
|
||||||
* `genesis_eth1_data.deposit_root` is the deposit root for the last deposit in `deposits`
|
|
||||||
* `genesis_eth1_data.deposit_count = len(genesis_deposits)`
|
|
||||||
|
|
||||||
*Note*: The function `is_genesis_trigger` has yet to be agreed upon by the community, and can be updated as necessary. We define the following testing placeholder:
|
|
||||||
|
|
||||||
```python
|
|
||||||
def is_genesis_trigger(deposits: Sequence[Deposit], timestamp: uint64) -> bool:
|
|
||||||
# Process deposits
|
|
||||||
state = BeaconState()
|
|
||||||
for deposit in deposits:
|
|
||||||
process_deposit(state, deposit)
|
|
||||||
|
|
||||||
# Count active validators at genesis
|
|
||||||
active_validator_count = 0
|
|
||||||
for validator in state.validators:
|
|
||||||
if validator.effective_balance == MAX_EFFECTIVE_BALANCE:
|
|
||||||
active_validator_count += 1
|
|
||||||
|
|
||||||
# Check effective balance to trigger genesis
|
|
||||||
GENESIS_ACTIVE_VALIDATOR_COUNT = 2**16
|
|
||||||
return active_validator_count == GENESIS_ACTIVE_VALIDATOR_COUNT
|
|
||||||
```
|
|
||||||
|
|
||||||
### Genesis state
|
### Genesis state
|
||||||
|
|
||||||
Let `genesis_state = get_genesis_beacon_state(genesis_deposits, genesis_time, genesis_eth1_data)`.
|
Before the Ethereum 2.0 genesis has been triggered, and for every Ethereum 1.0 block, call `initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)` where:
|
||||||
|
|
||||||
|
* `eth1_block_hash` is the hash of the Ethereum 1.0 block
|
||||||
|
* `eth1_timestamp` is the Unix timestamp corresponding to `eth1_block_hash`
|
||||||
|
* `deposits` is the sequence of all deposits, ordered chronologically, up to the block with hash `eth1_block_hash`
|
||||||
|
|
||||||
|
The genesis state `genesis_state` is the return value of calling `initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)` only if `is_valid_genesis_state(genesis_state) is True`.
|
||||||
|
|
||||||
|
Implementations can choose to support different (more optimized) variations of the below initialization approach:
|
||||||
|
- Build the `genesis_state` from a stream of deposits by incrementally updating the `state.eth1_data.deposit_root`.
|
||||||
|
- Compute deposit proofs for the final `state.eth1_data.deposit_root`, and process as a pre-determined collection.
|
||||||
|
|
||||||
|
*Note*: The two constants `MIN_GENESIS_TIME` and `MIN_GENESIS_ACTIVE_VALIDATOR_COUNT` have yet to be agreed upon by the community, and can be updated as necessary.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_genesis_beacon_state(deposits: Sequence[Deposit], genesis_time: int, eth1_data: Eth1Data) -> BeaconState:
|
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash,
|
||||||
|
eth1_timestamp: uint64,
|
||||||
|
deposits: Sequence[Deposit]) -> BeaconState:
|
||||||
state = BeaconState(
|
state = BeaconState(
|
||||||
genesis_time=genesis_time,
|
genesis_time=eth1_timestamp - eth1_timestamp % SECONDS_PER_DAY + 2 * SECONDS_PER_DAY,
|
||||||
eth1_data=eth1_data,
|
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=len(deposits)),
|
||||||
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Process genesis deposits
|
# Process deposits
|
||||||
for deposit in deposits:
|
leaves = list(map(lambda deposit: deposit.data, deposits))
|
||||||
|
for index, deposit in enumerate(deposits):
|
||||||
|
state.eth1_data.deposit_root = hash_tree_root(
|
||||||
|
List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1])
|
||||||
|
)
|
||||||
process_deposit(state, deposit)
|
process_deposit(state, deposit)
|
||||||
|
|
||||||
# Process genesis activations
|
# Process activations
|
||||||
for validator in state.validators:
|
for index, validator in enumerate(state.validators):
|
||||||
if validator.effective_balance >= MAX_EFFECTIVE_BALANCE:
|
if state.balances[index] >= MAX_EFFECTIVE_BALANCE:
|
||||||
validator.activation_eligibility_epoch = GENESIS_EPOCH
|
validator.activation_eligibility_epoch = GENESIS_EPOCH
|
||||||
validator.activation_epoch = GENESIS_EPOCH
|
validator.activation_epoch = GENESIS_EPOCH
|
||||||
|
|
||||||
# Populate active_index_roots
|
# Populate compact_committees_roots
|
||||||
genesis_active_index_root = hash_tree_root(
|
genesis_committee_root = get_compact_committees_root(state, GENESIS_EPOCH)
|
||||||
List[ValidatorIndex, VALIDATOR_REGISTRY_LIMIT](get_active_validator_indices(state, GENESIS_EPOCH))
|
|
||||||
)
|
|
||||||
for index in range(EPOCHS_PER_HISTORICAL_VECTOR):
|
for index in range(EPOCHS_PER_HISTORICAL_VECTOR):
|
||||||
state.active_index_roots[index] = genesis_active_index_root
|
state.compact_committees_roots[index] = genesis_committee_root
|
||||||
|
|
||||||
return state
|
return state
|
||||||
```
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def is_valid_genesis_state(state: BeaconState) -> bool:
|
||||||
|
if state.genesis_time < MIN_GENESIS_TIME:
|
||||||
|
return False
|
||||||
|
elif len(get_active_validator_indices(state, GENESIS_EPOCH)) < MIN_GENESIS_ACTIVE_VALIDATOR_COUNT:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
```
|
||||||
|
|
||||||
### Genesis block
|
### Genesis block
|
||||||
|
|
||||||
Let `genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))`.
|
Let `genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))`.
|
||||||
|
@ -1438,7 +1448,7 @@ def process_registry_updates(state: BeaconState) -> None:
|
||||||
for index, validator in enumerate(state.validators):
|
for index, validator in enumerate(state.validators):
|
||||||
if (
|
if (
|
||||||
validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
|
validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
|
||||||
validator.effective_balance >= MAX_EFFECTIVE_BALANCE
|
validator.effective_balance == MAX_EFFECTIVE_BALANCE
|
||||||
):
|
):
|
||||||
validator.activation_eligibility_epoch = get_current_epoch(state)
|
validator.activation_eligibility_epoch = get_current_epoch(state)
|
||||||
|
|
||||||
|
@ -1475,7 +1485,7 @@ def process_slashings(state: BeaconState) -> None:
|
||||||
```python
|
```python
|
||||||
def process_final_updates(state: BeaconState) -> None:
|
def process_final_updates(state: BeaconState) -> None:
|
||||||
current_epoch = get_current_epoch(state)
|
current_epoch = get_current_epoch(state)
|
||||||
next_epoch = current_epoch + 1
|
next_epoch = Epoch(current_epoch + 1)
|
||||||
# Reset eth1 data votes
|
# Reset eth1 data votes
|
||||||
if (state.slot + 1) % SLOTS_PER_ETH1_VOTING_PERIOD == 0:
|
if (state.slot + 1) % SLOTS_PER_ETH1_VOTING_PERIOD == 0:
|
||||||
state.eth1_data_votes = []
|
state.eth1_data_votes = []
|
||||||
|
@ -1488,12 +1498,8 @@ def process_final_updates(state: BeaconState) -> None:
|
||||||
# Update start shard
|
# Update start shard
|
||||||
state.start_shard = Shard((state.start_shard + get_shard_delta(state, current_epoch)) % SHARD_COUNT)
|
state.start_shard = Shard((state.start_shard + get_shard_delta(state, current_epoch)) % SHARD_COUNT)
|
||||||
# Set active index root
|
# Set active index root
|
||||||
index_root_position = (next_epoch + ACTIVATION_EXIT_DELAY) % EPOCHS_PER_HISTORICAL_VECTOR
|
committee_root_position = (next_epoch + ACTIVATION_EXIT_DELAY) % EPOCHS_PER_HISTORICAL_VECTOR
|
||||||
state.active_index_roots[index_root_position] = hash_tree_root(
|
state.compact_committees_roots[committee_root_position] = get_compact_committees_root(state, next_epoch)
|
||||||
List[ValidatorIndex, VALIDATOR_REGISTRY_LIMIT](
|
|
||||||
get_active_validator_indices(state, Epoch(next_epoch + ACTIVATION_EXIT_DELAY))
|
|
||||||
)
|
|
||||||
)
|
|
||||||
# Reset slashings
|
# Reset slashings
|
||||||
state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0)
|
state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0)
|
||||||
# Set randao mix
|
# Set randao mix
|
||||||
|
@ -1682,7 +1688,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||||
assert verify_merkle_branch(
|
assert verify_merkle_branch(
|
||||||
leaf=hash_tree_root(deposit.data),
|
leaf=hash_tree_root(deposit.data),
|
||||||
proof=deposit.proof,
|
proof=deposit.proof,
|
||||||
depth=DEPOSIT_CONTRACT_TREE_DEPTH,
|
depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the `List` length mix-in
|
||||||
index=state.eth1_deposit_index,
|
index=state.eth1_deposit_index,
|
||||||
root=state.eth1_data.deposit_root,
|
root=state.eth1_data.deposit_root,
|
||||||
)
|
)
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
- [`deposit` function](#deposit-function)
|
- [`deposit` function](#deposit-function)
|
||||||
- [Deposit amount](#deposit-amount)
|
- [Deposit amount](#deposit-amount)
|
||||||
- [Withdrawal credentials](#withdrawal-credentials)
|
- [Withdrawal credentials](#withdrawal-credentials)
|
||||||
- [`Deposit` log](#deposit-log)
|
- [`DepositEvent` log](#depositevent-log)
|
||||||
- [Vyper code](#vyper-code)
|
- [Vyper code](#vyper-code)
|
||||||
|
|
||||||
<!-- /TOC -->
|
<!-- /TOC -->
|
||||||
|
@ -53,9 +53,9 @@ One of the `DepositData` fields is `withdrawal_credentials`. It is a commitment
|
||||||
|
|
||||||
The private key corresponding to `withdrawal_pubkey` will be required to initiate a withdrawal. It can be stored separately until a withdrawal is required, e.g. in cold storage.
|
The private key corresponding to `withdrawal_pubkey` will be required to initiate a withdrawal. It can be stored separately until a withdrawal is required, e.g. in cold storage.
|
||||||
|
|
||||||
#### `Deposit` log
|
#### `DepositEvent` log
|
||||||
|
|
||||||
Every Ethereum 1.0 deposit emits a `Deposit` log for consumption by the beacon chain. The deposit contract does little validation, pushing most of the validator onboarding logic to the beacon chain. In particular, the proof of possession (a BLS12-381 signature) is not verified by the deposit contract.
|
Every Ethereum 1.0 deposit emits a `DepositEvent` log for consumption by the beacon chain. The deposit contract does little validation, pushing most of the validator onboarding logic to the beacon chain. In particular, the proof of possession (a BLS12-381 signature) is not verified by the deposit contract.
|
||||||
|
|
||||||
## Vyper code
|
## Vyper code
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,7 @@ We define an "expansion" of an object as an object where a field in an object th
|
||||||
|
|
||||||
We define two expansions:
|
We define two expansions:
|
||||||
|
|
||||||
* `ExtendedBeaconState`, which is identical to a `BeaconState` except `active_index_roots: List[Bytes32]` is replaced by `active_indices: List[List[ValidatorIndex]]`, where `BeaconState.active_index_roots[i] = hash_tree_root(ExtendedBeaconState.active_indices[i])`.
|
* `ExtendedBeaconState`, which is identical to a `BeaconState` except `compact_committees_roots: List[Bytes32]` is replaced by `active_indices: List[List[ValidatorIndex]]`, where `BeaconState.compact_committees_roots[i] = hash_tree_root(ExtendedBeaconState.active_indices[i])`.
|
||||||
* `ExtendedBeaconBlock`, which is identical to a `BeaconBlock` except `state_root` is replaced with the corresponding `state: ExtendedBeaconState`.
|
* `ExtendedBeaconBlock`, which is identical to a `BeaconBlock` except `state_root` is replaced with the corresponding `state: ExtendedBeaconState`.
|
||||||
|
|
||||||
### `get_active_validator_indices`
|
### `get_active_validator_indices`
|
||||||
|
@ -40,7 +40,7 @@ Note that there is now a new way to compute `get_active_validator_indices`:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_active_validator_indices(state: ExtendedBeaconState, epoch: Epoch) -> List[ValidatorIndex]:
|
def get_active_validator_indices(state: ExtendedBeaconState, epoch: Epoch) -> List[ValidatorIndex]:
|
||||||
return state.active_indices[epoch % ACTIVE_INDEX_ROOTS_LENGTH]
|
return state.active_indices[epoch % EPOCHS_PER_HISTORICAL_VECTOR]
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that it takes `state` instead of `state.validators` as an argument. This does not affect its use in `get_shuffled_committee`, because `get_shuffled_committee` has access to the full `state` as one of its arguments.
|
Note that it takes `state` instead of `state.validators` as an argument. This does not affect its use in `get_shuffled_committee`, because `get_shuffled_committee` has access to the full `state` as one of its arguments.
|
||||||
|
|
|
@ -227,7 +227,7 @@ def get_epoch_signature(state: BeaconState, block: BeaconBlock, privkey: int) ->
|
||||||
|
|
||||||
##### Eth1 Data
|
##### Eth1 Data
|
||||||
|
|
||||||
The `block.eth1_data` field is for block proposers to vote on recent Eth 1.0 data. This recent data contains an Eth 1.0 block hash as well as the associated deposit root (as calculated by the `get_deposit_root()` method of the deposit contract) and deposit count after execution of the corresponding Eth 1.0 block. If over half of the block proposers in the current Eth 1.0 voting period vote for the same `eth1_data` then `state.eth1_data` updates at the end of the voting period. Each deposit in `block.body.deposits` must verify against `state.eth1_data.eth1_deposit_root`.
|
The `block.eth1_data` field is for block proposers to vote on recent Eth 1.0 data. This recent data contains an Eth 1.0 block hash as well as the associated deposit root (as calculated by the `get_hash_tree_root()` method of the deposit contract) and deposit count after execution of the corresponding Eth 1.0 block. If over half of the block proposers in the current Eth 1.0 voting period vote for the same `eth1_data` then `state.eth1_data` updates at the end of the voting period. Each deposit in `block.body.deposits` must verify against `state.eth1_data.eth1_deposit_root`.
|
||||||
|
|
||||||
Let `get_eth1_data(distance: int) -> Eth1Data` be the (subjective) function that returns the Eth 1.0 data at distance `distance` relative to the Eth 1.0 head at the start of the current Eth 1.0 voting period. Let `previous_eth1_distance` be the distance relative to the Eth 1.0 block corresponding to `state.eth1_data.block_hash` at the start of the current Eth 1.0 voting period. An honest block proposer sets `block.eth1_data = get_eth1_vote(state, previous_eth1_distance)` where:
|
Let `get_eth1_data(distance: int) -> Eth1Data` be the (subjective) function that returns the Eth 1.0 data at distance `distance` relative to the Eth 1.0 head at the start of the current Eth 1.0 voting period. Let `previous_eth1_distance` be the distance relative to the Eth 1.0 block corresponding to `state.eth1_data.block_hash` at the start of the current Eth 1.0 voting period. An honest block proposer sets `block.eth1_data = get_eth1_vote(state, previous_eth1_distance)` where:
|
||||||
|
|
||||||
|
|
|
@ -27,9 +27,13 @@ def with_state(fn):
|
||||||
DEFAULT_BLS_ACTIVE = False
|
DEFAULT_BLS_ACTIVE = False
|
||||||
|
|
||||||
|
|
||||||
|
def spectest_with_bls_switch(fn):
|
||||||
|
return bls_switch(spectest()(fn))
|
||||||
|
|
||||||
|
|
||||||
# shorthand for decorating @with_state @spectest()
|
# shorthand for decorating @with_state @spectest()
|
||||||
def spec_state_test(fn):
|
def spec_state_test(fn):
|
||||||
return with_state(bls_switch(spectest()(fn)))
|
return with_state(spectest_with_bls_switch(fn))
|
||||||
|
|
||||||
|
|
||||||
def expect_assertion_error(fn):
|
def expect_assertion_error(fn):
|
||||||
|
|
|
@ -0,0 +1,30 @@
|
||||||
|
from eth2spec.test.context import spectest_with_bls_switch, with_phases
|
||||||
|
from eth2spec.test.helpers.deposits import (
|
||||||
|
prepare_genesis_deposits,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases(['phase0'])
|
||||||
|
@spectest_with_bls_switch
|
||||||
|
def test_initialize_beacon_state_from_eth1(spec):
|
||||||
|
deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
||||||
|
deposits, deposit_root = prepare_genesis_deposits(spec, deposit_count, spec.MAX_EFFECTIVE_BALANCE, signed=True)
|
||||||
|
|
||||||
|
eth1_block_hash = b'\x12' * 32
|
||||||
|
eth1_timestamp = spec.MIN_GENESIS_TIME
|
||||||
|
|
||||||
|
yield 'eth1_block_hash', eth1_block_hash
|
||||||
|
yield 'eth1_timestamp', eth1_timestamp
|
||||||
|
yield 'deposits', deposits
|
||||||
|
|
||||||
|
# initialize beacon_state
|
||||||
|
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
|
||||||
|
|
||||||
|
assert state.genesis_time == eth1_timestamp - eth1_timestamp % spec.SECONDS_PER_DAY + 2 * spec.SECONDS_PER_DAY
|
||||||
|
assert len(state.validators) == deposit_count
|
||||||
|
assert state.eth1_data.deposit_root == deposit_root
|
||||||
|
assert state.eth1_data.deposit_count == deposit_count
|
||||||
|
assert state.eth1_data.block_hash == eth1_block_hash
|
||||||
|
|
||||||
|
# yield state
|
||||||
|
yield 'state', state
|
|
@ -0,0 +1,86 @@
|
||||||
|
from eth2spec.test.context import spectest_with_bls_switch, with_phases
|
||||||
|
from eth2spec.test.helpers.deposits import (
|
||||||
|
prepare_genesis_deposits,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def create_valid_beacon_state(spec):
|
||||||
|
deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
||||||
|
deposits, _ = prepare_genesis_deposits(spec, deposit_count, spec.MAX_EFFECTIVE_BALANCE, signed=True)
|
||||||
|
|
||||||
|
eth1_block_hash = b'\x12' * 32
|
||||||
|
eth1_timestamp = spec.MIN_GENESIS_TIME
|
||||||
|
return spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
|
||||||
|
|
||||||
|
|
||||||
|
def run_is_valid_genesis_state(spec, state, valid=True):
|
||||||
|
"""
|
||||||
|
Run ``is_valid_genesis_state``, yielding:
|
||||||
|
- state ('state')
|
||||||
|
- is_valid ('is_valid')
|
||||||
|
If ``valid == False``, run expecting ``AssertionError``
|
||||||
|
"""
|
||||||
|
yield state
|
||||||
|
is_valid = spec.is_valid_genesis_state(state)
|
||||||
|
yield 'is_valid', is_valid
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases(['phase0'])
|
||||||
|
@spectest_with_bls_switch
|
||||||
|
def test_is_valid_genesis_state_true(spec):
|
||||||
|
state = create_valid_beacon_state(spec)
|
||||||
|
|
||||||
|
yield from run_is_valid_genesis_state(spec, state, valid=True)
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases(['phase0'])
|
||||||
|
@spectest_with_bls_switch
|
||||||
|
def test_is_valid_genesis_state_false_invalid_timestamp(spec):
|
||||||
|
state = create_valid_beacon_state(spec)
|
||||||
|
state.genesis_time = spec.MIN_GENESIS_TIME - 1
|
||||||
|
|
||||||
|
yield from run_is_valid_genesis_state(spec, state, valid=True)
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases(['phase0'])
|
||||||
|
@spectest_with_bls_switch
|
||||||
|
def test_is_valid_genesis_state_true_more_balance(spec):
|
||||||
|
state = create_valid_beacon_state(spec)
|
||||||
|
state.validators[0].effective_balance = spec.MAX_EFFECTIVE_BALANCE + 1
|
||||||
|
|
||||||
|
yield from run_is_valid_genesis_state(spec, state, valid=True)
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases(['phase0'])
|
||||||
|
@spectest_with_bls_switch
|
||||||
|
def test_is_valid_genesis_state_false_not_enough_balance(spec):
|
||||||
|
state = create_valid_beacon_state(spec)
|
||||||
|
state.validators[0].effective_balance = spec.MAX_EFFECTIVE_BALANCE - 1
|
||||||
|
|
||||||
|
yield from run_is_valid_genesis_state(spec, state, valid=False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases(['phase0'])
|
||||||
|
@spectest_with_bls_switch
|
||||||
|
def test_is_valid_genesis_state_true_one_more_validator(spec):
|
||||||
|
deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT + 1
|
||||||
|
deposits, _ = prepare_genesis_deposits(spec, deposit_count, spec.MAX_EFFECTIVE_BALANCE, signed=True)
|
||||||
|
|
||||||
|
eth1_block_hash = b'\x12' * 32
|
||||||
|
eth1_timestamp = spec.MIN_GENESIS_TIME
|
||||||
|
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
|
||||||
|
|
||||||
|
yield from run_is_valid_genesis_state(spec, state, valid=True)
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases(['phase0'])
|
||||||
|
@spectest_with_bls_switch
|
||||||
|
def test_is_valid_genesis_state_false_not_enough_validator(spec):
|
||||||
|
deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT - 1
|
||||||
|
deposits, _ = prepare_genesis_deposits(spec, deposit_count, spec.MAX_EFFECTIVE_BALANCE, signed=True)
|
||||||
|
|
||||||
|
eth1_block_hash = b'\x12' * 32
|
||||||
|
eth1_timestamp = spec.MIN_GENESIS_TIME
|
||||||
|
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
|
||||||
|
|
||||||
|
yield from run_is_valid_genesis_state(spec, state, valid=False)
|
|
@ -67,8 +67,8 @@ def get_valid_attestation(spec, state, slot=None, signed=False):
|
||||||
)
|
)
|
||||||
|
|
||||||
committee_size = len(crosslink_committee)
|
committee_size = len(crosslink_committee)
|
||||||
aggregation_bits = Bitlist[spec.MAX_INDICES_PER_ATTESTATION](*([0] * committee_size))
|
aggregation_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](*([0] * committee_size))
|
||||||
custody_bits = Bitlist[spec.MAX_INDICES_PER_ATTESTATION](*([0] * committee_size))
|
custody_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](*([0] * committee_size))
|
||||||
attestation = spec.Attestation(
|
attestation = spec.Attestation(
|
||||||
aggregation_bits=aggregation_bits,
|
aggregation_bits=aggregation_bits,
|
||||||
data=attestation_data,
|
data=attestation_data,
|
||||||
|
|
|
@ -1,66 +1,88 @@
|
||||||
from eth2spec.test.helpers.keys import pubkeys, privkeys
|
from eth2spec.test.helpers.keys import pubkeys, privkeys
|
||||||
from eth2spec.utils.bls import bls_sign
|
from eth2spec.utils.bls import bls_sign
|
||||||
from eth2spec.utils.merkle_minimal import calc_merkle_tree_from_leaves, get_merkle_root, get_merkle_proof
|
from eth2spec.utils.merkle_minimal import calc_merkle_tree_from_leaves, get_merkle_proof
|
||||||
from eth2spec.utils.ssz.ssz_impl import signing_root
|
from eth2spec.utils.ssz.ssz_impl import signing_root, hash_tree_root
|
||||||
|
from eth2spec.utils.ssz.ssz_typing import List
|
||||||
|
|
||||||
|
|
||||||
def build_deposit_data(spec, state, pubkey, privkey, amount, withdrawal_credentials, signed=False):
|
def build_deposit_data(spec, pubkey, privkey, amount, withdrawal_credentials, state=None, signed=False):
|
||||||
deposit_data = spec.DepositData(
|
deposit_data = spec.DepositData(
|
||||||
pubkey=pubkey,
|
pubkey=pubkey,
|
||||||
withdrawal_credentials=withdrawal_credentials,
|
withdrawal_credentials=withdrawal_credentials,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
)
|
)
|
||||||
if signed:
|
if signed:
|
||||||
sign_deposit_data(spec, state, deposit_data, privkey)
|
sign_deposit_data(spec, deposit_data, privkey, state)
|
||||||
return deposit_data
|
return deposit_data
|
||||||
|
|
||||||
|
|
||||||
def sign_deposit_data(spec, state, deposit_data, privkey):
|
def sign_deposit_data(spec, deposit_data, privkey, state=None):
|
||||||
signature = bls_sign(
|
if state is None:
|
||||||
message_hash=signing_root(deposit_data),
|
# Genesis
|
||||||
privkey=privkey,
|
domain = spec.bls_domain(spec.DOMAIN_DEPOSIT)
|
||||||
domain=spec.get_domain(
|
else:
|
||||||
|
domain = spec.get_domain(
|
||||||
state,
|
state,
|
||||||
spec.DOMAIN_DEPOSIT,
|
spec.DOMAIN_DEPOSIT,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
signature = bls_sign(
|
||||||
|
message_hash=signing_root(deposit_data),
|
||||||
|
privkey=privkey,
|
||||||
|
domain=domain,
|
||||||
)
|
)
|
||||||
deposit_data.signature = signature
|
deposit_data.signature = signature
|
||||||
|
|
||||||
|
|
||||||
def build_deposit(spec,
|
def build_deposit(spec,
|
||||||
state,
|
state,
|
||||||
deposit_data_leaves,
|
deposit_data_list,
|
||||||
pubkey,
|
pubkey,
|
||||||
privkey,
|
privkey,
|
||||||
amount,
|
amount,
|
||||||
withdrawal_credentials,
|
withdrawal_credentials,
|
||||||
signed):
|
signed):
|
||||||
deposit_data = build_deposit_data(spec, state, pubkey, privkey, amount, withdrawal_credentials, signed)
|
deposit_data = build_deposit_data(spec, pubkey, privkey, amount, withdrawal_credentials, state=state, signed=signed)
|
||||||
|
index = len(deposit_data_list)
|
||||||
|
deposit_data_list.append(deposit_data)
|
||||||
|
root = hash_tree_root(List[spec.DepositData, 2**spec.DEPOSIT_CONTRACT_TREE_DEPTH](*deposit_data_list))
|
||||||
|
tree = calc_merkle_tree_from_leaves(tuple([d.hash_tree_root() for d in deposit_data_list]))
|
||||||
|
proof = list(get_merkle_proof(tree, item_index=index)) + [(index + 1).to_bytes(32, 'little')]
|
||||||
|
leaf = deposit_data.hash_tree_root()
|
||||||
|
assert spec.verify_merkle_branch(leaf, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH + 1, index, root)
|
||||||
|
deposit = spec.Deposit(proof=proof, data=deposit_data)
|
||||||
|
|
||||||
item = deposit_data.hash_tree_root()
|
return deposit, root, deposit_data_list
|
||||||
index = len(deposit_data_leaves)
|
|
||||||
deposit_data_leaves.append(item)
|
|
||||||
tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
|
|
||||||
root = get_merkle_root((tuple(deposit_data_leaves)))
|
|
||||||
proof = list(get_merkle_proof(tree, item_index=index))
|
|
||||||
assert spec.verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, root)
|
|
||||||
|
|
||||||
deposit = spec.Deposit(
|
|
||||||
proof=list(proof),
|
|
||||||
index=index,
|
|
||||||
data=deposit_data,
|
|
||||||
)
|
|
||||||
|
|
||||||
return deposit, root, deposit_data_leaves
|
def prepare_genesis_deposits(spec, genesis_validator_count, amount, signed=False):
|
||||||
|
deposit_data_list = []
|
||||||
|
genesis_deposits = []
|
||||||
|
for validator_index in range(genesis_validator_count):
|
||||||
|
pubkey = pubkeys[validator_index]
|
||||||
|
privkey = privkeys[validator_index]
|
||||||
|
# insecurely use pubkey as withdrawal key if no credentials provided
|
||||||
|
withdrawal_credentials = spec.int_to_bytes(spec.BLS_WITHDRAWAL_PREFIX, length=1) + spec.hash(pubkey)[1:]
|
||||||
|
deposit, root, deposit_data_list = build_deposit(
|
||||||
|
spec,
|
||||||
|
None,
|
||||||
|
deposit_data_list,
|
||||||
|
pubkey,
|
||||||
|
privkey,
|
||||||
|
amount,
|
||||||
|
withdrawal_credentials,
|
||||||
|
signed,
|
||||||
|
)
|
||||||
|
genesis_deposits.append(deposit)
|
||||||
|
|
||||||
|
return genesis_deposits, root
|
||||||
|
|
||||||
|
|
||||||
def prepare_state_and_deposit(spec, state, validator_index, amount, withdrawal_credentials=None, signed=False):
|
def prepare_state_and_deposit(spec, state, validator_index, amount, withdrawal_credentials=None, signed=False):
|
||||||
"""
|
"""
|
||||||
Prepare the state for the deposit, and create a deposit for the given validator, depositing the given amount.
|
Prepare the state for the deposit, and create a deposit for the given validator, depositing the given amount.
|
||||||
"""
|
"""
|
||||||
pre_validator_count = len(state.validators)
|
deposit_data_list = []
|
||||||
# fill previous deposits with zero-hash
|
|
||||||
deposit_data_leaves = [spec.ZERO_HASH] * pre_validator_count
|
|
||||||
|
|
||||||
pubkey = pubkeys[validator_index]
|
pubkey = pubkeys[validator_index]
|
||||||
privkey = privkeys[validator_index]
|
privkey = privkeys[validator_index]
|
||||||
|
@ -69,10 +91,10 @@ def prepare_state_and_deposit(spec, state, validator_index, amount, withdrawal_c
|
||||||
if withdrawal_credentials is None:
|
if withdrawal_credentials is None:
|
||||||
withdrawal_credentials = spec.int_to_bytes(spec.BLS_WITHDRAWAL_PREFIX, length=1) + spec.hash(pubkey)[1:]
|
withdrawal_credentials = spec.int_to_bytes(spec.BLS_WITHDRAWAL_PREFIX, length=1) + spec.hash(pubkey)[1:]
|
||||||
|
|
||||||
deposit, root, deposit_data_leaves = build_deposit(
|
deposit, root, deposit_data_list = build_deposit(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
deposit_data_leaves,
|
deposit_data_list,
|
||||||
pubkey,
|
pubkey,
|
||||||
privkey,
|
privkey,
|
||||||
amount,
|
amount,
|
||||||
|
@ -80,6 +102,7 @@ def prepare_state_and_deposit(spec, state, validator_index, amount, withdrawal_c
|
||||||
signed,
|
signed,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
state.eth1_deposit_index = 0
|
||||||
state.eth1_data.deposit_root = root
|
state.eth1_data.deposit_root = root
|
||||||
state.eth1_data.deposit_count = len(deposit_data_leaves)
|
state.eth1_data.deposit_count = len(deposit_data_list)
|
||||||
return deposit
|
return deposit
|
||||||
|
|
|
@ -43,9 +43,9 @@ def create_genesis_state(spec, num_validators):
|
||||||
validator.activation_eligibility_epoch = spec.GENESIS_EPOCH
|
validator.activation_eligibility_epoch = spec.GENESIS_EPOCH
|
||||||
validator.activation_epoch = spec.GENESIS_EPOCH
|
validator.activation_epoch = spec.GENESIS_EPOCH
|
||||||
|
|
||||||
genesis_active_index_root = hash_tree_root(List[spec.ValidatorIndex, spec.VALIDATOR_REGISTRY_LIMIT](
|
genesis_compact_committees_root = hash_tree_root(List[spec.ValidatorIndex, spec.VALIDATOR_REGISTRY_LIMIT](
|
||||||
spec.get_active_validator_indices(state, spec.GENESIS_EPOCH)))
|
spec.get_active_validator_indices(state, spec.GENESIS_EPOCH)))
|
||||||
for index in range(spec.EPOCHS_PER_HISTORICAL_VECTOR):
|
for index in range(spec.EPOCHS_PER_HISTORICAL_VECTOR):
|
||||||
state.active_index_roots[index] = genesis_active_index_root
|
state.compact_committees_roots[index] = genesis_compact_committees_root
|
||||||
|
|
||||||
return state
|
return state
|
||||||
|
|
|
@ -4,13 +4,15 @@ from eth2spec.utils.bls import bls_sign
|
||||||
from eth2spec.utils.ssz.ssz_impl import signing_root
|
from eth2spec.utils.ssz.ssz_impl import signing_root
|
||||||
|
|
||||||
|
|
||||||
def get_valid_transfer(spec, state, slot=None, sender_index=None, amount=None, fee=None, signed=False):
|
def get_valid_transfer(spec, state, slot=None, sender_index=None,
|
||||||
|
recipient_index=None, amount=None, fee=None, signed=False):
|
||||||
if slot is None:
|
if slot is None:
|
||||||
slot = state.slot
|
slot = state.slot
|
||||||
current_epoch = spec.get_current_epoch(state)
|
current_epoch = spec.get_current_epoch(state)
|
||||||
if sender_index is None:
|
if sender_index is None:
|
||||||
sender_index = spec.get_active_validator_indices(state, current_epoch)[-1]
|
sender_index = spec.get_active_validator_indices(state, current_epoch)[-1]
|
||||||
recipient_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
if recipient_index is None:
|
||||||
|
recipient_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
transfer_pubkey = pubkeys[-1]
|
transfer_pubkey = pubkeys[-1]
|
||||||
transfer_privkey = privkeys[-1]
|
transfer_privkey = privkeys[-1]
|
||||||
|
|
||||||
|
|
|
@ -84,6 +84,29 @@ def test_success_since_max_epochs_per_crosslink(spec, state):
|
||||||
yield from run_attestation_processing(spec, state, attestation)
|
yield from run_attestation_processing(spec, state, attestation)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_wrong_end_epoch_with_max_epochs_per_crosslink(spec, state):
|
||||||
|
for _ in range(spec.MAX_EPOCHS_PER_CROSSLINK + 2):
|
||||||
|
next_epoch(spec, state)
|
||||||
|
apply_empty_block(spec, state)
|
||||||
|
|
||||||
|
attestation = get_valid_attestation(spec, state)
|
||||||
|
data = attestation.data
|
||||||
|
# test logic sanity check: make sure the attestation only includes MAX_EPOCHS_PER_CROSSLINK epochs
|
||||||
|
assert data.crosslink.end_epoch - data.crosslink.start_epoch == spec.MAX_EPOCHS_PER_CROSSLINK
|
||||||
|
# Now change it to be different
|
||||||
|
data.crosslink.end_epoch += 1
|
||||||
|
|
||||||
|
sign_attestation(spec, state, attestation)
|
||||||
|
|
||||||
|
for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
|
||||||
|
next_slot(spec, state)
|
||||||
|
apply_empty_block(spec, state)
|
||||||
|
|
||||||
|
yield from run_attestation_processing(spec, state, attestation, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@always_bls
|
@always_bls
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
|
@ -147,6 +170,47 @@ def test_wrong_shard(spec, state):
|
||||||
yield from run_attestation_processing(spec, state, attestation, False)
|
yield from run_attestation_processing(spec, state, attestation, False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_invalid_shard(spec, state):
|
||||||
|
attestation = get_valid_attestation(spec, state)
|
||||||
|
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||||
|
|
||||||
|
# off by one (with respect to valid range) on purpose
|
||||||
|
attestation.data.crosslink.shard = spec.SHARD_COUNT
|
||||||
|
|
||||||
|
sign_attestation(spec, state, attestation)
|
||||||
|
|
||||||
|
yield from run_attestation_processing(spec, state, attestation, False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_old_target_epoch(spec, state):
|
||||||
|
assert spec.MIN_ATTESTATION_INCLUSION_DELAY < spec.SLOTS_PER_EPOCH * 2
|
||||||
|
|
||||||
|
attestation = get_valid_attestation(spec, state, signed=True)
|
||||||
|
|
||||||
|
state.slot = spec.SLOTS_PER_EPOCH * 2 # target epoch will be too old to handle
|
||||||
|
|
||||||
|
yield from run_attestation_processing(spec, state, attestation, False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_future_target_epoch(spec, state):
|
||||||
|
assert spec.MIN_ATTESTATION_INCLUSION_DELAY < spec.SLOTS_PER_EPOCH * 2
|
||||||
|
|
||||||
|
attestation = get_valid_attestation(spec, state)
|
||||||
|
|
||||||
|
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||||
|
|
||||||
|
attestation.data.target.epoch = spec.get_current_epoch(state) + 1 # target epoch will be too new to handle
|
||||||
|
sign_attestation(spec, state, attestation)
|
||||||
|
|
||||||
|
yield from run_attestation_processing(spec, state, attestation, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_new_source_epoch(spec, state):
|
def test_new_source_epoch(spec, state):
|
||||||
|
@ -306,7 +370,7 @@ def test_empty_aggregation_bits(spec, state):
|
||||||
attestation = get_valid_attestation(spec, state)
|
attestation = get_valid_attestation(spec, state)
|
||||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||||
|
|
||||||
attestation.aggregation_bits = Bitlist[spec.MAX_INDICES_PER_ATTESTATION](
|
attestation.aggregation_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](
|
||||||
*([0b0] * len(attestation.aggregation_bits)))
|
*([0b0] * len(attestation.aggregation_bits)))
|
||||||
|
|
||||||
sign_attestation(spec, state, attestation)
|
sign_attestation(spec, state, attestation)
|
||||||
|
|
|
@ -200,12 +200,106 @@ def test_participants_already_slashed(spec, state):
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_custody_bit_0_and_1(spec, state):
|
def test_custody_bit_0_and_1_intersect(spec, state):
|
||||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||||
|
|
||||||
attester_slashing.attestation_1.custody_bit_1_indices = (
|
attester_slashing.attestation_1.custody_bit_1_indices.append(
|
||||||
attester_slashing.attestation_1.custody_bit_0_indices
|
attester_slashing.attestation_1.custody_bit_0_indices[0]
|
||||||
)
|
)
|
||||||
|
|
||||||
sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
|
sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
|
||||||
|
|
||||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
|
||||||
|
@always_bls
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_att1_bad_extra_index(spec, state):
|
||||||
|
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||||
|
|
||||||
|
indices = attester_slashing.attestation_1.custody_bit_0_indices
|
||||||
|
options = list(set(range(len(state.validators))) - set(indices))
|
||||||
|
indices.append(options[len(options) // 2]) # add random index, not previously in attestation.
|
||||||
|
attester_slashing.attestation_1.custody_bit_0_indices = sorted(indices)
|
||||||
|
# Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
|
||||||
|
# see if the bad extra index is spotted, and slashing is aborted.
|
||||||
|
|
||||||
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
|
||||||
|
@always_bls
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_att1_bad_replaced_index(spec, state):
|
||||||
|
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||||
|
|
||||||
|
indices = attester_slashing.attestation_1.custody_bit_0_indices
|
||||||
|
options = list(set(range(len(state.validators))) - set(indices))
|
||||||
|
indices[3] = options[len(options) // 2] # replace with random index, not previously in attestation.
|
||||||
|
attester_slashing.attestation_1.custody_bit_0_indices = sorted(indices)
|
||||||
|
# Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
|
||||||
|
# see if the bad replaced index is spotted, and slashing is aborted.
|
||||||
|
|
||||||
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
|
||||||
|
@always_bls
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_att2_bad_extra_index(spec, state):
|
||||||
|
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||||
|
|
||||||
|
indices = attester_slashing.attestation_2.custody_bit_0_indices
|
||||||
|
options = list(set(range(len(state.validators))) - set(indices))
|
||||||
|
indices.append(options[len(options) // 2]) # add random index, not previously in attestation.
|
||||||
|
attester_slashing.attestation_2.custody_bit_0_indices = sorted(indices)
|
||||||
|
# Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
|
||||||
|
# see if the bad extra index is spotted, and slashing is aborted.
|
||||||
|
|
||||||
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
|
||||||
|
@always_bls
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_att2_bad_replaced_index(spec, state):
|
||||||
|
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||||
|
|
||||||
|
indices = attester_slashing.attestation_2.custody_bit_0_indices
|
||||||
|
options = list(set(range(len(state.validators))) - set(indices))
|
||||||
|
indices[3] = options[len(options) // 2] # replace with random index, not previously in attestation.
|
||||||
|
attester_slashing.attestation_2.custody_bit_0_indices = sorted(indices)
|
||||||
|
# Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
|
||||||
|
# see if the bad replaced index is spotted, and slashing is aborted.
|
||||||
|
|
||||||
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_unsorted_att_1_bit0(spec, state):
|
||||||
|
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||||
|
|
||||||
|
indices = attester_slashing.attestation_1.custody_bit_0_indices
|
||||||
|
assert len(indices) >= 3
|
||||||
|
indices[1], indices[2] = indices[2], indices[1] # unsort second and third index
|
||||||
|
sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
|
||||||
|
|
||||||
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_unsorted_att_2_bit0(spec, state):
|
||||||
|
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
|
||||||
|
|
||||||
|
indices = attester_slashing.attestation_2.custody_bit_0_indices
|
||||||
|
assert len(indices) >= 3
|
||||||
|
indices[1], indices[2] = indices[2], indices[1] # unsort second and third index
|
||||||
|
sign_indexed_attestation(spec, state, attester_slashing.attestation_2)
|
||||||
|
|
||||||
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
|
||||||
|
# note: unsorted indices for custody bit 0 are to be introduced in phase 1 testing.
|
||||||
|
|
|
@ -49,20 +49,50 @@ def run_deposit_processing(spec, state, deposit, validator_index, valid=True, ef
|
||||||
assert len(state.balances) == pre_validator_count + 1
|
assert len(state.balances) == pre_validator_count + 1
|
||||||
assert get_balance(state, validator_index) == pre_balance + deposit.data.amount
|
assert get_balance(state, validator_index) == pre_balance + deposit.data.amount
|
||||||
|
|
||||||
|
effective = min(spec.MAX_EFFECTIVE_BALANCE,
|
||||||
|
pre_balance + deposit.data.amount)
|
||||||
|
effective -= effective % spec.EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
assert state.validators[validator_index].effective_balance == effective
|
||||||
|
|
||||||
assert state.eth1_deposit_index == state.eth1_data.deposit_count
|
assert state.eth1_deposit_index == state.eth1_data.deposit_count
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_new_deposit(spec, state):
|
def test_new_deposit_under_max(spec, state):
|
||||||
# fresh deposit = next validator index = validator appended to registry
|
# fresh deposit = next validator index = validator appended to registry
|
||||||
validator_index = len(state.validators)
|
validator_index = len(state.validators)
|
||||||
|
# effective balance will be 1 EFFECTIVE_BALANCE_INCREMENT smaller because of this small decrement.
|
||||||
|
amount = spec.MAX_EFFECTIVE_BALANCE - 1
|
||||||
|
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
|
||||||
|
|
||||||
|
yield from run_deposit_processing(spec, state, deposit, validator_index)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_new_deposit_max(spec, state):
|
||||||
|
# fresh deposit = next validator index = validator appended to registry
|
||||||
|
validator_index = len(state.validators)
|
||||||
|
# effective balance will be exactly the same as balance.
|
||||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||||
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
|
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
|
||||||
|
|
||||||
yield from run_deposit_processing(spec, state, deposit, validator_index)
|
yield from run_deposit_processing(spec, state, deposit, validator_index)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_new_deposit_over_max(spec, state):
|
||||||
|
# fresh deposit = next validator index = validator appended to registry
|
||||||
|
validator_index = len(state.validators)
|
||||||
|
# just 1 over the limit, effective balance should be set MAX_EFFECTIVE_BALANCE during processing
|
||||||
|
amount = spec.MAX_EFFECTIVE_BALANCE + 1
|
||||||
|
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
|
||||||
|
|
||||||
|
yield from run_deposit_processing(spec, state, deposit, validator_index)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@always_bls
|
@always_bls
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
|
@ -117,7 +147,7 @@ def test_invalid_withdrawal_credentials_top_up(spec, state):
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_wrong_deposit_for_deposit_count(spec, state):
|
def test_wrong_deposit_for_deposit_count(spec, state):
|
||||||
deposit_data_leaves = [spec.ZERO_HASH] * len(state.validators)
|
deposit_data_leaves = [spec.DepositData() for _ in range(len(state.validators))]
|
||||||
|
|
||||||
# build root for deposit_1
|
# build root for deposit_1
|
||||||
index_1 = len(deposit_data_leaves)
|
index_1 = len(deposit_data_leaves)
|
||||||
|
@ -167,6 +197,6 @@ def test_bad_merkle_proof(spec, state):
|
||||||
# mess up merkle branch
|
# mess up merkle branch
|
||||||
deposit.proof[5] = spec.ZERO_HASH
|
deposit.proof[5] = spec.ZERO_HASH
|
||||||
|
|
||||||
sign_deposit_data(spec, state, deposit.data, privkeys[validator_index])
|
sign_deposit_data(spec, deposit.data, privkeys[validator_index], state=state)
|
||||||
|
|
||||||
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=False)
|
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=False)
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases
|
from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases
|
||||||
from eth2spec.test.helpers.state import next_epoch
|
from eth2spec.test.helpers.state import next_epoch
|
||||||
from eth2spec.test.helpers.block import apply_empty_block
|
from eth2spec.test.helpers.block import apply_empty_block
|
||||||
from eth2spec.test.helpers.transfers import get_valid_transfer
|
from eth2spec.test.helpers.transfers import get_valid_transfer, sign_transfer
|
||||||
|
|
||||||
|
|
||||||
def run_transfer_processing(spec, state, transfer, valid=True):
|
def run_transfer_processing(spec, state, transfer, valid=True):
|
||||||
|
@ -13,11 +13,6 @@ def run_transfer_processing(spec, state, transfer, valid=True):
|
||||||
If ``valid == False``, run expecting ``AssertionError``
|
If ``valid == False``, run expecting ``AssertionError``
|
||||||
"""
|
"""
|
||||||
|
|
||||||
proposer_index = spec.get_beacon_proposer_index(state)
|
|
||||||
pre_transfer_sender_balance = state.balances[transfer.sender]
|
|
||||||
pre_transfer_recipient_balance = state.balances[transfer.recipient]
|
|
||||||
pre_transfer_proposer_balance = state.balances[proposer_index]
|
|
||||||
|
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
yield 'transfer', transfer
|
yield 'transfer', transfer
|
||||||
|
|
||||||
|
@ -26,6 +21,11 @@ def run_transfer_processing(spec, state, transfer, valid=True):
|
||||||
yield 'post', None
|
yield 'post', None
|
||||||
return
|
return
|
||||||
|
|
||||||
|
proposer_index = spec.get_beacon_proposer_index(state)
|
||||||
|
pre_transfer_sender_balance = state.balances[transfer.sender]
|
||||||
|
pre_transfer_recipient_balance = state.balances[transfer.recipient]
|
||||||
|
pre_transfer_proposer_balance = state.balances[proposer_index]
|
||||||
|
|
||||||
spec.process_transfer(state, transfer)
|
spec.process_transfer(state, transfer)
|
||||||
yield 'post', state
|
yield 'post', state
|
||||||
|
|
||||||
|
@ -107,20 +107,48 @@ def test_active_but_transfer_past_effective_balance(spec, state):
|
||||||
def test_incorrect_slot(spec, state):
|
def test_incorrect_slot(spec, state):
|
||||||
transfer = get_valid_transfer(spec, state, slot=state.slot + 1, signed=True)
|
transfer = get_valid_transfer(spec, state, slot=state.slot + 1, signed=True)
|
||||||
# un-activate so validator can transfer
|
# un-activate so validator can transfer
|
||||||
state.validators[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
|
state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
yield from run_transfer_processing(spec, state, transfer, False)
|
yield from run_transfer_processing(spec, state, transfer, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_insufficient_balance_for_fee_result_dust(spec, state):
|
def test_transfer_clean(spec, state):
|
||||||
sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
|
sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
|
||||||
state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE
|
state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT
|
||||||
|
transfer = get_valid_transfer(spec, state, sender_index=sender_index,
|
||||||
|
amount=spec.MIN_DEPOSIT_AMOUNT, fee=0, signed=True)
|
||||||
|
|
||||||
|
# un-activate so validator can transfer
|
||||||
|
state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
yield from run_transfer_processing(spec, state, transfer)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_transfer_clean_split_to_fee(spec, state):
|
||||||
|
sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
|
||||||
|
state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT
|
||||||
|
transfer = get_valid_transfer(spec, state, sender_index=sender_index,
|
||||||
|
amount=spec.MIN_DEPOSIT_AMOUNT // 2, fee=spec.MIN_DEPOSIT_AMOUNT // 2, signed=True)
|
||||||
|
|
||||||
|
# un-activate so validator can transfer
|
||||||
|
state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
yield from run_transfer_processing(spec, state, transfer)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_insufficient_balance_for_fee(spec, state):
|
||||||
|
sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
|
||||||
|
state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT
|
||||||
transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=0, fee=1, signed=True)
|
transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=0, fee=1, signed=True)
|
||||||
|
|
||||||
# un-activate so validator can transfer
|
# un-activate so validator can transfer
|
||||||
state.validators[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
|
state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
yield from run_transfer_processing(spec, state, transfer, False)
|
yield from run_transfer_processing(spec, state, transfer, False)
|
||||||
|
|
||||||
|
@ -142,11 +170,11 @@ def test_insufficient_balance_for_fee_result_full(spec, state):
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_insufficient_balance_for_amount_result_dust(spec, state):
|
def test_insufficient_balance_for_amount_result_dust(spec, state):
|
||||||
sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
|
sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
|
||||||
state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE
|
state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT
|
||||||
transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1, fee=0, signed=True)
|
transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1, fee=0, signed=True)
|
||||||
|
|
||||||
# un-activate so validator can transfer
|
# un-activate so validator can transfer
|
||||||
state.validators[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
|
state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
yield from run_transfer_processing(spec, state, transfer, False)
|
yield from run_transfer_processing(spec, state, transfer, False)
|
||||||
|
|
||||||
|
@ -287,7 +315,7 @@ def test_no_dust_sender(spec, state):
|
||||||
)
|
)
|
||||||
|
|
||||||
# un-activate so validator can transfer
|
# un-activate so validator can transfer
|
||||||
state.validators[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
|
state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
yield from run_transfer_processing(spec, state, transfer, False)
|
yield from run_transfer_processing(spec, state, transfer, False)
|
||||||
|
|
||||||
|
@ -301,7 +329,29 @@ def test_no_dust_recipient(spec, state):
|
||||||
state.balances[transfer.recipient] = 0
|
state.balances[transfer.recipient] = 0
|
||||||
|
|
||||||
# un-activate so validator can transfer
|
# un-activate so validator can transfer
|
||||||
state.validators[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
|
state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
yield from run_transfer_processing(spec, state, transfer, False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_non_existent_sender(spec, state):
|
||||||
|
sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
|
||||||
|
transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1, fee=0)
|
||||||
|
transfer.sender = len(state.validators)
|
||||||
|
sign_transfer(spec, state, transfer, 42) # mostly valid signature, but sender won't exist, use bogus key.
|
||||||
|
|
||||||
|
yield from run_transfer_processing(spec, state, transfer, False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_non_existent_recipient(spec, state):
|
||||||
|
sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
|
||||||
|
state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1
|
||||||
|
transfer = get_valid_transfer(spec, state, sender_index=sender_index,
|
||||||
|
recipient_index=len(state.validators), amount=1, fee=0, signed=True)
|
||||||
|
|
||||||
yield from run_transfer_processing(spec, state, transfer, False)
|
yield from run_transfer_processing(spec, state, transfer, False)
|
||||||
|
|
||||||
|
@ -313,6 +363,6 @@ def test_invalid_pubkey(spec, state):
|
||||||
state.validators[transfer.sender].withdrawal_credentials = spec.ZERO_HASH
|
state.validators[transfer.sender].withdrawal_credentials = spec.ZERO_HASH
|
||||||
|
|
||||||
# un-activate so validator can transfer
|
# un-activate so validator can transfer
|
||||||
state.validators[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
|
state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
yield from run_transfer_processing(spec, state, transfer, False)
|
yield from run_transfer_processing(spec, state, transfer, False)
|
||||||
|
|
|
@ -0,0 +1,45 @@
|
||||||
|
|
||||||
|
process_calls = [
|
||||||
|
'process_justification_and_finalization',
|
||||||
|
'process_crosslinks',
|
||||||
|
'process_rewards_and_penalties',
|
||||||
|
'process_registry_updates',
|
||||||
|
'process_reveal_deadlines',
|
||||||
|
'process_challenge_deadlines',
|
||||||
|
'process_slashings',
|
||||||
|
'process_final_updates',
|
||||||
|
'after_process_final_updates',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def run_epoch_processing_to(spec, state, process_name: str):
|
||||||
|
"""
|
||||||
|
Processes to the next epoch transition, up to, but not including, the sub-transition named ``process_name``
|
||||||
|
"""
|
||||||
|
slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH)
|
||||||
|
|
||||||
|
# transition state to slot before epoch state transition
|
||||||
|
spec.process_slots(state, slot - 1)
|
||||||
|
|
||||||
|
# start transitioning, do one slot update before the epoch itself.
|
||||||
|
spec.process_slot(state)
|
||||||
|
|
||||||
|
# process components of epoch transition before final-updates
|
||||||
|
for name in process_calls:
|
||||||
|
if name == process_name:
|
||||||
|
break
|
||||||
|
# only run when present. Later phases introduce more to the epoch-processing.
|
||||||
|
if hasattr(spec, name):
|
||||||
|
getattr(spec, name)(state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_epoch_processing_with(spec, state, process_name: str):
|
||||||
|
"""
|
||||||
|
Processes to the next epoch transition, up to and including the sub-transition named ``process_name``
|
||||||
|
- pre-state ('pre'), state before calling ``process_name``
|
||||||
|
- post-state ('post'), state after calling ``process_name``
|
||||||
|
"""
|
||||||
|
run_epoch_processing_to(spec, state, process_name)
|
||||||
|
yield 'pre', state
|
||||||
|
getattr(spec, process_name)(state)
|
||||||
|
yield 'post', state
|
|
@ -3,42 +3,20 @@ from copy import deepcopy
|
||||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||||
from eth2spec.test.helpers.state import (
|
from eth2spec.test.helpers.state import (
|
||||||
next_epoch,
|
next_epoch,
|
||||||
next_slot,
|
next_slot
|
||||||
state_transition_and_sign_block,
|
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.block import apply_empty_block, sign_block
|
from eth2spec.test.helpers.block import apply_empty_block
|
||||||
from eth2spec.test.helpers.attestations import (
|
from eth2spec.test.helpers.attestations import (
|
||||||
add_attestation_to_state,
|
add_attestation_to_state,
|
||||||
build_empty_block_for_next_slot,
|
|
||||||
fill_aggregate_attestation,
|
fill_aggregate_attestation,
|
||||||
get_valid_attestation,
|
get_valid_attestation,
|
||||||
sign_attestation,
|
sign_attestation,
|
||||||
)
|
)
|
||||||
|
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||||
|
|
||||||
|
|
||||||
def run_process_crosslinks(spec, state, valid=True):
|
def run_process_crosslinks(spec, state):
|
||||||
"""
|
yield from run_epoch_processing_with(spec, state, 'process_crosslinks')
|
||||||
Run ``process_crosslinks``, yielding:
|
|
||||||
- pre-state ('pre')
|
|
||||||
- post-state ('post').
|
|
||||||
If ``valid == False``, run expecting ``AssertionError``
|
|
||||||
"""
|
|
||||||
# transition state to slot before state transition
|
|
||||||
slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) - 1
|
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
|
||||||
block.slot = slot
|
|
||||||
sign_block(spec, state, block)
|
|
||||||
state_transition_and_sign_block(spec, state, block)
|
|
||||||
|
|
||||||
# cache state before epoch transition
|
|
||||||
spec.process_slot(state)
|
|
||||||
|
|
||||||
# process components of epoch transition before processing crosslinks
|
|
||||||
spec.process_justification_and_finalization(state)
|
|
||||||
|
|
||||||
yield 'pre', state
|
|
||||||
spec.process_crosslinks(state)
|
|
||||||
yield 'post', state
|
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
|
|
|
@ -0,0 +1,91 @@
|
||||||
|
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||||
|
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import (
|
||||||
|
run_epoch_processing_with, run_epoch_processing_to
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def run_process_final_updates(spec, state):
|
||||||
|
yield from run_epoch_processing_with(spec, state, 'process_final_updates')
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_eth1_vote_no_reset(spec, state):
|
||||||
|
assert spec.SLOTS_PER_ETH1_VOTING_PERIOD > spec.SLOTS_PER_EPOCH
|
||||||
|
# skip ahead to the end of the epoch
|
||||||
|
state.slot = spec.SLOTS_PER_EPOCH - 1
|
||||||
|
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||||
|
state.eth1_data_votes.append(
|
||||||
|
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||||
|
deposit_count=state.eth1_deposit_index,
|
||||||
|
block_hash=b'\xbb' * 32))
|
||||||
|
|
||||||
|
yield from run_process_final_updates(spec, state)
|
||||||
|
|
||||||
|
assert len(state.eth1_data_votes) == spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_eth1_vote_reset(spec, state):
|
||||||
|
# skip ahead to the end of the voting period
|
||||||
|
state.slot = spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1
|
||||||
|
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||||
|
state.eth1_data_votes.append(
|
||||||
|
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||||
|
deposit_count=state.eth1_deposit_index,
|
||||||
|
block_hash=b'\xbb' * 32))
|
||||||
|
|
||||||
|
yield from run_process_final_updates(spec, state)
|
||||||
|
|
||||||
|
assert len(state.eth1_data_votes) == 0
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_effective_balance_hysteresis(spec, state):
|
||||||
|
# Prepare state up to the final-updates.
|
||||||
|
# Then overwrite the balances, we only want to focus to be on the hysteresis based changes.
|
||||||
|
run_epoch_processing_to(spec, state, 'process_final_updates')
|
||||||
|
# Set some edge cases for balances
|
||||||
|
max = spec.MAX_EFFECTIVE_BALANCE
|
||||||
|
min = spec.EJECTION_BALANCE
|
||||||
|
inc = spec.EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
half_inc = inc // 2
|
||||||
|
cases = [
|
||||||
|
(max, max, max, "as-is"),
|
||||||
|
(max, max - 1, max - inc, "round down, step lower"),
|
||||||
|
(max, max + 1, max, "round down"),
|
||||||
|
(max, max - inc, max - inc, "exactly 1 step lower"),
|
||||||
|
(max, max - inc - 1, max - (2 * inc), "just 1 over 1 step lower"),
|
||||||
|
(max, max - inc + 1, max - inc, "close to 1 step lower"),
|
||||||
|
(min, min + (half_inc * 3), min, "bigger balance, but not high enough"),
|
||||||
|
(min, min + (half_inc * 3) + 1, min + inc, "bigger balance, high enough, but small step"),
|
||||||
|
(min, min + (half_inc * 4) - 1, min + inc, "bigger balance, high enough, close to double step"),
|
||||||
|
(min, min + (half_inc * 4), min + (2 * inc), "exact two step balance increment"),
|
||||||
|
(min, min + (half_inc * 4) + 1, min + (2 * inc), "over two steps, round down"),
|
||||||
|
]
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
for i, (pre_eff, bal, _, _) in enumerate(cases):
|
||||||
|
assert spec.is_active_validator(state.validators[i], current_epoch)
|
||||||
|
state.validators[i].effective_balance = pre_eff
|
||||||
|
state.balances[i] = bal
|
||||||
|
|
||||||
|
yield 'pre', state
|
||||||
|
spec.process_final_updates(state)
|
||||||
|
yield 'post', state
|
||||||
|
|
||||||
|
for i, (_, _, post_eff, name) in enumerate(cases):
|
||||||
|
assert state.validators[i].effective_balance == post_eff, name
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_historical_root_accumulator(spec, state):
|
||||||
|
# skip ahead to near the end of the historical roots period (excl block before epoch processing)
|
||||||
|
state.slot = spec.SLOTS_PER_HISTORICAL_ROOT - 1
|
||||||
|
history_len = len(state.historical_roots)
|
||||||
|
|
||||||
|
yield from run_process_final_updates(spec, state)
|
||||||
|
|
||||||
|
assert len(state.historical_roots) == history_len + 1
|
|
@ -0,0 +1,280 @@
|
||||||
|
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||||
|
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import (
|
||||||
|
run_epoch_processing_with
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def run_process_just_and_fin(spec, state):
|
||||||
|
yield from run_epoch_processing_with(spec, state, 'process_justification_and_finalization')
|
||||||
|
|
||||||
|
|
||||||
|
def get_shards_for_slot(spec, state, slot):
|
||||||
|
epoch = spec.slot_to_epoch(slot)
|
||||||
|
epoch_start_shard = spec.get_epoch_start_shard(state, epoch)
|
||||||
|
committees_per_slot = spec.get_epoch_committee_count(state, epoch) // spec.SLOTS_PER_EPOCH
|
||||||
|
shard = (epoch_start_shard + committees_per_slot * (slot % spec.SLOTS_PER_EPOCH)) % spec.SHARD_COUNT
|
||||||
|
return [shard + i for i in range(committees_per_slot)]
|
||||||
|
|
||||||
|
|
||||||
|
def add_mock_attestations(spec, state, epoch, source, target, sufficient_support=False):
|
||||||
|
# we must be at the end of the epoch
|
||||||
|
assert (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0
|
||||||
|
|
||||||
|
previous_epoch = spec.get_previous_epoch(state)
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
|
||||||
|
if current_epoch == epoch:
|
||||||
|
attestations = state.current_epoch_attestations
|
||||||
|
elif previous_epoch == epoch:
|
||||||
|
attestations = state.previous_epoch_attestations
|
||||||
|
else:
|
||||||
|
raise Exception(f"cannot include attestations in epoch ${epoch} from epoch ${current_epoch}")
|
||||||
|
|
||||||
|
total_balance = spec.get_total_active_balance(state)
|
||||||
|
remaining_balance = total_balance * 2 // 3
|
||||||
|
|
||||||
|
epoch_start_slot = spec.get_epoch_start_slot(epoch)
|
||||||
|
for slot in range(epoch_start_slot, epoch_start_slot + spec.SLOTS_PER_EPOCH):
|
||||||
|
for shard in get_shards_for_slot(spec, state, slot):
|
||||||
|
# Check if we already have had sufficient balance. (and undone if we don't want it).
|
||||||
|
# If so, do not create more attestations. (we do not have empty pending attestations normally anyway)
|
||||||
|
if remaining_balance < 0:
|
||||||
|
return
|
||||||
|
|
||||||
|
committee = spec.get_crosslink_committee(state, spec.slot_to_epoch(slot), shard)
|
||||||
|
# Create a bitfield filled with the given count per attestation,
|
||||||
|
# exactly on the right-most part of the committee field.
|
||||||
|
|
||||||
|
aggregation_bits = [0] * len(committee)
|
||||||
|
for v in range(len(committee) * 2 // 3 + 1):
|
||||||
|
if remaining_balance > 0:
|
||||||
|
remaining_balance -= state.validators[v].effective_balance
|
||||||
|
aggregation_bits[v] = 1
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
# remove just one attester to make the marginal support insufficient
|
||||||
|
if not sufficient_support:
|
||||||
|
aggregation_bits[aggregation_bits.index(1)] = 0
|
||||||
|
|
||||||
|
attestations.append(spec.PendingAttestation(
|
||||||
|
aggregation_bits=aggregation_bits,
|
||||||
|
data=spec.AttestationData(
|
||||||
|
beacon_block_root=b'\xff' * 32, # irrelevant to testing
|
||||||
|
source=source,
|
||||||
|
target=target,
|
||||||
|
crosslink=spec.Crosslink(shard=shard)
|
||||||
|
),
|
||||||
|
inclusion_delay=1,
|
||||||
|
))
|
||||||
|
|
||||||
|
|
||||||
|
def get_checkpoints(spec, epoch):
|
||||||
|
c1 = None if epoch < 1 else spec.Checkpoint(epoch=epoch - 1, root=b'\xaa' * 32)
|
||||||
|
c2 = None if epoch < 2 else spec.Checkpoint(epoch=epoch - 2, root=b'\xbb' * 32)
|
||||||
|
c3 = None if epoch < 3 else spec.Checkpoint(epoch=epoch - 3, root=b'\xcc' * 32)
|
||||||
|
c4 = None if epoch < 4 else spec.Checkpoint(epoch=epoch - 4, root=b'\xdd' * 32)
|
||||||
|
c5 = None if epoch < 5 else spec.Checkpoint(epoch=epoch - 5, root=b'\xee' * 32)
|
||||||
|
return c1, c2, c3, c4, c5
|
||||||
|
|
||||||
|
|
||||||
|
def put_checkpoints_in_block_roots(spec, state, checkpoints):
|
||||||
|
for c in checkpoints:
|
||||||
|
state.block_roots[spec.get_epoch_start_slot(c.epoch) % spec.SLOTS_PER_HISTORICAL_ROOT] = c.root
|
||||||
|
|
||||||
|
|
||||||
|
def finalize_on_234(spec, state, epoch, sufficient_support):
|
||||||
|
assert epoch > 4
|
||||||
|
state.slot = (spec.SLOTS_PER_EPOCH * epoch) - 1 # skip ahead to just before epoch
|
||||||
|
|
||||||
|
# 43210 -- epochs ago
|
||||||
|
# 3210x -- justification bitfield indices
|
||||||
|
# 11*0. -- justification bitfield contents, . = this epoch, * is being justified now
|
||||||
|
# checkpoints for the epochs ago:
|
||||||
|
c1, c2, c3, c4, _ = get_checkpoints(spec, epoch)
|
||||||
|
put_checkpoints_in_block_roots(spec, state, [c1, c2, c3, c4])
|
||||||
|
|
||||||
|
old_finalized = state.finalized_checkpoint
|
||||||
|
state.previous_justified_checkpoint = c4
|
||||||
|
state.current_justified_checkpoint = c3
|
||||||
|
state.justification_bits = spec.Bitvector[spec.JUSTIFICATION_BITS_LENGTH]()
|
||||||
|
state.justification_bits[1:3] = [1, 1] # mock 3rd and 4th latest epochs as justified (indices are pre-shift)
|
||||||
|
# mock the 2nd latest epoch as justifiable, with 4th as source
|
||||||
|
add_mock_attestations(spec, state,
|
||||||
|
epoch=epoch - 2,
|
||||||
|
source=c4,
|
||||||
|
target=c2,
|
||||||
|
sufficient_support=sufficient_support)
|
||||||
|
|
||||||
|
# process!
|
||||||
|
yield from run_process_just_and_fin(spec, state)
|
||||||
|
|
||||||
|
assert state.previous_justified_checkpoint == c3 # changed to old current
|
||||||
|
if sufficient_support:
|
||||||
|
assert state.current_justified_checkpoint == c2 # changed to 2nd latest
|
||||||
|
assert state.finalized_checkpoint == c4 # finalized old previous justified epoch
|
||||||
|
else:
|
||||||
|
assert state.current_justified_checkpoint == c3 # still old current
|
||||||
|
assert state.finalized_checkpoint == old_finalized # no new finalized
|
||||||
|
|
||||||
|
|
||||||
|
def finalize_on_23(spec, state, epoch, sufficient_support):
|
||||||
|
assert epoch > 3
|
||||||
|
state.slot = (spec.SLOTS_PER_EPOCH * epoch) - 1 # skip ahead to just before epoch
|
||||||
|
|
||||||
|
# 43210 -- epochs ago
|
||||||
|
# 210xx -- justification bitfield indices (pre shift)
|
||||||
|
# 3210x -- justification bitfield indices (post shift)
|
||||||
|
# 01*0. -- justification bitfield contents, . = this epoch, * is being justified now
|
||||||
|
# checkpoints for the epochs ago:
|
||||||
|
c1, c2, c3, _, _ = get_checkpoints(spec, epoch)
|
||||||
|
put_checkpoints_in_block_roots(spec, state, [c1, c2, c3])
|
||||||
|
|
||||||
|
old_finalized = state.finalized_checkpoint
|
||||||
|
state.previous_justified_checkpoint = c3
|
||||||
|
state.current_justified_checkpoint = c3
|
||||||
|
state.justification_bits = spec.Bitvector[spec.JUSTIFICATION_BITS_LENGTH]()
|
||||||
|
state.justification_bits[1] = 1 # mock 3rd latest epoch as justified (index is pre-shift)
|
||||||
|
# mock the 2nd latest epoch as justifiable, with 3rd as source
|
||||||
|
add_mock_attestations(spec, state,
|
||||||
|
epoch=epoch - 2,
|
||||||
|
source=c3,
|
||||||
|
target=c2,
|
||||||
|
sufficient_support=sufficient_support)
|
||||||
|
|
||||||
|
# process!
|
||||||
|
yield from run_process_just_and_fin(spec, state)
|
||||||
|
|
||||||
|
assert state.previous_justified_checkpoint == c3 # changed to old current
|
||||||
|
if sufficient_support:
|
||||||
|
assert state.current_justified_checkpoint == c2 # changed to 2nd latest
|
||||||
|
assert state.finalized_checkpoint == c3 # finalized old previous justified epoch
|
||||||
|
else:
|
||||||
|
assert state.current_justified_checkpoint == c3 # still old current
|
||||||
|
assert state.finalized_checkpoint == old_finalized # no new finalized
|
||||||
|
|
||||||
|
|
||||||
|
def finalize_on_123(spec, state, epoch, sufficient_support):
|
||||||
|
assert epoch > 5
|
||||||
|
state.slot = (spec.SLOTS_PER_EPOCH * epoch) - 1 # skip ahead to just before epoch
|
||||||
|
|
||||||
|
# 43210 -- epochs ago
|
||||||
|
# 210xx -- justification bitfield indices (pre shift)
|
||||||
|
# 3210x -- justification bitfield indices (post shift)
|
||||||
|
# 011*. -- justification bitfield contents, . = this epoch, * is being justified now
|
||||||
|
# checkpoints for the epochs ago:
|
||||||
|
c1, c2, c3, c4, c5 = get_checkpoints(spec, epoch)
|
||||||
|
put_checkpoints_in_block_roots(spec, state, [c1, c2, c3, c4, c5])
|
||||||
|
|
||||||
|
old_finalized = state.finalized_checkpoint
|
||||||
|
state.previous_justified_checkpoint = c5
|
||||||
|
state.current_justified_checkpoint = c3
|
||||||
|
state.justification_bits = spec.Bitvector[spec.JUSTIFICATION_BITS_LENGTH]()
|
||||||
|
state.justification_bits[1] = 1 # mock 3rd latest epochs as justified (index is pre-shift)
|
||||||
|
# mock the 2nd latest epoch as justifiable, with 5th as source
|
||||||
|
add_mock_attestations(spec, state,
|
||||||
|
epoch=epoch - 2,
|
||||||
|
source=c5,
|
||||||
|
target=c2,
|
||||||
|
sufficient_support=sufficient_support)
|
||||||
|
# mock the 1st latest epoch as justifiable, with 3rd as source
|
||||||
|
add_mock_attestations(spec, state,
|
||||||
|
epoch=epoch - 1,
|
||||||
|
source=c3,
|
||||||
|
target=c1,
|
||||||
|
sufficient_support=sufficient_support)
|
||||||
|
|
||||||
|
# process!
|
||||||
|
yield from run_process_just_and_fin(spec, state)
|
||||||
|
|
||||||
|
assert state.previous_justified_checkpoint == c3 # changed to old current
|
||||||
|
if sufficient_support:
|
||||||
|
assert state.current_justified_checkpoint == c1 # changed to 1st latest
|
||||||
|
assert state.finalized_checkpoint == c3 # finalized old current
|
||||||
|
else:
|
||||||
|
assert state.current_justified_checkpoint == c3 # still old current
|
||||||
|
assert state.finalized_checkpoint == old_finalized # no new finalized
|
||||||
|
|
||||||
|
|
||||||
|
def finalize_on_12(spec, state, epoch, sufficient_support):
|
||||||
|
assert epoch > 2
|
||||||
|
state.slot = (spec.SLOTS_PER_EPOCH * epoch) - 1 # skip ahead to just before epoch
|
||||||
|
|
||||||
|
# 43210 -- epochs ago
|
||||||
|
# 210xx -- justification bitfield indices (pre shift)
|
||||||
|
# 3210x -- justification bitfield indices (post shift)
|
||||||
|
# 001*. -- justification bitfield contents, . = this epoch, * is being justified now
|
||||||
|
# checkpoints for the epochs ago:
|
||||||
|
c1, c2, _, _, _ = get_checkpoints(spec, epoch)
|
||||||
|
put_checkpoints_in_block_roots(spec, state, [c1, c2])
|
||||||
|
|
||||||
|
old_finalized = state.finalized_checkpoint
|
||||||
|
state.previous_justified_checkpoint = c2
|
||||||
|
state.current_justified_checkpoint = c2
|
||||||
|
state.justification_bits = spec.Bitvector[spec.JUSTIFICATION_BITS_LENGTH]()
|
||||||
|
state.justification_bits[0] = 1 # mock 2nd latest epoch as justified (this is pre-shift)
|
||||||
|
# mock the 1st latest epoch as justifiable, with 2nd as source
|
||||||
|
add_mock_attestations(spec, state,
|
||||||
|
epoch=epoch - 1,
|
||||||
|
source=c2,
|
||||||
|
target=c1,
|
||||||
|
sufficient_support=sufficient_support)
|
||||||
|
|
||||||
|
# process!
|
||||||
|
yield from run_process_just_and_fin(spec, state)
|
||||||
|
|
||||||
|
assert state.previous_justified_checkpoint == c2 # changed to old current
|
||||||
|
if sufficient_support:
|
||||||
|
assert state.current_justified_checkpoint == c1 # changed to 1st latest
|
||||||
|
assert state.finalized_checkpoint == c2 # finalized previous justified epoch
|
||||||
|
else:
|
||||||
|
assert state.current_justified_checkpoint == c2 # still old current
|
||||||
|
assert state.finalized_checkpoint == old_finalized # no new finalized
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_234_ok_support(spec, state):
|
||||||
|
yield from finalize_on_234(spec, state, 5, True)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_234_poor_support(spec, state):
|
||||||
|
yield from finalize_on_234(spec, state, 5, False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_23_ok_support(spec, state):
|
||||||
|
yield from finalize_on_23(spec, state, 4, True)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_23_poor_support(spec, state):
|
||||||
|
yield from finalize_on_23(spec, state, 4, False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_123_ok_support(spec, state):
|
||||||
|
yield from finalize_on_123(spec, state, 6, True)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_123_poor_support(spec, state):
|
||||||
|
yield from finalize_on_123(spec, state, 6, False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_12_ok_support(spec, state):
|
||||||
|
yield from finalize_on_12(spec, state, 3, True)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_12_poor_support(spec, state):
|
||||||
|
yield from finalize_on_12(spec, state, 3, False)
|
|
@ -1,46 +1,25 @@
|
||||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block
|
from eth2spec.test.helpers.state import next_epoch
|
||||||
from eth2spec.test.helpers.state import next_epoch, state_transition_and_sign_block
|
|
||||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||||
|
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||||
|
|
||||||
|
|
||||||
def run_process_registry_updates(spec, state, valid=True):
|
def run_process_registry_updates(spec, state):
|
||||||
"""
|
yield from run_epoch_processing_with(spec, state, 'process_registry_updates')
|
||||||
Run ``process_crosslinks``, yielding:
|
|
||||||
- pre-state ('pre')
|
|
||||||
- post-state ('post').
|
|
||||||
If ``valid == False``, run expecting ``AssertionError``
|
|
||||||
"""
|
|
||||||
# transition state to slot before state transition
|
|
||||||
slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) - 1
|
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
|
||||||
block.slot = slot
|
|
||||||
sign_block(spec, state, block)
|
|
||||||
state_transition_and_sign_block(spec, state, block)
|
|
||||||
|
|
||||||
# cache state before epoch transition
|
|
||||||
spec.process_slot(state)
|
|
||||||
|
|
||||||
# process components of epoch transition before registry update
|
def mock_deposit(spec, state, index):
|
||||||
spec.process_justification_and_finalization(state)
|
assert spec.is_active_validator(state.validators[index], spec.get_current_epoch(state))
|
||||||
spec.process_crosslinks(state)
|
state.validators[index].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
spec.process_rewards_and_penalties(state)
|
state.validators[index].activation_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
state.validators[index].effective_balance = spec.MAX_EFFECTIVE_BALANCE
|
||||||
yield 'pre', state
|
assert not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state))
|
||||||
spec.process_registry_updates(state)
|
|
||||||
yield 'post', state
|
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_activation(spec, state):
|
def test_activation(spec, state):
|
||||||
index = 0
|
index = 0
|
||||||
assert spec.is_active_validator(state.validators[index], spec.get_current_epoch(state))
|
mock_deposit(spec, state, index)
|
||||||
|
|
||||||
# Mock a new deposit
|
|
||||||
state.validators[index].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
|
|
||||||
state.validators[index].activation_epoch = spec.FAR_FUTURE_EPOCH
|
|
||||||
state.validators[index].effective_balance = spec.MAX_EFFECTIVE_BALANCE
|
|
||||||
assert not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state))
|
|
||||||
|
|
||||||
for _ in range(spec.ACTIVATION_EXIT_DELAY + 1):
|
for _ in range(spec.ACTIVATION_EXIT_DELAY + 1):
|
||||||
next_epoch(spec, state)
|
next_epoch(spec, state)
|
||||||
|
@ -49,10 +28,39 @@ def test_activation(spec, state):
|
||||||
|
|
||||||
assert state.validators[index].activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH
|
assert state.validators[index].activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH
|
||||||
assert state.validators[index].activation_epoch != spec.FAR_FUTURE_EPOCH
|
assert state.validators[index].activation_epoch != spec.FAR_FUTURE_EPOCH
|
||||||
assert spec.is_active_validator(
|
assert spec.is_active_validator(state.validators[index], spec.get_current_epoch(state))
|
||||||
state.validators[index],
|
|
||||||
spec.get_current_epoch(state),
|
|
||||||
)
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_activation_queue_sorting(spec, state):
|
||||||
|
mock_activations = 10
|
||||||
|
|
||||||
|
epoch = spec.get_current_epoch(state)
|
||||||
|
for i in range(mock_activations):
|
||||||
|
mock_deposit(spec, state, i)
|
||||||
|
state.validators[i].activation_eligibility_epoch = epoch + 1
|
||||||
|
|
||||||
|
# give the last priority over the others
|
||||||
|
state.validators[mock_activations - 1].activation_eligibility_epoch = epoch
|
||||||
|
|
||||||
|
# make sure we are hitting the churn
|
||||||
|
churn_limit = spec.get_churn_limit(state)
|
||||||
|
assert mock_activations > churn_limit
|
||||||
|
|
||||||
|
yield from run_process_registry_updates(spec, state)
|
||||||
|
|
||||||
|
# the first got in as second
|
||||||
|
assert state.validators[0].activation_epoch != spec.FAR_FUTURE_EPOCH
|
||||||
|
# the prioritized got in as first
|
||||||
|
assert state.validators[mock_activations - 1].activation_epoch != spec.FAR_FUTURE_EPOCH
|
||||||
|
# the second last is at the end of the queue, and did not make the churn,
|
||||||
|
# hence is not assigned an activation_epoch yet.
|
||||||
|
assert state.validators[mock_activations - 2].activation_epoch == spec.FAR_FUTURE_EPOCH
|
||||||
|
# the one at churn_limit - 1 did not make it, it was out-prioritized
|
||||||
|
assert state.validators[churn_limit - 1].activation_epoch == spec.FAR_FUTURE_EPOCH
|
||||||
|
# but the the one in front of the above did
|
||||||
|
assert state.validators[churn_limit - 2].activation_epoch != spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
|
|
|
@ -0,0 +1,125 @@
|
||||||
|
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||||
|
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import (
|
||||||
|
run_epoch_processing_with, run_epoch_processing_to
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def run_process_slashings(spec, state):
|
||||||
|
yield from run_epoch_processing_with(spec, state, 'process_slashings')
|
||||||
|
|
||||||
|
|
||||||
|
def slash_validators(spec, state, indices, out_epochs):
|
||||||
|
total_slashed_balance = 0
|
||||||
|
for i, out_epoch in zip(indices, out_epochs):
|
||||||
|
v = state.validators[i]
|
||||||
|
v.slashed = True
|
||||||
|
spec.initiate_validator_exit(state, i)
|
||||||
|
v.withdrawable_epoch = out_epoch
|
||||||
|
total_slashed_balance += v.effective_balance
|
||||||
|
|
||||||
|
state.slashings[
|
||||||
|
spec.get_current_epoch(state) % spec.EPOCHS_PER_SLASHINGS_VECTOR
|
||||||
|
] = total_slashed_balance
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_max_penalties(spec, state):
|
||||||
|
slashed_count = (len(state.validators) // 3) + 1
|
||||||
|
out_epoch = spec.get_current_epoch(state) + (spec.EPOCHS_PER_SLASHINGS_VECTOR // 2)
|
||||||
|
|
||||||
|
slashed_indices = list(range(slashed_count))
|
||||||
|
slash_validators(spec, state, slashed_indices, [out_epoch] * slashed_count)
|
||||||
|
|
||||||
|
total_balance = spec.get_total_active_balance(state)
|
||||||
|
total_penalties = sum(state.slashings)
|
||||||
|
|
||||||
|
assert total_balance // 3 <= total_penalties
|
||||||
|
|
||||||
|
yield from run_process_slashings(spec, state)
|
||||||
|
|
||||||
|
for i in slashed_indices:
|
||||||
|
assert state.balances[i] == 0
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_small_penalty(spec, state):
|
||||||
|
# Just the bare minimum for this one validator
|
||||||
|
state.balances[0] = state.validators[0].effective_balance = spec.EJECTION_BALANCE
|
||||||
|
# All the other validators get the maximum.
|
||||||
|
for i in range(1, len(state.validators)):
|
||||||
|
state.validators[i].effective_balance = state.balances[i] = spec.MAX_EFFECTIVE_BALANCE
|
||||||
|
|
||||||
|
out_epoch = spec.get_current_epoch(state) + (spec.EPOCHS_PER_SLASHINGS_VECTOR // 2)
|
||||||
|
|
||||||
|
slash_validators(spec, state, [0], [out_epoch])
|
||||||
|
|
||||||
|
total_balance = spec.get_total_active_balance(state)
|
||||||
|
total_penalties = sum(state.slashings)
|
||||||
|
|
||||||
|
assert total_balance // 3 > total_penalties
|
||||||
|
|
||||||
|
run_epoch_processing_to(spec, state, 'process_slashings')
|
||||||
|
pre_slash_balances = list(state.balances)
|
||||||
|
yield 'pre', state
|
||||||
|
spec.process_slashings(state)
|
||||||
|
yield 'post', state
|
||||||
|
|
||||||
|
assert state.balances[0] == pre_slash_balances[0] - (state.validators[0].effective_balance
|
||||||
|
* 3 * total_penalties // total_balance)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_scaled_penalties(spec, state):
|
||||||
|
# skip to next epoch
|
||||||
|
state.slot = spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
|
# Also mock some previous slashings, so that we test to have the delta in the penalties computation.
|
||||||
|
base = spec.EJECTION_BALANCE
|
||||||
|
incr = spec.EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
# Just add some random slashings. non-zero slashings are at least the minimal effective balance.
|
||||||
|
state.slashings[0] = base + (incr * 12)
|
||||||
|
state.slashings[4] = base + (incr * 3)
|
||||||
|
state.slashings[5] = base + (incr * 6)
|
||||||
|
state.slashings[spec.EPOCHS_PER_SLASHINGS_VECTOR - 1] = base + (incr * 7)
|
||||||
|
|
||||||
|
slashed_count = len(state.validators) // 4
|
||||||
|
|
||||||
|
assert slashed_count > 10
|
||||||
|
|
||||||
|
# make the balances non-uniform.
|
||||||
|
# Otherwise it would just be a simple 3/4 balance slashing. Test the per-validator scaled penalties.
|
||||||
|
diff = spec.MAX_EFFECTIVE_BALANCE - base
|
||||||
|
increments = diff // incr
|
||||||
|
for i in range(10):
|
||||||
|
state.validators[i].effective_balance = base + (incr * (i % increments))
|
||||||
|
assert state.validators[i].effective_balance <= spec.MAX_EFFECTIVE_BALANCE
|
||||||
|
# add/remove some, see if balances different than the effective balances are picked up
|
||||||
|
state.balances[i] = state.validators[i].effective_balance + i - 5
|
||||||
|
|
||||||
|
total_balance = spec.get_total_active_balance(state)
|
||||||
|
|
||||||
|
out_epoch = spec.get_current_epoch(state) + (spec.EPOCHS_PER_SLASHINGS_VECTOR // 2)
|
||||||
|
|
||||||
|
slashed_indices = list(range(slashed_count))
|
||||||
|
|
||||||
|
# Process up to the sub-transition, then Hi-jack and get the balances.
|
||||||
|
# We just want to test the slashings.
|
||||||
|
# But we are not interested in the other balance changes during the same epoch transition.
|
||||||
|
run_epoch_processing_to(spec, state, 'process_slashings')
|
||||||
|
pre_slash_balances = list(state.balances)
|
||||||
|
|
||||||
|
slash_validators(spec, state, slashed_indices, [out_epoch] * slashed_count)
|
||||||
|
|
||||||
|
yield 'pre', state
|
||||||
|
spec.process_slashings(state)
|
||||||
|
yield 'post', state
|
||||||
|
|
||||||
|
total_penalties = sum(state.slashings)
|
||||||
|
|
||||||
|
for i in slashed_indices:
|
||||||
|
v = state.validators[i]
|
||||||
|
penalty = v.effective_balance * total_penalties * 3 // total_balance
|
||||||
|
assert state.balances[i] == pre_slash_balances[i] - penalty
|
|
@ -4,15 +4,46 @@ from eth2spec.utils.ssz.ssz_impl import signing_root
|
||||||
from eth2spec.utils.bls import bls_sign
|
from eth2spec.utils.bls import bls_sign
|
||||||
|
|
||||||
from eth2spec.test.helpers.state import get_balance, state_transition_and_sign_block
|
from eth2spec.test.helpers.state import get_balance, state_transition_and_sign_block
|
||||||
# from eth2spec.test.helpers.transfers import get_valid_transfer
|
from eth2spec.test.helpers.block import build_empty_block_for_next_slot, build_empty_block, sign_block
|
||||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block
|
|
||||||
from eth2spec.test.helpers.keys import privkeys, pubkeys
|
from eth2spec.test.helpers.keys import privkeys, pubkeys
|
||||||
from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing
|
from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing
|
||||||
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing
|
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing
|
||||||
from eth2spec.test.helpers.attestations import get_valid_attestation
|
from eth2spec.test.helpers.attestations import get_valid_attestation
|
||||||
from eth2spec.test.helpers.deposits import prepare_state_and_deposit
|
from eth2spec.test.helpers.deposits import prepare_state_and_deposit
|
||||||
|
|
||||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
from eth2spec.test.context import spec_state_test, with_all_phases, expect_assertion_error
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_prev_slot_block_transition(spec, state):
|
||||||
|
# Go to clean slot
|
||||||
|
spec.process_slots(state, state.slot + 1)
|
||||||
|
# Make a block for it
|
||||||
|
block = build_empty_block(spec, state, slot=state.slot, signed=True)
|
||||||
|
# Transition to next slot, above block will not be invalid on top of new state.
|
||||||
|
spec.process_slots(state, state.slot + 1)
|
||||||
|
|
||||||
|
yield 'pre', state
|
||||||
|
expect_assertion_error(lambda: state_transition_and_sign_block(spec, state, block))
|
||||||
|
yield 'blocks', [block]
|
||||||
|
yield 'post', None
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_same_slot_block_transition(spec, state):
|
||||||
|
# Same slot on top of pre-state, but move out of slot 0 first.
|
||||||
|
spec.process_slots(state, state.slot + 1)
|
||||||
|
|
||||||
|
block = build_empty_block(spec, state, slot=state.slot, signed=True)
|
||||||
|
|
||||||
|
yield 'pre', state
|
||||||
|
|
||||||
|
state_transition_and_sign_block(spec, state, block)
|
||||||
|
|
||||||
|
yield 'blocks', [block]
|
||||||
|
yield 'post', state
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
|
@ -35,6 +66,22 @@ def test_empty_block_transition(spec, state):
|
||||||
assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.ZERO_HASH
|
assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.ZERO_HASH
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_invalid_state_root(spec, state):
|
||||||
|
yield 'pre', state
|
||||||
|
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
block.state_root = b"\xaa" * 32
|
||||||
|
sign_block(spec, state, block)
|
||||||
|
|
||||||
|
expect_assertion_error(
|
||||||
|
lambda: spec.state_transition(state, block, validate_state_root=True))
|
||||||
|
|
||||||
|
yield 'blocks', [block]
|
||||||
|
yield 'post', None
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_skipped_slots(spec, state):
|
def test_skipped_slots(spec, state):
|
||||||
|
@ -76,26 +123,29 @@ def test_empty_epoch_transition(spec, state):
|
||||||
assert spec.get_block_root_at_slot(state, slot) == block.parent_root
|
assert spec.get_block_root_at_slot(state, slot) == block.parent_root
|
||||||
|
|
||||||
|
|
||||||
# @with_all_phases
|
@with_all_phases
|
||||||
# @spec_state_test
|
@spec_state_test
|
||||||
# def test_empty_epoch_transition_not_finalizing(spec, state):
|
def test_empty_epoch_transition_not_finalizing(spec, state):
|
||||||
# # copy for later balance lookups.
|
# Don't run for non-minimal configs, it takes very long, and the effect
|
||||||
# pre_state = deepcopy(state)
|
# of calling finalization/justification is just the same as with the minimal configuration.
|
||||||
# yield 'pre', state
|
if spec.SLOTS_PER_EPOCH > 8:
|
||||||
|
return
|
||||||
|
|
||||||
# block = build_empty_block_for_next_slot(spec, state)
|
# copy for later balance lookups.
|
||||||
# block.slot += spec.SLOTS_PER_EPOCH * 5
|
pre_balances = list(state.balances)
|
||||||
# sign_block(spec, state, block, proposer_index=0)
|
yield 'pre', state
|
||||||
|
|
||||||
# state_transition_and_sign_block(spec, state, block)
|
spec.process_slots(state, state.slot + (spec.SLOTS_PER_EPOCH * 5))
|
||||||
|
block = build_empty_block_for_next_slot(spec, state, signed=True)
|
||||||
|
state_transition_and_sign_block(spec, state, block)
|
||||||
|
|
||||||
# yield 'blocks', [block]
|
yield 'blocks', [block]
|
||||||
# yield 'post', state
|
yield 'post', state
|
||||||
|
|
||||||
# assert state.slot == block.slot
|
assert state.slot == block.slot
|
||||||
# assert state.finalized_epoch < spec.get_current_epoch(state) - 4
|
assert state.finalized_checkpoint.epoch < spec.get_current_epoch(state) - 4
|
||||||
# for index in range(len(state.validators)):
|
for index in range(len(state.validators)):
|
||||||
# assert get_balance(state, index) < get_balance(pre_state, index)
|
assert state.balances[index] < pre_balances[index]
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
|
@ -172,7 +222,27 @@ def test_attester_slashing(spec, state):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# TODO update functions below to be like above, i.e. with @spec_state_test and yielding data to put into the test vector
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_expected_deposit_in_block(spec, state):
|
||||||
|
# Make the state expect a deposit, then don't provide it.
|
||||||
|
state.eth1_data.deposit_count += 1
|
||||||
|
yield 'pre', state
|
||||||
|
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
sign_block(spec, state, block)
|
||||||
|
bad = False
|
||||||
|
try:
|
||||||
|
state_transition_and_sign_block(spec, state, block)
|
||||||
|
bad = True
|
||||||
|
except AssertionError:
|
||||||
|
pass
|
||||||
|
if bad:
|
||||||
|
raise AssertionError("expected deposit was not enforced")
|
||||||
|
|
||||||
|
yield 'blocks', [block]
|
||||||
|
yield 'post', None
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
|
@ -376,6 +446,7 @@ def test_historical_batch(spec, state):
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
|
|
||||||
block = build_empty_block_for_next_slot(spec, state, signed=True)
|
block = build_empty_block_for_next_slot(spec, state, signed=True)
|
||||||
|
sign_block(spec, state, block)
|
||||||
state_transition_and_sign_block(spec, state, block)
|
state_transition_and_sign_block(spec, state, block)
|
||||||
|
|
||||||
yield 'blocks', [block]
|
yield 'blocks', [block]
|
||||||
|
@ -386,29 +457,78 @@ def test_historical_batch(spec, state):
|
||||||
assert len(state.historical_roots) == pre_historical_roots_len + 1
|
assert len(state.historical_roots) == pre_historical_roots_len + 1
|
||||||
|
|
||||||
|
|
||||||
# @with_all_phases
|
@with_all_phases
|
||||||
# @spec_state_test
|
@spec_state_test
|
||||||
# def test_eth1_data_votes(spec, state):
|
def test_eth1_data_votes_consensus(spec, state):
|
||||||
# yield 'pre', state
|
# Don't run when it will take very, very long to simulate. Minimal configuration suffices.
|
||||||
|
if spec.SLOTS_PER_ETH1_VOTING_PERIOD > 16:
|
||||||
|
return
|
||||||
|
|
||||||
# expected_votes = 0
|
offset_block = build_empty_block(spec, state, slot=spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1)
|
||||||
# assert len(state.eth1_data_votes) == expected_votes
|
sign_block(spec, state, offset_block)
|
||||||
|
state_transition_and_sign_block(spec, state, offset_block)
|
||||||
|
yield 'pre', state
|
||||||
|
|
||||||
# blocks = []
|
a = b'\xaa' * 32
|
||||||
# for _ in range(spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1):
|
b = b'\xbb' * 32
|
||||||
# block = build_empty_block_for_next_slot(spec, state)
|
c = b'\xcc' * 32
|
||||||
# state_transition_and_sign_block(spec, state, block)
|
|
||||||
# expected_votes += 1
|
|
||||||
# assert len(state.eth1_data_votes) == expected_votes
|
|
||||||
# blocks.append(block)
|
|
||||||
|
|
||||||
# block = build_empty_block_for_next_slot(spec, state)
|
blocks = []
|
||||||
# blocks.append(block)
|
|
||||||
|
|
||||||
# state_transition_and_sign_block(spec, state, block)
|
for i in range(0, spec.SLOTS_PER_ETH1_VOTING_PERIOD):
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
# wait for over 50% for A, then start voting B
|
||||||
|
block.body.eth1_data.block_hash = b if i * 2 > spec.SLOTS_PER_ETH1_VOTING_PERIOD else a
|
||||||
|
sign_block(spec, state, block)
|
||||||
|
state_transition_and_sign_block(spec, state, block)
|
||||||
|
blocks.append(block)
|
||||||
|
|
||||||
# yield 'blocks', [block]
|
assert len(state.eth1_data_votes) == spec.SLOTS_PER_ETH1_VOTING_PERIOD
|
||||||
# yield 'post', state
|
assert state.eth1_data.block_hash == a
|
||||||
|
|
||||||
# assert state.slot % spec.SLOTS_PER_ETH1_VOTING_PERIOD == 0
|
# transition to next eth1 voting period
|
||||||
# assert len(state.eth1_data_votes) == 1
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
block.body.eth1_data.block_hash = c
|
||||||
|
sign_block(spec, state, block)
|
||||||
|
state_transition_and_sign_block(spec, state, block)
|
||||||
|
blocks.append(block)
|
||||||
|
|
||||||
|
yield 'blocks', blocks
|
||||||
|
yield 'post', state
|
||||||
|
|
||||||
|
assert state.eth1_data.block_hash == a
|
||||||
|
assert state.slot % spec.SLOTS_PER_ETH1_VOTING_PERIOD == 0
|
||||||
|
assert len(state.eth1_data_votes) == 1
|
||||||
|
assert state.eth1_data_votes[0].block_hash == c
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_eth1_data_votes_no_consensus(spec, state):
|
||||||
|
# Don't run when it will take very, very long to simulate. Minimal configuration suffices.
|
||||||
|
if spec.SLOTS_PER_ETH1_VOTING_PERIOD > 16:
|
||||||
|
return
|
||||||
|
|
||||||
|
offset_block = build_empty_block(spec, state, slot=spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1)
|
||||||
|
sign_block(spec, state, offset_block)
|
||||||
|
state_transition_and_sign_block(spec, state, offset_block)
|
||||||
|
yield 'pre', state
|
||||||
|
|
||||||
|
a = b'\xaa' * 32
|
||||||
|
b = b'\xbb' * 32
|
||||||
|
|
||||||
|
blocks = []
|
||||||
|
|
||||||
|
for i in range(0, spec.SLOTS_PER_ETH1_VOTING_PERIOD):
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
# wait for precisely 50% for A, then start voting B for other 50%
|
||||||
|
block.body.eth1_data.block_hash = b if i * 2 >= spec.SLOTS_PER_ETH1_VOTING_PERIOD else a
|
||||||
|
sign_block(spec, state, block)
|
||||||
|
state_transition_and_sign_block(spec, state, block)
|
||||||
|
blocks.append(block)
|
||||||
|
|
||||||
|
assert len(state.eth1_data_votes) == spec.SLOTS_PER_ETH1_VOTING_PERIOD
|
||||||
|
assert state.eth1_data.block_hash == b'\x00' * 32
|
||||||
|
|
||||||
|
yield 'blocks', blocks
|
||||||
|
yield 'post', state
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
from .hash_function import hash
|
from .hash_function import hash
|
||||||
|
from math import log2
|
||||||
|
|
||||||
|
|
||||||
ZERO_BYTES32 = b'\x00' * 32
|
ZERO_BYTES32 = b'\x00' * 32
|
||||||
|
@ -8,11 +9,10 @@ for layer in range(1, 100):
|
||||||
zerohashes.append(hash(zerohashes[layer - 1] + zerohashes[layer - 1]))
|
zerohashes.append(hash(zerohashes[layer - 1] + zerohashes[layer - 1]))
|
||||||
|
|
||||||
|
|
||||||
# Compute a Merkle root of a right-zerobyte-padded 2**32 sized tree
|
def calc_merkle_tree_from_leaves(values, layer_count=32):
|
||||||
def calc_merkle_tree_from_leaves(values):
|
|
||||||
values = list(values)
|
values = list(values)
|
||||||
tree = [values[::]]
|
tree = [values[::]]
|
||||||
for h in range(32):
|
for h in range(layer_count):
|
||||||
if len(values) % 2 == 1:
|
if len(values) % 2 == 1:
|
||||||
values.append(zerohashes[h])
|
values.append(zerohashes[h])
|
||||||
values = [hash(values[i] + values[i + 1]) for i in range(0, len(values), 2)]
|
values = [hash(values[i] + values[i + 1]) for i in range(0, len(values), 2)]
|
||||||
|
@ -20,8 +20,11 @@ def calc_merkle_tree_from_leaves(values):
|
||||||
return tree
|
return tree
|
||||||
|
|
||||||
|
|
||||||
def get_merkle_root(values):
|
def get_merkle_root(values, pad_to=1):
|
||||||
return calc_merkle_tree_from_leaves(values)[-1][0]
|
layer_count = int(log2(pad_to))
|
||||||
|
if len(values) == 0:
|
||||||
|
return zerohashes[layer_count]
|
||||||
|
return calc_merkle_tree_from_leaves(values, layer_count)[-1][0]
|
||||||
|
|
||||||
|
|
||||||
def get_merkle_proof(tree, item_index):
|
def get_merkle_proof(tree, item_index):
|
||||||
|
@ -32,19 +35,7 @@ def get_merkle_proof(tree, item_index):
|
||||||
return proof
|
return proof
|
||||||
|
|
||||||
|
|
||||||
def next_power_of_two(v: int) -> int:
|
def merkleize_chunks(chunks, pad_to: int=1):
|
||||||
"""
|
|
||||||
Get the next power of 2. (for 64 bit range ints).
|
|
||||||
0 is a special case, to have non-empty defaults.
|
|
||||||
Examples:
|
|
||||||
0 -> 1, 1 -> 1, 2 -> 2, 3 -> 4, 32 -> 32, 33 -> 64
|
|
||||||
"""
|
|
||||||
if v == 0:
|
|
||||||
return 1
|
|
||||||
return 1 << (v - 1).bit_length()
|
|
||||||
|
|
||||||
|
|
||||||
def merkleize_chunks(chunks, pad_to: int = 1):
|
|
||||||
count = len(chunks)
|
count = len(chunks)
|
||||||
depth = max(count - 1, 0).bit_length()
|
depth = max(count - 1, 0).bit_length()
|
||||||
max_depth = max(depth, (pad_to - 1).bit_length())
|
max_depth = max(depth, (pad_to - 1).bit_length())
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
import pytest
|
import pytest
|
||||||
from .merkle_minimal import zerohashes, merkleize_chunks
|
from .merkle_minimal import zerohashes, merkleize_chunks, get_merkle_root
|
||||||
from .hash_function import hash
|
from .hash_function import hash
|
||||||
|
|
||||||
|
|
||||||
|
@ -53,6 +53,7 @@ cases = [
|
||||||
'depth,count,pow2,value',
|
'depth,count,pow2,value',
|
||||||
cases,
|
cases,
|
||||||
)
|
)
|
||||||
def test_merkleize_chunks(depth, count, pow2, value):
|
def test_merkleize_chunks_and_get_merkle_root(depth, count, pow2, value):
|
||||||
chunks = [e(i) for i in range(count)]
|
chunks = [e(i) for i in range(count)]
|
||||||
assert merkleize_chunks(chunks, pad_to=pow2) == value
|
assert merkleize_chunks(chunks, pad_to=pow2) == value
|
||||||
|
assert get_merkle_root(chunks, pad_to=pow2) == value
|
||||||
|
|
Loading…
Reference in New Issue