Merge pull request #1490 from ethereum/master-copy
backport v0.9.1 and v0.9.2 to dev
This commit is contained in:
commit
19fa53709a
|
@ -17,8 +17,8 @@ MIN_PER_EPOCH_CHURN_LIMIT: 4
|
||||||
CHURN_LIMIT_QUOTIENT: 65536
|
CHURN_LIMIT_QUOTIENT: 65536
|
||||||
# See issue 563
|
# See issue 563
|
||||||
SHUFFLE_ROUND_COUNT: 90
|
SHUFFLE_ROUND_COUNT: 90
|
||||||
# `2**16` (= 65,536)
|
# `2**14` (= 16,384)
|
||||||
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 65536
|
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384
|
||||||
# Jan 3, 2020
|
# Jan 3, 2020
|
||||||
MIN_GENESIS_TIME: 1578009600
|
MIN_GENESIS_TIME: 1578009600
|
||||||
|
|
||||||
|
@ -29,6 +29,18 @@ MIN_GENESIS_TIME: 1578009600
|
||||||
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8
|
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8
|
||||||
|
|
||||||
|
|
||||||
|
# Validator
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 2**10 (= 1,024)
|
||||||
|
ETH1_FOLLOW_DISTANCE: 1024
|
||||||
|
# 2**4 (= 16)
|
||||||
|
TARGET_AGGREGATORS_PER_COMMITTEE: 16
|
||||||
|
# 2**0 (= 1)
|
||||||
|
RANDOM_SUBNETS_PER_VALIDATOR: 1
|
||||||
|
# 2**8 (= 256)
|
||||||
|
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256
|
||||||
|
|
||||||
|
|
||||||
# Deposit contract
|
# Deposit contract
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# **TBD**
|
# **TBD**
|
||||||
|
|
|
@ -28,6 +28,18 @@ MIN_GENESIS_TIME: 1578009600
|
||||||
# 2**1 (= 1)
|
# 2**1 (= 1)
|
||||||
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 2
|
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 2
|
||||||
|
|
||||||
|
#
|
||||||
|
# Validator
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# [customized] process deposits more quickly, but insecure
|
||||||
|
ETH1_FOLLOW_DISTANCE: 16
|
||||||
|
# 2**4 (= 16)
|
||||||
|
TARGET_AGGREGATORS_PER_COMMITTEE: 16
|
||||||
|
# 2**0 (= 1)
|
||||||
|
RANDOM_SUBNETS_PER_VALIDATOR: 1
|
||||||
|
# 2**8 (= 256)
|
||||||
|
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256
|
||||||
|
|
||||||
|
|
||||||
# Deposit contract
|
# Deposit contract
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
|
@ -79,26 +79,26 @@ def ceillog2(x: uint64) -> int:
|
||||||
SUNDRY_FUNCTIONS = '''
|
SUNDRY_FUNCTIONS = '''
|
||||||
# Monkey patch hash cache
|
# Monkey patch hash cache
|
||||||
_hash = hash
|
_hash = hash
|
||||||
hash_cache: Dict[bytes, Hash] = {}
|
hash_cache: Dict[bytes, Bytes32] = {}
|
||||||
|
|
||||||
|
|
||||||
def get_eth1_data(distance: uint64) -> Hash:
|
def get_eth1_data(distance: uint64) -> Bytes32:
|
||||||
return hash(distance)
|
return hash(distance)
|
||||||
|
|
||||||
|
|
||||||
def hash(x: bytes) -> Hash:
|
def hash(x: bytes) -> Bytes32:
|
||||||
if x not in hash_cache:
|
if x not in hash_cache:
|
||||||
hash_cache[x] = Hash(_hash(x))
|
hash_cache[x] = Bytes32(_hash(x))
|
||||||
return hash_cache[x]
|
return hash_cache[x]
|
||||||
|
|
||||||
|
|
||||||
# Monkey patch validator compute committee code
|
# Monkey patch validator compute committee code
|
||||||
_compute_committee = compute_committee
|
_compute_committee = compute_committee
|
||||||
committee_cache: Dict[Tuple[Hash, Hash, int, int], Sequence[ValidatorIndex]] = {}
|
committee_cache: Dict[Tuple[Bytes32, Bytes32, int, int], Sequence[ValidatorIndex]] = {}
|
||||||
|
|
||||||
|
|
||||||
def compute_committee(indices: Sequence[ValidatorIndex], # type: ignore
|
def compute_committee(indices: Sequence[ValidatorIndex], # type: ignore
|
||||||
seed: Hash,
|
seed: Bytes32,
|
||||||
index: int,
|
index: int,
|
||||||
count: int) -> Sequence[ValidatorIndex]:
|
count: int) -> Sequence[ValidatorIndex]:
|
||||||
param_hash = (hash(b''.join(index.to_bytes(length=4, byteorder='little') for index in indices)), seed, index, count)
|
param_hash = (hash(b''.join(index.to_bytes(length=4, byteorder='little') for index in indices)), seed, index, count)
|
||||||
|
|
|
@ -137,7 +137,7 @@ We define the following Python custom types for type hinting and readability:
|
||||||
| `CommitteeIndex` | `uint64` | a committee index at a slot |
|
| `CommitteeIndex` | `uint64` | a committee index at a slot |
|
||||||
| `ValidatorIndex` | `uint64` | a validator registry index |
|
| `ValidatorIndex` | `uint64` | a validator registry index |
|
||||||
| `Gwei` | `uint64` | an amount in Gwei |
|
| `Gwei` | `uint64` | an amount in Gwei |
|
||||||
| `Hash` | `Bytes32` | a hash |
|
| `Root` | `Bytes32` | a Merkle root |
|
||||||
| `Version` | `Bytes4` | a fork version number |
|
| `Version` | `Bytes4` | a fork version number |
|
||||||
| `DomainType` | `Bytes4` | a domain type |
|
| `DomainType` | `Bytes4` | a domain type |
|
||||||
| `Domain` | `Bytes8` | a signature domain |
|
| `Domain` | `Bytes8` | a signature domain |
|
||||||
|
@ -171,7 +171,7 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
| `MIN_PER_EPOCH_CHURN_LIMIT` | `2**2` (= 4) |
|
| `MIN_PER_EPOCH_CHURN_LIMIT` | `2**2` (= 4) |
|
||||||
| `CHURN_LIMIT_QUOTIENT` | `2**16` (= 65,536) |
|
| `CHURN_LIMIT_QUOTIENT` | `2**16` (= 65,536) |
|
||||||
| `SHUFFLE_ROUND_COUNT` | `90` |
|
| `SHUFFLE_ROUND_COUNT` | `90` |
|
||||||
| `MIN_GENESIS_ACTIVE_VALIDATOR_COUNT` | `2**16` (= 65,536) |
|
| `MIN_GENESIS_ACTIVE_VALIDATOR_COUNT` | `2**14` (= 16,384) |
|
||||||
| `MIN_GENESIS_TIME` | `1578009600` (Jan 3, 2020) |
|
| `MIN_GENESIS_TIME` | `1578009600` (Jan 3, 2020) |
|
||||||
|
|
||||||
- For the safety of committees, `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.)
|
- For the safety of committees, `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.)
|
||||||
|
@ -275,7 +275,7 @@ class Fork(Container):
|
||||||
```python
|
```python
|
||||||
class Checkpoint(Container):
|
class Checkpoint(Container):
|
||||||
epoch: Epoch
|
epoch: Epoch
|
||||||
root: Hash
|
root: Root
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `Validator`
|
#### `Validator`
|
||||||
|
@ -283,7 +283,7 @@ class Checkpoint(Container):
|
||||||
```python
|
```python
|
||||||
class Validator(Container):
|
class Validator(Container):
|
||||||
pubkey: BLSPubkey
|
pubkey: BLSPubkey
|
||||||
withdrawal_credentials: Hash # Commitment to pubkey for withdrawals
|
withdrawal_credentials: Bytes32 # Commitment to pubkey for withdrawals
|
||||||
effective_balance: Gwei # Balance at stake
|
effective_balance: Gwei # Balance at stake
|
||||||
slashed: boolean
|
slashed: boolean
|
||||||
# Status epochs
|
# Status epochs
|
||||||
|
@ -300,7 +300,7 @@ class AttestationData(Container):
|
||||||
slot: Slot
|
slot: Slot
|
||||||
index: CommitteeIndex
|
index: CommitteeIndex
|
||||||
# LMD GHOST vote
|
# LMD GHOST vote
|
||||||
beacon_block_root: Hash
|
beacon_block_root: Root
|
||||||
# FFG vote
|
# FFG vote
|
||||||
source: Checkpoint
|
source: Checkpoint
|
||||||
target: Checkpoint
|
target: Checkpoint
|
||||||
|
@ -329,17 +329,17 @@ class PendingAttestation(Container):
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class Eth1Data(Container):
|
class Eth1Data(Container):
|
||||||
deposit_root: Hash
|
deposit_root: Root
|
||||||
deposit_count: uint64
|
deposit_count: uint64
|
||||||
block_hash: Hash
|
block_hash: Bytes32
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `HistoricalBatch`
|
#### `HistoricalBatch`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class HistoricalBatch(Container):
|
class HistoricalBatch(Container):
|
||||||
block_roots: Vector[Hash, SLOTS_PER_HISTORICAL_ROOT]
|
block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||||
state_roots: Vector[Hash, SLOTS_PER_HISTORICAL_ROOT]
|
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `DepositData`
|
#### `DepositData`
|
||||||
|
@ -347,7 +347,7 @@ class HistoricalBatch(Container):
|
||||||
```python
|
```python
|
||||||
class DepositData(Container):
|
class DepositData(Container):
|
||||||
pubkey: BLSPubkey
|
pubkey: BLSPubkey
|
||||||
withdrawal_credentials: Hash
|
withdrawal_credentials: Bytes32
|
||||||
amount: Gwei
|
amount: Gwei
|
||||||
signature: BLSSignature
|
signature: BLSSignature
|
||||||
```
|
```
|
||||||
|
@ -357,9 +357,9 @@ class DepositData(Container):
|
||||||
```python
|
```python
|
||||||
class BeaconBlockHeader(Container):
|
class BeaconBlockHeader(Container):
|
||||||
slot: Slot
|
slot: Slot
|
||||||
parent_root: Hash
|
parent_root: Root
|
||||||
state_root: Hash
|
state_root: Root
|
||||||
body_root: Hash
|
body_root: Root
|
||||||
signature: BLSSignature
|
signature: BLSSignature
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -395,7 +395,7 @@ class Attestation(Container):
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class Deposit(Container):
|
class Deposit(Container):
|
||||||
proof: Vector[Hash, DEPOSIT_CONTRACT_TREE_DEPTH + 1] # Merkle path to deposit data list root
|
proof: Vector[Bytes32, DEPOSIT_CONTRACT_TREE_DEPTH + 1] # Merkle path to deposit data list root
|
||||||
data: DepositData
|
data: DepositData
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -430,8 +430,8 @@ class BeaconBlockBody(Container):
|
||||||
```python
|
```python
|
||||||
class BeaconBlock(Container):
|
class BeaconBlock(Container):
|
||||||
slot: Slot
|
slot: Slot
|
||||||
parent_root: Hash
|
parent_root: Root
|
||||||
state_root: Hash
|
state_root: Root
|
||||||
body: BeaconBlockBody
|
body: BeaconBlockBody
|
||||||
signature: BLSSignature
|
signature: BLSSignature
|
||||||
```
|
```
|
||||||
|
@ -448,9 +448,9 @@ class BeaconState(Container):
|
||||||
fork: Fork
|
fork: Fork
|
||||||
# History
|
# History
|
||||||
latest_block_header: BeaconBlockHeader
|
latest_block_header: BeaconBlockHeader
|
||||||
block_roots: Vector[Hash, SLOTS_PER_HISTORICAL_ROOT]
|
block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||||
state_roots: Vector[Hash, SLOTS_PER_HISTORICAL_ROOT]
|
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||||
historical_roots: List[Hash, HISTORICAL_ROOTS_LIMIT]
|
historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT]
|
||||||
# Eth1
|
# Eth1
|
||||||
eth1_data: Eth1Data
|
eth1_data: Eth1Data
|
||||||
eth1_data_votes: List[Eth1Data, SLOTS_PER_ETH1_VOTING_PERIOD]
|
eth1_data_votes: List[Eth1Data, SLOTS_PER_ETH1_VOTING_PERIOD]
|
||||||
|
@ -459,7 +459,7 @@ class BeaconState(Container):
|
||||||
validators: List[Validator, VALIDATOR_REGISTRY_LIMIT]
|
validators: List[Validator, VALIDATOR_REGISTRY_LIMIT]
|
||||||
balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
|
balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
|
||||||
# Randomness
|
# Randomness
|
||||||
randao_mixes: Vector[Hash, EPOCHS_PER_HISTORICAL_VECTOR]
|
randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR]
|
||||||
# Slashings
|
# Slashings
|
||||||
slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances
|
slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances
|
||||||
# Attestations
|
# Attestations
|
||||||
|
@ -527,15 +527,15 @@ def bytes_to_int(data: bytes) -> uint64:
|
||||||
|
|
||||||
#### `hash`
|
#### `hash`
|
||||||
|
|
||||||
`def hash(data: bytes) -> Hash` is SHA256.
|
`def hash(data: bytes) -> Bytes32` is SHA256.
|
||||||
|
|
||||||
#### `hash_tree_root`
|
#### `hash_tree_root`
|
||||||
|
|
||||||
`def hash_tree_root(object: SSZSerializable) -> Hash` is a function for hashing objects into a single root by utilizing a hash tree structure, as defined in the [SSZ spec](../simple-serialize.md#merkleization).
|
`def hash_tree_root(object: SSZSerializable) -> Root` is a function for hashing objects into a single root by utilizing a hash tree structure, as defined in the [SSZ spec](../simple-serialize.md#merkleization).
|
||||||
|
|
||||||
#### `signing_root`
|
#### `signing_root`
|
||||||
|
|
||||||
`def signing_root(object: Container) -> Hash` is a function for computing signing messages, as defined in the [SSZ spec](../simple-serialize.md#self-signed-containers).
|
`def signing_root(object: Container) -> Root` is a function for computing signing messages, as defined in the [SSZ spec](../simple-serialize.md#self-signed-containers).
|
||||||
|
|
||||||
#### `bls_verify`
|
#### `bls_verify`
|
||||||
|
|
||||||
|
@ -611,7 +611,7 @@ def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: Indexe
|
||||||
#### `is_valid_merkle_branch`
|
#### `is_valid_merkle_branch`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def is_valid_merkle_branch(leaf: Hash, branch: Sequence[Hash], depth: uint64, index: uint64, root: Hash) -> bool:
|
def is_valid_merkle_branch(leaf: Bytes32, branch: Sequence[Bytes32], depth: uint64, index: uint64, root: Root) -> bool:
|
||||||
"""
|
"""
|
||||||
Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and ``branch``.
|
Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and ``branch``.
|
||||||
"""
|
"""
|
||||||
|
@ -629,7 +629,7 @@ def is_valid_merkle_branch(leaf: Hash, branch: Sequence[Hash], depth: uint64, in
|
||||||
#### `compute_shuffled_index`
|
#### `compute_shuffled_index`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def compute_shuffled_index(index: ValidatorIndex, index_count: uint64, seed: Hash) -> ValidatorIndex:
|
def compute_shuffled_index(index: ValidatorIndex, index_count: uint64, seed: Bytes32) -> ValidatorIndex:
|
||||||
"""
|
"""
|
||||||
Return the shuffled validator index corresponding to ``seed`` (and ``index_count``).
|
Return the shuffled validator index corresponding to ``seed`` (and ``index_count``).
|
||||||
"""
|
"""
|
||||||
|
@ -652,7 +652,7 @@ def compute_shuffled_index(index: ValidatorIndex, index_count: uint64, seed: Has
|
||||||
#### `compute_proposer_index`
|
#### `compute_proposer_index`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Hash) -> ValidatorIndex:
|
def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex:
|
||||||
"""
|
"""
|
||||||
Return from ``indices`` a random index sampled by effective balance.
|
Return from ``indices`` a random index sampled by effective balance.
|
||||||
"""
|
"""
|
||||||
|
@ -672,7 +672,7 @@ def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex]
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def compute_committee(indices: Sequence[ValidatorIndex],
|
def compute_committee(indices: Sequence[ValidatorIndex],
|
||||||
seed: Hash,
|
seed: Bytes32,
|
||||||
index: uint64,
|
index: uint64,
|
||||||
count: uint64) -> Sequence[ValidatorIndex]:
|
count: uint64) -> Sequence[ValidatorIndex]:
|
||||||
"""
|
"""
|
||||||
|
@ -749,7 +749,7 @@ def get_previous_epoch(state: BeaconState) -> Epoch:
|
||||||
#### `get_block_root`
|
#### `get_block_root`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_block_root(state: BeaconState, epoch: Epoch) -> Hash:
|
def get_block_root(state: BeaconState, epoch: Epoch) -> Root:
|
||||||
"""
|
"""
|
||||||
Return the block root at the start of a recent ``epoch``.
|
Return the block root at the start of a recent ``epoch``.
|
||||||
"""
|
"""
|
||||||
|
@ -759,7 +759,7 @@ def get_block_root(state: BeaconState, epoch: Epoch) -> Hash:
|
||||||
#### `get_block_root_at_slot`
|
#### `get_block_root_at_slot`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_block_root_at_slot(state: BeaconState, slot: Slot) -> Hash:
|
def get_block_root_at_slot(state: BeaconState, slot: Slot) -> Root:
|
||||||
"""
|
"""
|
||||||
Return the block root at a recent ``slot``.
|
Return the block root at a recent ``slot``.
|
||||||
"""
|
"""
|
||||||
|
@ -770,7 +770,7 @@ def get_block_root_at_slot(state: BeaconState, slot: Slot) -> Hash:
|
||||||
#### `get_randao_mix`
|
#### `get_randao_mix`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_randao_mix(state: BeaconState, epoch: Epoch) -> Hash:
|
def get_randao_mix(state: BeaconState, epoch: Epoch) -> Bytes32:
|
||||||
"""
|
"""
|
||||||
Return the randao mix at a recent ``epoch``.
|
Return the randao mix at a recent ``epoch``.
|
||||||
"""
|
"""
|
||||||
|
@ -801,7 +801,7 @@ def get_validator_churn_limit(state: BeaconState) -> uint64:
|
||||||
#### `get_seed`
|
#### `get_seed`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Hash:
|
def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Bytes32:
|
||||||
"""
|
"""
|
||||||
Return the seed at ``epoch``.
|
Return the seed at ``epoch``.
|
||||||
"""
|
"""
|
||||||
|
@ -996,7 +996,7 @@ Before the Ethereum 2.0 genesis has been triggered, and for every Ethereum 1.0 b
|
||||||
- `deposits` is the sequence of all deposits, ordered chronologically, up to (and including) the block with hash `eth1_block_hash`
|
- `deposits` is the sequence of all deposits, ordered chronologically, up to (and including) the block with hash `eth1_block_hash`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash,
|
def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
||||||
eth1_timestamp: uint64,
|
eth1_timestamp: uint64,
|
||||||
deposits: Sequence[Deposit]) -> BeaconState:
|
deposits: Sequence[Deposit]) -> BeaconState:
|
||||||
state = BeaconState(
|
state = BeaconState(
|
||||||
|
|
|
@ -57,7 +57,7 @@ The head block root associated with a `store` is defined as `get_head(store)`. A
|
||||||
@dataclass(eq=True, frozen=True)
|
@dataclass(eq=True, frozen=True)
|
||||||
class LatestMessage(object):
|
class LatestMessage(object):
|
||||||
epoch: Epoch
|
epoch: Epoch
|
||||||
root: Hash
|
root: Root
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `Store`
|
#### `Store`
|
||||||
|
@ -70,8 +70,8 @@ class Store(object):
|
||||||
justified_checkpoint: Checkpoint
|
justified_checkpoint: Checkpoint
|
||||||
finalized_checkpoint: Checkpoint
|
finalized_checkpoint: Checkpoint
|
||||||
best_justified_checkpoint: Checkpoint
|
best_justified_checkpoint: Checkpoint
|
||||||
blocks: Dict[Hash, BeaconBlock] = field(default_factory=dict)
|
blocks: Dict[Root, BeaconBlock] = field(default_factory=dict)
|
||||||
block_states: Dict[Hash, BeaconState] = field(default_factory=dict)
|
block_states: Dict[Root, BeaconState] = field(default_factory=dict)
|
||||||
checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict)
|
checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict)
|
||||||
latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict)
|
latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict)
|
||||||
```
|
```
|
||||||
|
@ -113,7 +113,7 @@ def compute_slots_since_epoch_start(slot: Slot) -> int:
|
||||||
#### `get_ancestor`
|
#### `get_ancestor`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_ancestor(store: Store, root: Hash, slot: Slot) -> Hash:
|
def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
|
||||||
block = store.blocks[root]
|
block = store.blocks[root]
|
||||||
if block.slot > slot:
|
if block.slot > slot:
|
||||||
return get_ancestor(store, block.parent_root, slot)
|
return get_ancestor(store, block.parent_root, slot)
|
||||||
|
@ -126,7 +126,7 @@ def get_ancestor(store: Store, root: Hash, slot: Slot) -> Hash:
|
||||||
#### `get_latest_attesting_balance`
|
#### `get_latest_attesting_balance`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_latest_attesting_balance(store: Store, root: Hash) -> Gwei:
|
def get_latest_attesting_balance(store: Store, root: Root) -> Gwei:
|
||||||
state = store.checkpoint_states[store.justified_checkpoint]
|
state = store.checkpoint_states[store.justified_checkpoint]
|
||||||
active_indices = get_active_validator_indices(state, get_current_epoch(state))
|
active_indices = get_active_validator_indices(state, get_current_epoch(state))
|
||||||
return Gwei(sum(
|
return Gwei(sum(
|
||||||
|
@ -139,7 +139,7 @@ def get_latest_attesting_balance(store: Store, root: Hash) -> Gwei:
|
||||||
#### `get_head`
|
#### `get_head`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_head(store: Store) -> Hash:
|
def get_head(store: Store) -> Root:
|
||||||
# Execute the LMD-GHOST fork choice
|
# Execute the LMD-GHOST fork choice
|
||||||
head = store.justified_checkpoint.root
|
head = store.justified_checkpoint.root
|
||||||
justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
|
justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
|
||||||
|
@ -238,6 +238,12 @@ def on_block(store: Store, block: BeaconBlock) -> None:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def on_attestation(store: Store, attestation: Attestation) -> None:
|
def on_attestation(store: Store, attestation: Attestation) -> None:
|
||||||
|
"""
|
||||||
|
Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire.
|
||||||
|
|
||||||
|
An ``attestation`` that is asserted as invalid may be valid at a later time,
|
||||||
|
consider scheduling it for later processing in such case.
|
||||||
|
"""
|
||||||
target = attestation.data.target
|
target = attestation.data.target
|
||||||
|
|
||||||
# Attestations must be from the current or previous epoch
|
# Attestations must be from the current or previous epoch
|
||||||
|
@ -248,10 +254,17 @@ def on_attestation(store: Store, attestation: Attestation) -> None:
|
||||||
# Cannot calculate the current shuffling if have not seen the target
|
# Cannot calculate the current shuffling if have not seen the target
|
||||||
assert target.root in store.blocks
|
assert target.root in store.blocks
|
||||||
|
|
||||||
|
# Attestations target be for a known block. If target block is unknown, delay consideration until the block is found
|
||||||
|
assert target.root in store.blocks
|
||||||
# Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrives
|
# Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrives
|
||||||
base_state = store.block_states[target.root].copy()
|
base_state = store.block_states[target.root].copy()
|
||||||
assert store.time >= base_state.genesis_time + compute_start_slot_at_epoch(target.epoch) * SECONDS_PER_SLOT
|
assert store.time >= base_state.genesis_time + compute_start_slot_at_epoch(target.epoch) * SECONDS_PER_SLOT
|
||||||
|
|
||||||
|
# Attestations must be for a known block. If block is unknown, delay consideration until the block is found
|
||||||
|
assert attestation.data.beacon_block_root in store.blocks
|
||||||
|
# Attestations must not be for blocks in the future. If not, the attestation should not be considered
|
||||||
|
assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot
|
||||||
|
|
||||||
# Store target checkpoint state if not yet seen
|
# Store target checkpoint state if not yet seen
|
||||||
if target not in store.checkpoint_states:
|
if target not in store.checkpoint_states:
|
||||||
process_slots(base_state, compute_start_slot_at_epoch(target.epoch))
|
process_slots(base_state, compute_start_slot_at_epoch(target.epoch))
|
||||||
|
|
|
@ -62,7 +62,7 @@ class ShardReceiptDelta(Container):
|
||||||
```python
|
```python
|
||||||
class ShardReceiptProof(Container):
|
class ShardReceiptProof(Container):
|
||||||
shard: Shard
|
shard: Shard
|
||||||
proof: List[Hash, PLACEHOLDER]
|
proof: List[Bytes32, PLACEHOLDER]
|
||||||
receipt: List[ShardReceiptDelta, PLACEHOLDER]
|
receipt: List[ShardReceiptDelta, PLACEHOLDER]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -109,7 +109,7 @@ def committee_to_compact_committee(state: BeaconState, committee: Sequence[Valid
|
||||||
#### `verify_merkle_proof`
|
#### `verify_merkle_proof`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def verify_merkle_proof(leaf: Hash, proof: Sequence[Hash], index: GeneralizedIndex, root: Hash) -> bool:
|
def verify_merkle_proof(leaf: Bytes32, proof: Sequence[Bytes32], index: GeneralizedIndex, root: Root) -> bool:
|
||||||
assert len(proof) == get_generalized_index_length(index)
|
assert len(proof) == get_generalized_index_length(index)
|
||||||
for i, h in enumerate(proof):
|
for i, h in enumerate(proof):
|
||||||
if get_generalized_index_bit(index, i):
|
if get_generalized_index_bit(index, i):
|
||||||
|
@ -199,7 +199,7 @@ Add the following fields to the end of the specified container objects.
|
||||||
```python
|
```python
|
||||||
class BeaconState(Container):
|
class BeaconState(Container):
|
||||||
# Period committees
|
# Period committees
|
||||||
period_committee_roots: Vector[Hash, PERIOD_COMMITTEE_ROOT_LENGTH]
|
period_committee_roots: Vector[Root, PERIOD_COMMITTEE_ROOT_LENGTH]
|
||||||
next_shard_receipt_period: Vector[uint64, SHARD_COUNT]
|
next_shard_receipt_period: Vector[uint64, SHARD_COUNT]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -170,7 +170,7 @@ class CustodyChunkChallengeRecord(Container):
|
||||||
challenger_index: ValidatorIndex
|
challenger_index: ValidatorIndex
|
||||||
responder_index: ValidatorIndex
|
responder_index: ValidatorIndex
|
||||||
inclusion_epoch: Epoch
|
inclusion_epoch: Epoch
|
||||||
data_root: Hash
|
data_root: Root
|
||||||
depth: uint64
|
depth: uint64
|
||||||
chunk_index: uint64
|
chunk_index: uint64
|
||||||
```
|
```
|
||||||
|
@ -183,9 +183,9 @@ class CustodyBitChallengeRecord(Container):
|
||||||
challenger_index: ValidatorIndex
|
challenger_index: ValidatorIndex
|
||||||
responder_index: ValidatorIndex
|
responder_index: ValidatorIndex
|
||||||
inclusion_epoch: Epoch
|
inclusion_epoch: Epoch
|
||||||
data_root: Hash
|
data_root: Root
|
||||||
chunk_count: uint64
|
chunk_count: uint64
|
||||||
chunk_bits_merkle_root: Hash
|
chunk_bits_merkle_root: Root
|
||||||
responder_key: BLSSignature
|
responder_key: BLSSignature
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -196,8 +196,8 @@ class CustodyResponse(Container):
|
||||||
challenge_index: uint64
|
challenge_index: uint64
|
||||||
chunk_index: uint64
|
chunk_index: uint64
|
||||||
chunk: ByteVector[BYTES_PER_CUSTODY_CHUNK]
|
chunk: ByteVector[BYTES_PER_CUSTODY_CHUNK]
|
||||||
data_branch: List[Hash, CUSTODY_DATA_DEPTH]
|
data_branch: List[Bytes32, CUSTODY_DATA_DEPTH]
|
||||||
chunk_bits_branch: List[Hash, CUSTODY_CHUNK_BIT_DEPTH]
|
chunk_bits_branch: List[Bytes32, CUSTODY_CHUNK_BIT_DEPTH]
|
||||||
chunk_bits_leaf: Bitvector[256]
|
chunk_bits_leaf: Bitvector[256]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -228,7 +228,7 @@ class EarlyDerivedSecretReveal(Container):
|
||||||
# Index of the validator who revealed (whistleblower)
|
# Index of the validator who revealed (whistleblower)
|
||||||
masker_index: ValidatorIndex
|
masker_index: ValidatorIndex
|
||||||
# Mask used to hide the actual reveal signature (prevent reveal from being stolen)
|
# Mask used to hide the actual reveal signature (prevent reveal from being stolen)
|
||||||
mask: Hash
|
mask: Bytes32
|
||||||
```
|
```
|
||||||
|
|
||||||
### Phase 0 container updates
|
### Phase 0 container updates
|
||||||
|
@ -283,11 +283,11 @@ def ceillog2(x: uint64) -> int:
|
||||||
### `is_valid_merkle_branch_with_mixin`
|
### `is_valid_merkle_branch_with_mixin`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def is_valid_merkle_branch_with_mixin(leaf: Hash,
|
def is_valid_merkle_branch_with_mixin(leaf: Bytes32,
|
||||||
branch: Sequence[Hash],
|
branch: Sequence[Bytes32],
|
||||||
depth: uint64,
|
depth: uint64,
|
||||||
index: uint64,
|
index: uint64,
|
||||||
root: Hash,
|
root: Root,
|
||||||
mixin: uint64) -> bool:
|
mixin: uint64) -> bool:
|
||||||
value = leaf
|
value = leaf
|
||||||
for i in range(depth):
|
for i in range(depth):
|
||||||
|
@ -672,7 +672,7 @@ def process_chunk_challenge_response(state: BeaconState,
|
||||||
# Verify chunk index
|
# Verify chunk index
|
||||||
assert response.chunk_index == challenge.chunk_index
|
assert response.chunk_index == challenge.chunk_index
|
||||||
# Verify bit challenge data is null
|
# Verify bit challenge data is null
|
||||||
assert response.chunk_bits_branch == [] and response.chunk_bits_leaf == Hash()
|
assert response.chunk_bits_branch == [] and response.chunk_bits_leaf == Bytes32()
|
||||||
# Verify minimum delay
|
# Verify minimum delay
|
||||||
assert get_current_epoch(state) >= challenge.inclusion_epoch + MAX_SEED_LOOKAHEAD
|
assert get_current_epoch(state) >= challenge.inclusion_epoch + MAX_SEED_LOOKAHEAD
|
||||||
# Verify the chunk matches the crosslink data root
|
# Verify the chunk matches the crosslink data root
|
||||||
|
|
|
@ -118,9 +118,9 @@ class Crosslink(Container):
|
||||||
class ShardBlock(Container):
|
class ShardBlock(Container):
|
||||||
shard: Shard
|
shard: Shard
|
||||||
slot: ShardSlot
|
slot: ShardSlot
|
||||||
beacon_block_root: Hash
|
beacon_block_root: Root
|
||||||
parent_root: Hash
|
parent_root: Root
|
||||||
state_root: Hash
|
state_root: Root
|
||||||
body: List[byte, MAX_SHARD_BLOCK_SIZE - SHARD_HEADER_SIZE]
|
body: List[byte, MAX_SHARD_BLOCK_SIZE - SHARD_HEADER_SIZE]
|
||||||
block_size_sum: uint64
|
block_size_sum: uint64
|
||||||
aggregation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE]
|
aggregation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE]
|
||||||
|
@ -134,10 +134,10 @@ class ShardBlock(Container):
|
||||||
class ShardBlockHeader(Container):
|
class ShardBlockHeader(Container):
|
||||||
shard: Shard
|
shard: Shard
|
||||||
slot: ShardSlot
|
slot: ShardSlot
|
||||||
beacon_block_root: Hash
|
beacon_block_root: Root
|
||||||
parent_root: Hash
|
parent_root: Root
|
||||||
state_root: Hash
|
state_root: Root
|
||||||
body_root: Hash
|
body_root: Root
|
||||||
block_size_sum: uint64
|
block_size_sum: uint64
|
||||||
aggregation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE]
|
aggregation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE]
|
||||||
attestations: BLSSignature
|
attestations: BLSSignature
|
||||||
|
@ -150,7 +150,7 @@ class ShardBlockHeader(Container):
|
||||||
class ShardState(Container):
|
class ShardState(Container):
|
||||||
shard: Shard
|
shard: Shard
|
||||||
slot: ShardSlot
|
slot: ShardSlot
|
||||||
history_accumulator: Vector[Hash, HISTORY_ACCUMULATOR_DEPTH]
|
history_accumulator: Vector[Bytes32, HISTORY_ACCUMULATOR_DEPTH]
|
||||||
latest_block_header: ShardBlockHeader
|
latest_block_header: ShardBlockHeader
|
||||||
block_size_sum: uint64
|
block_size_sum: uint64
|
||||||
# Fees and rewards
|
# Fees and rewards
|
||||||
|
@ -166,7 +166,7 @@ class ShardState(Container):
|
||||||
```python
|
```python
|
||||||
class ShardAttestationData(Container):
|
class ShardAttestationData(Container):
|
||||||
slot: ShardSlot
|
slot: ShardSlot
|
||||||
parent_root: Hash
|
parent_root: Root
|
||||||
```
|
```
|
||||||
|
|
||||||
## Helper functions
|
## Helper functions
|
||||||
|
|
|
@ -59,9 +59,9 @@ In a binary Merkle tree, we define a "generalized index" of a node as `2**depth
|
||||||
Note that the generalized index has the convenient property that the two children of node `k` are `2k` and `2k+1`, and also that it equals the position of a node in the linear representation of the Merkle tree that's computed by this function:
|
Note that the generalized index has the convenient property that the two children of node `k` are `2k` and `2k+1`, and also that it equals the position of a node in the linear representation of the Merkle tree that's computed by this function:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def merkle_tree(leaves: Sequence[Hash]) -> Sequence[Hash]:
|
def merkle_tree(leaves: Sequence[Bytes32]) -> Sequence[Bytes32]:
|
||||||
padded_length = get_next_power_of_two(len(leaves))
|
padded_length = get_next_power_of_two(len(leaves))
|
||||||
o = [Hash()] * padded_length + list(leaves) + [Hash()] * (padded_length - len(leaves))
|
o = [Bytes32()] * padded_length + list(leaves) + [Bytes32()] * (padded_length - len(leaves))
|
||||||
for i in range(padded_length - 1, 0, -1):
|
for i in range(padded_length - 1, 0, -1):
|
||||||
o[i] = hash(o[i * 2] + o[i * 2 + 1])
|
o[i] = hash(o[i * 2] + o[i * 2 + 1])
|
||||||
return o
|
return o
|
||||||
|
@ -289,7 +289,7 @@ def get_helper_indices(indices: Sequence[GeneralizedIndex]) -> Sequence[Generali
|
||||||
Now we provide the Merkle proof verification functions. First, for single item proofs:
|
Now we provide the Merkle proof verification functions. First, for single item proofs:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def calculate_merkle_root(leaf: Hash, proof: Sequence[Hash], index: GeneralizedIndex) -> Hash:
|
def calculate_merkle_root(leaf: Bytes32, proof: Sequence[Bytes32], index: GeneralizedIndex) -> Root:
|
||||||
assert len(proof) == get_generalized_index_length(index)
|
assert len(proof) == get_generalized_index_length(index)
|
||||||
for i, h in enumerate(proof):
|
for i, h in enumerate(proof):
|
||||||
if get_generalized_index_bit(index, i):
|
if get_generalized_index_bit(index, i):
|
||||||
|
@ -300,16 +300,16 @@ def calculate_merkle_root(leaf: Hash, proof: Sequence[Hash], index: GeneralizedI
|
||||||
```
|
```
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def verify_merkle_proof(leaf: Hash, proof: Sequence[Hash], index: GeneralizedIndex, root: Hash) -> bool:
|
def verify_merkle_proof(leaf: Bytes32, proof: Sequence[Bytes32], index: GeneralizedIndex, root: Root) -> bool:
|
||||||
return calculate_merkle_root(leaf, proof, index) == root
|
return calculate_merkle_root(leaf, proof, index) == root
|
||||||
```
|
```
|
||||||
|
|
||||||
Now for multi-item proofs:
|
Now for multi-item proofs:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def calculate_multi_merkle_root(leaves: Sequence[Hash],
|
def calculate_multi_merkle_root(leaves: Sequence[Bytes32],
|
||||||
proof: Sequence[Hash],
|
proof: Sequence[Bytes32],
|
||||||
indices: Sequence[GeneralizedIndex]) -> Hash:
|
indices: Sequence[GeneralizedIndex]) -> Root:
|
||||||
assert len(leaves) == len(indices)
|
assert len(leaves) == len(indices)
|
||||||
helper_indices = get_helper_indices(indices)
|
helper_indices = get_helper_indices(indices)
|
||||||
assert len(proof) == len(helper_indices)
|
assert len(proof) == len(helper_indices)
|
||||||
|
@ -332,10 +332,10 @@ def calculate_multi_merkle_root(leaves: Sequence[Hash],
|
||||||
```
|
```
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def verify_merkle_multiproof(leaves: Sequence[Hash],
|
def verify_merkle_multiproof(leaves: Sequence[Bytes32],
|
||||||
proof: Sequence[Hash],
|
proof: Sequence[Bytes32],
|
||||||
indices: Sequence[GeneralizedIndex],
|
indices: Sequence[GeneralizedIndex],
|
||||||
root: Hash) -> bool:
|
root: Root) -> bool:
|
||||||
return calculate_multi_merkle_root(leaves, proof, indices) == root
|
return calculate_multi_merkle_root(leaves, proof, indices) == root
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -49,16 +49,16 @@ We define the following Python custom types for type hinting and readability:
|
||||||
```python
|
```python
|
||||||
class LightClientUpdate(container):
|
class LightClientUpdate(container):
|
||||||
# Shard block root (and authenticating signature data)
|
# Shard block root (and authenticating signature data)
|
||||||
shard_block_root: Hash
|
shard_block_root: Root
|
||||||
fork_version: Version
|
fork_version: Version
|
||||||
aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
|
aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
|
||||||
signature: BLSSignature
|
signature: BLSSignature
|
||||||
# Updated beacon header (and authenticating branch)
|
# Updated beacon header (and authenticating branch)
|
||||||
header: BeaconBlockHeader
|
header: BeaconBlockHeader
|
||||||
header_branch: Vector[Hash, BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_DEPTH]
|
header_branch: Vector[Bytes32, BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_DEPTH]
|
||||||
# Updated period committee (and authenticating branch)
|
# Updated period committee (and authenticating branch)
|
||||||
committee: CompactCommittee
|
committee: CompactCommittee
|
||||||
committee_branch: Vector[Hash, PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH + log_2(SHARD_COUNT)]
|
committee_branch: Vector[Bytes32, PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH + log_2(SHARD_COUNT)]
|
||||||
```
|
```
|
||||||
|
|
||||||
## Helpers
|
## Helpers
|
||||||
|
|
|
@ -174,9 +174,9 @@ There are two primary global topics used to propagate beacon blocks and aggregat
|
||||||
- The aggregate attestation defined by `hash_tree_root(aggregate_and_proof.aggregate)` has _not_ already been seen (via aggregate gossip, within a block, or through the creation of an equivalent aggregate locally).
|
- The aggregate attestation defined by `hash_tree_root(aggregate_and_proof.aggregate)` has _not_ already been seen (via aggregate gossip, within a block, or through the creation of an equivalent aggregate locally).
|
||||||
- The block being voted for (`aggregate_and_proof.aggregate.data.beacon_block_root`) passes validation.
|
- The block being voted for (`aggregate_and_proof.aggregate.data.beacon_block_root`) passes validation.
|
||||||
- `aggregate_and_proof.aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (`aggregate_and_proof.aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate_and_proof.aggregate.data.slot`).
|
- `aggregate_and_proof.aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (`aggregate_and_proof.aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate_and_proof.aggregate.data.slot`).
|
||||||
- The validator index is within the aggregate's committee -- i.e. `aggregate_and_proof.index in get_attesting_indices(state, aggregate_and_proof.aggregate.data, aggregate_and_proof.aggregate.aggregation_bits)`.
|
- The validator index is within the aggregate's committee -- i.e. `aggregate_and_proof.aggregator_index in get_attesting_indices(state, aggregate_and_proof.aggregate.data, aggregate_and_proof.aggregate.aggregation_bits)`.
|
||||||
- `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate_and_proof.aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`.
|
- `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate_and_proof.aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`.
|
||||||
- The `aggregate_and_proof.selection_proof` is a valid signature of the `aggregate_and_proof.aggregate.data.slot` by the validator with index `aggregate_and_proof.index`.
|
- The `aggregate_and_proof.selection_proof` is a valid signature of the `aggregate_and_proof.aggregate.data.slot` by the validator with index `aggregate_and_proof.aggregator_index`.
|
||||||
- The signature of `aggregate_and_proof.aggregate` is valid.
|
- The signature of `aggregate_and_proof.aggregate` is valid.
|
||||||
|
|
||||||
Additional global topics are used to propagate lower frequency validator messages. Their `TopicName`s are:
|
Additional global topics are used to propagate lower frequency validator messages. Their `TopicName`s are:
|
||||||
|
|
|
@ -55,6 +55,7 @@
|
||||||
- [Aggregate signature](#aggregate-signature-1)
|
- [Aggregate signature](#aggregate-signature-1)
|
||||||
- [Broadcast aggregate](#broadcast-aggregate)
|
- [Broadcast aggregate](#broadcast-aggregate)
|
||||||
- [`AggregateAndProof`](#aggregateandproof)
|
- [`AggregateAndProof`](#aggregateandproof)
|
||||||
|
- [Phase 0 attestation subnet stability](#phase-0-attestation-subnet-stability)
|
||||||
- [How to avoid slashing](#how-to-avoid-slashing)
|
- [How to avoid slashing](#how-to-avoid-slashing)
|
||||||
- [Proposer slashing](#proposer-slashing)
|
- [Proposer slashing](#proposer-slashing)
|
||||||
- [Attester slashing](#attester-slashing)
|
- [Attester slashing](#attester-slashing)
|
||||||
|
@ -79,6 +80,8 @@ All terminology, constants, functions, and protocol mechanics defined in the [Ph
|
||||||
| - | - | :-: | :-: |
|
| - | - | :-: | :-: |
|
||||||
| `ETH1_FOLLOW_DISTANCE` | `2**10` (= 1,024) | blocks | ~4 hours |
|
| `ETH1_FOLLOW_DISTANCE` | `2**10` (= 1,024) | blocks | ~4 hours |
|
||||||
| `TARGET_AGGREGATORS_PER_COMMITTEE` | `2**4` (= 16) | validators | |
|
| `TARGET_AGGREGATORS_PER_COMMITTEE` | `2**4` (= 16) | validators | |
|
||||||
|
| `RANDOM_SUBNETS_PER_VALIDATOR` | `2**0` (= 1) | subnets | |
|
||||||
|
| `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | epochs | ~27 hours |
|
||||||
|
|
||||||
## Becoming a validator
|
## Becoming a validator
|
||||||
|
|
||||||
|
@ -183,9 +186,13 @@ def is_proposer(state: BeaconState,
|
||||||
|
|
||||||
The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing, which must be checked during the epoch in question.
|
The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing, which must be checked during the epoch in question.
|
||||||
|
|
||||||
`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments by noting at which future slot they will have to attest.
|
`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments by noting at which future slot they will have to attest and joining the committee index attestation subnet related to their committee assignment.
|
||||||
|
|
||||||
Specifically, a validator should call `get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments.
|
Specifically a validator should:
|
||||||
|
* Call `get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments.
|
||||||
|
* Join the pubsub topic -- `committee_index{committee_index % ATTESTATION_SUBNET_COUNT}_beacon_attestation`.
|
||||||
|
* If any current peers are subscribed to the topic, the validator simply sends `subscribe` messages for the new topic.
|
||||||
|
* If no current peers are subscribed to the topic, the validator must discover new peers on this topic. If "topic discovery" is available, use topic discovery to find peers that advertise subscription to the topic. If not, "guess and check" by connecting with a number of random new peers, persisting connections with peers subscribed to the topic and (potentially) dropping the new peers otherwise.
|
||||||
|
|
||||||
## Beacon chain responsibilities
|
## Beacon chain responsibilities
|
||||||
|
|
||||||
|
@ -358,7 +365,7 @@ Some validators are selected to locally aggregate attestations with a similar `a
|
||||||
A validator is selected to aggregate based upon the return value of `is_aggregator()`.
|
A validator is selected to aggregate based upon the return value of `is_aggregator()`.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSignature:
|
def get_slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSignature:
|
||||||
domain = get_domain(state, DOMAIN_BEACON_ATTESTER, compute_epoch_at_slot(slot))
|
domain = get_domain(state, DOMAIN_BEACON_ATTESTER, compute_epoch_at_slot(slot))
|
||||||
return bls_sign(privkey, hash_tree_root(slot), domain)
|
return bls_sign(privkey, hash_tree_root(slot), domain)
|
||||||
```
|
```
|
||||||
|
@ -404,15 +411,19 @@ Aggregate attestations are broadcast as `AggregateAndProof` objects to prove to
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class AggregateAndProof(Container):
|
class AggregateAndProof(Container):
|
||||||
index: ValidatorIndex
|
aggregator_index: ValidatorIndex
|
||||||
selection_proof: BLSSignature
|
|
||||||
aggregate: Attestation
|
aggregate: Attestation
|
||||||
|
selection_proof: BLSSignature
|
||||||
```
|
```
|
||||||
|
|
||||||
Where
|
Where
|
||||||
* `index` is the validator's `validator_index`.
|
* `aggregator_index` is the validator's `ValidatorIndex`.
|
||||||
* `selection_proof` is the signature of the slot (`slot_signature()`).
|
|
||||||
* `aggregate` is the `aggregate_attestation` constructed in the previous section.
|
* `aggregate` is the `aggregate_attestation` constructed in the previous section.
|
||||||
|
* `selection_proof` is the signature of the slot (`get_slot_signature()`).
|
||||||
|
|
||||||
|
## Phase 0 attestation subnet stability
|
||||||
|
|
||||||
|
Because Phase 0 does not have shards and thus does not have Shard Committees, there is no stable backbone to the attestation subnets (`committee_index{subnet_id}_beacon_attestation`). To provide this stability, each validator must randomly select and remain subscribed to `RANDOM_SUBNETS_PER_VALIDATOR` attestation subnets. The lifetime of each random subscription should be a random number of epochs between `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` and `2 * EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION]`.
|
||||||
|
|
||||||
## How to avoid slashing
|
## How to avoid slashing
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
|
|
||||||
from eth2spec.test.context import with_all_phases, spec_state_test
|
from eth2spec.test.context import with_all_phases, spec_state_test
|
||||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||||
from eth2spec.test.helpers.attestations import get_valid_attestation
|
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation
|
||||||
from eth2spec.test.helpers.state import state_transition_and_sign_block
|
from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block
|
||||||
|
|
||||||
|
|
||||||
def run_on_attestation(spec, state, store, attestation, valid=True):
|
def run_on_attestation(spec, state, store, attestation, valid=True):
|
||||||
|
@ -89,18 +88,48 @@ def test_on_attestation_past_epoch(spec, state):
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_on_attestation_target_not_in_store(spec, state):
|
def test_on_attestation_target_not_in_store(spec, state):
|
||||||
store = spec.get_genesis_store(state)
|
store = spec.get_genesis_store(state)
|
||||||
time = 100
|
time = spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
||||||
spec.on_tick(store, time)
|
spec.on_tick(store, time)
|
||||||
|
|
||||||
# move to next epoch to make block new target
|
# move to immediately before next epoch to make block new target
|
||||||
state.slot += spec.SLOTS_PER_EPOCH
|
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH - 1)
|
||||||
|
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
target_block = build_empty_block_for_next_slot(spec, state)
|
||||||
state_transition_and_sign_block(spec, state, block)
|
state_transition_and_sign_block(spec, state, target_block)
|
||||||
|
|
||||||
# do not add block to store
|
# do not add target block to store
|
||||||
|
|
||||||
|
attestation = get_valid_attestation(spec, state, slot=target_block.slot)
|
||||||
|
assert attestation.data.target.root == target_block.signing_root()
|
||||||
|
|
||||||
|
run_on_attestation(spec, state, store, attestation, False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_on_attestation_beacon_block_not_in_store(spec, state):
|
||||||
|
store = spec.get_genesis_store(state)
|
||||||
|
time = spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
||||||
|
spec.on_tick(store, time)
|
||||||
|
|
||||||
|
# move to immediately before next epoch to make block new target
|
||||||
|
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH - 1)
|
||||||
|
|
||||||
|
target_block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
state_transition_and_sign_block(spec, state, target_block)
|
||||||
|
|
||||||
|
# store target in store
|
||||||
|
spec.on_block(store, target_block)
|
||||||
|
|
||||||
|
head_block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
state_transition_and_sign_block(spec, state, head_block)
|
||||||
|
|
||||||
|
# do not add head block to store
|
||||||
|
|
||||||
|
attestation = get_valid_attestation(spec, state, slot=head_block.slot)
|
||||||
|
assert attestation.data.target.root == target_block.signing_root()
|
||||||
|
assert attestation.data.beacon_block_root == head_block.signing_root()
|
||||||
|
|
||||||
attestation = get_valid_attestation(spec, state, slot=block.slot)
|
|
||||||
run_on_attestation(spec, state, store, attestation, False)
|
run_on_attestation(spec, state, store, attestation, False)
|
||||||
|
|
||||||
|
|
||||||
|
@ -124,6 +153,26 @@ def test_on_attestation_future_epoch(spec, state):
|
||||||
run_on_attestation(spec, state, store, attestation, False)
|
run_on_attestation(spec, state, store, attestation, False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_on_attestation_future_block(spec, state):
|
||||||
|
store = spec.get_genesis_store(state)
|
||||||
|
time = spec.SECONDS_PER_SLOT * 5
|
||||||
|
spec.on_tick(store, time)
|
||||||
|
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
state_transition_and_sign_block(spec, state, block)
|
||||||
|
|
||||||
|
spec.on_block(store, block)
|
||||||
|
|
||||||
|
# attestation for slot immediately prior to the block being attested to
|
||||||
|
attestation = get_valid_attestation(spec, state, slot=block.slot - 1, signed=False)
|
||||||
|
attestation.data.beacon_block_root = block.signing_root()
|
||||||
|
sign_attestation(spec, state, attestation)
|
||||||
|
|
||||||
|
run_on_attestation(spec, state, store, attestation, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_on_attestation_same_slot(spec, state):
|
def test_on_attestation_same_slot(spec, state):
|
||||||
|
|
|
@ -59,7 +59,7 @@ def build_empty_block(spec, state, slot=None, signed=False):
|
||||||
empty_block.slot = slot
|
empty_block.slot = slot
|
||||||
empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index
|
empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index
|
||||||
previous_block_header = deepcopy(state.latest_block_header)
|
previous_block_header = deepcopy(state.latest_block_header)
|
||||||
if previous_block_header.state_root == spec.Hash():
|
if previous_block_header.state_root == spec.Root():
|
||||||
previous_block_header.state_root = state.hash_tree_root()
|
previous_block_header.state_root = state.hash_tree_root()
|
||||||
empty_block.parent_root = signing_root(previous_block_header)
|
empty_block.parent_root = signing_root(previous_block_header)
|
||||||
|
|
||||||
|
|
|
@ -14,6 +14,16 @@ def next_slot(spec, state):
|
||||||
spec.process_slots(state, state.slot + 1)
|
spec.process_slots(state, state.slot + 1)
|
||||||
|
|
||||||
|
|
||||||
|
def transition_to(spec, state, slot):
|
||||||
|
"""
|
||||||
|
Transition to ``slot``.
|
||||||
|
"""
|
||||||
|
assert state.slot <= slot
|
||||||
|
for _ in range(slot - state.slot):
|
||||||
|
next_slot(spec, state)
|
||||||
|
assert state.slot == slot
|
||||||
|
|
||||||
|
|
||||||
def next_epoch(spec, state):
|
def next_epoch(spec, state):
|
||||||
"""
|
"""
|
||||||
Transition to the start slot of the next epoch
|
Transition to the start slot of the next epoch
|
||||||
|
|
|
@ -195,7 +195,7 @@ def test_bad_merkle_proof(spec, state):
|
||||||
deposit = prepare_state_and_deposit(spec, state, validator_index, amount)
|
deposit = prepare_state_and_deposit(spec, state, validator_index, amount)
|
||||||
|
|
||||||
# mess up merkle branch
|
# mess up merkle branch
|
||||||
deposit.proof[5] = spec.Hash()
|
deposit.proof[5] = spec.Bytes32()
|
||||||
|
|
||||||
sign_deposit_data(spec, deposit.data, privkeys[validator_index], state=state)
|
sign_deposit_data(spec, deposit.data, privkeys[validator_index], state=state)
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,7 @@ def test_empty_block_transition(spec, state):
|
||||||
|
|
||||||
assert len(state.eth1_data_votes) == pre_eth1_votes + 1
|
assert len(state.eth1_data_votes) == pre_eth1_votes + 1
|
||||||
assert spec.get_block_root_at_slot(state, pre_slot) == block.parent_root
|
assert spec.get_block_root_at_slot(state, pre_slot) == block.parent_root
|
||||||
assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.Hash()
|
assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.Bytes32()
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
|
@ -98,7 +98,7 @@ def test_skipped_slots(spec, state):
|
||||||
yield 'post', state
|
yield 'post', state
|
||||||
|
|
||||||
assert state.slot == block.slot
|
assert state.slot == block.slot
|
||||||
assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.Hash()
|
assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.Bytes32()
|
||||||
for slot in range(pre_slot, state.slot):
|
for slot in range(pre_slot, state.slot):
|
||||||
assert spec.get_block_root_at_slot(state, slot) == block.parent_root
|
assert spec.get_block_root_at_slot(state, slot) == block.parent_root
|
||||||
|
|
||||||
|
|
|
@ -208,13 +208,13 @@ def test_bytesn_subclass():
|
||||||
assert issubclass(ByteVector[32](b'\xab' * 32).type(), Bytes32)
|
assert issubclass(ByteVector[32](b'\xab' * 32).type(), Bytes32)
|
||||||
assert issubclass(ByteVector[32], Bytes32)
|
assert issubclass(ByteVector[32], Bytes32)
|
||||||
|
|
||||||
class Hash(Bytes32):
|
class Root(Bytes32):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
assert isinstance(Hash(b'\xab' * 32), Bytes32)
|
assert isinstance(Root(b'\xab' * 32), Bytes32)
|
||||||
assert not isinstance(Hash(b'\xab' * 32), Bytes48)
|
assert not isinstance(Root(b'\xab' * 32), Bytes48)
|
||||||
assert issubclass(Hash(b'\xab' * 32).type(), Bytes32)
|
assert issubclass(Root(b'\xab' * 32).type(), Bytes32)
|
||||||
assert issubclass(Hash, Bytes32)
|
assert issubclass(Root, Bytes32)
|
||||||
|
|
||||||
assert not issubclass(Bytes48, Bytes32)
|
assert not issubclass(Bytes48, Bytes32)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue