Merge pull request #1490 from ethereum/master-copy

backport v0.9.1 and v0.9.2 to dev
This commit is contained in:
Danny Ryan 2019-12-04 11:58:17 -07:00 committed by GitHub
commit 19fa53709a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 216 additions and 109 deletions

View File

@ -17,8 +17,8 @@ MIN_PER_EPOCH_CHURN_LIMIT: 4
CHURN_LIMIT_QUOTIENT: 65536
# See issue 563
SHUFFLE_ROUND_COUNT: 90
# `2**16` (= 65,536)
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 65536
# `2**14` (= 16,384)
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384
# Jan 3, 2020
MIN_GENESIS_TIME: 1578009600
@ -29,6 +29,18 @@ MIN_GENESIS_TIME: 1578009600
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8
# Validator
# ---------------------------------------------------------------
# 2**10 (= 1,024)
ETH1_FOLLOW_DISTANCE: 1024
# 2**4 (= 16)
TARGET_AGGREGATORS_PER_COMMITTEE: 16
# 2**0 (= 1)
RANDOM_SUBNETS_PER_VALIDATOR: 1
# 2**8 (= 256)
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256
# Deposit contract
# ---------------------------------------------------------------
# **TBD**

View File

@ -28,6 +28,18 @@ MIN_GENESIS_TIME: 1578009600
# 2**1 (= 1)
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 2
#
# Validator
# ---------------------------------------------------------------
# [customized] process deposits more quickly, but insecure
ETH1_FOLLOW_DISTANCE: 16
# 2**4 (= 16)
TARGET_AGGREGATORS_PER_COMMITTEE: 16
# 2**0 (= 1)
RANDOM_SUBNETS_PER_VALIDATOR: 1
# 2**8 (= 256)
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256
# Deposit contract
# ---------------------------------------------------------------

View File

@ -79,26 +79,26 @@ def ceillog2(x: uint64) -> int:
SUNDRY_FUNCTIONS = '''
# Monkey patch hash cache
_hash = hash
hash_cache: Dict[bytes, Hash] = {}
hash_cache: Dict[bytes, Bytes32] = {}
def get_eth1_data(distance: uint64) -> Hash:
def get_eth1_data(distance: uint64) -> Bytes32:
return hash(distance)
def hash(x: bytes) -> Hash:
def hash(x: bytes) -> Bytes32:
if x not in hash_cache:
hash_cache[x] = Hash(_hash(x))
hash_cache[x] = Bytes32(_hash(x))
return hash_cache[x]
# Monkey patch validator compute committee code
_compute_committee = compute_committee
committee_cache: Dict[Tuple[Hash, Hash, int, int], Sequence[ValidatorIndex]] = {}
committee_cache: Dict[Tuple[Bytes32, Bytes32, int, int], Sequence[ValidatorIndex]] = {}
def compute_committee(indices: Sequence[ValidatorIndex], # type: ignore
seed: Hash,
seed: Bytes32,
index: int,
count: int) -> Sequence[ValidatorIndex]:
param_hash = (hash(b''.join(index.to_bytes(length=4, byteorder='little') for index in indices)), seed, index, count)

View File

@ -137,7 +137,7 @@ We define the following Python custom types for type hinting and readability:
| `CommitteeIndex` | `uint64` | a committee index at a slot |
| `ValidatorIndex` | `uint64` | a validator registry index |
| `Gwei` | `uint64` | an amount in Gwei |
| `Hash` | `Bytes32` | a hash |
| `Root` | `Bytes32` | a Merkle root |
| `Version` | `Bytes4` | a fork version number |
| `DomainType` | `Bytes4` | a domain type |
| `Domain` | `Bytes8` | a signature domain |
@ -171,7 +171,7 @@ The following values are (non-configurable) constants used throughout the specif
| `MIN_PER_EPOCH_CHURN_LIMIT` | `2**2` (= 4) |
| `CHURN_LIMIT_QUOTIENT` | `2**16` (= 65,536) |
| `SHUFFLE_ROUND_COUNT` | `90` |
| `MIN_GENESIS_ACTIVE_VALIDATOR_COUNT` | `2**16` (= 65,536) |
| `MIN_GENESIS_ACTIVE_VALIDATOR_COUNT` | `2**14` (= 16,384) |
| `MIN_GENESIS_TIME` | `1578009600` (Jan 3, 2020) |
- For the safety of committees, `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.)
@ -275,7 +275,7 @@ class Fork(Container):
```python
class Checkpoint(Container):
epoch: Epoch
root: Hash
root: Root
```
#### `Validator`
@ -283,7 +283,7 @@ class Checkpoint(Container):
```python
class Validator(Container):
pubkey: BLSPubkey
withdrawal_credentials: Hash # Commitment to pubkey for withdrawals
withdrawal_credentials: Bytes32 # Commitment to pubkey for withdrawals
effective_balance: Gwei # Balance at stake
slashed: boolean
# Status epochs
@ -300,7 +300,7 @@ class AttestationData(Container):
slot: Slot
index: CommitteeIndex
# LMD GHOST vote
beacon_block_root: Hash
beacon_block_root: Root
# FFG vote
source: Checkpoint
target: Checkpoint
@ -329,17 +329,17 @@ class PendingAttestation(Container):
```python
class Eth1Data(Container):
deposit_root: Hash
deposit_root: Root
deposit_count: uint64
block_hash: Hash
block_hash: Bytes32
```
#### `HistoricalBatch`
```python
class HistoricalBatch(Container):
block_roots: Vector[Hash, SLOTS_PER_HISTORICAL_ROOT]
state_roots: Vector[Hash, SLOTS_PER_HISTORICAL_ROOT]
block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
```
#### `DepositData`
@ -347,7 +347,7 @@ class HistoricalBatch(Container):
```python
class DepositData(Container):
pubkey: BLSPubkey
withdrawal_credentials: Hash
withdrawal_credentials: Bytes32
amount: Gwei
signature: BLSSignature
```
@ -357,9 +357,9 @@ class DepositData(Container):
```python
class BeaconBlockHeader(Container):
slot: Slot
parent_root: Hash
state_root: Hash
body_root: Hash
parent_root: Root
state_root: Root
body_root: Root
signature: BLSSignature
```
@ -395,7 +395,7 @@ class Attestation(Container):
```python
class Deposit(Container):
proof: Vector[Hash, DEPOSIT_CONTRACT_TREE_DEPTH + 1] # Merkle path to deposit data list root
proof: Vector[Bytes32, DEPOSIT_CONTRACT_TREE_DEPTH + 1] # Merkle path to deposit data list root
data: DepositData
```
@ -430,8 +430,8 @@ class BeaconBlockBody(Container):
```python
class BeaconBlock(Container):
slot: Slot
parent_root: Hash
state_root: Hash
parent_root: Root
state_root: Root
body: BeaconBlockBody
signature: BLSSignature
```
@ -448,9 +448,9 @@ class BeaconState(Container):
fork: Fork
# History
latest_block_header: BeaconBlockHeader
block_roots: Vector[Hash, SLOTS_PER_HISTORICAL_ROOT]
state_roots: Vector[Hash, SLOTS_PER_HISTORICAL_ROOT]
historical_roots: List[Hash, HISTORICAL_ROOTS_LIMIT]
block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT]
# Eth1
eth1_data: Eth1Data
eth1_data_votes: List[Eth1Data, SLOTS_PER_ETH1_VOTING_PERIOD]
@ -459,7 +459,7 @@ class BeaconState(Container):
validators: List[Validator, VALIDATOR_REGISTRY_LIMIT]
balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
# Randomness
randao_mixes: Vector[Hash, EPOCHS_PER_HISTORICAL_VECTOR]
randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR]
# Slashings
slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances
# Attestations
@ -527,15 +527,15 @@ def bytes_to_int(data: bytes) -> uint64:
#### `hash`
`def hash(data: bytes) -> Hash` is SHA256.
`def hash(data: bytes) -> Bytes32` is SHA256.
#### `hash_tree_root`
`def hash_tree_root(object: SSZSerializable) -> Hash` is a function for hashing objects into a single root by utilizing a hash tree structure, as defined in the [SSZ spec](../simple-serialize.md#merkleization).
`def hash_tree_root(object: SSZSerializable) -> Root` is a function for hashing objects into a single root by utilizing a hash tree structure, as defined in the [SSZ spec](../simple-serialize.md#merkleization).
#### `signing_root`
`def signing_root(object: Container) -> Hash` is a function for computing signing messages, as defined in the [SSZ spec](../simple-serialize.md#self-signed-containers).
`def signing_root(object: Container) -> Root` is a function for computing signing messages, as defined in the [SSZ spec](../simple-serialize.md#self-signed-containers).
#### `bls_verify`
@ -611,7 +611,7 @@ def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: Indexe
#### `is_valid_merkle_branch`
```python
def is_valid_merkle_branch(leaf: Hash, branch: Sequence[Hash], depth: uint64, index: uint64, root: Hash) -> bool:
def is_valid_merkle_branch(leaf: Bytes32, branch: Sequence[Bytes32], depth: uint64, index: uint64, root: Root) -> bool:
"""
Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and ``branch``.
"""
@ -629,7 +629,7 @@ def is_valid_merkle_branch(leaf: Hash, branch: Sequence[Hash], depth: uint64, in
#### `compute_shuffled_index`
```python
def compute_shuffled_index(index: ValidatorIndex, index_count: uint64, seed: Hash) -> ValidatorIndex:
def compute_shuffled_index(index: ValidatorIndex, index_count: uint64, seed: Bytes32) -> ValidatorIndex:
"""
Return the shuffled validator index corresponding to ``seed`` (and ``index_count``).
"""
@ -652,7 +652,7 @@ def compute_shuffled_index(index: ValidatorIndex, index_count: uint64, seed: Has
#### `compute_proposer_index`
```python
def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Hash) -> ValidatorIndex:
def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex:
"""
Return from ``indices`` a random index sampled by effective balance.
"""
@ -672,7 +672,7 @@ def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex]
```python
def compute_committee(indices: Sequence[ValidatorIndex],
seed: Hash,
seed: Bytes32,
index: uint64,
count: uint64) -> Sequence[ValidatorIndex]:
"""
@ -749,7 +749,7 @@ def get_previous_epoch(state: BeaconState) -> Epoch:
#### `get_block_root`
```python
def get_block_root(state: BeaconState, epoch: Epoch) -> Hash:
def get_block_root(state: BeaconState, epoch: Epoch) -> Root:
"""
Return the block root at the start of a recent ``epoch``.
"""
@ -759,7 +759,7 @@ def get_block_root(state: BeaconState, epoch: Epoch) -> Hash:
#### `get_block_root_at_slot`
```python
def get_block_root_at_slot(state: BeaconState, slot: Slot) -> Hash:
def get_block_root_at_slot(state: BeaconState, slot: Slot) -> Root:
"""
Return the block root at a recent ``slot``.
"""
@ -770,7 +770,7 @@ def get_block_root_at_slot(state: BeaconState, slot: Slot) -> Hash:
#### `get_randao_mix`
```python
def get_randao_mix(state: BeaconState, epoch: Epoch) -> Hash:
def get_randao_mix(state: BeaconState, epoch: Epoch) -> Bytes32:
"""
Return the randao mix at a recent ``epoch``.
"""
@ -801,7 +801,7 @@ def get_validator_churn_limit(state: BeaconState) -> uint64:
#### `get_seed`
```python
def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Hash:
def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Bytes32:
"""
Return the seed at ``epoch``.
"""
@ -996,7 +996,7 @@ Before the Ethereum 2.0 genesis has been triggered, and for every Ethereum 1.0 b
- `deposits` is the sequence of all deposits, ordered chronologically, up to (and including) the block with hash `eth1_block_hash`
```python
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash,
def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
eth1_timestamp: uint64,
deposits: Sequence[Deposit]) -> BeaconState:
state = BeaconState(

View File

@ -57,7 +57,7 @@ The head block root associated with a `store` is defined as `get_head(store)`. A
@dataclass(eq=True, frozen=True)
class LatestMessage(object):
epoch: Epoch
root: Hash
root: Root
```
#### `Store`
@ -70,8 +70,8 @@ class Store(object):
justified_checkpoint: Checkpoint
finalized_checkpoint: Checkpoint
best_justified_checkpoint: Checkpoint
blocks: Dict[Hash, BeaconBlock] = field(default_factory=dict)
block_states: Dict[Hash, BeaconState] = field(default_factory=dict)
blocks: Dict[Root, BeaconBlock] = field(default_factory=dict)
block_states: Dict[Root, BeaconState] = field(default_factory=dict)
checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict)
latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict)
```
@ -113,7 +113,7 @@ def compute_slots_since_epoch_start(slot: Slot) -> int:
#### `get_ancestor`
```python
def get_ancestor(store: Store, root: Hash, slot: Slot) -> Hash:
def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
block = store.blocks[root]
if block.slot > slot:
return get_ancestor(store, block.parent_root, slot)
@ -126,7 +126,7 @@ def get_ancestor(store: Store, root: Hash, slot: Slot) -> Hash:
#### `get_latest_attesting_balance`
```python
def get_latest_attesting_balance(store: Store, root: Hash) -> Gwei:
def get_latest_attesting_balance(store: Store, root: Root) -> Gwei:
state = store.checkpoint_states[store.justified_checkpoint]
active_indices = get_active_validator_indices(state, get_current_epoch(state))
return Gwei(sum(
@ -139,7 +139,7 @@ def get_latest_attesting_balance(store: Store, root: Hash) -> Gwei:
#### `get_head`
```python
def get_head(store: Store) -> Hash:
def get_head(store: Store) -> Root:
# Execute the LMD-GHOST fork choice
head = store.justified_checkpoint.root
justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
@ -238,6 +238,12 @@ def on_block(store: Store, block: BeaconBlock) -> None:
```python
def on_attestation(store: Store, attestation: Attestation) -> None:
"""
Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire.
An ``attestation`` that is asserted as invalid may be valid at a later time,
consider scheduling it for later processing in such case.
"""
target = attestation.data.target
# Attestations must be from the current or previous epoch
@ -248,10 +254,17 @@ def on_attestation(store: Store, attestation: Attestation) -> None:
# Cannot calculate the current shuffling if have not seen the target
assert target.root in store.blocks
# Attestations target be for a known block. If target block is unknown, delay consideration until the block is found
assert target.root in store.blocks
# Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrives
base_state = store.block_states[target.root].copy()
assert store.time >= base_state.genesis_time + compute_start_slot_at_epoch(target.epoch) * SECONDS_PER_SLOT
# Attestations must be for a known block. If block is unknown, delay consideration until the block is found
assert attestation.data.beacon_block_root in store.blocks
# Attestations must not be for blocks in the future. If not, the attestation should not be considered
assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot
# Store target checkpoint state if not yet seen
if target not in store.checkpoint_states:
process_slots(base_state, compute_start_slot_at_epoch(target.epoch))

View File

@ -62,7 +62,7 @@ class ShardReceiptDelta(Container):
```python
class ShardReceiptProof(Container):
shard: Shard
proof: List[Hash, PLACEHOLDER]
proof: List[Bytes32, PLACEHOLDER]
receipt: List[ShardReceiptDelta, PLACEHOLDER]
```
@ -109,7 +109,7 @@ def committee_to_compact_committee(state: BeaconState, committee: Sequence[Valid
#### `verify_merkle_proof`
```python
def verify_merkle_proof(leaf: Hash, proof: Sequence[Hash], index: GeneralizedIndex, root: Hash) -> bool:
def verify_merkle_proof(leaf: Bytes32, proof: Sequence[Bytes32], index: GeneralizedIndex, root: Root) -> bool:
assert len(proof) == get_generalized_index_length(index)
for i, h in enumerate(proof):
if get_generalized_index_bit(index, i):
@ -199,7 +199,7 @@ Add the following fields to the end of the specified container objects.
```python
class BeaconState(Container):
# Period committees
period_committee_roots: Vector[Hash, PERIOD_COMMITTEE_ROOT_LENGTH]
period_committee_roots: Vector[Root, PERIOD_COMMITTEE_ROOT_LENGTH]
next_shard_receipt_period: Vector[uint64, SHARD_COUNT]
```

View File

@ -170,7 +170,7 @@ class CustodyChunkChallengeRecord(Container):
challenger_index: ValidatorIndex
responder_index: ValidatorIndex
inclusion_epoch: Epoch
data_root: Hash
data_root: Root
depth: uint64
chunk_index: uint64
```
@ -183,9 +183,9 @@ class CustodyBitChallengeRecord(Container):
challenger_index: ValidatorIndex
responder_index: ValidatorIndex
inclusion_epoch: Epoch
data_root: Hash
data_root: Root
chunk_count: uint64
chunk_bits_merkle_root: Hash
chunk_bits_merkle_root: Root
responder_key: BLSSignature
```
@ -196,8 +196,8 @@ class CustodyResponse(Container):
challenge_index: uint64
chunk_index: uint64
chunk: ByteVector[BYTES_PER_CUSTODY_CHUNK]
data_branch: List[Hash, CUSTODY_DATA_DEPTH]
chunk_bits_branch: List[Hash, CUSTODY_CHUNK_BIT_DEPTH]
data_branch: List[Bytes32, CUSTODY_DATA_DEPTH]
chunk_bits_branch: List[Bytes32, CUSTODY_CHUNK_BIT_DEPTH]
chunk_bits_leaf: Bitvector[256]
```
@ -228,7 +228,7 @@ class EarlyDerivedSecretReveal(Container):
# Index of the validator who revealed (whistleblower)
masker_index: ValidatorIndex
# Mask used to hide the actual reveal signature (prevent reveal from being stolen)
mask: Hash
mask: Bytes32
```
### Phase 0 container updates
@ -283,11 +283,11 @@ def ceillog2(x: uint64) -> int:
### `is_valid_merkle_branch_with_mixin`
```python
def is_valid_merkle_branch_with_mixin(leaf: Hash,
branch: Sequence[Hash],
def is_valid_merkle_branch_with_mixin(leaf: Bytes32,
branch: Sequence[Bytes32],
depth: uint64,
index: uint64,
root: Hash,
root: Root,
mixin: uint64) -> bool:
value = leaf
for i in range(depth):
@ -672,7 +672,7 @@ def process_chunk_challenge_response(state: BeaconState,
# Verify chunk index
assert response.chunk_index == challenge.chunk_index
# Verify bit challenge data is null
assert response.chunk_bits_branch == [] and response.chunk_bits_leaf == Hash()
assert response.chunk_bits_branch == [] and response.chunk_bits_leaf == Bytes32()
# Verify minimum delay
assert get_current_epoch(state) >= challenge.inclusion_epoch + MAX_SEED_LOOKAHEAD
# Verify the chunk matches the crosslink data root

View File

@ -118,9 +118,9 @@ class Crosslink(Container):
class ShardBlock(Container):
shard: Shard
slot: ShardSlot
beacon_block_root: Hash
parent_root: Hash
state_root: Hash
beacon_block_root: Root
parent_root: Root
state_root: Root
body: List[byte, MAX_SHARD_BLOCK_SIZE - SHARD_HEADER_SIZE]
block_size_sum: uint64
aggregation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE]
@ -134,10 +134,10 @@ class ShardBlock(Container):
class ShardBlockHeader(Container):
shard: Shard
slot: ShardSlot
beacon_block_root: Hash
parent_root: Hash
state_root: Hash
body_root: Hash
beacon_block_root: Root
parent_root: Root
state_root: Root
body_root: Root
block_size_sum: uint64
aggregation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE]
attestations: BLSSignature
@ -150,7 +150,7 @@ class ShardBlockHeader(Container):
class ShardState(Container):
shard: Shard
slot: ShardSlot
history_accumulator: Vector[Hash, HISTORY_ACCUMULATOR_DEPTH]
history_accumulator: Vector[Bytes32, HISTORY_ACCUMULATOR_DEPTH]
latest_block_header: ShardBlockHeader
block_size_sum: uint64
# Fees and rewards
@ -166,7 +166,7 @@ class ShardState(Container):
```python
class ShardAttestationData(Container):
slot: ShardSlot
parent_root: Hash
parent_root: Root
```
## Helper functions

View File

@ -59,9 +59,9 @@ In a binary Merkle tree, we define a "generalized index" of a node as `2**depth
Note that the generalized index has the convenient property that the two children of node `k` are `2k` and `2k+1`, and also that it equals the position of a node in the linear representation of the Merkle tree that's computed by this function:
```python
def merkle_tree(leaves: Sequence[Hash]) -> Sequence[Hash]:
def merkle_tree(leaves: Sequence[Bytes32]) -> Sequence[Bytes32]:
padded_length = get_next_power_of_two(len(leaves))
o = [Hash()] * padded_length + list(leaves) + [Hash()] * (padded_length - len(leaves))
o = [Bytes32()] * padded_length + list(leaves) + [Bytes32()] * (padded_length - len(leaves))
for i in range(padded_length - 1, 0, -1):
o[i] = hash(o[i * 2] + o[i * 2 + 1])
return o
@ -289,7 +289,7 @@ def get_helper_indices(indices: Sequence[GeneralizedIndex]) -> Sequence[Generali
Now we provide the Merkle proof verification functions. First, for single item proofs:
```python
def calculate_merkle_root(leaf: Hash, proof: Sequence[Hash], index: GeneralizedIndex) -> Hash:
def calculate_merkle_root(leaf: Bytes32, proof: Sequence[Bytes32], index: GeneralizedIndex) -> Root:
assert len(proof) == get_generalized_index_length(index)
for i, h in enumerate(proof):
if get_generalized_index_bit(index, i):
@ -300,16 +300,16 @@ def calculate_merkle_root(leaf: Hash, proof: Sequence[Hash], index: GeneralizedI
```
```python
def verify_merkle_proof(leaf: Hash, proof: Sequence[Hash], index: GeneralizedIndex, root: Hash) -> bool:
def verify_merkle_proof(leaf: Bytes32, proof: Sequence[Bytes32], index: GeneralizedIndex, root: Root) -> bool:
return calculate_merkle_root(leaf, proof, index) == root
```
Now for multi-item proofs:
```python
def calculate_multi_merkle_root(leaves: Sequence[Hash],
proof: Sequence[Hash],
indices: Sequence[GeneralizedIndex]) -> Hash:
def calculate_multi_merkle_root(leaves: Sequence[Bytes32],
proof: Sequence[Bytes32],
indices: Sequence[GeneralizedIndex]) -> Root:
assert len(leaves) == len(indices)
helper_indices = get_helper_indices(indices)
assert len(proof) == len(helper_indices)
@ -332,10 +332,10 @@ def calculate_multi_merkle_root(leaves: Sequence[Hash],
```
```python
def verify_merkle_multiproof(leaves: Sequence[Hash],
proof: Sequence[Hash],
def verify_merkle_multiproof(leaves: Sequence[Bytes32],
proof: Sequence[Bytes32],
indices: Sequence[GeneralizedIndex],
root: Hash) -> bool:
root: Root) -> bool:
return calculate_multi_merkle_root(leaves, proof, indices) == root
```

View File

@ -49,16 +49,16 @@ We define the following Python custom types for type hinting and readability:
```python
class LightClientUpdate(container):
# Shard block root (and authenticating signature data)
shard_block_root: Hash
shard_block_root: Root
fork_version: Version
aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
signature: BLSSignature
# Updated beacon header (and authenticating branch)
header: BeaconBlockHeader
header_branch: Vector[Hash, BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_DEPTH]
header_branch: Vector[Bytes32, BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_DEPTH]
# Updated period committee (and authenticating branch)
committee: CompactCommittee
committee_branch: Vector[Hash, PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH + log_2(SHARD_COUNT)]
committee_branch: Vector[Bytes32, PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH + log_2(SHARD_COUNT)]
```
## Helpers

View File

@ -174,9 +174,9 @@ There are two primary global topics used to propagate beacon blocks and aggregat
- The aggregate attestation defined by `hash_tree_root(aggregate_and_proof.aggregate)` has _not_ already been seen (via aggregate gossip, within a block, or through the creation of an equivalent aggregate locally).
- The block being voted for (`aggregate_and_proof.aggregate.data.beacon_block_root`) passes validation.
- `aggregate_and_proof.aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (`aggregate_and_proof.aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate_and_proof.aggregate.data.slot`).
- The validator index is within the aggregate's committee -- i.e. `aggregate_and_proof.index in get_attesting_indices(state, aggregate_and_proof.aggregate.data, aggregate_and_proof.aggregate.aggregation_bits)`.
- The validator index is within the aggregate's committee -- i.e. `aggregate_and_proof.aggregator_index in get_attesting_indices(state, aggregate_and_proof.aggregate.data, aggregate_and_proof.aggregate.aggregation_bits)`.
- `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate_and_proof.aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`.
- The `aggregate_and_proof.selection_proof` is a valid signature of the `aggregate_and_proof.aggregate.data.slot` by the validator with index `aggregate_and_proof.index`.
- The `aggregate_and_proof.selection_proof` is a valid signature of the `aggregate_and_proof.aggregate.data.slot` by the validator with index `aggregate_and_proof.aggregator_index`.
- The signature of `aggregate_and_proof.aggregate` is valid.
Additional global topics are used to propagate lower frequency validator messages. Their `TopicName`s are:

View File

@ -55,6 +55,7 @@
- [Aggregate signature](#aggregate-signature-1)
- [Broadcast aggregate](#broadcast-aggregate)
- [`AggregateAndProof`](#aggregateandproof)
- [Phase 0 attestation subnet stability](#phase-0-attestation-subnet-stability)
- [How to avoid slashing](#how-to-avoid-slashing)
- [Proposer slashing](#proposer-slashing)
- [Attester slashing](#attester-slashing)
@ -79,6 +80,8 @@ All terminology, constants, functions, and protocol mechanics defined in the [Ph
| - | - | :-: | :-: |
| `ETH1_FOLLOW_DISTANCE` | `2**10` (= 1,024) | blocks | ~4 hours |
| `TARGET_AGGREGATORS_PER_COMMITTEE` | `2**4` (= 16) | validators | |
| `RANDOM_SUBNETS_PER_VALIDATOR` | `2**0` (= 1) | subnets | |
| `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | epochs | ~27 hours |
## Becoming a validator
@ -183,9 +186,13 @@ def is_proposer(state: BeaconState,
The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing, which must be checked during the epoch in question.
`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments by noting at which future slot they will have to attest.
`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments by noting at which future slot they will have to attest and joining the committee index attestation subnet related to their committee assignment.
Specifically, a validator should call `get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments.
Specifically a validator should:
* Call `get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments.
* Join the pubsub topic -- `committee_index{committee_index % ATTESTATION_SUBNET_COUNT}_beacon_attestation`.
* If any current peers are subscribed to the topic, the validator simply sends `subscribe` messages for the new topic.
* If no current peers are subscribed to the topic, the validator must discover new peers on this topic. If "topic discovery" is available, use topic discovery to find peers that advertise subscription to the topic. If not, "guess and check" by connecting with a number of random new peers, persisting connections with peers subscribed to the topic and (potentially) dropping the new peers otherwise.
## Beacon chain responsibilities
@ -358,7 +365,7 @@ Some validators are selected to locally aggregate attestations with a similar `a
A validator is selected to aggregate based upon the return value of `is_aggregator()`.
```python
def slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSignature:
def get_slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSignature:
domain = get_domain(state, DOMAIN_BEACON_ATTESTER, compute_epoch_at_slot(slot))
return bls_sign(privkey, hash_tree_root(slot), domain)
```
@ -404,15 +411,19 @@ Aggregate attestations are broadcast as `AggregateAndProof` objects to prove to
```python
class AggregateAndProof(Container):
index: ValidatorIndex
selection_proof: BLSSignature
aggregator_index: ValidatorIndex
aggregate: Attestation
selection_proof: BLSSignature
```
Where
* `index` is the validator's `validator_index`.
* `selection_proof` is the signature of the slot (`slot_signature()`).
* `aggregator_index` is the validator's `ValidatorIndex`.
* `aggregate` is the `aggregate_attestation` constructed in the previous section.
* `selection_proof` is the signature of the slot (`get_slot_signature()`).
## Phase 0 attestation subnet stability
Because Phase 0 does not have shards and thus does not have Shard Committees, there is no stable backbone to the attestation subnets (`committee_index{subnet_id}_beacon_attestation`). To provide this stability, each validator must randomly select and remain subscribed to `RANDOM_SUBNETS_PER_VALIDATOR` attestation subnets. The lifetime of each random subscription should be a random number of epochs between `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` and `2 * EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION]`.
## How to avoid slashing

View File

@ -1,8 +1,7 @@
from eth2spec.test.context import with_all_phases, spec_state_test
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
from eth2spec.test.helpers.attestations import get_valid_attestation
from eth2spec.test.helpers.state import state_transition_and_sign_block
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation
from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block
def run_on_attestation(spec, state, store, attestation, valid=True):
@ -89,18 +88,48 @@ def test_on_attestation_past_epoch(spec, state):
@spec_state_test
def test_on_attestation_target_not_in_store(spec, state):
store = spec.get_genesis_store(state)
time = 100
time = spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
spec.on_tick(store, time)
# move to next epoch to make block new target
state.slot += spec.SLOTS_PER_EPOCH
# move to immediately before next epoch to make block new target
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH - 1)
block = build_empty_block_for_next_slot(spec, state)
state_transition_and_sign_block(spec, state, block)
target_block = build_empty_block_for_next_slot(spec, state)
state_transition_and_sign_block(spec, state, target_block)
# do not add block to store
# do not add target block to store
attestation = get_valid_attestation(spec, state, slot=target_block.slot)
assert attestation.data.target.root == target_block.signing_root()
run_on_attestation(spec, state, store, attestation, False)
@with_all_phases
@spec_state_test
def test_on_attestation_beacon_block_not_in_store(spec, state):
store = spec.get_genesis_store(state)
time = spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
spec.on_tick(store, time)
# move to immediately before next epoch to make block new target
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH - 1)
target_block = build_empty_block_for_next_slot(spec, state)
state_transition_and_sign_block(spec, state, target_block)
# store target in store
spec.on_block(store, target_block)
head_block = build_empty_block_for_next_slot(spec, state)
state_transition_and_sign_block(spec, state, head_block)
# do not add head block to store
attestation = get_valid_attestation(spec, state, slot=head_block.slot)
assert attestation.data.target.root == target_block.signing_root()
assert attestation.data.beacon_block_root == head_block.signing_root()
attestation = get_valid_attestation(spec, state, slot=block.slot)
run_on_attestation(spec, state, store, attestation, False)
@ -124,6 +153,26 @@ def test_on_attestation_future_epoch(spec, state):
run_on_attestation(spec, state, store, attestation, False)
@with_all_phases
@spec_state_test
def test_on_attestation_future_block(spec, state):
store = spec.get_genesis_store(state)
time = spec.SECONDS_PER_SLOT * 5
spec.on_tick(store, time)
block = build_empty_block_for_next_slot(spec, state)
state_transition_and_sign_block(spec, state, block)
spec.on_block(store, block)
# attestation for slot immediately prior to the block being attested to
attestation = get_valid_attestation(spec, state, slot=block.slot - 1, signed=False)
attestation.data.beacon_block_root = block.signing_root()
sign_attestation(spec, state, attestation)
run_on_attestation(spec, state, store, attestation, False)
@with_all_phases
@spec_state_test
def test_on_attestation_same_slot(spec, state):

View File

@ -59,7 +59,7 @@ def build_empty_block(spec, state, slot=None, signed=False):
empty_block.slot = slot
empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index
previous_block_header = deepcopy(state.latest_block_header)
if previous_block_header.state_root == spec.Hash():
if previous_block_header.state_root == spec.Root():
previous_block_header.state_root = state.hash_tree_root()
empty_block.parent_root = signing_root(previous_block_header)

View File

@ -14,6 +14,16 @@ def next_slot(spec, state):
spec.process_slots(state, state.slot + 1)
def transition_to(spec, state, slot):
"""
Transition to ``slot``.
"""
assert state.slot <= slot
for _ in range(slot - state.slot):
next_slot(spec, state)
assert state.slot == slot
def next_epoch(spec, state):
"""
Transition to the start slot of the next epoch

View File

@ -195,7 +195,7 @@ def test_bad_merkle_proof(spec, state):
deposit = prepare_state_and_deposit(spec, state, validator_index, amount)
# mess up merkle branch
deposit.proof[5] = spec.Hash()
deposit.proof[5] = spec.Bytes32()
sign_deposit_data(spec, deposit.data, privkeys[validator_index], state=state)

View File

@ -63,7 +63,7 @@ def test_empty_block_transition(spec, state):
assert len(state.eth1_data_votes) == pre_eth1_votes + 1
assert spec.get_block_root_at_slot(state, pre_slot) == block.parent_root
assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.Hash()
assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.Bytes32()
@with_all_phases
@ -98,7 +98,7 @@ def test_skipped_slots(spec, state):
yield 'post', state
assert state.slot == block.slot
assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.Hash()
assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.Bytes32()
for slot in range(pre_slot, state.slot):
assert spec.get_block_root_at_slot(state, slot) == block.parent_root

View File

@ -208,13 +208,13 @@ def test_bytesn_subclass():
assert issubclass(ByteVector[32](b'\xab' * 32).type(), Bytes32)
assert issubclass(ByteVector[32], Bytes32)
class Hash(Bytes32):
class Root(Bytes32):
pass
assert isinstance(Hash(b'\xab' * 32), Bytes32)
assert not isinstance(Hash(b'\xab' * 32), Bytes48)
assert issubclass(Hash(b'\xab' * 32).type(), Bytes32)
assert issubclass(Hash, Bytes32)
assert isinstance(Root(b'\xab' * 32), Bytes32)
assert not isinstance(Root(b'\xab' * 32), Bytes48)
assert issubclass(Root(b'\xab' * 32).type(), Bytes32)
assert issubclass(Root, Bytes32)
assert not issubclass(Bytes48, Bytes32)