Merge branch 'dev' into dankrad-patch-7

# Conflicts:
#	specs/core/1_custody-game.md
#	specs/core/1_shard-data-chains.md
This commit is contained in:
Dankrad Feist 2019-05-09 01:00:25 +01:00
commit ec230f443a
No known key found for this signature in database
GPG Key ID: 6815E6A20BEBBABA
16 changed files with 188 additions and 201 deletions

View File

@ -72,7 +72,7 @@ MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# 2**11 (= 2,048) epochs 9 days
PERSISTENT_COMMITTEE_PERIOD: 2048
# 2**6 (= 64) epochs ~7 hours
MAX_CROSSLINK_EPOCHS: 64
MAX_EPOCHS_PER_CROSSLINK: 64
# 2**2 (= 4) epochs 25.6 minutes
MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
@ -124,4 +124,4 @@ DOMAIN_RANDAO: 1
DOMAIN_ATTESTATION: 2
DOMAIN_DEPOSIT: 3
DOMAIN_VOLUNTARY_EXIT: 4
DOMAIN_TRANSFER: 5
DOMAIN_TRANSFER: 5

View File

@ -71,7 +71,7 @@ MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# 2**11 (= 2,048) epochs 9 days
PERSISTENT_COMMITTEE_PERIOD: 2048
# 2**6 (= 64) epochs ~7 hours
MAX_CROSSLINK_EPOCHS: 64
MAX_EPOCHS_PER_CROSSLINK: 64
# 2**2 (= 4) epochs 25.6 minutes
MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
@ -123,4 +123,4 @@ DOMAIN_RANDAO: 1
DOMAIN_ATTESTATION: 2
DOMAIN_DEPOSIT: 3
DOMAIN_VOLUNTARY_EXIT: 4
DOMAIN_TRANSFER: 5
DOMAIN_TRANSFER: 5

View File

@ -15,16 +15,9 @@ from typing import (
from eth2spec.utils.minimal_ssz import *
from eth2spec.utils.bls_stub import *
""")
for i in (1, 2, 3, 4, 8, 32, 48, 96):
code_lines.append("def int_to_bytes%d(x): return x.to_bytes(%d, 'little')" % (i, i))
code_lines.append("""
# stub, will get overwritten by real var
SLOTS_PER_EPOCH = 64
Slot = NewType('Slot', int) # uint64
Epoch = NewType('Epoch', int) # uint64
Shard = NewType('Shard', int) # uint64

View File

@ -75,14 +75,14 @@
- [`compute_committee`](#compute_committee)
- [`get_crosslink_committee`](#get_crosslink_committee)
- [`get_attesting_indices`](#get_attesting_indices)
- [`int_to_bytes1`, `int_to_bytes2`, ...](#int_to_bytes1-int_to_bytes2-)
- [`int_to_bytes`](#int_to_bytes)
- [`bytes_to_int`](#bytes_to_int)
- [`get_total_balance`](#get_total_balance)
- [`get_domain`](#get_domain)
- [`get_bitfield_bit`](#get_bitfield_bit)
- [`verify_bitfield`](#verify_bitfield)
- [`convert_to_indexed`](#convert_to_indexed)
- [`verify_indexed_attestation`](#verify_indexed_attestation)
- [`validate_indexed_attestation`](#validate_indexed_attestation)
- [`is_slashable_attestation_data`](#is_slashable_attestation_data)
- [`integer_squareroot`](#integer_squareroot)
- [`get_delayed_activation_exit_epoch`](#get_delayed_activation_exit_epoch)
@ -194,8 +194,8 @@ These configurations are updated for releases, but may be out of sync during `de
| `GENESIS_SLOT` | `0` |
| `GENESIS_EPOCH` | `0` |
| `FAR_FUTURE_EPOCH` | `2**64 - 1` |
| `ZERO_HASH` | `int_to_bytes32(0)` |
| `BLS_WITHDRAWAL_PREFIX_BYTE` | `int_to_bytes1(0)` |
| `ZERO_HASH` | `b'\x00' * 32` |
| `BLS_WITHDRAWAL_PREFIX` | `0` |
### Time parameters
@ -209,10 +209,10 @@ These configurations are updated for releases, but may be out of sync during `de
| `SLOTS_PER_HISTORICAL_ROOT` | `2**13` (= 8,192) | slots | ~13 hours |
| `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours |
| `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | 9 days |
| `MAX_CROSSLINK_EPOCHS` | `2**6` (= 64) | epochs | ~7 hours |
| `MAX_EPOCHS_PER_CROSSLINK` | `2**6` (= 64) | epochs | ~7 hours |
| `MIN_EPOCHS_TO_INACTIVITY_PENALTY` | `2**2` (= 4) | epochs | 25.6 minutes |
* `MAX_CROSSLINK_EPOCHS` should be a small constant times `SHARD_COUNT // SLOTS_PER_EPOCH`
* `MAX_EPOCHS_PER_CROSSLINK` should be a small constant times `SHARD_COUNT // SLOTS_PER_EPOCH`
### State list lengths
@ -282,12 +282,14 @@ The types are defined topologically to aid in facilitating an executable version
```python
{
# Shard number
'shard': 'uint64',
# Epoch number
'epoch': 'uint64',
# Root of the previous crosslink
'previous_crosslink_root': 'bytes32',
'parent_root': 'bytes32',
# Root of the crosslinked shard data since the previous crosslink
'crosslink_data_root': 'bytes32',
'data_root': 'bytes32',
}
```
@ -318,9 +320,7 @@ The types are defined topologically to aid in facilitating an executable version
'target_root': 'bytes32',
# Crosslink vote
'shard': 'uint64',
'previous_crosslink_root': 'bytes32',
'crosslink_data_root': 'bytes32',
'crosslink': Crosslink,
}
```
@ -369,9 +369,9 @@ The types are defined topologically to aid in facilitating an executable version
```python
{
'slot': 'uint64',
'previous_block_root': 'bytes32',
'parent_root': 'bytes32',
'state_root': 'bytes32',
'block_body_root': 'bytes32',
'body_root': 'bytes32',
'signature': 'bytes96',
}
```
@ -536,7 +536,7 @@ The types are defined topologically to aid in facilitating an executable version
{
# Header
'slot': 'uint64',
'previous_block_root': 'bytes32',
'parent_root': 'bytes32',
'state_root': 'bytes32',
'body': BeaconBlockBody,
'signature': 'bytes96',
@ -767,7 +767,7 @@ def get_epoch_start_shard(state: BeaconState, epoch: Epoch) -> Shard:
```python
def get_attestation_data_slot(state: BeaconState, data: AttestationData) -> Slot:
committee_count = get_epoch_committee_count(state, data.target_epoch)
offset = (data.shard + SHARD_COUNT - get_epoch_start_shard(state, data.target_epoch)) % SHARD_COUNT
offset = (data.crosslink.shard + SHARD_COUNT - get_epoch_start_shard(state, data.target_epoch)) % SHARD_COUNT
return get_epoch_start_slot(data.target_epoch) + offset // (committee_count // SLOTS_PER_EPOCH)
```
@ -830,7 +830,7 @@ def generate_seed(state: BeaconState,
return hash(
get_randao_mix(state, epoch + LATEST_RANDAO_MIXES_LENGTH - MIN_SEED_LOOKAHEAD) +
get_active_index_root(state, epoch) +
int_to_bytes32(epoch)
int_to_bytes(epoch, length=32)
)
```
@ -851,7 +851,7 @@ def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex:
i = 0
while True:
candidate_index = first_committee[(epoch + i) % len(first_committee)]
random_byte = hash(seed + int_to_bytes8(i // 32))[i % 32]
random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32]
effective_balance = state.validator_registry[candidate_index].effective_balance
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
return candidate_index
@ -888,10 +888,10 @@ def get_shuffled_index(index: ValidatorIndex, index_count: int, seed: Bytes32) -
# Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf)
# See the 'generalized domain' algorithm on page 3
for round in range(SHUFFLE_ROUND_COUNT):
pivot = bytes_to_int(hash(seed + int_to_bytes1(round))[0:8]) % index_count
pivot = bytes_to_int(hash(seed + int_to_bytes(round, length=1))[0:8]) % index_count
flip = (pivot - index) % index_count
position = max(index, flip)
source = hash(seed + int_to_bytes1(round) + int_to_bytes4(position // 256))
source = hash(seed + int_to_bytes(round, length=1) + int_to_bytes(position // 256, length=4))
byte = source[(position % 256) // 8]
bit = (byte >> (position % 8)) % 2
index = flip if bit else index
@ -929,14 +929,17 @@ def get_attesting_indices(state: BeaconState,
"""
Return the sorted attesting indices corresponding to ``attestation_data`` and ``bitfield``.
"""
committee = get_crosslink_committee(state, attestation_data.target_epoch, attestation_data.shard)
committee = get_crosslink_committee(state, attestation_data.target_epoch, attestation_data.crosslink.shard)
assert verify_bitfield(bitfield, len(committee))
return sorted([index for i, index in enumerate(committee) if get_bitfield_bit(bitfield, i) == 0b1])
```
### `int_to_bytes1`, `int_to_bytes2`, ...
### `int_to_bytes`
`int_to_bytes1(x): return x.to_bytes(1, 'little')`, `int_to_bytes2(x): return x.to_bytes(2, 'little')`, and so on for all integers, particularly 1, 2, 3, 4, 8, 32, 48, 96.
```python
def int_to_bytes(integer: int, length: int) -> bytes:
return integer.to_bytes(length, 'little')
```
### `bytes_to_int`
@ -966,7 +969,7 @@ def get_domain(state: BeaconState,
"""
epoch = get_current_epoch(state) if message_epoch is None else message_epoch
fork_version = state.fork.previous_version if epoch < state.fork.epoch else state.fork.current_version
return bytes_to_int(fork_version + int_to_bytes4(domain_type))
return bytes_to_int(fork_version + int_to_bytes(domain_type, length=4))
```
### `get_bitfield_bit`
@ -1016,35 +1019,29 @@ def convert_to_indexed(state: BeaconState, attestation: Attestation) -> IndexedA
)
```
### `verify_indexed_attestation`
### `validate_indexed_attestation`
```python
def verify_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool:
def validate_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> None:
"""
Verify validity of ``indexed_attestation`` fields.
Verify validity of ``indexed_attestation``.
"""
custody_bit_0_indices = indexed_attestation.custody_bit_0_indices
custody_bit_1_indices = indexed_attestation.custody_bit_1_indices
bit_0_indices = indexed_attestation.custody_bit_0_indices
bit_1_indices = indexed_attestation.custody_bit_1_indices
# Ensure no duplicate indices across custody bits
assert len(set(custody_bit_0_indices).intersection(set(custody_bit_1_indices))) == 0
if len(custody_bit_1_indices) > 0: # [TO BE REMOVED IN PHASE 1]
return False
if not (1 <= len(custody_bit_0_indices) + len(custody_bit_1_indices) <= MAX_INDICES_PER_ATTESTATION):
return False
if custody_bit_0_indices != sorted(custody_bit_0_indices):
return False
if custody_bit_1_indices != sorted(custody_bit_1_indices):
return False
return bls_verify_multiple(
# Verify no index has custody bit equal to 1 [to be removed in phase 1]
assert len(bit_1_indices) == 0
# Verify max number of indices
assert len(bit_0_indices) + len(bit_1_indices) <= MAX_INDICES_PER_ATTESTATION
# Verify index sets are disjoint
assert len(set(bit_0_indices).intersection(bit_1_indices)) == 0
# Verify indices are sorted
assert bit_0_indices == sorted(bit_0_indices) and bit_1_indices == sorted(bit_1_indices)
# Verify aggregate signature
assert bls_verify_multiple(
pubkeys=[
bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_0_indices]),
bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_1_indices]),
bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in bit_0_indices]),
bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in bit_1_indices]),
],
message_hashes=[
hash_tree_root(AttestationDataAndCustodyBit(data=indexed_attestation.data, custody_bit=0b0)),
@ -1302,34 +1299,19 @@ def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestat
return get_total_balance(state, get_unslashed_attesting_indices(state, attestations))
```
```python
def get_crosslink_from_attestation_data(state: BeaconState, data: AttestationData) -> Crosslink:
return Crosslink(
epoch=min(data.target_epoch, state.current_crosslinks[data.shard].epoch + MAX_CROSSLINK_EPOCHS),
previous_crosslink_root=data.previous_crosslink_root,
crosslink_data_root=data.crosslink_data_root,
)
```
```python
def get_winning_crosslink_and_attesting_indices(state: BeaconState, epoch: Epoch, shard: Shard) -> Tuple[Crosslink, List[ValidatorIndex]]:
shard_attestations = [a for a in get_matching_source_attestations(state, epoch) if a.data.shard == shard]
shard_crosslinks = [get_crosslink_from_attestation_data(state, a.data) for a in shard_attestations]
candidate_crosslinks = [
c for c in shard_crosslinks
if hash_tree_root(state.current_crosslinks[shard]) in (c.previous_crosslink_root, hash_tree_root(c))
]
if len(candidate_crosslinks) == 0:
return Crosslink(), []
def get_attestations_for(crosslink: Crosslink) -> List[PendingAttestation]:
return [a for a in shard_attestations if get_crosslink_from_attestation_data(state, a.data) == crosslink]
# Winning crosslink has the crosslink data root with the most balance voting for it (ties broken lexicographically)
winning_crosslink = max(candidate_crosslinks, key=lambda crosslink: (
get_attesting_balance(state, get_attestations_for(crosslink)), crosslink.crosslink_data_root
attestations = [a for a in get_matching_source_attestations(state, epoch) if a.data.crosslink.shard == shard]
crosslinks = list(filter(
lambda c: hash_tree_root(state.current_crosslinks[shard]) in (c.parent_root, hash_tree_root(c)),
[a.data.crosslink for a in attestations]
))
return winning_crosslink, get_unslashed_attesting_indices(state, get_attestations_for(winning_crosslink))
# Winning crosslink has the crosslink data root with the most balance voting for it (ties broken lexicographically)
winning_crosslink = max(crosslinks, key=lambda c: (
get_attesting_balance(state, [a for a in attestations if a.data.crosslink == c]), c.data_root
), default=Crosslink())
winning_attestations = [a for a in attestations if a.data.crosslink == winning_crosslink]
return winning_crosslink, get_unslashed_attesting_indices(state, winning_attestations)
```
#### Justification and finalization
@ -1601,12 +1583,12 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
# Verify that the slots match
assert block.slot == state.slot
# Verify that the parent matches
assert block.previous_block_root == signing_root(state.latest_block_header)
assert block.parent_root == signing_root(state.latest_block_header)
# Save current block as the new latest block
state.latest_block_header = BeaconBlockHeader(
slot=block.slot,
previous_block_root=block.previous_block_root,
block_body_root=hash_tree_root(block.body),
parent_root=block.parent_root,
body_root=hash_tree_root(block.body),
)
# Verify proposer is not slashed
proposer = state.validator_registry[get_beacon_proposer_index(state)]
@ -1684,8 +1666,8 @@ def process_attester_slashing(state: BeaconState,
attestation_1 = attester_slashing.attestation_1
attestation_2 = attester_slashing.attestation_2
assert is_slashable_attestation_data(attestation_1.data, attestation_2.data)
assert verify_indexed_attestation(state, attestation_1)
assert verify_indexed_attestation(state, attestation_2)
validate_indexed_attestation(state, attestation_1)
validate_indexed_attestation(state, attestation_2)
slashed_any = False
attesting_indices_1 = attestation_1.custody_bit_0_indices + attestation_1.custody_bit_1_indices
@ -1712,29 +1694,29 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
attestation_slot = get_attestation_data_slot(state, data)
assert attestation_slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= attestation_slot + SLOTS_PER_EPOCH
# Check target epoch, source epoch, source root, and source crosslink
assert (data.target_epoch, data.source_epoch, data.source_root, data.previous_crosslink_root) in {
(get_current_epoch(state), state.current_justified_epoch, state.current_justified_root, hash_tree_root(state.current_crosslinks[data.shard])),
(get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root, hash_tree_root(state.previous_crosslinks[data.shard])),
}
# Check crosslink data root
assert data.crosslink_data_root == ZERO_HASH # [to be removed in phase 1]
# Check signature and bitfields
assert verify_indexed_attestation(state, convert_to_indexed(state, attestation))
# Cache pending attestation
pending_attestation = PendingAttestation(
data=data,
aggregation_bitfield=attestation.aggregation_bitfield,
inclusion_delay=state.slot - attestation_slot,
proposer_index=get_beacon_proposer_index(state),
)
assert data.target_epoch in (get_previous_epoch(state), get_current_epoch(state))
if data.target_epoch == get_current_epoch(state):
ffg_data = (state.current_justified_epoch, state.current_justified_root, get_current_epoch(state))
parent_crosslink = state.current_crosslinks[data.crosslink.shard]
state.current_epoch_attestations.append(pending_attestation)
else:
ffg_data = (state.previous_justified_epoch, state.previous_justified_root, get_previous_epoch(state))
parent_crosslink = state.previous_crosslinks[data.crosslink.shard]
state.previous_epoch_attestations.append(pending_attestation)
# Check FFG data, crosslink data, and signature
assert ffg_data == (data.source_epoch, data.source_root, data.target_epoch)
assert data.crosslink.epoch == min(data.target_epoch, parent_crosslink.epoch + MAX_EPOCHS_PER_CROSSLINK)
assert data.crosslink.parent_root == hash_tree_root(parent_crosslink)
assert data.crosslink.data_root == ZERO_HASH # [to be removed in phase 1]
validate_indexed_attestation(state, convert_to_indexed(state, attestation))
```
##### Deposits
@ -1837,7 +1819,7 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None:
# Verify that the pubkey is valid
assert (
state.validator_registry[transfer.sender].withdrawal_credentials ==
BLS_WITHDRAWAL_PREFIX_BYTE + hash(transfer.pubkey)[1:]
int_to_bytes(BLS_WITHDRAWAL_PREFIX, length=1) + hash(transfer.pubkey)[1:]
)
# Verify that the signature is valid
assert bls_verify(transfer.pubkey, signing_root(transfer), transfer.signature, get_domain(state, DOMAIN_TRANSFER))

View File

@ -38,7 +38,7 @@ All terminology, constants, functions, and protocol mechanics defined in the [Ph
Processing the beacon chain is similar to processing the Ethereum 1.0 chain. Clients download and process blocks and maintain a view of what is the current "canonical chain", terminating at the current "head". For a beacon block, `block`, to be processed by a node, the following conditions must be met:
* The parent block with root `block.previous_block_root` has been processed and accepted.
* The parent block with root `block.parent_root` has been processed and accepted.
* An Ethereum 1.0 block pointed to by the `state.latest_eth1_data.block_hash` has been processed and accepted.
* The node's Unix time is greater than or equal to `state.genesis_time + block.slot * SECONDS_PER_SLOT`.

View File

@ -147,7 +147,7 @@ This document details the beacon chain additions and changes in Phase 1 of Ether
'challenger_index': ValidatorIndex,
'responder_index': ValidatorIndex,
'deadline': Epoch,
'crosslink_data_root': 'bytes32',
'data_root': 'bytes32',
'depth': 'uint64',
'chunk_index': 'uint64',
}
@ -161,7 +161,7 @@ This document details the beacon chain additions and changes in Phase 1 of Ether
'challenger_index': ValidatorIndex,
'responder_index': ValidatorIndex,
'deadline': Epoch,
'crosslink_data_root': 'bytes32',
'data_root': 'bytes32',
'chunk_count': 'uint64',
'chunk_bits_merkle_root': 'bytes32',
'responder_key': BLSSignature,
@ -271,7 +271,7 @@ The `empty` function accepts and SSZ type as input and returns an object of that
def get_custody_chunk_count(attestation: Attestation) -> int:
crosslink_start_epoch = attestation.data.latest_crosslink.epoch
crosslink_end_epoch = slot_to_epoch(attestation.data.slot)
crosslink_crosslink_length = min(MAX_CROSSLINK_EPOCHS, end_epoch - start_epoch)
crosslink_crosslink_length = min(MAX_EPOCHS_PER_CROSSLINK, end_epoch - start_epoch)
chunks_per_epoch = 2 * BYTES_PER_SHARD_BLOCK * SLOTS_PER_EPOCH // BYTES_PER_CUSTODY_CHUNK
return crosslink_crosslink_length * chunks_per_epoch
```
@ -432,10 +432,10 @@ def process_early_derived_secret_reveal(state: BeaconState,
# round key
slash_validator(state, reveal.revealed_index, reveal.masker_index)
else:
# Only a small penalty proportional to proposer slot reward for RANDAO reveal
# Only a small penalty proportional to proposer slot reward for RANDAO reveal
# that does not interfere with the custody period
# The penalty is proportional to the max proposer reward
# The penalty is proportional to the max proposer reward
# Calculate penalty
max_proposer_slot_reward = (
get_base_reward(state, reveal.revealed_index) *
@ -454,7 +454,7 @@ def process_early_derived_secret_reveal(state: BeaconState,
increase_balance(state, whistleblower_index, whistleblowing_reward - proposer_reward)
decrease_balance(state, reveal.revealed_index, penalty)
# Mark this derived secret as exposed so validator cannot be punished repeatedly
# Mark this derived secret as exposed so validator cannot be punished repeatedly
state.exposed_derived_secrets[reveal.epoch % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS].append(reveal.revealed_index)
```
@ -480,7 +480,7 @@ def process_chunk_challenge(state: BeaconState,
# Verify the challenge is not a duplicate
for record in state.custody_chunk_challenge_records:
assert (
record.crosslink_data_root != challenge.attestation.data.crosslink_data_root or
record.data_root != challenge.attestation.data.crosslink.data_root or
record.chunk_index != challenge.chunk_index
)
# Verify depth
@ -492,7 +492,7 @@ def process_chunk_challenge(state: BeaconState,
challenger_index=get_beacon_proposer_index(state),
responder_index=challenge.responder_index,
deadline=get_current_epoch(state) + CUSTODY_RESPONSE_DEADLINE,
crosslink_data_root=challenge.attestation.data.crosslink_data_root,
data_root=challenge.attestation.data.crosslink.data_root,
depth=depth,
chunk_index=challenge.chunk_index,
)
@ -570,7 +570,7 @@ def process_bit_challenge(state: BeaconState,
challenger_index=challenge.challenger_index,
responder_index=challenge.responder_index,
deadline=get_current_epoch(state) + CUSTODY_RESPONSE_DEADLINE,
crosslink_data_root=challenge.attestation.data.crosslink_data_root,
data_root=challenge.attestation.data.crosslink.data_root,
chunk_count=chunk_count,
chunk_bits_merkle_root=merkle_root(pad_to_power_of_2((challenge.chunk_bits))),
responder_key=challenge.responder_key,
@ -616,7 +616,7 @@ def process_chunk_challenge_response(state: BeaconState,
branch=response.data_branch,
depth=challenge.depth,
index=response.chunk_index,
root=challenge.crosslink_data_root,
root=challenge.data_root,
)
# Clear the challenge
records = state.custody_chunk_challenge_records
@ -638,7 +638,7 @@ def process_bit_challenge_response(state: BeaconState,
branch=response.data_branch,
depth=math.log2(next_power_of_two(challenge.chunk_count)),
index=response.chunk_index,
root=challenge.crosslink_data_root,
root=challenge.data_root,
)
# Verify the chunk bit leaf matches the challenge data
assert verify_merkle_branch(

View File

@ -94,7 +94,7 @@ This document describes the shard data layer and the shard fork choice rule in P
'slot': Slot,
'shard': Shard,
'beacon_chain_root': 'bytes32',
'previous_block_root': 'bytes32',
'parent_root': 'bytes32',
'data': ShardBlockBody,
'state_root': 'bytes32',
'attestations': [ShardAttestation],
@ -109,7 +109,7 @@ This document describes the shard data layer and the shard fork choice rule in P
'slot': Slot,
'shard': Shard,
'beacon_chain_root': 'bytes32',
'previous_block_root': 'bytes32',
'parent_root': 'bytes32',
'body_root': 'bytes32',
'state_root': 'bytes32',
'attestations': [ShardAttestation],
@ -182,7 +182,7 @@ def get_shard_proposer_index(state: BeaconState,
slot: Slot) -> ValidatorIndex:
# Randomly shift persistent committee
persistent_committee = get_persistent_committee(state, shard, slot)
seed = hash(state.current_shuffling_seed + int_to_bytes8(shard) + int_to_bytes8(slot))
seed = hash(state.current_shuffling_seed + int_to_bytes(shard, length=8) + int_to_bytes(slot, length=8))
random_index = bytes_to_int(seed[0:8]) % len(persistent_committee)
persistent_committee = persistent_committee[random_index:] + persistent_committee[:random_index]
@ -203,7 +203,7 @@ def get_shard_header(block: ShardBlock) -> ShardBlockHeader:
slot=block.slot,
shard=block.shard,
beacon_chain_root=block.beacon_chain_root,
previous_block_root=block.previous_block_root,
parent_root=block.parent_root,
body_root=hash_tree_root(block.body),
state_root=block.state_root,
attestations=block.attestations,
@ -217,7 +217,7 @@ def get_shard_header(block: ShardBlock) -> ShardBlockHeader:
def verify_shard_attestation_signature(state: BeaconState,
attestation: ShardAttestation) -> None:
data = attestation.data
persistent_committee = get_persistent_committee(state, data.shard, data.slot)
persistent_committee = get_persistent_committee(state, data.crosslink.shard, data.slot)
assert verify_bitfield(attestation.aggregation_bitfield, len(persistent_committee))
pubkeys = []
for i, index in enumerate(persistent_committee):
@ -227,7 +227,7 @@ def verify_shard_attestation_signature(state: BeaconState,
pubkeys.append(validator.pubkey)
assert bls_verify(
pubkey=bls_aggregate_pubkeys(pubkeys),
message_hash=data.shard_block_root,
message_hash=data.crosslink.shard_block_root,
signature=attestation.aggregate_signature,
domain=get_domain(state, slot_to_epoch(data.slot), DOMAIN_SHARD_ATTESTER)
)
@ -298,12 +298,12 @@ def is_valid_shard_block(beacon_blocks: List[BeaconBlock],
# Check parent block
if block.slot == PHASE_1_GENESIS_SLOT:
assert candidate.previous_block_root == ZERO_HASH
assert candidate.parent_root == ZERO_HASH
else:
parent_block = next(
(block for block in valid_shard_blocks if
signing_root(block) == candidate.previous_block_root),
None)
signing_root(block) == candidate.parent_root)
, None)
assert parent_block != None
assert parent_block.shard == block.shard
assert parent_block.slot < block.slot
@ -314,7 +314,7 @@ def is_valid_shard_block(beacon_blocks: List[BeaconBlock],
for _, attestation in enumerate(block.attestations):
assert max(GENESIS_SHARD_SLOT, block.slot - SLOTS_PER_EPOCH) <= attestation.data.slot
assert attestation.data.slot <= block.slot - MIN_ATTESTATION_INCLUSION_DELAY
assert attestation.data.shard == block.shard
assert attestation.data.crosslink.shard == block.shard
verify_shard_attestation_signature(beacon_state, attestation)
# Check signature
@ -345,11 +345,11 @@ def is_valid_shard_attestation(valid_shard_blocks: List[ShardBlock],
# Check shard block
shard_block = next(
(block for block in valid_shard_blocks if
signing_root(block) == candidate.attestation.data.shard_block_root),
None)
signing_root(block) == candidate.attestation.data.crosslink.shard_block_root)
, None)
assert shard_block != None
assert shard_block.slot == attestation.data.slot
assert shard_block.shard == attestation.data.shard
assert shard_block.shard == attestation.data.crosslink.shard
# Check signature
verify_shard_attestation_signature(beacon_state, attestation)
@ -380,22 +380,22 @@ def is_valid_beacon_attestation(shard: Shard,
# Check previous attestation
if candidate.data.previous_crosslink.epoch <= PHASE_1_GENESIS_EPOCH:
assert candidate.data.previous_crosslink.crosslink_data_root == ZERO_HASH
assert candidate.data.previous_crosslink.data_root == ZERO_HASH
else:
previous_attestation = next(
(attestation for attestation in valid_attestations if
attestation.data.crosslink_data_root == candidate.data.previous_crosslink.crosslink_data_root),
None)
attestation.data.crosslink.data_root == candidate.data.previous_crosslink.data_root)
, None)
assert previous_attestation != None
assert candidate.data.previous_attestation.epoch < slot_to_epoch(candidate.data.slot)
# Check crosslink data root
start_epoch = state.latest_crosslinks[shard].epoch
end_epoch = min(slot_to_epoch(candidate.data.slot) - CROSSLINK_LOOKBACK, start_epoch + MAX_CROSSLINK_EPOCHS)
end_epoch = min(slot_to_epoch(candidate.data.slot) - CROSSLINK_LOOKBACK, start_epoch + MAX_EPOCHS_PER_CROSSLINK)
blocks = []
for slot in range(start_epoch * SLOTS_PER_EPOCH, end_epoch * SLOTS_PER_EPOCH):
blocks.append(shard_blocks[slot])
assert candidate.data.crosslink_data_root == compute_crosslink_data_root(blocks)
assert candidate.data.crosslink.data_root == compute_crosslink_data_root(blocks)
return True
```

View File

@ -146,7 +146,7 @@ def compute_committee(header: BeaconBlockHeader,
]
def get_switchover_epoch(index):
return (
bytes_to_int(hash(validator_memory.earlier_period_data.seed + int_to_bytes3(index))[0:8]) %
bytes_to_int(hash(validator_memory.earlier_period_data.seed + int_to_bytes(index, length=3))[0:8]) %
PERSISTENT_COMMITTEE_PERIOD
)

View File

@ -16,7 +16,7 @@
- [Serialization](#serialization)
- [`"uintN"`](#uintn)
- [`"bool"`](#bool)
- [Containers, vectors, lists](#containers-vectors-lists)
- [Vectors, containers, lists, unions](#vectors-containers-lists-unions)
- [Deserialization](#deserialization)
- [Merkleization](#merkleization)
- [Self-signed containers](#self-signed-containers)
@ -46,8 +46,12 @@
* angle bracket notation `[type, N]`, e.g. `["uint64", N]`
* **list**: ordered variable-length homogeneous collection of values
* angle bracket notation `[type]`, e.g. `["uint64"]`
* **union**: union type containing one of the given subtypes
* round bracket notation `(type_1, type_2, ...)`, e.g. `("uint64", "null")`
We recursively define "variable-size" types to be lists and all types that contains a variable-size type. All other types are said to be "fixed-size".
### Variable-size and fixed-size
We recursively define "variable-size" types to be lists and unions and all types that contain a variable-size type. All other types are said to be "fixed-size".
### Aliases
@ -56,10 +60,15 @@ For convenience we alias:
* `"byte"` to `"uint8"` (this is a basic type)
* `"bytes"` to `["byte"]` (this is *not* a basic type)
* `"bytesN"` to `["byte", N]` (this is *not* a basic type)
* `"null"`: `{}`, i.e. the empty container
### Default values
The default value of a type upon initialization is recursively defined using `0` for `"uintN"`, `False` for `"bool"`, and `[]` for lists.
The default value of a type upon initialization is recursively defined using `0` for `"uintN"`, `False` for `"bool"`, and `[]` for lists. Unions default to the first type in the union (with type index zero), which is `"null"` if present in the union.
### Illegal types
Empty vector types (i.e. `[subtype, 0]` for some `subtype`) are not legal. The `"null"` type is only legal as the first type in a union subtype (i.e., with type index zero).
## Serialization
@ -67,7 +76,6 @@ We recursively define the `serialize` function which consumes an object `value`
> *Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signing_root`, `is_variable_size`, etc.) objects implicitly carry their type.
### `"uintN"`
```python
@ -82,7 +90,13 @@ assert value in (True, False)
return b"\x01" if value is True else b"\x00"
```
### Containers, vectors, lists
### `"null"`
```python
return b""
```
### Vectors, containers, lists, unions
```python
# Recursively serialize
@ -102,6 +116,16 @@ fixed_parts = [part if part != None else variable_offsets[i] for i, part in enum
return b"".join(fixed_parts + variable_parts)
```
If `value` is a union type:
Define value as an object that has properties `value.value` with the contained value, and `value.type_index` which indexes the type.
```python
serialized_bytes = serialize(value.value)
serialized_type_index = value.type_index.to_bytes(BYTES_PER_LENGTH_OFFSET, "little")
return serialized_type_index + serialized_bytes
```
## Deserialization
Because serialization is an injective function (i.e. two distinct objects of the same type will serialize to different values) any bytestring has at most one object it could deserialize to. Efficient algorithms for computing this object can be found in [the implementations](#implementations).
@ -111,8 +135,9 @@ Because serialization is an injective function (i.e. two distinct objects of the
We first define helper functions:
* `pack`: Given ordered objects of the same basic type, serialize them, pack them into `BYTES_PER_CHUNK`-byte chunks, right-pad the last chunk with zero bytes, and return the chunks.
* `merkleize`: Given ordered `BYTES_PER_CHUNK`-byte chunks, if necessary append zero chunks so that the number of chunks is a power of two, Merkleize the chunks, and return the root.
* `merkleize`: Given ordered `BYTES_PER_CHUNK`-byte chunks, if necessary append zero chunks so that the number of chunks is a power of two, Merkleize the chunks, and return the root. Note that `merkleize` on a single chunk is simply that chunk, i.e. the identity when the number of chunks is one.
* `mix_in_length`: Given a Merkle root `root` and a length `length` (`"uint256"` little-endian serialization) return `hash(root + length)`.
* `mix_in_type`: Given a Merkle root `root` and a type_index `type_index` (`"uint256"` little-endian serialization) return `hash(root + type_index)`.
We now define Merkleization `hash_tree_root(value)` of an object `value` recursively:
@ -120,6 +145,7 @@ We now define Merkleization `hash_tree_root(value)` of an object `value` recursi
* `mix_in_length(merkleize(pack(value)), len(value))` if `value` is a list of basic objects
* `merkleize([hash_tree_root(element) for element in value])` if `value` is a vector of composite objects or a container
* `mix_in_length(merkleize([hash_tree_root(element) for element in value]), len(value))` if `value` is a list of composite objects
* `mix_in_type(merkleize(value.value), value.type_index)` if `value` is of union type
## Self-signed containers

View File

@ -37,14 +37,9 @@
- [Voluntary exits](#voluntary-exits)
- [Attestations](#attestations-1)
- [Attestation data](#attestation-data)
- [Slot](#slot-1)
- [Beacon block root](#beacon-block-root)
- [Source epoch](#source-epoch)
- [Source root](#source-root)
- [Target root](#target-root)
- [Shard](#shard)
- [Previous crosslink root](#previous-crosslink-root)
- [Crosslink data root](#crosslink-data-root)
- [LMD GHOST vote](#lmd-ghost-vote)
- [FFG vote](#ffg-vote)
- [Crosslink vote](#crosslink-vote)
- [Construct attestation](#construct-attestation)
- [Data](#data)
- [Aggregation bitfield](#aggregation-bitfield)
@ -152,7 +147,7 @@ Set `block.slot = slot` where `slot` is the current slot at which the validator
##### Parent root
Set `block.previous_block_root = signing_root(parent)`.
Set `block.parent_root = signing_root(parent)`.
##### State root
@ -245,43 +240,29 @@ First the validator should construct `attestation_data`, an [`AttestationData`](
* Let `head_block` be the result of running the fork choice during the assigned slot.
* Let `head_state` be the state of `head_block` processed through any empty slots up to the assigned slot.
##### Slot
Set `attestation_data.slot = head_state.slot`.
##### Beacon block root
##### LMD GHOST vote
Set `attestation_data.beacon_block_root = signing_root(head_block)`.
##### Source epoch
##### FFG vote
Set `attestation_data.source_epoch = head_state.justified_epoch`.
* Set `attestation_data.source_epoch = head_state.justified_epoch`.
* Set `attestation_data.source_root = head_state.current_justified_root`.
* Set `attestation_data.target_epoch = get_current_epoch(head_state)`
* Set `attestation_data.target_root = signing_root(epoch_boundary_block)` where `epoch_boundary_block` is the block at the most recent epoch boundary.
##### Source root
Set `attestation_data.source_root = head_state.current_justified_root`.
##### Target root
Set `attestation_data.target_root = signing_root(epoch_boundary)` where `epoch_boundary` is the block at the most recent epoch boundary.
*Note*: This can be looked up in the state using:
*Note*: `epoch_boundary_block` can be looked up in the state using:
* Let `epoch_start_slot = get_epoch_start_slot(get_current_epoch(head_state))`.
* Set `epoch_boundary = head if epoch_start_slot == head_state.slot else get_block_root(state, epoch_start_slot)`.
* Let `epoch_boundary_block = head if epoch_start_slot == head_state.slot else get_block_root(state, epoch_start_slot)`.
##### Shard
##### Crosslink vote
Set `attestation_data.shard = shard` where `shard` is the shard associated with the validator's committee defined by `get_crosslink_committees_at_slot`.
Construct `attestation_data.crosslink` via the following
##### Previous crosslink root
Set `attestation_data.previous_crosslink_root = hash_tree_root(head_state.current_crosslinks[shard])`.
##### Crosslink data root
Set `attestation_data.crosslink_data_root = ZERO_HASH`.
*Note*: This is a stub for Phase 0.
* Set `attestation_data.crosslink.shard = shard` where `shard` is the shard associated with the validator's committee defined by `get_crosslink_committees_at_slot`.
* Set `attestation_data.crosslink.epoch = min(attestation_data.target_epoch, head_state.current_crosslinks[shard].epoch + MAX_EPOCHS_PER_CROSSLINK)`.
* Set `attestation_data.crosslink.parent_root = hash_tree_root(head_state.current_crosslinks[shard])`.
* Set `attestation_data.crosslink.data_root = ZERO_HASH`. *Note*: This is a stub for Phase 0.
#### Construct attestation

View File

@ -15,7 +15,7 @@ def shuffling_case(seed: spec.Bytes32, count: int):
@to_tuple
def shuffling_test_cases():
for seed in [spec.hash(spec.int_to_bytes4(seed_init_value)) for seed_init_value in range(30)]:
for seed in [spec.hash(spec.int_to_bytes(seed_init_value, length=4)) for seed_init_value in range(30)]:
for count in [0, 1, 2, 3, 5, 10, 33, 100, 1000]:
yield shuffling_case(seed, count)

View File

@ -113,7 +113,7 @@ def test_non_zero_crosslink_data_root(state):
attestation = get_valid_attestation(state)
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
attestation.data.crosslink_data_root = b'\x42' * 32
attestation.data.crosslink.data_root = b'\x42' * 32
pre_state, post_state = run_attestation_processing(state, attestation, False)
@ -126,7 +126,7 @@ def test_bad_previous_crosslink(state):
for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
next_slot(state)
state.current_crosslinks[attestation.data.shard].epoch += 10
state.current_crosslinks[attestation.data.crosslink.shard].epoch += 10
pre_state, post_state = run_attestation_processing(state, attestation, False)
@ -150,6 +150,6 @@ def test_empty_aggregation_bitfield(state):
attestation.aggregation_bitfield = b'\x00' * len(attestation.aggregation_bitfield)
pre_state, post_state = run_attestation_processing(state, attestation, False)
pre_state, post_state = run_attestation_processing(state, attestation)
return pre_state, attestation, post_state

View File

@ -53,9 +53,9 @@ def test_invalid_slot(state):
return pre_state, block, None
def test_invalid_previous_block_root(state):
def test_invalid_parent_block_root(state):
block = build_empty_block_for_next_slot(state)
block.previous_block_root = b'\12' * 32 # invalid prev root
block.parent_root = b'\12' * 32 # invalid prev root
pre_state, post_state = run_block_header_processing(state, block, valid=False)
return pre_state, block, None

View File

@ -64,7 +64,7 @@ def test_single_crosslink_update_from_current_epoch(state):
pre_state, post_state = run_process_crosslinks(state)
shard = attestation.data.shard
shard = attestation.data.crosslink.shard
assert post_state.previous_crosslinks[shard] != post_state.current_crosslinks[shard]
assert pre_state.current_crosslinks[shard] != post_state.current_crosslinks[shard]
@ -84,11 +84,11 @@ def test_single_crosslink_update_from_previous_epoch(state):
pre_state, post_state = run_process_crosslinks(state)
crosslink_deltas = get_crosslink_deltas(state)
shard = attestation.data.shard
shard = attestation.data.crosslink.shard
assert post_state.previous_crosslinks[shard] != post_state.current_crosslinks[shard]
assert pre_state.current_crosslinks[shard] != post_state.current_crosslinks[shard]
# ensure rewarded
for index in get_crosslink_committee(state, attestation.data.target_epoch, attestation.data.shard):
for index in get_crosslink_committee(state, attestation.data.target_epoch, attestation.data.crosslink.shard):
assert crosslink_deltas[0][index] > 0
assert crosslink_deltas[1][index] == 0
@ -108,7 +108,7 @@ def test_double_late_crosslink(state):
for slot in range(spec.SLOTS_PER_EPOCH):
attestation_2 = get_valid_attestation(state)
if attestation_2.data.shard == attestation_1.data.shard:
if attestation_2.data.crosslink.shard == attestation_1.data.crosslink.shard:
break
next_slot(state)
fill_aggregate_attestation(state, attestation_2)
@ -124,12 +124,12 @@ def test_double_late_crosslink(state):
pre_state, post_state = run_process_crosslinks(state)
crosslink_deltas = get_crosslink_deltas(state)
shard = attestation_2.data.shard
shard = attestation_2.data.crosslink.shard
# ensure that the current crosslinks were not updated by the second attestation
assert post_state.previous_crosslinks[shard] == post_state.current_crosslinks[shard]
# ensure no reward, only penalties for the failed crosslink
for index in get_crosslink_committee(state, attestation_2.data.target_epoch, attestation_2.data.shard):
for index in get_crosslink_committee(state, attestation_2.data.target_epoch, attestation_2.data.crosslink.shard):
assert crosslink_deltas[0][index] == 0
assert crosslink_deltas[1][index] > 0

View File

@ -10,6 +10,7 @@ from eth2spec.utils.minimal_ssz import signing_root
from eth2spec.phase1.spec import (
# constants
ZERO_HASH,
MAX_EPOCHS_PER_CROSSLINK,
# SSZ
Attestation,
AttestationData,
@ -17,6 +18,7 @@ from eth2spec.phase1.spec import (
AttesterSlashing,
BeaconBlock,
BeaconBlockHeader,
Crosslink,
Deposit,
DepositData,
Eth1Data,
@ -126,7 +128,7 @@ def build_empty_block_for_next_slot(state):
previous_block_header = deepcopy(state.latest_block_header)
if previous_block_header.state_root == spec.ZERO_HASH:
previous_block_header.state_root = state.hash_tree_root()
empty_block.previous_block_root = signing_root(previous_block_header)
empty_block.parent_root = signing_root(previous_block_header)
return empty_block
@ -153,7 +155,7 @@ def build_attestation_data(state, slot, shard):
assert state.slot >= slot
if slot == state.slot:
block_root = build_empty_block_for_next_slot(state).previous_block_root
block_root = build_empty_block_for_next_slot(state).parent_root
else:
block_root = get_block_root_at_slot(state, slot)
@ -174,14 +176,17 @@ def build_attestation_data(state, slot, shard):
crosslinks = state.current_crosslinks if slot_to_epoch(slot) == get_current_epoch(state) else state.previous_crosslinks
return AttestationData(
shard=shard,
beacon_block_root=block_root,
source_epoch=justified_epoch,
source_root=justified_block_root,
target_epoch=slot_to_epoch(slot),
target_root=epoch_boundary_root,
crosslink_data_root=spec.ZERO_HASH,
previous_crosslink_root=hash_tree_root(crosslinks[shard]),
crosslink=Crosslink(
shard=shard,
epoch=min(slot_to_epoch(slot), crosslinks[shard].epoch + MAX_EPOCHS_PER_CROSSLINK),
data_root=spec.ZERO_HASH,
parent_root=hash_tree_root(crosslinks[shard]),
),
)
@ -235,12 +240,12 @@ def get_valid_proposer_slashing(state):
header_1 = BeaconBlockHeader(
slot=slot,
previous_block_root=ZERO_HASH,
parent_root=ZERO_HASH,
state_root=ZERO_HASH,
block_body_root=ZERO_HASH,
body_root=ZERO_HASH,
)
header_2 = deepcopy(header_1)
header_2.previous_block_root = b'\x02' * 32
header_2.parent_root = b'\x02' * 32
header_2.slot = slot + 1
domain = get_domain(
@ -288,7 +293,7 @@ def get_valid_attestation(state, slot=None):
attestation_data = build_attestation_data(state, slot, shard)
crosslink_committee = get_crosslink_committee(state, attestation_data.target_epoch, attestation_data.shard)
crosslink_committee = get_crosslink_committee(state, attestation_data.target_epoch, attestation_data.crosslink.shard)
committee_size = len(crosslink_committee)
bitfield_length = (committee_size + 7) // 8
@ -381,7 +386,7 @@ def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0)
def fill_aggregate_attestation(state, attestation):
crosslink_committee = get_crosslink_committee(state, attestation.data.target_epoch, attestation.data.shard)
crosslink_committee = get_crosslink_committee(state, attestation.data.target_epoch, attestation.data.crosslink.shard)
for i in range(len(crosslink_committee)):
attestation.aggregation_bitfield = set_bitfield_bit(attestation.aggregation_bitfield, i)

View File

@ -68,7 +68,7 @@ def test_empty_block_transition(state):
state_transition(test_state, block)
assert len(test_state.eth1_data_votes) == len(state.eth1_data_votes) + 1
assert get_block_root_at_slot(test_state, state.slot) == block.previous_block_root
assert get_block_root_at_slot(test_state, state.slot) == block.parent_root
return state, [block], test_state
@ -82,7 +82,7 @@ def test_skipped_slots(state):
assert test_state.slot == block.slot
for slot in range(state.slot, test_state.slot):
assert get_block_root_at_slot(test_state, slot) == block.previous_block_root
assert get_block_root_at_slot(test_state, slot) == block.parent_root
return state, [block], test_state
@ -96,7 +96,7 @@ def test_empty_epoch_transition(state):
assert test_state.slot == block.slot
for slot in range(state.slot, test_state.slot):
assert get_block_root_at_slot(test_state, slot) == block.previous_block_root
assert get_block_root_at_slot(test_state, slot) == block.parent_root
return state, [block], test_state