Merge branch 'dev' into patch-1
This commit is contained in:
commit
9d175c9dc9
|
@ -55,6 +55,7 @@
|
||||||
- [Helper functions](#helper-functions)
|
- [Helper functions](#helper-functions)
|
||||||
- [`hash`](#hash)
|
- [`hash`](#hash)
|
||||||
- [`hash_tree_root`](#hash_tree_root)
|
- [`hash_tree_root`](#hash_tree_root)
|
||||||
|
- [`signed_root`](#signed_root)
|
||||||
- [`slot_to_epoch`](#slot_to_epoch)
|
- [`slot_to_epoch`](#slot_to_epoch)
|
||||||
- [`get_previous_epoch`](#get_previous_epoch)
|
- [`get_previous_epoch`](#get_previous_epoch)
|
||||||
- [`get_current_epoch`](#get_current_epoch)
|
- [`get_current_epoch`](#get_current_epoch)
|
||||||
|
@ -75,6 +76,7 @@
|
||||||
- [`generate_seed`](#generate_seed)
|
- [`generate_seed`](#generate_seed)
|
||||||
- [`get_beacon_proposer_index`](#get_beacon_proposer_index)
|
- [`get_beacon_proposer_index`](#get_beacon_proposer_index)
|
||||||
- [`merkle_root`](#merkle_root)
|
- [`merkle_root`](#merkle_root)
|
||||||
|
- [`verify_merkle_branch`](#verify_merkle_branch)
|
||||||
- [`get_attestation_participants`](#get_attestation_participants)
|
- [`get_attestation_participants`](#get_attestation_participants)
|
||||||
- [`is_power_of_two`](#is_power_of_two)
|
- [`is_power_of_two`](#is_power_of_two)
|
||||||
- [`int_to_bytes1`, `int_to_bytes2`, ...](#int_to_bytes1-int_to_bytes2-)
|
- [`int_to_bytes1`, `int_to_bytes2`, ...](#int_to_bytes1-int_to_bytes2-)
|
||||||
|
@ -385,7 +387,7 @@ The following data structures are defined as [SimpleSerialize (SSZ)](https://git
|
||||||
```python
|
```python
|
||||||
{
|
{
|
||||||
# Branch in the deposit tree
|
# Branch in the deposit tree
|
||||||
'branch': ['bytes32'],
|
'proof': ['bytes32'],
|
||||||
# Index in the deposit tree
|
# Index in the deposit tree
|
||||||
'index': 'uint64',
|
'index': 'uint64',
|
||||||
# Data
|
# Data
|
||||||
|
@ -876,19 +878,22 @@ def get_crosslink_committees_at_slot(state: BeaconState,
|
||||||
shuffling_epoch = state.previous_shuffling_epoch
|
shuffling_epoch = state.previous_shuffling_epoch
|
||||||
shuffling_start_shard = state.previous_shuffling_start_shard
|
shuffling_start_shard = state.previous_shuffling_start_shard
|
||||||
elif epoch == next_epoch:
|
elif epoch == next_epoch:
|
||||||
current_committees_per_epoch = get_current_epoch_committee_count(state)
|
|
||||||
committees_per_epoch = get_next_epoch_committee_count(state)
|
|
||||||
shuffling_epoch = next_epoch
|
|
||||||
|
|
||||||
epochs_since_last_registry_update = current_epoch - state.validator_registry_update_epoch
|
epochs_since_last_registry_update = current_epoch - state.validator_registry_update_epoch
|
||||||
if registry_change:
|
if registry_change:
|
||||||
|
committees_per_epoch = get_next_epoch_committee_count(state)
|
||||||
seed = generate_seed(state, next_epoch)
|
seed = generate_seed(state, next_epoch)
|
||||||
|
shuffling_epoch = next_epoch
|
||||||
|
current_committees_per_epoch = get_current_epoch_committee_count(state)
|
||||||
shuffling_start_shard = (state.current_shuffling_start_shard + current_committees_per_epoch) % SHARD_COUNT
|
shuffling_start_shard = (state.current_shuffling_start_shard + current_committees_per_epoch) % SHARD_COUNT
|
||||||
elif epochs_since_last_registry_update > 1 and is_power_of_two(epochs_since_last_registry_update):
|
elif epochs_since_last_registry_update > 1 and is_power_of_two(epochs_since_last_registry_update):
|
||||||
|
committees_per_epoch = get_next_epoch_committee_count(state)
|
||||||
seed = generate_seed(state, next_epoch)
|
seed = generate_seed(state, next_epoch)
|
||||||
|
shuffling_epoch = next_epoch
|
||||||
shuffling_start_shard = state.current_shuffling_start_shard
|
shuffling_start_shard = state.current_shuffling_start_shard
|
||||||
else:
|
else:
|
||||||
|
committees_per_epoch = get_current_epoch_committee_count(state)
|
||||||
seed = state.current_shuffling_seed
|
seed = state.current_shuffling_seed
|
||||||
|
shuffling_epoch = state.current_shuffling_epoch
|
||||||
shuffling_start_shard = state.current_shuffling_start_shard
|
shuffling_start_shard = state.current_shuffling_start_shard
|
||||||
|
|
||||||
shuffling = get_shuffling(
|
shuffling = get_shuffling(
|
||||||
|
@ -967,11 +972,19 @@ def generate_seed(state: BeaconState,
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_beacon_proposer_index(state: BeaconState,
|
def get_beacon_proposer_index(state: BeaconState,
|
||||||
slot: Slot) -> ValidatorIndex:
|
slot: Slot,
|
||||||
|
registry_change: bool=False) -> ValidatorIndex:
|
||||||
"""
|
"""
|
||||||
Return the beacon proposer index for the ``slot``.
|
Return the beacon proposer index for the ``slot``.
|
||||||
"""
|
"""
|
||||||
first_committee, _ = get_crosslink_committees_at_slot(state, slot)[0]
|
epoch = slot_to_epoch(slot)
|
||||||
|
current_epoch = get_current_epoch(state)
|
||||||
|
previous_epoch = get_previous_epoch(state)
|
||||||
|
next_epoch = current_epoch + 1
|
||||||
|
|
||||||
|
assert previous_epoch <= epoch <= next_epoch
|
||||||
|
|
||||||
|
first_committee, _ = get_crosslink_committees_at_slot(state, slot, registry_change)[0]
|
||||||
return first_committee[slot % len(first_committee)]
|
return first_committee[slot % len(first_committee)]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -989,6 +1002,23 @@ def merkle_root(values: List[Bytes32]) -> Bytes32:
|
||||||
return o[1]
|
return o[1]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `verify_merkle_branch`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def verify_merkle_branch(leaf: Bytes32, proof: List[Bytes32], depth: int, index: int, root: Bytes32) -> bool:
|
||||||
|
"""
|
||||||
|
Verify that the given ``leaf`` is on the merkle branch ``proof``
|
||||||
|
starting with the given ``root``.
|
||||||
|
"""
|
||||||
|
value = leaf
|
||||||
|
for i in range(depth):
|
||||||
|
if index // (2**i) % 2:
|
||||||
|
value = hash(proof[i] + value)
|
||||||
|
else:
|
||||||
|
value = hash(value + proof[i])
|
||||||
|
return value == root
|
||||||
|
```
|
||||||
|
|
||||||
### `get_attestation_participants`
|
### `get_attestation_participants`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -1051,7 +1081,7 @@ def get_effective_balance(state: State, index: ValidatorIndex) -> Gwei:
|
||||||
```python
|
```python
|
||||||
def get_total_balance(state: BeaconState, validators: List[ValidatorIndex]) -> Gwei:
|
def get_total_balance(state: BeaconState, validators: List[ValidatorIndex]) -> Gwei:
|
||||||
"""
|
"""
|
||||||
Return the combined effective balance of an array of validators.
|
Return the combined effective balance of an array of ``validators``.
|
||||||
"""
|
"""
|
||||||
return sum([get_effective_balance(state, i) for i in validators])
|
return sum([get_effective_balance(state, i) for i in validators])
|
||||||
```
|
```
|
||||||
|
@ -1235,6 +1265,31 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||||
"""
|
"""
|
||||||
deposit_input = deposit.deposit_data.deposit_input
|
deposit_input = deposit.deposit_data.deposit_input
|
||||||
|
|
||||||
|
# Should equal 8 bytes for deposit_data.amount +
|
||||||
|
# 8 bytes for deposit_data.timestamp +
|
||||||
|
# 176 bytes for deposit_data.deposit_input
|
||||||
|
# It should match the deposit_data in the eth1.0 deposit contract
|
||||||
|
serialized_deposit_data = serialize(deposit.deposit_data)
|
||||||
|
# Deposits must be processed in order
|
||||||
|
assert deposit.index == state.deposit_index
|
||||||
|
|
||||||
|
# Verify the Merkle branch
|
||||||
|
merkle_branch_is_valid = verify_merkle_branch(
|
||||||
|
leaf=hash(serialized_deposit_data),
|
||||||
|
proof=deposit.proof,
|
||||||
|
depth=DEPOSIT_CONTRACT_TREE_DEPTH,
|
||||||
|
index=deposit.index,
|
||||||
|
root=state.latest_eth1_data.deposit_root,
|
||||||
|
)
|
||||||
|
assert merkle_branch_is_valid
|
||||||
|
|
||||||
|
# Increment the next deposit index we are expecting. Note that this
|
||||||
|
# needs to be done here because while the deposit contract will never
|
||||||
|
# create an invalid Merkle branch, it may admit an invalid deposit
|
||||||
|
# object, and we need to be able to skip over it
|
||||||
|
state.deposit_index += 1
|
||||||
|
|
||||||
|
# Verify the proof of possession
|
||||||
proof_is_valid = bls_verify(
|
proof_is_valid = bls_verify(
|
||||||
pubkey=deposit_input.pubkey,
|
pubkey=deposit_input.pubkey,
|
||||||
message_hash=signed_root(deposit_input, "proof_of_possession"),
|
message_hash=signed_root(deposit_input, "proof_of_possession"),
|
||||||
|
@ -1315,12 +1370,13 @@ def exit_validator(state: BeaconState, index: ValidatorIndex) -> None:
|
||||||
Note that this function mutates ``state``.
|
Note that this function mutates ``state``.
|
||||||
"""
|
"""
|
||||||
validator = state.validator_registry[index]
|
validator = state.validator_registry[index]
|
||||||
|
delayed_activation_exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state))
|
||||||
|
|
||||||
# The following updates only occur if not previous exited
|
# The following updates only occur if not previous exited
|
||||||
if validator.exit_epoch <= get_delayed_activation_exit_epoch(get_current_epoch(state)):
|
if validator.exit_epoch <= delayed_activation_exit_epoch:
|
||||||
return
|
return
|
||||||
|
else:
|
||||||
validator.exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state))
|
validator.exit_epoch = delayed_activation_exit_epoch
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `slash_validator`
|
#### `slash_validator`
|
||||||
|
@ -1449,7 +1505,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit],
|
||||||
validator_registry_update_epoch=GENESIS_EPOCH,
|
validator_registry_update_epoch=GENESIS_EPOCH,
|
||||||
|
|
||||||
# Randomness and committees
|
# Randomness and committees
|
||||||
latest_randao_mixes=[EMPTY_SIGNATURE for _ in range(LATEST_RANDAO_MIXES_LENGTH)],
|
latest_randao_mixes=[ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)],
|
||||||
previous_shuffling_start_shard=GENESIS_START_SHARD,
|
previous_shuffling_start_shard=GENESIS_START_SHARD,
|
||||||
current_shuffling_start_shard=GENESIS_START_SHARD,
|
current_shuffling_start_shard=GENESIS_START_SHARD,
|
||||||
previous_shuffling_epoch=GENESIS_EPOCH,
|
previous_shuffling_epoch=GENESIS_EPOCH,
|
||||||
|
@ -1474,7 +1530,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit],
|
||||||
# Ethereum 1.0 chain data
|
# Ethereum 1.0 chain data
|
||||||
latest_eth1_data=latest_eth1_data,
|
latest_eth1_data=latest_eth1_data,
|
||||||
eth1_data_votes=[],
|
eth1_data_votes=[],
|
||||||
deposit_index=len(genesis_validator_deposits)
|
deposit_index=0,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Process genesis deposits
|
# Process genesis deposits
|
||||||
|
@ -1627,59 +1683,123 @@ Below are the processing steps that happen at every `block`.
|
||||||
|
|
||||||
Verify that `len(block.body.proposer_slashings) <= MAX_PROPOSER_SLASHINGS`.
|
Verify that `len(block.body.proposer_slashings) <= MAX_PROPOSER_SLASHINGS`.
|
||||||
|
|
||||||
For each `proposer_slashing` in `block.body.proposer_slashings`:
|
For each `proposer_slashing` in `block.body.proposer_slashings`, run the following function:
|
||||||
|
|
||||||
* Let `proposer = state.validator_registry[proposer_slashing.proposer_index]`.
|
```python
|
||||||
* Verify that `proposer_slashing.proposal_1.slot == proposer_slashing.proposal_2.slot`.
|
def process_proposer_slashing(state: BeaconState,
|
||||||
* Verify that `proposer_slashing.proposal_1.shard == proposer_slashing.proposal_2.shard`.
|
proposer_slashing: ProposerSlashing) -> None:
|
||||||
* Verify that `proposer_slashing.proposal_1.block_root != proposer_slashing.proposal_2.block_root`.
|
"""
|
||||||
* Verify that `proposer.slashed == False`.
|
Process ``ProposerSlashing`` transaction.
|
||||||
* Verify that `bls_verify(pubkey=proposer.pubkey, message_hash=signed_root(proposer_slashing.proposal_1, "signature"), signature=proposer_slashing.proposal_1.signature, domain=get_domain(state.fork, slot_to_epoch(proposer_slashing.proposal_1.slot), DOMAIN_PROPOSAL))`.
|
Note that this function mutates ``state``.
|
||||||
* Verify that `bls_verify(pubkey=proposer.pubkey, message_hash=signed_root(proposer_slashing.proposal_2, "signature"), signature=proposer_slashing.proposal_2.signature, domain=get_domain(state.fork, slot_to_epoch(proposer_slashing.proposal_2.slot), DOMAIN_PROPOSAL))`.
|
"""
|
||||||
* Run `slash_validator(state, proposer_slashing.proposer_index)`.
|
proposer = state.validator_registry[proposer_slashing.proposer_index]
|
||||||
|
# Verify that the slot is the same
|
||||||
|
assert proposer_slashing.proposal_1.slot == proposer_slashing.proposal_2.slot
|
||||||
|
# Verify that the shard is the same (or that both proposals are beacon chain proposals)
|
||||||
|
assert proposer_slashing.proposal_1.shard == proposer_slashing.proposal_2.shard
|
||||||
|
# But the roots are different!
|
||||||
|
assert proposer_slashing.proposal_1.block_root != proposer_slashing.proposal_2.block_root
|
||||||
|
# Proposer is not yet slashed
|
||||||
|
assert proposer.slashed is False
|
||||||
|
# Signatures are valid
|
||||||
|
for proposal in (proposer_slashing.proposal_1, proposer_slashing.proposal_2):
|
||||||
|
assert bls_verify(
|
||||||
|
pubkey=proposer.pubkey,
|
||||||
|
message_hash=signed_root(proposal, "signature"),
|
||||||
|
signature=proposal.signature,
|
||||||
|
domain=get_domain(state.fork, slot_to_epoch(proposal.slot), DOMAIN_PROPOSAL)
|
||||||
|
)
|
||||||
|
slash_validator(state, proposer_slashing.proposer_index)
|
||||||
|
```
|
||||||
|
|
||||||
##### Attester slashings
|
##### Attester slashings
|
||||||
|
|
||||||
Verify that `len(block.body.attester_slashings) <= MAX_ATTESTER_SLASHINGS`.
|
Verify that `len(block.body.attester_slashings) <= MAX_ATTESTER_SLASHINGS`.
|
||||||
|
|
||||||
For each `attester_slashing` in `block.body.attester_slashings`:
|
For each `attester_slashing` in `block.body.attester_slashings`, run the following function:
|
||||||
|
|
||||||
* Let `slashable_attestation_1 = attester_slashing.slashable_attestation_1`.
|
```python
|
||||||
* Let `slashable_attestation_2 = attester_slashing.slashable_attestation_2`.
|
def process_attester_slashing(state: BeaconState,
|
||||||
* Verify that `slashable_attestation_1.data != slashable_attestation_2.data`.
|
attester_slashing: AttesterSlashing) -> None:
|
||||||
* Verify that `is_double_vote(slashable_attestation_1.data, slashable_attestation_2.data)` or `is_surround_vote(slashable_attestation_1.data, slashable_attestation_2.data)`.
|
"""
|
||||||
* Verify that `verify_slashable_attestation(state, slashable_attestation_1)`.
|
Process ``AttesterSlashing`` transaction.
|
||||||
* Verify that `verify_slashable_attestation(state, slashable_attestation_2)`.
|
Note that this function mutates ``state``.
|
||||||
* Let `slashable_indices = [index for index in slashable_attestation_1.validator_indices if index in slashable_attestation_2.validator_indices and state.validator_registry[index].slashed == False]`.
|
"""
|
||||||
* Verify that `len(slashable_indices) >= 1`.
|
attestation1 = attester_slashing.slashable_attestation_1
|
||||||
* Run `slash_validator(state, index)` for each `index` in `slashable_indices`.
|
attestation2 = attester_slashing.slashable_attestation_2
|
||||||
|
# Check that the attestations are conflicting
|
||||||
|
assert attestation1.data != attestation2.data
|
||||||
|
assert (
|
||||||
|
is_double_vote(attestation1.data, attestation2.data) or
|
||||||
|
is_surround_vote(attestation1.data, attestation2.data)
|
||||||
|
)
|
||||||
|
assert verify_slashable_attestation(state, attestation1)
|
||||||
|
assert verify_slashable_attestation(state, attestation2)
|
||||||
|
slashable_indices = [
|
||||||
|
index for index in attestation1.validator_indices
|
||||||
|
if (
|
||||||
|
index in attestation2.validator_indices and
|
||||||
|
state.validator_registry[index].slashed is False
|
||||||
|
)
|
||||||
|
]
|
||||||
|
assert len(slashable_indices) >= 1
|
||||||
|
for index in slashable_indices:
|
||||||
|
slash_validator(state, index)
|
||||||
|
```
|
||||||
|
|
||||||
##### Attestations
|
##### Attestations
|
||||||
|
|
||||||
Verify that `len(block.body.attestations) <= MAX_ATTESTATIONS`.
|
Verify that `len(block.body.attestations) <= MAX_ATTESTATIONS`.
|
||||||
|
|
||||||
For each `attestation` in `block.body.attestations`:
|
For each `attestation` in `block.body.attestations`, run the following function:
|
||||||
|
|
||||||
* Verify that `attestation.data.slot >= GENESIS_SLOT`.
|
|
||||||
* Verify that `attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot`.
|
|
||||||
* Verify that `state.slot < attestation.data.slot + SLOTS_PER_EPOCH.
|
|
||||||
* Verify that `attestation.data.justified_epoch` is equal to `state.justified_epoch if slot_to_epoch(attestation.data.slot + 1) >= get_current_epoch(state) else state.previous_justified_epoch`.
|
|
||||||
* Verify that `attestation.data.justified_block_root` is equal to `get_block_root(state, get_epoch_start_slot(attestation.data.justified_epoch))`.
|
|
||||||
* Verify that either (i) `state.latest_crosslinks[attestation.data.shard] == attestation.data.latest_crosslink` or (ii) `state.latest_crosslinks[attestation.data.shard] == Crosslink(crosslink_data_root=attestation.data.crosslink_data_root, epoch=slot_to_epoch(attestation.data.slot))`.
|
|
||||||
* Verify bitfields and aggregate signature:
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
assert attestation.custody_bitfield == b'\x00' * len(attestation.custody_bitfield) # [TO BE REMOVED IN PHASE 1]
|
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||||
|
"""
|
||||||
|
Process ``Attestation`` transaction.
|
||||||
|
Note that this function mutates ``state``.
|
||||||
|
"""
|
||||||
|
# Can't submit attestations that are too far in history (or in prehistory)
|
||||||
|
assert attestation.data.slot >= GENESIS_SLOT
|
||||||
|
assert state.slot < attestation.data.slot + SLOTS_PER_EPOCH
|
||||||
|
# Can't submit attestations too quickly
|
||||||
|
assert attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot
|
||||||
|
# Verify that the justified epoch is correct, case 1: current epoch attestations
|
||||||
|
if slot_to_epoch(attestation.data.slot + 1) >= get_current_epoch(state):
|
||||||
|
assert attestation.data.justified_epoch == state.justified_epoch
|
||||||
|
# Case 2: previous epoch attestations
|
||||||
|
else:
|
||||||
|
assert attestation.data.justified_epoch == state.previous_justified_epoch
|
||||||
|
# Check that the justified block root is correct
|
||||||
|
assert attestation.data.justified_block_root == get_block_root(
|
||||||
|
state, get_epoch_start_slot(attestation.data.justified_epoch)
|
||||||
|
)
|
||||||
|
# Check that the crosslink data is valid
|
||||||
|
acceptable_crosslink_data = {
|
||||||
|
# Case 1: Latest crosslink matches the one in the state
|
||||||
|
attestation.data.latest_crosslink,
|
||||||
|
# Case 2: State has already been updated, state's latest crosslink matches the crosslink
|
||||||
|
# the attestation is trying to create
|
||||||
|
Crosslink(
|
||||||
|
crosslink_data_root=attestation.data.crosslink_data_root,
|
||||||
|
epoch=slot_to_epoch(attestation.data.slot)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
assert state.latest_crosslinks[attestation.data.shard] in acceptable_crosslink_data
|
||||||
|
# Attestation must be nonempty!
|
||||||
assert attestation.aggregation_bitfield != b'\x00' * len(attestation.aggregation_bitfield)
|
assert attestation.aggregation_bitfield != b'\x00' * len(attestation.aggregation_bitfield)
|
||||||
|
# Custody must be empty (to be removed in phase 1)
|
||||||
|
assert attestation.custody_bitfield == b'\x00' * len(attestation.custody_bitfield)
|
||||||
|
# Get the committee for the specific shard that this attestation is for
|
||||||
crosslink_committee = [
|
crosslink_committee = [
|
||||||
committee for committee, shard in get_crosslink_committees_at_slot(state, attestation.data.slot)
|
committee for committee, shard in get_crosslink_committees_at_slot(state, attestation.data.slot)
|
||||||
if shard == attestation.data.shard
|
if shard == attestation.data.shard
|
||||||
][0]
|
][0]
|
||||||
|
# Custody bitfield must be a subset of the attestation bitfield
|
||||||
for i in range(len(crosslink_committee)):
|
for i in range(len(crosslink_committee)):
|
||||||
if get_bitfield_bit(attestation.aggregation_bitfield, i) == 0b0:
|
if get_bitfield_bit(attestation.aggregation_bitfield, i) == 0b0:
|
||||||
assert get_bitfield_bit(attestation.custody_bitfield, i) == 0b0
|
assert get_bitfield_bit(attestation.custody_bitfield, i) == 0b0
|
||||||
|
# Verify aggregate signature
|
||||||
participants = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield)
|
participants = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield)
|
||||||
custody_bit_1_participants = get_attestation_participants(state, attestation.data, attestation.custody_bitfield)
|
custody_bit_1_participants = get_attestation_participants(state, attestation.data, attestation.custody_bitfield)
|
||||||
custody_bit_0_participants = [i in participants for i not in custody_bit_1_participants]
|
custody_bit_0_participants = [i in participants for i not in custody_bit_1_participants]
|
||||||
|
@ -1696,56 +1816,50 @@ For each `attestation` in `block.body.attestations`:
|
||||||
signature=attestation.aggregate_signature,
|
signature=attestation.aggregate_signature,
|
||||||
domain=get_domain(state.fork, slot_to_epoch(attestation.data.slot), DOMAIN_ATTESTATION),
|
domain=get_domain(state.fork, slot_to_epoch(attestation.data.slot), DOMAIN_ATTESTATION),
|
||||||
)
|
)
|
||||||
|
# Crosslink data root is zero (to be removed in phase 1)
|
||||||
|
assert attestation.data.crosslink_data_root == ZERO_HASH
|
||||||
|
# Apply the attestation
|
||||||
|
state.latest_attestations.append(PendingAttestation(
|
||||||
|
data=attestation.data,
|
||||||
|
aggregation_bitfield=attestation.aggregation_bitfield,
|
||||||
|
custody_bitfield=attestation.custody_bitfield,
|
||||||
|
inclusion_slot=state.slot)
|
||||||
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
* [TO BE REMOVED IN PHASE 1] Verify that `attestation.data.crosslink_data_root == ZERO_HASH`.
|
|
||||||
* Append `PendingAttestation(data=attestation.data, aggregation_bitfield=attestation.aggregation_bitfield, custody_bitfield=attestation.custody_bitfield, inclusion_slot=state.slot)` to `state.latest_attestations`.
|
|
||||||
|
|
||||||
##### Deposits
|
##### Deposits
|
||||||
|
|
||||||
Verify that `len(block.body.deposits) <= MAX_DEPOSITS`.
|
Verify that `len(block.body.deposits) <= MAX_DEPOSITS`.
|
||||||
|
|
||||||
[TODO: update the call to `verify_merkle_branch` below if it needs to change after we process deposits in order]
|
For each `deposit` in `block.body.deposits`, run `process_deposit(state, deposit)`.
|
||||||
|
|
||||||
For each `deposit` in `block.body.deposits`:
|
|
||||||
|
|
||||||
* Let `serialized_deposit_data` be the serialized form of `deposit.deposit_data`. It should be 8 bytes for `deposit_data.amount` followed by 8 bytes for `deposit_data.timestamp` and then the `DepositInput` bytes. That is, it should match `deposit_data` in the [Ethereum 1.0 deposit contract](#ethereum-10-deposit-contract) of which the hash was placed into the Merkle tree.
|
|
||||||
* Verify that `deposit.index == state.deposit_index`.
|
|
||||||
* Verify that `verify_merkle_branch(hash(serialized_deposit_data), deposit.branch, DEPOSIT_CONTRACT_TREE_DEPTH, deposit.index, state.latest_eth1_data.deposit_root)` is `True`.
|
|
||||||
|
|
||||||
```python
|
|
||||||
def verify_merkle_branch(leaf: Bytes32, branch: List[Bytes32], depth: int, index: int, root: Bytes32) -> bool:
|
|
||||||
"""
|
|
||||||
Verify that the given ``leaf`` is on the merkle branch ``branch``.
|
|
||||||
"""
|
|
||||||
value = leaf
|
|
||||||
for i in range(depth):
|
|
||||||
if index // (2**i) % 2:
|
|
||||||
value = hash(branch[i] + value)
|
|
||||||
else:
|
|
||||||
value = hash(value + branch[i])
|
|
||||||
return value == root
|
|
||||||
```
|
|
||||||
|
|
||||||
* Run the following:
|
|
||||||
|
|
||||||
```python
|
|
||||||
process_deposit(state, deposit)
|
|
||||||
```
|
|
||||||
|
|
||||||
* Set `state.deposit_index += 1`.
|
|
||||||
|
|
||||||
##### Voluntary exits
|
##### Voluntary exits
|
||||||
|
|
||||||
Verify that `len(block.body.voluntary_exits) <= MAX_VOLUNTARY_EXITS`.
|
Verify that `len(block.body.voluntary_exits) <= MAX_VOLUNTARY_EXITS`.
|
||||||
|
|
||||||
For each `exit` in `block.body.voluntary_exits`:
|
For each `exit` in `block.body.voluntary_exits`, run the following function:
|
||||||
|
|
||||||
* Let `validator = state.validator_registry[exit.validator_index]`.
|
```python
|
||||||
* Verify that `validator.exit_epoch > get_delayed_activation_exit_epoch(get_current_epoch(state))`.
|
def process_exit(state: BeaconState, exit: VoluntaryExit) -> None:
|
||||||
* Verify that `get_current_epoch(state) >= exit.epoch`.
|
"""
|
||||||
* Verify that `bls_verify(pubkey=validator.pubkey, message_hash=signed_root(exit, "signature"), signature=exit.signature, domain=get_domain(state.fork, exit.epoch, DOMAIN_EXIT))`.
|
Process ``VoluntaryExit`` transaction.
|
||||||
* Run `initiate_validator_exit(state, exit.validator_index)`.
|
Note that this function mutates ``state``.
|
||||||
|
"""
|
||||||
|
validator = state.validator_registry[exit.validator_index]
|
||||||
|
# Verify the validator has not yet exited
|
||||||
|
assert validator.exit_epoch > get_delayed_activation_exit_epoch(get_current_epoch(state))
|
||||||
|
# Exits must specify an epoch when they become valid; they are not valid before then
|
||||||
|
assert get_current_epoch(state) >= exit.epoch
|
||||||
|
# Verify signature
|
||||||
|
assert bls_verify(
|
||||||
|
pubkey=validator.pubkey,
|
||||||
|
message_hash=signed_root(exit, "signature"),
|
||||||
|
signature=exit.signature,
|
||||||
|
domain=get_domain(state.fork, exit.epoch, DOMAIN_EXIT)
|
||||||
|
)
|
||||||
|
# Run the exit
|
||||||
|
initiate_validator_exit(state, exit.validator_index)
|
||||||
|
```
|
||||||
|
|
||||||
##### Transfers
|
##### Transfers
|
||||||
|
|
||||||
|
@ -1753,18 +1867,46 @@ Note: Transfers are a temporary functionality for phases 0 and 1, to be removed
|
||||||
|
|
||||||
Verify that `len(block.body.transfers) <= MAX_TRANSFERS` and that all transfers are distinct.
|
Verify that `len(block.body.transfers) <= MAX_TRANSFERS` and that all transfers are distinct.
|
||||||
|
|
||||||
For each `transfer` in `block.body.transfers`:
|
For each `transfer` in `block.body.transfers`, run the following function:
|
||||||
|
|
||||||
* Verify that `state.validator_balances[transfer.from] >= transfer.amount`.
|
```python
|
||||||
* Verify that `state.validator_balances[transfer.from] >= transfer.fee`.
|
def process_transfer(state: BeaconState, transfer: Transfer) -> None:
|
||||||
* Verify that `state.validator_balances[transfer.from] == transfer.amount + transfer.fee` or `state.validator_balances[transfer.from] >= transfer.amount + transfer.fee + MIN_DEPOSIT_AMOUNT`.
|
"""
|
||||||
* Verify that `state.slot == transfer.slot`.
|
Process ``Transfer`` transaction.
|
||||||
* Verify that `get_current_epoch(state) >= state.validator_registry[transfer.from].withdrawable_epoch` or `state.validator_registry[transfer.from].activation_epoch == FAR_FUTURE_EPOCH`.
|
Note that this function mutates ``state``.
|
||||||
* Verify that `state.validator_registry[transfer.from].withdrawal_credentials == BLS_WITHDRAWAL_PREFIX_BYTE + hash(transfer.pubkey)[1:]`.
|
"""
|
||||||
* Verify that `bls_verify(pubkey=transfer.pubkey, message_hash=signed_root(transfer, "signature"), signature=transfer.signature, domain=get_domain(state.fork, slot_to_epoch(transfer.slot), DOMAIN_TRANSFER))`.
|
# Verify the amount and fee aren't individually too big (for anti-overflow purposes)
|
||||||
* Set `state.validator_balances[transfer.from] -= transfer.amount + transfer.fee`.
|
assert state.validator_balances[transfer.from] >= max(transfer.amount, transfer.fee)
|
||||||
* Set `state.validator_balances[transfer.to] += transfer.amount`.
|
# Verify that we have enough ETH to send, and that after the transfer the balance will be either
|
||||||
* Set `state.validator_balances[get_beacon_proposer_index(state, state.slot)] += transfer.fee`.
|
# exactly zero or at least MIN_DEPOSIT_AMOUNT
|
||||||
|
assert (
|
||||||
|
state.validator_balances[transfer.from] == transfer.amount + transfer.fee or
|
||||||
|
state.validator_balances[transfer.from] >= transfer.amount + transfer.fee + MIN_DEPOSIT_AMOUNT
|
||||||
|
)
|
||||||
|
# A transfer is valid in only one slot
|
||||||
|
assert state.slot == transfer.slot
|
||||||
|
# Only withdrawn or not-yet-deposited accounts can transfer
|
||||||
|
assert (
|
||||||
|
get_current_epoch(state) >= state.validator_registry[transfer.from].withdrawable_epoch or
|
||||||
|
state.validator_registry[transfer.from].activation_epoch == FAR_FUTURE_EPOCH
|
||||||
|
)
|
||||||
|
# Verify that the pubkey is valid
|
||||||
|
assert (
|
||||||
|
state.validator_registry[transfer.from].withdrawal_credentials ==
|
||||||
|
BLS_WITHDRAWAL_PREFIX_BYTE + hash(transfer.pubkey)[1:]
|
||||||
|
)
|
||||||
|
# Verify that the signature is valid
|
||||||
|
assert bls_verify(
|
||||||
|
pubkey=transfer.pubkey,
|
||||||
|
message_hash=signed_root(transfer, "signature"),
|
||||||
|
signature=transfer.signature,
|
||||||
|
domain=get_domain(state.fork, slot_to_epoch(transfer.slot), DOMAIN_TRANSFER)
|
||||||
|
)
|
||||||
|
# Process the transfer
|
||||||
|
state.validator_balances[transfer.from] -= transfer.amount + transfer.fee
|
||||||
|
state.validator_balances[transfer.to] += transfer.amount
|
||||||
|
state.validator_balances[get_beacon_proposer_index(state, state.slot)] += transfer.fee
|
||||||
|
```
|
||||||
|
|
||||||
### Per-epoch processing
|
### Per-epoch processing
|
||||||
|
|
||||||
|
@ -1862,30 +2004,29 @@ Note: When applying penalties in the following balance recalculations implemente
|
||||||
|
|
||||||
##### Justification and finalization
|
##### Justification and finalization
|
||||||
|
|
||||||
Note: Rewards and penalties are for participation in the previous epoch, so the "active validator" set is drawn from `get_active_validator_indices(state.validator_registry, previous_epoch)`.
|
* Let `previous_active_validator_indices = get_active_validator_indices(state.validator_registry, previous_epoch)`
|
||||||
|
|
||||||
* Let `epochs_since_finality = next_epoch - state.finalized_epoch`.
|
* Let `epochs_since_finality = next_epoch - state.finalized_epoch`.
|
||||||
|
|
||||||
Case 1: `epochs_since_finality <= 4`:
|
Case 1: `epochs_since_finality <= 4`:
|
||||||
|
|
||||||
* Expected FFG source:
|
* Expected FFG source:
|
||||||
* Any [validator](#dfn-validator) `index` in `previous_epoch_attester_indices` gains `base_reward(state, index) * previous_epoch_attesting_balance // previous_total_balance`.
|
* Any [validator](#dfn-validator) `index` in `previous_epoch_attester_indices` gains `base_reward(state, index) * previous_epoch_attesting_balance // previous_total_balance`.
|
||||||
* Any [active validator](#dfn-active-validator) `index` not in `previous_epoch_attester_indices` loses `base_reward(state, index)`.
|
* Any [active validator](#dfn-active-validator) `index` from `previous_active_validator_indices` not in `previous_epoch_attester_indices` loses `base_reward(state, index)`.
|
||||||
* Expected FFG target:
|
* Expected FFG target:
|
||||||
* Any [validator](#dfn-validator) `index` in `previous_epoch_boundary_attester_indices` gains `base_reward(state, index) * previous_epoch_boundary_attesting_balance // previous_total_balance`.
|
* Any [validator](#dfn-validator) `index` in `previous_epoch_boundary_attester_indices` gains `base_reward(state, index) * previous_epoch_boundary_attesting_balance // previous_total_balance`.
|
||||||
* Any [active validator](#dfn-active-validator) `index` not in `previous_epoch_boundary_attester_indices` loses `base_reward(state, index)`.
|
* Any [active validator](#dfn-active-validator) `index` from `previous_active_validator_indices` not in `previous_epoch_boundary_attester_indices` loses `base_reward(state, index)`.
|
||||||
* Expected beacon chain head:
|
* Expected beacon chain head:
|
||||||
* Any [validator](#dfn-validator) `index` in `previous_epoch_head_attester_indices` gains `base_reward(state, index) * previous_epoch_head_attesting_balance // previous_total_balance)`.
|
* Any [validator](#dfn-validator) `index` in `previous_epoch_head_attester_indices` gains `base_reward(state, index) * previous_epoch_head_attesting_balance // previous_total_balance)`.
|
||||||
* Any [active validator](#dfn-active-validator) `index` not in `previous_epoch_head_attester_indices` loses `base_reward(state, index)`.
|
* Any [active validator](#dfn-active-validator) `index` from `previous_active_validator_indices` not in `previous_epoch_head_attester_indices` loses `base_reward(state, index)`.
|
||||||
* Inclusion distance:
|
* Inclusion distance:
|
||||||
* Any [validator](#dfn-validator) `index` in `previous_epoch_attester_indices` gains `base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_distance(state, index)`
|
* Any [validator](#dfn-validator) `index` in `previous_epoch_attester_indices` gains `base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_distance(state, index)`
|
||||||
|
|
||||||
Case 2: `epochs_since_finality > 4`:
|
Case 2: `epochs_since_finality > 4`:
|
||||||
|
|
||||||
* Any [active validator](#dfn-active-validator) `index` not in `previous_epoch_attester_indices`, loses `inactivity_penalty(state, index, epochs_since_finality)`.
|
* Any [active validator](#dfn-active-validator) `index` from `previous_active_validator_indices` not in `previous_epoch_attester_indices`, loses `inactivity_penalty(state, index, epochs_since_finality)`.
|
||||||
* Any [active validator](#dfn-active-validator) `index` not in `previous_epoch_boundary_attester_indices`, loses `inactivity_penalty(state, index, epochs_since_finality)`.
|
* Any [active validator](#dfn-active-validator) `index` from `previous_active_validator_indices` not in `previous_epoch_boundary_attester_indices`, loses `inactivity_penalty(state, index, epochs_since_finality)`.
|
||||||
* Any [active validator](#dfn-active-validator) `index` not in `previous_epoch_head_attester_indices`, loses `base_reward(state, index)`.
|
* Any [active validator](#dfn-active-validator) `index` from `previous_active_validator_indices` not in `previous_epoch_head_attester_indices`, loses `base_reward(state, index)`.
|
||||||
* Any [active validator](#dfn-active-validator) `index` with `validator.slashed == True`, loses `2 * inactivity_penalty(state, index, epochs_since_finality) + base_reward(state, index)`.
|
* Any [active validator](#dfn-active-validator) `index` with `validator.slashed is True`, loses `2 * inactivity_penalty(state, index, epochs_since_finality) + base_reward(state, index)`.
|
||||||
* Any [validator](#dfn-validator) `index` in `previous_epoch_attester_indices` loses `base_reward(state, index) - base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_distance(state, index)`
|
* Any [validator](#dfn-validator) `index` in `previous_epoch_attester_indices` loses `base_reward(state, index) - base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_distance(state, index)`
|
||||||
|
|
||||||
##### Attestation inclusion
|
##### Attestation inclusion
|
||||||
|
@ -1964,7 +2105,7 @@ def update_validator_registry(state: BeaconState) -> None:
|
||||||
# Exit validators within the allowable balance churn
|
# Exit validators within the allowable balance churn
|
||||||
balance_churn = 0
|
balance_churn = 0
|
||||||
for index, validator in enumerate(state.validator_registry):
|
for index, validator in enumerate(state.validator_registry):
|
||||||
if validator.activation_epoch == FAR_FUTURE_EPOCH and validator.initiated_exit:
|
if validator.exit_epoch == FAR_FUTURE_EPOCH and validator.initiated_exit:
|
||||||
# Check the balance churn would be within the allowance
|
# Check the balance churn would be within the allowance
|
||||||
balance_churn += get_effective_balance(state, index)
|
balance_churn += get_effective_balance(state, index)
|
||||||
if balance_churn > max_balance_churn:
|
if balance_churn > max_balance_churn:
|
||||||
|
@ -1978,8 +2119,8 @@ def update_validator_registry(state: BeaconState) -> None:
|
||||||
|
|
||||||
and perform the following updates:
|
and perform the following updates:
|
||||||
|
|
||||||
* Set `state.current_shuffling_epoch = next_epoch`
|
|
||||||
* Set `state.current_shuffling_start_shard = (state.current_shuffling_start_shard + get_current_epoch_committee_count(state)) % SHARD_COUNT`
|
* Set `state.current_shuffling_start_shard = (state.current_shuffling_start_shard + get_current_epoch_committee_count(state)) % SHARD_COUNT`
|
||||||
|
* Set `state.current_shuffling_epoch = next_epoch`
|
||||||
* Set `state.current_shuffling_seed = generate_seed(state, state.current_shuffling_epoch)`
|
* Set `state.current_shuffling_seed = generate_seed(state, state.current_shuffling_epoch)`
|
||||||
|
|
||||||
If a validator registry update does _not_ happen do the following:
|
If a validator registry update does _not_ happen do the following:
|
||||||
|
@ -2004,12 +2145,14 @@ def process_slashings(state: BeaconState) -> None:
|
||||||
active_validator_indices = get_active_validator_indices(state.validator_registry, current_epoch)
|
active_validator_indices = get_active_validator_indices(state.validator_registry, current_epoch)
|
||||||
total_balance = sum(get_effective_balance(state, i) for i in active_validator_indices)
|
total_balance = sum(get_effective_balance(state, i) for i in active_validator_indices)
|
||||||
|
|
||||||
for index, validator in enumerate(state.validator_registry):
|
# Compute `total_penalties`
|
||||||
if validator.slashed and current_epoch == validator.withdrawable_epoch - LATEST_SLASHED_EXIT_LENGTH // 2:
|
|
||||||
epoch_index = current_epoch % LATEST_SLASHED_EXIT_LENGTH
|
epoch_index = current_epoch % LATEST_SLASHED_EXIT_LENGTH
|
||||||
total_at_start = state.latest_slashed_balances[(epoch_index + 1) % LATEST_SLASHED_EXIT_LENGTH]
|
total_at_start = state.latest_slashed_balances[(epoch_index + 1) % LATEST_SLASHED_EXIT_LENGTH]
|
||||||
total_at_end = state.latest_slashed_balances[epoch_index]
|
total_at_end = state.latest_slashed_balances[epoch_index]
|
||||||
total_penalties = total_at_end - total_at_start
|
total_penalties = total_at_end - total_at_start
|
||||||
|
|
||||||
|
for index, validator in enumerate(state.validator_registry):
|
||||||
|
if validator.slashed and current_epoch == validator.withdrawable_epoch - LATEST_SLASHED_EXIT_LENGTH // 2:
|
||||||
penalty = max(
|
penalty = max(
|
||||||
get_effective_balance(state, index) * min(total_penalties * 3, total_balance) // total_balance,
|
get_effective_balance(state, index) * min(total_penalties * 3, total_balance) // total_balance,
|
||||||
get_effective_balance(state, index) // MIN_PENALTY_QUOTIENT
|
get_effective_balance(state, index) // MIN_PENALTY_QUOTIENT
|
||||||
|
@ -2044,7 +2187,7 @@ def process_exit_queue(state: BeaconState) -> None:
|
||||||
#### Final updates
|
#### Final updates
|
||||||
|
|
||||||
* Set `state.latest_active_index_roots[(next_epoch + ACTIVATION_EXIT_DELAY) % LATEST_ACTIVE_INDEX_ROOTS_LENGTH] = hash_tree_root(get_active_validator_indices(state.validator_registry, next_epoch + ACTIVATION_EXIT_DELAY))`.
|
* Set `state.latest_active_index_roots[(next_epoch + ACTIVATION_EXIT_DELAY) % LATEST_ACTIVE_INDEX_ROOTS_LENGTH] = hash_tree_root(get_active_validator_indices(state.validator_registry, next_epoch + ACTIVATION_EXIT_DELAY))`.
|
||||||
* Set `state.latest_slashed_balances[(next_epoch) % LATEST_SLASHED_EXIT_LENGTH] = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH]`.
|
* Set `state.latest_slashed_balances[next_epoch % LATEST_SLASHED_EXIT_LENGTH] = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH]`.
|
||||||
* Set `state.latest_randao_mixes[next_epoch % LATEST_RANDAO_MIXES_LENGTH] = get_randao_mix(state, current_epoch)`.
|
* Set `state.latest_randao_mixes[next_epoch % LATEST_RANDAO_MIXES_LENGTH] = get_randao_mix(state, current_epoch)`.
|
||||||
* Remove any `attestation` in `state.latest_attestations` such that `slot_to_epoch(attestation.data.slot) < current_epoch`.
|
* Remove any `attestation` in `state.latest_attestations` such that `slot_to_epoch(attestation.data.slot) < current_epoch`.
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,8 @@
|
||||||
|
|
||||||
**NOTICE**: This document is a work-in-progress for researchers and implementers. It reflects recent spec changes and takes precedence over the [Python proof-of-concept implementation](https://github.com/ethereum/beacon_chain).
|
**NOTICE**: This document is a work-in-progress for researchers and implementers. It reflects recent spec changes and takes precedence over the [Python proof-of-concept implementation](https://github.com/ethereum/beacon_chain).
|
||||||
|
|
||||||
|
At the current stage, Phase 1, while fundamentally feature-complete, is still subject to change. Development teams with spare resources may consider starting on the "Shard chains and crosslink data" section; at least basic properties, such as the fact that a shard block can get created every slot and is dependent on both a parent block in the same shard and a beacon chain block at or before that same slot, are unlikely to change, though details are likely to undergo similar kinds of changes to what Phase 0 has undergone since the start of the year.
|
||||||
|
|
||||||
## Table of contents
|
## Table of contents
|
||||||
|
|
||||||
<!-- TOC -->
|
<!-- TOC -->
|
||||||
|
@ -15,6 +17,7 @@
|
||||||
- [Time parameters](#time-parameters)
|
- [Time parameters](#time-parameters)
|
||||||
- [Max operations per block](#max-operations-per-block)
|
- [Max operations per block](#max-operations-per-block)
|
||||||
- [Signature domains](#signature-domains)
|
- [Signature domains](#signature-domains)
|
||||||
|
- [Shard chains and crosslink data](#shard-chains-and-crosslink-data)
|
||||||
- [Helper functions](#helper-functions)
|
- [Helper functions](#helper-functions)
|
||||||
- [`get_split_offset`](#get_split_offset)
|
- [`get_split_offset`](#get_split_offset)
|
||||||
- [`get_shuffled_committee`](#get_shuffled_committee)
|
- [`get_shuffled_committee`](#get_shuffled_committee)
|
||||||
|
@ -35,7 +38,7 @@
|
||||||
- [`BranchChallengeRecord`](#branchchallengerecord)
|
- [`BranchChallengeRecord`](#branchchallengerecord)
|
||||||
- [`SubkeyReveal`](#subkeyreveal)
|
- [`SubkeyReveal`](#subkeyreveal)
|
||||||
- [Helpers](#helpers)
|
- [Helpers](#helpers)
|
||||||
- [`get_attestation_merkle_depth`](#get_attestation_merkle_depth)
|
- [`get_attestation_data_merkle_depth`](#get_attestation_data_merkle_depth)
|
||||||
- [`epoch_to_custody_period`](#epoch_to_custody_period)
|
- [`epoch_to_custody_period`](#epoch_to_custody_period)
|
||||||
- [`slot_to_custody_period`](#slot_to_custody_period)
|
- [`slot_to_custody_period`](#slot_to_custody_period)
|
||||||
- [`get_current_custody_period`](#get_current_custody_period)
|
- [`get_current_custody_period`](#get_current_custody_period)
|
||||||
|
@ -71,6 +74,9 @@ Phase 1 depends upon all of the constants defined in [Phase 0](0_beacon-chain.md
|
||||||
| `SHARD_CHUNK_SIZE` | 2**5 (= 32) | bytes |
|
| `SHARD_CHUNK_SIZE` | 2**5 (= 32) | bytes |
|
||||||
| `SHARD_BLOCK_SIZE` | 2**14 (= 16,384) | bytes |
|
| `SHARD_BLOCK_SIZE` | 2**14 (= 16,384) | bytes |
|
||||||
| `MINOR_REWARD_QUOTIENT` | 2**8 (= 256) | |
|
| `MINOR_REWARD_QUOTIENT` | 2**8 (= 256) | |
|
||||||
|
| `MAX_POC_RESPONSE_DEPTH` | 5 | |
|
||||||
|
| `ZERO_PUBKEY` | int_to_bytes48(0)| |
|
||||||
|
| `VALIDATOR_NULL` | 2**64 - 1 | |
|
||||||
|
|
||||||
#### Time parameters
|
#### Time parameters
|
||||||
|
|
||||||
|
@ -85,18 +91,24 @@ Phase 1 depends upon all of the constants defined in [Phase 0](0_beacon-chain.md
|
||||||
#### Max operations per block
|
#### Max operations per block
|
||||||
|
|
||||||
| Name | Value |
|
| Name | Value |
|
||||||
|-------------------------------|---------------|
|
|----------------------------------------------------|---------------|
|
||||||
| `MAX_BRANCH_CHALLENGES` | 2**2 (= 4) |
|
| `MAX_BRANCH_CHALLENGES` | 2**2 (= 4) |
|
||||||
| `MAX_BRANCH_RESPONSES` | 2**4 (= 16) |
|
| `MAX_BRANCH_RESPONSES` | 2**4 (= 16) |
|
||||||
| `MAX_EARLY_SUBKEY_REVEALS` | 2**4 (= 16) |
|
| `MAX_EARLY_SUBKEY_REVEALS` | 2**4 (= 16) |
|
||||||
|
| `MAX_INTERACTIVE_CUSTODY_CHALLENGE_INITIATIONS` | 2 |
|
||||||
|
| `MAX_INTERACTIVE_CUSTODY_CHALLENGE_RESPONSES` | 16 |
|
||||||
|
| `MAX_INTERACTIVE_CUSTODY_CHALLENGE_CONTINUTATIONS` | 16 |
|
||||||
|
|
||||||
#### Signature domains
|
#### Signature domains
|
||||||
|
|
||||||
| Name | Value |
|
| Name | Value |
|
||||||
|------------------------|-----------------|
|
|------------------------------|-----------------|
|
||||||
| `DOMAIN_SHARD_PROPOSER` | 129 |
|
| `DOMAIN_SHARD_PROPOSER` | 129 |
|
||||||
| `DOMAIN_SHARD_ATTESTER` | 130 |
|
| `DOMAIN_SHARD_ATTESTER` | 130 |
|
||||||
| `DOMAIN_CUSTODY_SUBKEY` | 131 |
|
| `DOMAIN_CUSTODY_SUBKEY` | 131 |
|
||||||
|
| `DOMAIN_CUSTODY_INTERACTIVE` | 132 |
|
||||||
|
|
||||||
|
# Shard chains and crosslink data
|
||||||
|
|
||||||
## Helper functions
|
## Helper functions
|
||||||
|
|
||||||
|
@ -158,7 +170,6 @@ def get_persistent_committee(state: BeaconState,
|
||||||
[i for i in later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)]
|
[i for i in later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)]
|
||||||
)))
|
)))
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `get_shard_proposer_index`
|
#### `get_shard_proposer_index`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -290,7 +301,7 @@ The `shard_chain_commitment` is only valid if it equals `compute_commitment(head
|
||||||
|
|
||||||
### Shard block fork choice rule
|
### Shard block fork choice rule
|
||||||
|
|
||||||
The fork choice rule for any shard is LMD GHOST using the shard chain attestations of the persistent committee and the beacon chain attestations of the crosslink committee currently assigned to that shard, but instead of being rooted in the genesis it is rooted in the latest block referenced in the most recent accepted crosslink (ie. `state.crosslinks[shard].crosslink_data_root`). Only blocks whose `beacon_chain_ref` is the block in the main beacon chain at the specified `slot` should be considered (if the beacon chain skips a slot, then the block at that slot is considered to be the block in the beacon chain at the highest slot lower than a slot).
|
The fork choice rule for any shard is LMD GHOST using the shard chain attestations of the persistent committee and the beacon chain attestations of the crosslink committee currently assigned to that shard, but instead of being rooted in the genesis it is rooted in the block referenced in the most recent accepted crosslink (ie. `state.crosslinks[shard].shard_block_root`). Only blocks whose `beacon_chain_ref` is the block in the main beacon chain at the specified `slot` should be considered (if the beacon chain skips a slot, then the block at that slot is considered to be the block in the beacon chain at the highest slot lower than a slot).
|
||||||
|
|
||||||
# Updates to the beacon chain
|
# Updates to the beacon chain
|
||||||
|
|
||||||
|
@ -301,7 +312,6 @@ The fork choice rule for any shard is LMD GHOST using the shard chain attestatio
|
||||||
Add member values to the end of the `Validator` object:
|
Add member values to the end of the `Validator` object:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
'open_branch_challenges': [BranchChallengeRecord],
|
|
||||||
'next_subkey_to_reveal': 'uint64',
|
'next_subkey_to_reveal': 'uint64',
|
||||||
'reveal_max_periods_late': 'uint64',
|
'reveal_max_periods_late': 'uint64',
|
||||||
```
|
```
|
||||||
|
@ -309,7 +319,6 @@ Add member values to the end of the `Validator` object:
|
||||||
And the initializers:
|
And the initializers:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
'open_branch_challenges': [],
|
|
||||||
'next_subkey_to_reveal': get_current_custody_period(state),
|
'next_subkey_to_reveal': get_current_custody_period(state),
|
||||||
'reveal_max_periods_late': 0,
|
'reveal_max_periods_late': 0,
|
||||||
```
|
```
|
||||||
|
@ -322,6 +331,10 @@ Add member values to the `BeaconBlockBody` structure:
|
||||||
'branch_challenges': [BranchChallenge],
|
'branch_challenges': [BranchChallenge],
|
||||||
'branch_responses': [BranchResponse],
|
'branch_responses': [BranchResponse],
|
||||||
'subkey_reveals': [SubkeyReveal],
|
'subkey_reveals': [SubkeyReveal],
|
||||||
|
'interactive_custody_challenge_initiations': [InteractiveCustodyChallengeInitiation],
|
||||||
|
'interactive_custody_challenge_responses': [InteractiveCustodyChallengeResponse],
|
||||||
|
'interactive_custody_challenge_continuations': [InteractiveCustodyChallengeContinuation],
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
And initialize to the following:
|
And initialize to the following:
|
||||||
|
@ -332,6 +345,17 @@ And initialize to the following:
|
||||||
'subkey_reveals': [],
|
'subkey_reveals': [],
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `BeaconState`
|
||||||
|
|
||||||
|
Add member values to the `BeaconState` structure:
|
||||||
|
|
||||||
|
```python
|
||||||
|
'branch_challenge_records': [BranchChallengeRecord],
|
||||||
|
'next_branch_challenge_id': 'uint64',
|
||||||
|
'custody_challenge_records': [InteractiveCustodyChallengeRecord],
|
||||||
|
'next_custody_challenge_id': 'uint64',
|
||||||
|
```
|
||||||
|
|
||||||
### `BranchChallenge`
|
### `BranchChallenge`
|
||||||
|
|
||||||
Define a `BranchChallenge` as follows:
|
Define a `BranchChallenge` as follows:
|
||||||
|
@ -350,11 +374,10 @@ Define a `BranchResponse` as follows:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
{
|
{
|
||||||
'responder_index': 'uint64',
|
'challenge_id': 'uint64',
|
||||||
|
'responding_to_custody_challenge': 'bool',
|
||||||
'data': 'bytes32',
|
'data': 'bytes32',
|
||||||
'branch': ['bytes32'],
|
'branch': ['bytes32'],
|
||||||
'data_index': 'uint64',
|
|
||||||
'root': 'bytes32',
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -364,14 +387,75 @@ Define a `BranchChallengeRecord` as follows:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
{
|
{
|
||||||
|
'challenge_id': 'uint64',
|
||||||
'challenger_index': 'uint64',
|
'challenger_index': 'uint64',
|
||||||
|
'responder_index': 'uint64',
|
||||||
'root': 'bytes32',
|
'root': 'bytes32',
|
||||||
'depth': 'uint64',
|
'depth': 'uint64',
|
||||||
'inclusion_epoch': 'uint64',
|
'deadline': 'uint64',
|
||||||
'data_index': 'uint64',
|
'data_index': 'uint64',
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `InteractiveCustodyChallengeRecord`
|
||||||
|
|
||||||
|
```python
|
||||||
|
{
|
||||||
|
'challenge_id': 'uint64',
|
||||||
|
'challenger_index': 'uint64',
|
||||||
|
'responder_index': 'uint64',
|
||||||
|
# Initial data root
|
||||||
|
'data_root': 'bytes32',
|
||||||
|
# Initial custody bit
|
||||||
|
'custody_bit': 'bool',
|
||||||
|
# Responder subkey
|
||||||
|
'responder_subkey': 'bytes96',
|
||||||
|
# The hash in the PoC tree in the position that we are currently at
|
||||||
|
'current_custody_tree_node': 'bytes32',
|
||||||
|
# The position in the tree, in terms of depth and position offset
|
||||||
|
'depth': 'uint64',
|
||||||
|
'offset': 'uint64',
|
||||||
|
# Max depth of the branch
|
||||||
|
'max_depth': 'uint64',
|
||||||
|
# Deadline to respond (as an epoch)
|
||||||
|
'deadline': 'uint64',
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### `InteractiveCustodyChallengeInitiation`
|
||||||
|
|
||||||
|
```python
|
||||||
|
{
|
||||||
|
'attestation': SlashableAttestation,
|
||||||
|
'responder_index': 'uint64',
|
||||||
|
'challenger_index': 'uint64',
|
||||||
|
'responder_subkey': 'bytes96',
|
||||||
|
'signature': 'bytes96',
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### `InteractiveCustodyChallengeResponse`
|
||||||
|
|
||||||
|
```python
|
||||||
|
{
|
||||||
|
'challenge_id': 'uint64',
|
||||||
|
'hashes': ['bytes32'],
|
||||||
|
'signature': 'bytes96',
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### `InteractiveCustodyChallengeContinuation`
|
||||||
|
|
||||||
|
```python
|
||||||
|
{
|
||||||
|
'challenge_id': 'uint64',
|
||||||
|
'sub_index': 'uint64',
|
||||||
|
'new_custody_tree_node': 'bytes32',
|
||||||
|
'proof': ['bytes32'],
|
||||||
|
'signature': 'bytes96',
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
### `SubkeyReveal`
|
### `SubkeyReveal`
|
||||||
|
|
||||||
Define a `SubkeyReveal` as follows:
|
Define a `SubkeyReveal` as follows:
|
||||||
|
@ -388,6 +472,20 @@ Define a `SubkeyReveal` as follows:
|
||||||
|
|
||||||
## Helpers
|
## Helpers
|
||||||
|
|
||||||
|
### `get_branch_challenge_record_by_id`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_branch_challenge_record_by_id(state: BeaconState, id: int) -> BranchChallengeRecord:
|
||||||
|
return [c for c in state.branch_challenges if c.challenge_id == id][0]
|
||||||
|
```
|
||||||
|
|
||||||
|
### `get_custody_challenge_record_by_id`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_custody_challenge_record_by_id(state: BeaconState, id: int) -> BranchChallengeRecord:
|
||||||
|
return [c for c in state.branch_challenges if c.challenge_id == id][0]
|
||||||
|
```
|
||||||
|
|
||||||
### `get_attestation_merkle_depth`
|
### `get_attestation_merkle_depth`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -453,6 +551,19 @@ def verify_custody_subkey_reveal(pubkey: bytes48,
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `verify_signed_challenge_message`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def verify_signed_challenge_message(message: Any, pubkey: bytes48) -> bool:
|
||||||
|
return bls_verify(
|
||||||
|
message_hash=signed_root(message, 'signature'),
|
||||||
|
pubkey=pubkey,
|
||||||
|
signature=message.signature,
|
||||||
|
domain=get_domain(state, get_current_epoch(state), DOMAIN_CUSTODY_INTERACTIVE)
|
||||||
|
)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
### `penalize_validator`
|
### `penalize_validator`
|
||||||
|
|
||||||
Change the definition of `penalize_validator` as follows:
|
Change the definition of `penalize_validator` as follows:
|
||||||
|
@ -493,29 +604,88 @@ Add the following operations to the per-slot processing, in order the given belo
|
||||||
|
|
||||||
Verify that `len(block.body.branch_challenges) <= MAX_BRANCH_CHALLENGES`.
|
Verify that `len(block.body.branch_challenges) <= MAX_BRANCH_CHALLENGES`.
|
||||||
|
|
||||||
For each `challenge` in `block.body.branch_challenges`:
|
For each `challenge` in `block.body.branch_challenges`, run:
|
||||||
|
|
||||||
* Verify that `slot_to_epoch(challenge.attestation.data.slot) >= get_current_epoch(state) - MAX_BRANCH_CHALLENGE_DELAY`.
|
```python
|
||||||
* Verify that `state.validator_registry[responder_index].exit_epoch >= get_current_epoch(state) - MAX_BRANCH_CHALLENGE_DELAY`.
|
def process_branch_challenge(challenge: BranchChallenge,
|
||||||
* Verify that `verify_slashable_attestation(state, challenge.attestation)` returns `True`.
|
state: BeaconState):
|
||||||
* Verify that `challenge.responder_index` is in `challenge.attestation.validator_indices`.
|
# Check that it's not too late to challenge
|
||||||
* Let `depth = get_attestation_merkle_depth(challenge.attestation)`. Verify that `challenge.data_index < 2**depth`.
|
assert slot_to_epoch(challenge.attestation.data.slot) >= get_current_epoch(state) - MAX_BRANCH_CHALLENGE_DELAY
|
||||||
* Verify that there does not exist a `BranchChallengeRecord` in `state.validator_registry[challenge.responder_index].open_branch_challenges` with `root == challenge.attestation.data.shard_chain_commitment` and `data_index == data_index`.
|
assert state.validator_registry[responder_index].exit_epoch >= get_current_epoch(state) - MAX_BRANCH_CHALLENGE_DELAY
|
||||||
* Append to `state.validator_registry[challenge.responder_index].open_branch_challenges` the object `BranchChallengeRecord(challenger_index=get_beacon_proposer_index(state, state.slot), root=challenge.attestation.data.shard_chain_commitment, depth=depth, inclusion_epoch=get_current_epoch(state), data_index=data_index)`.
|
# Check the attestation is valid
|
||||||
|
assert verify_slashable_attestation(state, challenge.attestation)
|
||||||
**Invariant**: the `open_branch_challenges` array will always stay sorted in order of `inclusion_epoch`.
|
# Check that the responder participated
|
||||||
|
assert challenger.responder_index in challenge.attestation.validator_indices
|
||||||
|
# Check the challenge is not a duplicate
|
||||||
|
assert [
|
||||||
|
c for c in state.branch_challenge_records if c.root == challenge.attestation.data.crosslink_data_root and
|
||||||
|
c.data_index == challenge.data_index
|
||||||
|
] == []
|
||||||
|
# Check validity of depth
|
||||||
|
depth = get_attestation_merkle_depth(challenge.attestation)
|
||||||
|
assert c.data_index < 2**depth
|
||||||
|
# Add new challenge
|
||||||
|
state.branch_challenge_records.append(BranchChallengeRecord(
|
||||||
|
challenge_id=state.next_branch_challenge_id,
|
||||||
|
challenger_index=get_beacon_proposer_index(state, state.slot),
|
||||||
|
root=challenge.attestation.data.shard_chain_commitment,
|
||||||
|
depth=depth,
|
||||||
|
deadline=get_current_epoch(state) + CHALLENGE_RESPONSE_DEADLINE,
|
||||||
|
data_index=challenge.data_index
|
||||||
|
))
|
||||||
|
state.next_branch_challenge_id += 1
|
||||||
|
```
|
||||||
|
|
||||||
#### Branch responses
|
#### Branch responses
|
||||||
|
|
||||||
Verify that `len(block.body.branch_responses) <= MAX_BRANCH_RESPONSES`.
|
Verify that `len(block.body.branch_responses) <= MAX_BRANCH_RESPONSES`.
|
||||||
|
|
||||||
For each `response` in `block.body.branch_responses`:
|
For each `response` in `block.body.branch_responses`, if `response.responding_to_custody_challenge == False`, run:
|
||||||
|
|
||||||
* Find the `BranchChallengeRecord` in `state.validator_registry[response.responder_index].open_branch_challenges` whose (`root`, `data_index`) match the (`root`, `data_index`) of the `response`. Verify that one such record exists (it is not possible for there to be more than one), call it `record`.
|
```python
|
||||||
* Verify that `verify_merkle_branch(leaf=response.data, branch=response.branch, depth=record.depth, index=record.data_index, root=record.root)` is True.
|
def process_branch_exploration_response(response: BranchResponse,
|
||||||
* Verify that `get_current_epoch(state) >= record.inclusion_epoch + ENTRY_EXIT_DELAY`.
|
state: BeaconState):
|
||||||
* Remove the `record` from `state.validator_registry[response.responder_index].open_branch_challenges`
|
challenge = get_branch_challenge_record_by_id(response.challenge_id)
|
||||||
* Determine the proposer `proposer_index = get_beacon_proposer_index(state, state.slot)` and set `state.validator_balances[proposer_index] += base_reward(state, index) // MINOR_REWARD_QUOTIENT`.
|
assert verify_merkle_branch(
|
||||||
|
leaf=response.data,
|
||||||
|
branch=response.branch,
|
||||||
|
depth=challenge.depth,
|
||||||
|
index=challenge.data_index,
|
||||||
|
root=challenge.root
|
||||||
|
)
|
||||||
|
# Must wait at least ENTRY_EXIT_DELAY before responding to a branch challenge
|
||||||
|
assert get_current_epoch(state) >= challenge.inclusion_epoch + ENTRY_EXIT_DELAY
|
||||||
|
state.branch_challenge_records.pop(challenge)
|
||||||
|
# Reward the proposer
|
||||||
|
proposer_index = get_beacon_proposer_index(state, state.slot)
|
||||||
|
state.validator_balances[proposer_index] += base_reward(state, index) // MINOR_REWARD_QUOTIENT
|
||||||
|
```
|
||||||
|
|
||||||
|
If `response.responding_to_custody_challenge == True`, run:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_branch_custody_response(response: BranchResponse,
|
||||||
|
state: BeaconState):
|
||||||
|
challenge = get_custody_challenge_record_by_id(response.challenge_id)
|
||||||
|
responder = state.validator_registry[challenge.responder_index]
|
||||||
|
# Verify we're not too late
|
||||||
|
assert get_current_epoch(state) < responder.withdrawable_epoch
|
||||||
|
# Verify the Merkle branch *of the data tree*
|
||||||
|
assert verify_merkle_branch(
|
||||||
|
leaf=response.data,
|
||||||
|
branch=response.branch,
|
||||||
|
depth=challenge.max_depth,
|
||||||
|
index=challenge.offset,
|
||||||
|
root=challenge.data_root
|
||||||
|
)
|
||||||
|
# Responder wins
|
||||||
|
if hash(challenge.responder_subkey + response.data) == challenge.current_custody_tree_node:
|
||||||
|
penalize_validator(state, challenge.challenger_index, challenge.responder_index)
|
||||||
|
# Challenger wins
|
||||||
|
else:
|
||||||
|
penalize_validator(state, challenge.responder_index, challenge.challenger_index)
|
||||||
|
state.custody_challenge_records.pop(challenge)
|
||||||
|
```
|
||||||
|
|
||||||
#### Subkey reveals
|
#### Subkey reveals
|
||||||
|
|
||||||
|
@ -541,6 +711,126 @@ In case (ii):
|
||||||
* Set `state.validator_registry[reveal.validator_index].next_subkey_to_reveal += 1`
|
* Set `state.validator_registry[reveal.validator_index].next_subkey_to_reveal += 1`
|
||||||
* Set `state.validator_registry[reveal.validator_index].reveal_max_periods_late = max(state.validator_registry[reveal.validator_index].reveal_max_periods_late, get_current_period(state) - reveal.period)`.
|
* Set `state.validator_registry[reveal.validator_index].reveal_max_periods_late = max(state.validator_registry[reveal.validator_index].reveal_max_periods_late, get_current_period(state) - reveal.period)`.
|
||||||
|
|
||||||
|
#### Interactive custody challenge initiations
|
||||||
|
|
||||||
|
Verify that `len(block.body.interactive_custody_challenge_initiations) <= MAX_INTERACTIVE_CUSTODY_CHALLENGE_INITIATIONS`.
|
||||||
|
|
||||||
|
For each `initiation` in `block.body.interactive_custody_challenge_initiations`, use the following function to process it:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_initiation(initiation: InteractiveCustodyChallengeInitiation,
|
||||||
|
state: BeaconState):
|
||||||
|
challenger = state.validator_registry[initiation.challenger_index]
|
||||||
|
responder = state.validator_registry[initiation.responder_index]
|
||||||
|
# Verify the signature
|
||||||
|
assert verify_signed_challenge_message(initiation, challenger.pubkey)
|
||||||
|
# Verify the attestation
|
||||||
|
assert verify_slashable_attestation(initiation.attestation, state)
|
||||||
|
# Check that the responder actually participated in the attestation
|
||||||
|
assert initiation.responder_index in attestation.validator_indices
|
||||||
|
# Any validator can be a challenger or responder of max 1 challenge at a time
|
||||||
|
for c in state.custody_challenge_records:
|
||||||
|
assert c.challenger_index != initiation.challenger_index
|
||||||
|
assert c.responder_index != initiation.responder_index
|
||||||
|
# Can't challenge if you've been penalized
|
||||||
|
assert challenger.penalized_epoch == FAR_FUTURE_EPOCH
|
||||||
|
# Make sure the revealed subkey is valid
|
||||||
|
assert verify_custody_subkey_reveal(
|
||||||
|
pubkey=state.validator_registry[responder_index].pubkey,
|
||||||
|
subkey=initiation.responder_subkey,
|
||||||
|
period=slot_to_custody_period(attestation.data.slot)
|
||||||
|
)
|
||||||
|
# Verify that the attestation is still eligible for challenging
|
||||||
|
min_challengeable_epoch = responder.exit_epoch - CUSTODY_PERIOD_LENGTH * (1 + responder.reveal_max_periods_late)
|
||||||
|
assert min_challengeable_epoch <= slot_to_epoch(initiation.attestation.data.slot)
|
||||||
|
# Create a new challenge object
|
||||||
|
state.branch_challenge_records.append(InteractiveCustodyChallengeRecord(
|
||||||
|
challenge_id=state.next_branch_challenge_id,
|
||||||
|
challenger_index=initiation.challenger_index,
|
||||||
|
responder_index=initiation.responder_index,
|
||||||
|
data_root=attestation.custody_commitment,
|
||||||
|
custody_bit=get_bitfield_bit(attestation.custody_bitfield, attestation.validator_indices.index(responder_index)),
|
||||||
|
responder_subkey=responder_subkey,
|
||||||
|
current_custody_tree_node=ZERO_HASH,
|
||||||
|
depth=0,
|
||||||
|
offset=0,
|
||||||
|
max_depth=get_attestation_data_merkle_depth(initiation.attestation.data),
|
||||||
|
deadline=get_current_epoch(state) + CHALLENGE_RESPONSE_DEADLINE
|
||||||
|
))
|
||||||
|
state.next_branch_challenge_id += 1
|
||||||
|
# Responder can't withdraw yet!
|
||||||
|
state.validator_registry[responder_index].withdrawable_epoch = FAR_FUTURE_EPOCH
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Interactive custody challenge responses
|
||||||
|
|
||||||
|
A response provides 32 hashes that are under current known proof of custody tree node. Note that at the beginning the tree node is just one bit of the custody root, so we ask the responder to sign to commit to the top 5 levels of the tree and therefore the root hash; at all other stages in the game responses are self-verifying.
|
||||||
|
|
||||||
|
Verify that `len(block.body.interactive_custody_challenge_responses) <= MAX_INTERACTIVE_CUSTODY_CHALLENGE_RESPONSES`.
|
||||||
|
|
||||||
|
For each `response` in `block.body.interactive_custody_challenge_responses`, use the following function to process it:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_response(response: InteractiveCustodyChallengeResponse,
|
||||||
|
state: State):
|
||||||
|
challenge = get_custody_challenge_record_by_id(state, response.challenge_id)
|
||||||
|
responder = state.validator_registry[challenge.responder_index]
|
||||||
|
# Check that the right number of hashes was provided
|
||||||
|
expected_depth = min(challenge.max_depth - challenge.depth, MAX_POC_RESPONSE_DEPTH)
|
||||||
|
assert 2**expected_depth == len(response.hashes)
|
||||||
|
# Must make some progress!
|
||||||
|
assert expected_depth > 0
|
||||||
|
# Check the hashes match the previously provided root
|
||||||
|
root = merkle_root(response.hashes)
|
||||||
|
# If this is the first response check the bit and the signature and set the root
|
||||||
|
if challenge.depth == 0:
|
||||||
|
assert get_bitfield_bit(root, 0) == challenge.custody_bit
|
||||||
|
assert verify_signed_challenge_message(response, responder.pubkey)
|
||||||
|
challenge.current_custody_tree_node = root
|
||||||
|
# Otherwise just check the response against the root
|
||||||
|
else:
|
||||||
|
assert root == challenge_data.current_custody_tree_node
|
||||||
|
# Update challenge data
|
||||||
|
challenge.deadline=FAR_FUTURE_EPOCH
|
||||||
|
responder.withdrawable_epoch = get_current_epoch(state) + MAX_POC_RESPONSE_DEPTH
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Interactive custody challenge continuations
|
||||||
|
|
||||||
|
Once a response provides 32 hashes, the challenger has the right to choose any one of them that they feel is constructed incorrectly to continue the game. Note that eventually, the game will get to the point where the `new_custody_tree_node` is a leaf node.
|
||||||
|
|
||||||
|
Verify that `len(block.body.interactive_custody_challenge_continuations) <= MAX_INTERACTIVE_CUSTODY_CHALLENGE_CONTINUATIONS`.
|
||||||
|
|
||||||
|
For each `continuation` in `block.body.interactive_custody_challenge_continuations`, use the following function to process it:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_continuation(continuation: InteractiveCustodyChallengeContinuation,
|
||||||
|
state: State):
|
||||||
|
challenge = get_custody_challenge_record_by_id(state, continuation.challenge_id)
|
||||||
|
challenger = state.validator_registry[challenge.challenger_index]
|
||||||
|
responder = state.validator_registry[challenge.responder_index]
|
||||||
|
expected_depth = min(challenge_data.max_depth - challenge_data.depth, MAX_POC_RESPONSE_DEPTH)
|
||||||
|
# Verify we're not too late
|
||||||
|
assert get_current_epoch(state) < responder.withdrawable_epoch
|
||||||
|
# Verify the Merkle branch (the previous custody response provided the next level of hashes so the
|
||||||
|
# challenger has the info to make any Merkle branch)
|
||||||
|
assert verify_merkle_branch(
|
||||||
|
leaf=new_custody_tree_node,
|
||||||
|
branch=continuation.proof,
|
||||||
|
depth=expected_depth,
|
||||||
|
index=sub_index,
|
||||||
|
root=challenge_data.current_custody_tree_node
|
||||||
|
)
|
||||||
|
# Verify signature
|
||||||
|
assert verify_signed_challenge_message(continuation, challenger.pubkey)
|
||||||
|
# Update the challenge data
|
||||||
|
challenge.current_custody_tree_node = continuation.new_custody_tree_node
|
||||||
|
challenge.depth += expected_depth
|
||||||
|
challenge.deadline = get_current_epoch(state) + MAX_POC_RESPONSE_DEPTH
|
||||||
|
responder.withdrawable_epoch = FAR_FUTURE_EPOCH
|
||||||
|
challenge.offset = challenge_data.offset * 2**expected_depth + sub_index
|
||||||
|
```
|
||||||
|
|
||||||
## Per-epoch processing
|
## Per-epoch processing
|
||||||
|
|
||||||
Add the following loop immediately below the `process_ejections` loop:
|
Add the following loop immediately below the `process_ejections` loop:
|
||||||
|
@ -548,12 +838,18 @@ Add the following loop immediately below the `process_ejections` loop:
|
||||||
```python
|
```python
|
||||||
def process_challenge_absences(state: BeaconState) -> None:
|
def process_challenge_absences(state: BeaconState) -> None:
|
||||||
"""
|
"""
|
||||||
Iterate through the validator registry
|
Iterate through the challenge list
|
||||||
and penalize validators with balance that did not answer challenges.
|
and penalize validators with balance that did not answer challenges.
|
||||||
"""
|
"""
|
||||||
for index, validator in enumerate(state.validator_registry):
|
for c in state.branch_challenge_records:
|
||||||
if len(validator.open_branch_challenges) > 0 and get_current_epoch(state) > validator.open_branch_challenges[0].inclusion_epoch + CHALLENGE_RESPONSE_DEADLINE:
|
if get_current_epoch(state) > c.deadline:
|
||||||
penalize_validator(state, index, validator.open_branch_challenges[0].challenger_index)
|
penalize_validator(state, c.responder_index, c.challenger_index)
|
||||||
|
|
||||||
|
for c in state.custody_challenge_records:
|
||||||
|
if get_current_epoch(state) > c.deadline:
|
||||||
|
penalize_validator(state, c.responder_index, c.challenger_index)
|
||||||
|
if get_current_epoch(state) > state.validator_registry[c.responder_index].withdrawable_epoch:
|
||||||
|
penalize_validator(state, c.challenger_index, c.responder_index)
|
||||||
```
|
```
|
||||||
|
|
||||||
In `process_penalties_and_exits`, change the definition of `eligible` to the following (note that it is not a pure function because `state` is declared in the surrounding scope):
|
In `process_penalties_and_exits`, change the definition of `eligible` to the following (note that it is not a pure function because `state` is declared in the surrounding scope):
|
||||||
|
@ -562,7 +858,7 @@ In `process_penalties_and_exits`, change the definition of `eligible` to the fol
|
||||||
def eligible(index):
|
def eligible(index):
|
||||||
validator = state.validator_registry[index]
|
validator = state.validator_registry[index]
|
||||||
# Cannot exit if there are still open branch challenges
|
# Cannot exit if there are still open branch challenges
|
||||||
if len(validator.open_branch_challenges) > 0:
|
if [c for c in state.branch_challenge_records if c.responder_index == index] != []:
|
||||||
return False
|
return False
|
||||||
# Cannot exit if you have not revealed all of your subkeys
|
# Cannot exit if you have not revealed all of your subkeys
|
||||||
elif validator.next_subkey_to_reveal <= epoch_to_custody_period(validator.exit_epoch):
|
elif validator.next_subkey_to_reveal <= epoch_to_custody_period(validator.exit_epoch):
|
||||||
|
@ -582,7 +878,15 @@ Run the following on the fork block after per-slot processing and before per-blo
|
||||||
For all `validator` in `ValidatorRegistry`, update it to the new format and fill the new member values with:
|
For all `validator` in `ValidatorRegistry`, update it to the new format and fill the new member values with:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
'open_branch_challenges': [],
|
|
||||||
'next_subkey_to_reveal': get_current_custody_period(state),
|
'next_subkey_to_reveal': get_current_custody_period(state),
|
||||||
'reveal_max_periods_late': 0,
|
'reveal_max_periods_late': 0,
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Update the `BeaconState` to the new format and fill the new member values with:
|
||||||
|
|
||||||
|
```python
|
||||||
|
'branch_challenge_records': [],
|
||||||
|
'next_branch_challenge_id': 0,
|
||||||
|
'custody_challenge_records': [],
|
||||||
|
'next_custody_challenge_id': 0,
|
||||||
|
```
|
||||||
|
|
|
@ -1,424 +1,124 @@
|
||||||
# [WIP] SimpleSerialize (SSZ) Spec
|
# SimpleSerialiZe (SSZ)
|
||||||
|
|
||||||
This is the **work in progress** document to describe `SimpleSerialize`, the
|
This is a **work in progress** describing typing, serialization and Merkleization of Ethereum 2.0 objects.
|
||||||
current selected serialization method for Ethereum 2.0 using the Beacon Chain.
|
|
||||||
|
|
||||||
This document specifies the general information for serializing and
|
## Table of contents
|
||||||
deserializing objects and data types.
|
|
||||||
|
|
||||||
## ToC
|
- [Constants](#constants)
|
||||||
|
- [Typing](#typing)
|
||||||
* [About](#about)
|
- [Basic types](#basic-types)
|
||||||
* [Variables and Functions](#variables-and-functions)
|
- [Composite types](#composite-types)
|
||||||
* [Constants](#constants)
|
- [Aliases](#aliases)
|
||||||
* [Overview](#overview)
|
- [Serialization](#serialization)
|
||||||
+ [Serialize/Encode](#serializeencode)
|
- [`uintN`](#uintn)
|
||||||
- [uintN](#uintn)
|
- [`bool`](#bool)
|
||||||
- [bool](#bool)
|
- [Tuples, containers, lists](#tuples-containers-lists)
|
||||||
- [bytesN](#bytesn)
|
- [Deserialization](#deserialization)
|
||||||
- [List/Vectors](#listvectors)
|
- [Merkleization](#merkleization)
|
||||||
- [Container](#container)
|
- [Self-signed containers](#self-signed-containers)
|
||||||
+ [Deserialize/Decode](#deserializedecode)
|
- [Implementations](#implementations)
|
||||||
- [uintN](#uintn-1)
|
|
||||||
- [bool](#bool-1)
|
|
||||||
- [bytesN](#bytesn-1)
|
|
||||||
- [List/Vectors](#listvectors-1)
|
|
||||||
- [Container](#container-1)
|
|
||||||
+ [Tree Hash](#tree-hash)
|
|
||||||
- [`uint8`..`uint256`, `bool`, `bytes1`..`bytes32`](#uint8uint256-bool-bytes1bytes32)
|
|
||||||
- [`uint264`..`uintN`, `bytes33`..`bytesN`](#uint264uintn-bytes33bytesn)
|
|
||||||
- [List/Vectors](#listvectors-2)
|
|
||||||
- [Container](#container-2)
|
|
||||||
+ [Signed Roots](#signed-roots)
|
|
||||||
* [Implementations](#implementations)
|
|
||||||
|
|
||||||
## About
|
|
||||||
|
|
||||||
`SimpleSerialize` was first proposed by Vitalik Buterin as the serialization
|
|
||||||
protocol for use in the Ethereum 2.0 Beacon Chain.
|
|
||||||
|
|
||||||
The core feature of `ssz` is the simplicity of the serialization with low
|
|
||||||
overhead.
|
|
||||||
|
|
||||||
## Variables and Functions
|
|
||||||
|
|
||||||
| Term | Definition |
|
|
||||||
|:-------------|:-----------------------------------------------------------------------------------------------|
|
|
||||||
| `little` | Little endian. |
|
|
||||||
| `byteorder` | Specifies [endianness](https://en.wikipedia.org/wiki/Endianness): big endian or little endian. |
|
|
||||||
| `len` | Length/number of bytes. |
|
|
||||||
| `to_bytes` | Convert to bytes. Should take parameters ``size`` and ``byteorder``. |
|
|
||||||
| `from_bytes` | Convert from bytes to object. Should take ``bytes`` and ``byteorder``. |
|
|
||||||
| `value` | The value to serialize. |
|
|
||||||
| `rawbytes` | Raw serialized bytes. |
|
|
||||||
| `deserialized_object` | The deserialized data in the data structure of your programming language. |
|
|
||||||
| `new_index` | An index to keep track the latest position where the `rawbytes` have been deserialized. |
|
|
||||||
|
|
||||||
## Constants
|
## Constants
|
||||||
|
|
||||||
| Constant | Value | Definition |
|
| Name | Value | Description |
|
||||||
|:------------------|:-----:|:--------------------------------------------------------------------------------------|
|
|-|-|-|
|
||||||
| `LENGTH_BYTES` | 4 | Number of bytes used for the length added before a variable-length serialized object. |
|
| `BYTES_PER_CHUNK` | `32` | Number of bytes per chunk.
|
||||||
| `SSZ_CHUNK_SIZE` | 128 | Number of bytes for the chunk size of the Merkle tree leaf. |
|
| `BYTES_PER_LENGTH_PREFIX` | `4` | Number of bytes per serialized length prefix. |
|
||||||
|
|
||||||
## Overview
|
## Typing
|
||||||
|
### Basic types
|
||||||
|
|
||||||
### Serialize/Encode
|
* `uintN`: `N`-bit unsigned integer (where `N in [8, 16, 32, 64, 128, 256]`)
|
||||||
|
* `bool`: `True` or `False`
|
||||||
|
|
||||||
#### uintN
|
### Composite types
|
||||||
|
|
||||||
| uint Type | Usage |
|
* **container**: ordered heterogenous collection of values
|
||||||
|:---------:|:-----------------------------------------------------------|
|
* key-pair curly bracket notation `{}`, e.g. `{'foo': "uint64", 'bar': "bool"}`
|
||||||
| `uintN` | Type of `N` bits unsigned integer, where ``N % 8 == 0``. |
|
* **tuple**: ordered fixed-length homogeneous collection of values
|
||||||
|
* angle bracket notation `[N]`, e.g. `uint64[N]`
|
||||||
|
* **list**: ordered variable-length homogenous collection of values
|
||||||
|
* angle bracket notation `[]`, e.g. `uint64[]`
|
||||||
|
|
||||||
Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
### Aliases
|
||||||
|
|
||||||
All integers are serialized as **little endian**.
|
For convenience we alias:
|
||||||
|
|
||||||
| Check to perform | Code |
|
* `byte` to `uint8`
|
||||||
|:-----------------------|:----------------------|
|
* `bytes` to `byte[]`
|
||||||
| Size is a byte integer | ``int_size % 8 == 0`` |
|
* `bytesN` to `byte[N]`
|
||||||
|
|
||||||
|
## Serialization
|
||||||
|
|
||||||
|
We recursively define the `serialize` function which consumes an object `value` (of the type specified) and returns a bytestring of type `bytes`.
|
||||||
|
|
||||||
|
*Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signed_root`, etc.) objects implicitly carry their type.
|
||||||
|
|
||||||
|
### `uintN`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
assert(int_size % 8 == 0)
|
assert N in [8, 16, 32, 64, 128, 256]
|
||||||
buffer_size = int_size / 8
|
return value.to_bytes(N // 8, 'little')
|
||||||
return value.to_bytes(buffer_size, 'little')
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### bool
|
### `bool`
|
||||||
|
|
||||||
Convert directly to a single 0x00 or 0x01 byte.
|
|
||||||
|
|
||||||
| Check to perform | Code |
|
|
||||||
|:------------------|:---------------------------|
|
|
||||||
| Value is boolean | ``value in (True, False)`` |
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
assert(value in (True, False))
|
assert value in (True, False)
|
||||||
return b'\x01' if value is True else b'\x00'
|
return b'\x01' if value is True else b'\x00'
|
||||||
```
|
```
|
||||||
|
|
||||||
#### bytesN
|
### Tuples, containers, lists
|
||||||
|
|
||||||
A fixed-size byte array.
|
If `value` is fixed-length (i.e. does not embed a list):
|
||||||
|
|
||||||
| Checks to perform | Code |
|
|
||||||
|:---------------------------------------|:---------------------|
|
|
||||||
| Length in bytes is correct for `bytesN` | ``len(value) == N`` |
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
assert(len(value) == N)
|
return ''.join([serialize(element) for element in value])
|
||||||
|
|
||||||
return value
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### List/Vectors
|
If `value` is variable-length (i.e. embeds a list):
|
||||||
|
|
||||||
Lists are a collection of elements of the same homogeneous type.
|
|
||||||
|
|
||||||
| Check to perform | Code |
|
|
||||||
|:--------------------------------------------|:----------------------------|
|
|
||||||
| Length of serialized list fits into 4 bytes | ``len(serialized) < 2**32`` |
|
|
||||||
|
|
||||||
1. Serialize all list elements individually and concatenate them.
|
|
||||||
2. Prefix the concatenation with its length encoded as a `4-byte` **little-endian** unsigned integer.
|
|
||||||
|
|
||||||
We define `bytes` to be a synonym of `List[bytes1]`.
|
|
||||||
|
|
||||||
**Example in Python**
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
serialized_bytes = ''.join([serialize(element) for element in value])
|
||||||
serialized_list_string = b''
|
assert len(serialized_bytes) < 2**(8 * BYTES_PER_LENGTH_PREFIX)
|
||||||
|
serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, 'little')
|
||||||
for item in value:
|
return serialized_length + serialized_bytes
|
||||||
serialized_list_string += serialize(item)
|
|
||||||
|
|
||||||
assert(len(serialized_list_string) < 2**32)
|
|
||||||
|
|
||||||
serialized_len = (len(serialized_list_string).to_bytes(LENGTH_BYTES, 'little'))
|
|
||||||
|
|
||||||
return serialized_len + serialized_list_string
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Container
|
## Deserialization
|
||||||
|
|
||||||
A container represents a heterogenous, associative collection of key-value pairs. Each pair is referred to as a `field`. To get the value for a given field, you supply the key which is a symbol unique to the container referred to as the field's `name`. The container data type is analogous to the `struct` type found in many languages like C or Go.
|
Because serialization is an injective function (i.e. two distinct objects of the same type will serialize to different values) any bytestring has at most one object it could deserialize to. Efficient algorithms for computing this object can be found in [the implementations](#implementations).
|
||||||
|
|
||||||
To serialize a container, obtain the list of its field's names in the specified order. For each field name in this list, obtain the corresponding value and serialize it. Tightly pack the complete set of serialized values in the same order as the field names into a buffer. Calculate the size of this buffer of serialized bytes and encode as a `4-byte` **little endian** `uint32`. Prepend the encoded length to the buffer. The result of this concatenation is the final serialized value of the container.
|
## Merkleization
|
||||||
|
|
||||||
| Check to perform | Code |
|
We first define helper functions:
|
||||||
|:----------------------------------------------|:----------------------------|
|
|
||||||
| Length of serialized fields fits into 4 bytes | ``len(serialized) < 2**32`` |
|
|
||||||
|
|
||||||
To serialize:
|
* `pack`: Given ordered objects of the same basic type, serialize them, pack them into `BYTES_PER_CHUNK`-byte chunks, right-pad the last chunk with zero bytes, and return the chunks.
|
||||||
|
* `merkleize`: Given ordered `BYTES_PER_CHUNK`-byte chunks, if necessary append zero chunks so that the number of chunks is a power of two, Merkleize the chunks, and return the root.
|
||||||
|
* `mix_in_length`: Given a Merkle root `root` and a length `length` (`uint256` little-endian serialization) return `hash(root + length)`.
|
||||||
|
|
||||||
1. Get the list of the container's fields.
|
We now define Merkleization `hash_tree_root(value)` of an object `value` recursively:
|
||||||
|
|
||||||
2. For each name in the list, obtain the corresponding value from the container and serialize it. Place this serialized value into a buffer. The serialized values should be tightly packed.
|
* `merkleize(pack(value))` if `value` is a basic object or a tuple of basic objects
|
||||||
|
* `mix_in_length(merkleize(pack(value)), len(value))` if `value` is a list of basic objects
|
||||||
|
* `merkleize([hash_tree_root(element) for element in value])` if `value` is a tuple of composite objects or a container
|
||||||
|
* `mix_in_length(merkleize([hash_tree_root(element) for element in value]), len(value))` if `value` is a list of composite objects
|
||||||
|
|
||||||
3. Get the number of raw bytes in the serialized buffer. Encode that number as a `4-byte` **little endian** `uint32`.
|
## Self-signed containers
|
||||||
|
|
||||||
4. Prepend the length to the serialized buffer.
|
Let `value` be a self-signed container object. The convention is that the signature (e.g. a `bytes96` BLS12-381 signature) be the last field of `value`. Further, the signed message for `value` is `signed_root(value) = hash_tree_root(truncate_last(value))` where `truncate_last` truncates the last element of `value`.
|
||||||
|
|
||||||
**Example in Python**
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_field_names(typ):
|
|
||||||
return typ.fields.keys()
|
|
||||||
|
|
||||||
def get_value_for_field_name(value, field_name):
|
|
||||||
return getattr(value, field_name)
|
|
||||||
|
|
||||||
def get_type_for_field_name(typ, field_name):
|
|
||||||
return typ.fields[field_name]
|
|
||||||
|
|
||||||
serialized_buffer = b''
|
|
||||||
|
|
||||||
typ = type(value)
|
|
||||||
for field_name in get_field_names(typ):
|
|
||||||
field_value = get_value_for_field_name(value, field_name)
|
|
||||||
field_type = get_type_for_field_name(typ, field_name)
|
|
||||||
serialized_buffer += serialize(field_value, field_type)
|
|
||||||
|
|
||||||
assert(len(serialized_buffer) < 2**32)
|
|
||||||
|
|
||||||
serialized_len = (len(serialized_buffer).to_bytes(LENGTH_BYTES, 'little'))
|
|
||||||
|
|
||||||
return serialized_len + serialized_buffer
|
|
||||||
```
|
|
||||||
|
|
||||||
### Deserialize/Decode
|
|
||||||
|
|
||||||
The decoding requires knowledge of the type of the item to be decoded. When
|
|
||||||
performing decoding on an entire serialized string, it also requires knowledge
|
|
||||||
of the order in which the objects have been serialized.
|
|
||||||
|
|
||||||
Note: Each return will provide:
|
|
||||||
- `deserialized_object`
|
|
||||||
- `new_index`
|
|
||||||
|
|
||||||
At each step, the following checks should be made:
|
|
||||||
|
|
||||||
| Check to perform | Check |
|
|
||||||
|:-------------------------|:-----------------------------------------------------------|
|
|
||||||
| Ensure sufficient length | ``len(rawbytes) >= current_index + deserialize_length`` |
|
|
||||||
|
|
||||||
At the final step, the following checks should be made:
|
|
||||||
|
|
||||||
| Check to perform | Check |
|
|
||||||
|:-------------------------|:-------------------------------------|
|
|
||||||
| Ensure no extra length | `new_index == len(rawbytes)` |
|
|
||||||
|
|
||||||
#### uintN
|
|
||||||
|
|
||||||
Convert directly from bytes into integer utilising the number of bytes the same
|
|
||||||
size as the integer length. (e.g. ``uint16 == 2 bytes``)
|
|
||||||
|
|
||||||
All integers are interpreted as **little endian**.
|
|
||||||
|
|
||||||
```python
|
|
||||||
byte_length = int_size / 8
|
|
||||||
new_index = current_index + byte_length
|
|
||||||
assert(len(rawbytes) >= new_index)
|
|
||||||
return int.from_bytes(rawbytes[current_index:current_index+byte_length], 'little'), new_index
|
|
||||||
```
|
|
||||||
|
|
||||||
#### bool
|
|
||||||
|
|
||||||
Return True if 0x01, False if 0x00.
|
|
||||||
|
|
||||||
```python
|
|
||||||
assert rawbytes in (b'\x00', b'\x01')
|
|
||||||
return True if rawbytes == b'\x01' else False
|
|
||||||
```
|
|
||||||
|
|
||||||
#### bytesN
|
|
||||||
|
|
||||||
Return the `N` bytes.
|
|
||||||
|
|
||||||
```python
|
|
||||||
assert(len(rawbytes) >= current_index + N)
|
|
||||||
new_index = current_index + N
|
|
||||||
return rawbytes[current_index:current_index+N], new_index
|
|
||||||
```
|
|
||||||
|
|
||||||
#### List/Vectors
|
|
||||||
|
|
||||||
Deserialize each element in the list.
|
|
||||||
1. Get the length of the serialized list.
|
|
||||||
2. Loop through deserializing each item in the list until you reach the
|
|
||||||
entire length of the list.
|
|
||||||
|
|
||||||
| Check to perform | code |
|
|
||||||
|:------------------------------------------|:----------------------------------------------------------------|
|
|
||||||
| ``rawbytes`` has enough left for length | ``len(rawbytes) > current_index + LENGTH_BYTES`` |
|
|
||||||
| list is not greater than serialized bytes | ``len(rawbytes) > current_index + LENGTH_BYTES + total_length`` |
|
|
||||||
|
|
||||||
```python
|
|
||||||
assert(len(rawbytes) > current_index + LENGTH_BYTES)
|
|
||||||
total_length = int.from_bytes(rawbytes[current_index:current_index + LENGTH_BYTES], 'little')
|
|
||||||
new_index = current_index + LENGTH_BYTES + total_length
|
|
||||||
assert(len(rawbytes) >= new_index)
|
|
||||||
item_index = current_index + LENGTH_BYTES
|
|
||||||
deserialized_list = []
|
|
||||||
|
|
||||||
while item_index < new_index:
|
|
||||||
object, item_index = deserialize(rawbytes, item_index, item_type)
|
|
||||||
deserialized_list.append(object)
|
|
||||||
|
|
||||||
return deserialized_list, new_index
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Container
|
|
||||||
|
|
||||||
Refer to the section on container encoding for some definitions.
|
|
||||||
|
|
||||||
To deserialize a container, loop over each field in the container and use the type of that field to know what kind of deserialization to perform. Consume successive elements of the data stream for each successful deserialization.
|
|
||||||
|
|
||||||
Instantiate a container with the full set of deserialized data, matching each member with the corresponding field.
|
|
||||||
|
|
||||||
| Check to perform | code |
|
|
||||||
|:------------------------------------------|:----------------------------------------------------------------|
|
|
||||||
| ``rawbytes`` has enough left for length | ``len(rawbytes) > current_index + LENGTH_BYTES`` |
|
|
||||||
| list is not greater than serialized bytes | ``len(rawbytes) > current_index + LENGTH_BYTES + total_length`` |
|
|
||||||
|
|
||||||
To deserialize:
|
|
||||||
|
|
||||||
1. Get the list of the container's fields.
|
|
||||||
2. For each name in the list, attempt to deserialize a value for that type. Collect these values as they will be used to construct an instance of the container.
|
|
||||||
3. Construct a container instance after successfully consuming the entire subset of the stream for the serialized container.
|
|
||||||
|
|
||||||
**Example in Python**
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_field_names(typ):
|
|
||||||
return typ.fields.keys()
|
|
||||||
|
|
||||||
def get_value_for_field_name(value, field_name):
|
|
||||||
return getattr(value, field_name)
|
|
||||||
|
|
||||||
def get_type_for_field_name(typ, field_name):
|
|
||||||
return typ.fields[field_name]
|
|
||||||
|
|
||||||
class Container:
|
|
||||||
# this is the container; here we will define an empty class for demonstration
|
|
||||||
pass
|
|
||||||
|
|
||||||
# get a reference to the type in some way...
|
|
||||||
container = Container()
|
|
||||||
typ = type(container)
|
|
||||||
|
|
||||||
assert(len(rawbytes) > current_index + LENGTH_BYTES)
|
|
||||||
total_length = int.from_bytes(rawbytes[current_index:current_index + LENGTH_BYTES], 'little')
|
|
||||||
new_index = current_index + LENGTH_BYTES + total_length
|
|
||||||
assert(len(rawbytes) >= new_index)
|
|
||||||
item_index = current_index + LENGTH_BYTES
|
|
||||||
|
|
||||||
values = {}
|
|
||||||
for field_name in get_field_names(typ):
|
|
||||||
field_name_type = get_type_for_field_name(typ, field_name)
|
|
||||||
values[field_name], item_index = deserialize(data, item_index, field_name_type)
|
|
||||||
assert item_index == new_index
|
|
||||||
return typ(**values), item_index
|
|
||||||
```
|
|
||||||
|
|
||||||
### Tree Hash
|
|
||||||
|
|
||||||
The below `hash_tree_root_internal` algorithm is defined recursively in the case of lists and containers, and it outputs a value equal to or less than 32 bytes in size. For use as a "final output" (eg. for signing), use `hash_tree_root(x) = zpad(hash_tree_root_internal(x), 32)`, where `zpad` is a helper that extends the given `bytes` value to the desired `length` by adding zero bytes on the right:
|
|
||||||
|
|
||||||
```python
|
|
||||||
def zpad(input: bytes, length: int) -> bytes:
|
|
||||||
return input + b'\x00' * (length - len(input))
|
|
||||||
```
|
|
||||||
|
|
||||||
Refer to [the helper function `hash`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#hash) of Phase 0 of the [Eth2.0 specs](https://github.com/ethereum/eth2.0-specs) for a definition of the hash function used below, `hash(x)`.
|
|
||||||
|
|
||||||
#### `uint8`..`uint256`, `bool`, `bytes1`..`bytes32`
|
|
||||||
|
|
||||||
Return the serialization of the value.
|
|
||||||
|
|
||||||
#### `uint264`..`uintN`, `bytes33`..`bytesN`
|
|
||||||
|
|
||||||
Return the hash of the serialization of the value.
|
|
||||||
|
|
||||||
#### List/Vectors
|
|
||||||
|
|
||||||
First, we define the Merkle tree function.
|
|
||||||
|
|
||||||
```python
|
|
||||||
# Merkle tree hash of a list of homogenous, non-empty items
|
|
||||||
def merkle_hash(lst):
|
|
||||||
# Store length of list (to compensate for non-bijectiveness of padding)
|
|
||||||
datalen = len(lst).to_bytes(32, 'little')
|
|
||||||
|
|
||||||
if len(lst) == 0:
|
|
||||||
# Handle empty list case
|
|
||||||
chunkz = [b'\x00' * SSZ_CHUNK_SIZE]
|
|
||||||
elif len(lst[0]) < SSZ_CHUNK_SIZE:
|
|
||||||
# See how many items fit in a chunk
|
|
||||||
items_per_chunk = SSZ_CHUNK_SIZE // len(lst[0])
|
|
||||||
|
|
||||||
# Build a list of chunks based on the number of items in the chunk
|
|
||||||
chunkz = [
|
|
||||||
zpad(b''.join(lst[i:i + items_per_chunk]), SSZ_CHUNK_SIZE)
|
|
||||||
for i in range(0, len(lst), items_per_chunk)
|
|
||||||
]
|
|
||||||
else:
|
|
||||||
# Leave large items alone
|
|
||||||
chunkz = lst
|
|
||||||
|
|
||||||
# Merkleise
|
|
||||||
def next_power_of_2(x):
|
|
||||||
return 1 if x == 0 else 2**(x - 1).bit_length()
|
|
||||||
|
|
||||||
for i in range(len(chunkz), next_power_of_2(len(chunkz))):
|
|
||||||
chunkz.append(b'\x00' * SSZ_CHUNK_SIZE)
|
|
||||||
while len(chunkz) > 1:
|
|
||||||
chunkz = [hash(chunkz[i] + chunkz[i+1]) for i in range(0, len(chunkz), 2)]
|
|
||||||
|
|
||||||
# Return hash of root and data length
|
|
||||||
return hash(chunkz[0] + datalen)
|
|
||||||
```
|
|
||||||
|
|
||||||
To `hash_tree_root_internal` a list, we simply do:
|
|
||||||
|
|
||||||
```python
|
|
||||||
return merkle_hash([hash_tree_root_internal(item) for item in value])
|
|
||||||
```
|
|
||||||
|
|
||||||
Where the inner `hash_tree_root_internal` is a recursive application of the tree-hashing function (returning less than 32 bytes for short single values).
|
|
||||||
|
|
||||||
#### Container
|
|
||||||
|
|
||||||
Recursively tree hash the values in the container in the same order as the fields, and Merkle hash the results.
|
|
||||||
|
|
||||||
```python
|
|
||||||
return merkle_hash([hash_tree_root_internal(getattr(x, field)) for field in value.fields])
|
|
||||||
```
|
|
||||||
|
|
||||||
### Signed roots
|
|
||||||
|
|
||||||
Let `field_name` be a field name in an SSZ container `container`. We define `truncate(container, field_name)` to be the `container` with the fields from `field_name` onwards truncated away. That is, `truncate(container, field_name) = [getattr(container, field)) for field in value.fields[:i]]` where `i = value.fields.index(field_name)`.
|
|
||||||
|
|
||||||
When `field_name` maps to a signature (e.g. a BLS12-381 signature of type `Bytes96`) the convention is that the corresponding signed message be `signed_root(container, field_name) = hash_tree_root(truncate(container, field_name))`. For example if `container = {"foo": sub_object_1, "bar": sub_object_2, "signature": bytes96, "baz": sub_object_3}` then `signed_root(container, "signature") = merkle_hash([hash_tree_root(sub_object_1), hash_tree_root(sub_object_2)])`.
|
|
||||||
|
|
||||||
Note that this convention means that fields after the signature are _not_ signed over. If there are multiple signatures in `container` then those are expected to be signing over the fields in the order specified. If multiple signatures of the same value are expected the convention is that the signature field be an array of signatures.
|
|
||||||
|
|
||||||
## Implementations
|
## Implementations
|
||||||
|
|
||||||
| Language | Implementation | Description |
|
| Language | Project | Maintainer | Implementation |
|
||||||
|:--------:|--------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------|
|
|-|-|-|-|
|
||||||
| Python | [ https://github.com/ethereum/py-ssz ](https://github.com/ethereum/py-ssz) | Python implementation of SSZ |
|
| Python | Ethereum 2.0 | Ethereum Foundation | [https://github.com/ethereum/py-ssz](https://github.com/ethereum/py-ssz) |
|
||||||
| Rust | [ https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz ](https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz) | Lighthouse (Rust Ethereum 2.0 Node) maintained SSZ. |
|
| Rust | Lighthouse | Sigma Prime | [https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz](https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz) |
|
||||||
| Nim | [ https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim ](https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim) | Nim Implementation maintained SSZ. |
|
| Nim | Nimbus | Status | [https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim](https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim) |
|
||||||
| Rust | [ https://github.com/paritytech/shasper/tree/master/util/ssz ](https://github.com/paritytech/shasper/tree/master/util/ssz) | Shasper implementation of SSZ maintained by ParityTech. |
|
| Rust | Shasper | ParityTech | [https://github.com/paritytech/shasper/tree/master/util/ssz](https://github.com/paritytech/shasper/tree/master/util/ssz) |
|
||||||
| Javascript | [ https://github.com/ChainSafeSystems/ssz-js/blob/master/src/index.js ](https://github.com/ChainSafeSystems/ssz-js/blob/master/src/index.js) | Javascript Implementation maintained SSZ |
|
| Javascript | Lodestart | Chain Safe Systems | [https://github.com/ChainSafeSystems/ssz-js/blob/master/src/index.js](https://github.com/ChainSafeSystems/ssz-js/blob/master/src/index.js) |
|
||||||
| Java | [ https://www.github.com/ConsenSys/cava/tree/master/ssz ](https://www.github.com/ConsenSys/cava/tree/master/ssz) | SSZ Java library part of the Cava suite |
|
| Java | Cava | ConsenSys | [https://www.github.com/ConsenSys/cava/tree/master/ssz](https://www.github.com/ConsenSys/cava/tree/master/ssz) |
|
||||||
| Go | [ https://github.com/prysmaticlabs/prysm/tree/master/shared/ssz ](https://github.com/prysmaticlabs/prysm/tree/master/shared/ssz) | Go implementation of SSZ mantained by Prysmatic Labs |
|
| Go | Prysm | Prysmatic Labs | [https://github.com/prysmaticlabs/prysm/tree/master/shared/ssz](https://github.com/prysmaticlabs/prysm/tree/master/shared/ssz) |
|
||||||
| Swift | [ https://github.com/yeeth/SimpleSerialize.swift ](https://github.com/yeeth/SimpleSerialize.swift) | Swift implementation maintained SSZ |
|
| Swift | Yeeth | Dean Eigenmann | [https://github.com/yeeth/SimpleSerialize.swift](https://github.com/yeeth/SimpleSerialize.swift) |
|
||||||
| C# | [ https://github.com/codingupastorm/csharp-ssz ](https://github.com/codingupastorm/csharp-ssz) | C# implementation maintained SSZ |
|
| C# | | Jordan Andrews | [https://github.com/codingupastorm/csharp-ssz](https://github.com/codingupastorm/csharp-ssz) |
|
||||||
| C++ | [ https://github.com/NAKsir-melody/cpp_ssz](https://github.com/NAKsir-melody/cpp_ssz) | C++ implementation maintained SSZ |
|
| C++ | | | [https://github.com/NAKsir-melody/cpp_ssz](https://github.com/NAKsir-melody/cpp_ssz) |
|
||||||
|
|
||||||
## Copyright
|
|
||||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
|
||||||
|
|
|
@ -50,7 +50,7 @@ __NOTICE__: This document is a work-in-progress for researchers and implementers
|
||||||
- [Aggregation bitfield](#aggregation-bitfield)
|
- [Aggregation bitfield](#aggregation-bitfield)
|
||||||
- [Custody bitfield](#custody-bitfield)
|
- [Custody bitfield](#custody-bitfield)
|
||||||
- [Aggregate signature](#aggregate-signature)
|
- [Aggregate signature](#aggregate-signature)
|
||||||
- [Validator assigments](#validator-assignments)
|
- [Validator assignments](#validator-assignments)
|
||||||
- [Lookahead](#lookahead)
|
- [Lookahead](#lookahead)
|
||||||
- [How to avoid slashing](#how-to-avoid-slashing)
|
- [How to avoid slashing](#how-to-avoid-slashing)
|
||||||
- [Proposer slashing](#proposer-slashing)
|
- [Proposer slashing](#proposer-slashing)
|
||||||
|
@ -353,7 +353,7 @@ def get_committee_assignment(
|
||||||
a beacon block at the assigned slot.
|
a beacon block at the assigned slot.
|
||||||
"""
|
"""
|
||||||
previous_epoch = get_previous_epoch(state)
|
previous_epoch = get_previous_epoch(state)
|
||||||
next_epoch = get_current_epoch(state)
|
next_epoch = get_current_epoch(state) + 1
|
||||||
assert previous_epoch <= epoch <= next_epoch
|
assert previous_epoch <= epoch <= next_epoch
|
||||||
|
|
||||||
epoch_start_slot = get_epoch_start_slot(epoch)
|
epoch_start_slot = get_epoch_start_slot(epoch)
|
||||||
|
@ -371,8 +371,7 @@ def get_committee_assignment(
|
||||||
if len(selected_committees) > 0:
|
if len(selected_committees) > 0:
|
||||||
validators = selected_committees[0][0]
|
validators = selected_committees[0][0]
|
||||||
shard = selected_committees[0][1]
|
shard = selected_committees[0][1]
|
||||||
first_committee_at_slot = crosslink_committees[0][0] # List[ValidatorIndex]
|
is_proposer = validator_index == get_beacon_proposer_index(state, slot, registry_change=registry_change)
|
||||||
is_proposer = first_committee_at_slot[slot % len(first_committee_at_slot)] == validator_index
|
|
||||||
|
|
||||||
assignment = (validators, shard, slot, is_proposer)
|
assignment = (validators, shard, slot, is_proposer)
|
||||||
return assignment
|
return assignment
|
||||||
|
|
Loading…
Reference in New Issue