Merge branch 'dev' into patch-1

This commit is contained in:
Dankrad Feist 2019-03-06 17:28:09 +01:00
commit 9d175c9dc9
4 changed files with 690 additions and 544 deletions

View File

@ -55,6 +55,7 @@
- [Helper functions](#helper-functions)
- [`hash`](#hash)
- [`hash_tree_root`](#hash_tree_root)
- [`signed_root`](#signed_root)
- [`slot_to_epoch`](#slot_to_epoch)
- [`get_previous_epoch`](#get_previous_epoch)
- [`get_current_epoch`](#get_current_epoch)
@ -75,6 +76,7 @@
- [`generate_seed`](#generate_seed)
- [`get_beacon_proposer_index`](#get_beacon_proposer_index)
- [`merkle_root`](#merkle_root)
- [`verify_merkle_branch`](#verify_merkle_branch)
- [`get_attestation_participants`](#get_attestation_participants)
- [`is_power_of_two`](#is_power_of_two)
- [`int_to_bytes1`, `int_to_bytes2`, ...](#int_to_bytes1-int_to_bytes2-)
@ -385,7 +387,7 @@ The following data structures are defined as [SimpleSerialize (SSZ)](https://git
```python
{
# Branch in the deposit tree
'branch': ['bytes32'],
'proof': ['bytes32'],
# Index in the deposit tree
'index': 'uint64',
# Data
@ -876,19 +878,22 @@ def get_crosslink_committees_at_slot(state: BeaconState,
shuffling_epoch = state.previous_shuffling_epoch
shuffling_start_shard = state.previous_shuffling_start_shard
elif epoch == next_epoch:
current_committees_per_epoch = get_current_epoch_committee_count(state)
committees_per_epoch = get_next_epoch_committee_count(state)
shuffling_epoch = next_epoch
epochs_since_last_registry_update = current_epoch - state.validator_registry_update_epoch
if registry_change:
committees_per_epoch = get_next_epoch_committee_count(state)
seed = generate_seed(state, next_epoch)
shuffling_epoch = next_epoch
current_committees_per_epoch = get_current_epoch_committee_count(state)
shuffling_start_shard = (state.current_shuffling_start_shard + current_committees_per_epoch) % SHARD_COUNT
elif epochs_since_last_registry_update > 1 and is_power_of_two(epochs_since_last_registry_update):
committees_per_epoch = get_next_epoch_committee_count(state)
seed = generate_seed(state, next_epoch)
shuffling_epoch = next_epoch
shuffling_start_shard = state.current_shuffling_start_shard
else:
committees_per_epoch = get_current_epoch_committee_count(state)
seed = state.current_shuffling_seed
shuffling_epoch = state.current_shuffling_epoch
shuffling_start_shard = state.current_shuffling_start_shard
shuffling = get_shuffling(
@ -967,11 +972,19 @@ def generate_seed(state: BeaconState,
```python
def get_beacon_proposer_index(state: BeaconState,
slot: Slot) -> ValidatorIndex:
slot: Slot,
registry_change: bool=False) -> ValidatorIndex:
"""
Return the beacon proposer index for the ``slot``.
"""
first_committee, _ = get_crosslink_committees_at_slot(state, slot)[0]
epoch = slot_to_epoch(slot)
current_epoch = get_current_epoch(state)
previous_epoch = get_previous_epoch(state)
next_epoch = current_epoch + 1
assert previous_epoch <= epoch <= next_epoch
first_committee, _ = get_crosslink_committees_at_slot(state, slot, registry_change)[0]
return first_committee[slot % len(first_committee)]
```
@ -989,6 +1002,23 @@ def merkle_root(values: List[Bytes32]) -> Bytes32:
return o[1]
```
### `verify_merkle_branch`
```python
def verify_merkle_branch(leaf: Bytes32, proof: List[Bytes32], depth: int, index: int, root: Bytes32) -> bool:
"""
Verify that the given ``leaf`` is on the merkle branch ``proof``
starting with the given ``root``.
"""
value = leaf
for i in range(depth):
if index // (2**i) % 2:
value = hash(proof[i] + value)
else:
value = hash(value + proof[i])
return value == root
```
### `get_attestation_participants`
```python
@ -1051,7 +1081,7 @@ def get_effective_balance(state: State, index: ValidatorIndex) -> Gwei:
```python
def get_total_balance(state: BeaconState, validators: List[ValidatorIndex]) -> Gwei:
"""
Return the combined effective balance of an array of validators.
Return the combined effective balance of an array of ``validators``.
"""
return sum([get_effective_balance(state, i) for i in validators])
```
@ -1235,6 +1265,31 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None:
"""
deposit_input = deposit.deposit_data.deposit_input
# Should equal 8 bytes for deposit_data.amount +
# 8 bytes for deposit_data.timestamp +
# 176 bytes for deposit_data.deposit_input
# It should match the deposit_data in the eth1.0 deposit contract
serialized_deposit_data = serialize(deposit.deposit_data)
# Deposits must be processed in order
assert deposit.index == state.deposit_index
# Verify the Merkle branch
merkle_branch_is_valid = verify_merkle_branch(
leaf=hash(serialized_deposit_data),
proof=deposit.proof,
depth=DEPOSIT_CONTRACT_TREE_DEPTH,
index=deposit.index,
root=state.latest_eth1_data.deposit_root,
)
assert merkle_branch_is_valid
# Increment the next deposit index we are expecting. Note that this
# needs to be done here because while the deposit contract will never
# create an invalid Merkle branch, it may admit an invalid deposit
# object, and we need to be able to skip over it
state.deposit_index += 1
# Verify the proof of possession
proof_is_valid = bls_verify(
pubkey=deposit_input.pubkey,
message_hash=signed_root(deposit_input, "proof_of_possession"),
@ -1315,12 +1370,13 @@ def exit_validator(state: BeaconState, index: ValidatorIndex) -> None:
Note that this function mutates ``state``.
"""
validator = state.validator_registry[index]
delayed_activation_exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state))
# The following updates only occur if not previous exited
if validator.exit_epoch <= get_delayed_activation_exit_epoch(get_current_epoch(state)):
if validator.exit_epoch <= delayed_activation_exit_epoch:
return
validator.exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state))
else:
validator.exit_epoch = delayed_activation_exit_epoch
```
#### `slash_validator`
@ -1449,7 +1505,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit],
validator_registry_update_epoch=GENESIS_EPOCH,
# Randomness and committees
latest_randao_mixes=[EMPTY_SIGNATURE for _ in range(LATEST_RANDAO_MIXES_LENGTH)],
latest_randao_mixes=[ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)],
previous_shuffling_start_shard=GENESIS_START_SHARD,
current_shuffling_start_shard=GENESIS_START_SHARD,
previous_shuffling_epoch=GENESIS_EPOCH,
@ -1474,7 +1530,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit],
# Ethereum 1.0 chain data
latest_eth1_data=latest_eth1_data,
eth1_data_votes=[],
deposit_index=len(genesis_validator_deposits)
deposit_index=0,
)
# Process genesis deposits
@ -1627,59 +1683,123 @@ Below are the processing steps that happen at every `block`.
Verify that `len(block.body.proposer_slashings) <= MAX_PROPOSER_SLASHINGS`.
For each `proposer_slashing` in `block.body.proposer_slashings`:
For each `proposer_slashing` in `block.body.proposer_slashings`, run the following function:
* Let `proposer = state.validator_registry[proposer_slashing.proposer_index]`.
* Verify that `proposer_slashing.proposal_1.slot == proposer_slashing.proposal_2.slot`.
* Verify that `proposer_slashing.proposal_1.shard == proposer_slashing.proposal_2.shard`.
* Verify that `proposer_slashing.proposal_1.block_root != proposer_slashing.proposal_2.block_root`.
* Verify that `proposer.slashed == False`.
* Verify that `bls_verify(pubkey=proposer.pubkey, message_hash=signed_root(proposer_slashing.proposal_1, "signature"), signature=proposer_slashing.proposal_1.signature, domain=get_domain(state.fork, slot_to_epoch(proposer_slashing.proposal_1.slot), DOMAIN_PROPOSAL))`.
* Verify that `bls_verify(pubkey=proposer.pubkey, message_hash=signed_root(proposer_slashing.proposal_2, "signature"), signature=proposer_slashing.proposal_2.signature, domain=get_domain(state.fork, slot_to_epoch(proposer_slashing.proposal_2.slot), DOMAIN_PROPOSAL))`.
* Run `slash_validator(state, proposer_slashing.proposer_index)`.
```python
def process_proposer_slashing(state: BeaconState,
proposer_slashing: ProposerSlashing) -> None:
"""
Process ``ProposerSlashing`` transaction.
Note that this function mutates ``state``.
"""
proposer = state.validator_registry[proposer_slashing.proposer_index]
# Verify that the slot is the same
assert proposer_slashing.proposal_1.slot == proposer_slashing.proposal_2.slot
# Verify that the shard is the same (or that both proposals are beacon chain proposals)
assert proposer_slashing.proposal_1.shard == proposer_slashing.proposal_2.shard
# But the roots are different!
assert proposer_slashing.proposal_1.block_root != proposer_slashing.proposal_2.block_root
# Proposer is not yet slashed
assert proposer.slashed is False
# Signatures are valid
for proposal in (proposer_slashing.proposal_1, proposer_slashing.proposal_2):
assert bls_verify(
pubkey=proposer.pubkey,
message_hash=signed_root(proposal, "signature"),
signature=proposal.signature,
domain=get_domain(state.fork, slot_to_epoch(proposal.slot), DOMAIN_PROPOSAL)
)
slash_validator(state, proposer_slashing.proposer_index)
```
##### Attester slashings
Verify that `len(block.body.attester_slashings) <= MAX_ATTESTER_SLASHINGS`.
For each `attester_slashing` in `block.body.attester_slashings`:
For each `attester_slashing` in `block.body.attester_slashings`, run the following function:
* Let `slashable_attestation_1 = attester_slashing.slashable_attestation_1`.
* Let `slashable_attestation_2 = attester_slashing.slashable_attestation_2`.
* Verify that `slashable_attestation_1.data != slashable_attestation_2.data`.
* Verify that `is_double_vote(slashable_attestation_1.data, slashable_attestation_2.data)` or `is_surround_vote(slashable_attestation_1.data, slashable_attestation_2.data)`.
* Verify that `verify_slashable_attestation(state, slashable_attestation_1)`.
* Verify that `verify_slashable_attestation(state, slashable_attestation_2)`.
* Let `slashable_indices = [index for index in slashable_attestation_1.validator_indices if index in slashable_attestation_2.validator_indices and state.validator_registry[index].slashed == False]`.
* Verify that `len(slashable_indices) >= 1`.
* Run `slash_validator(state, index)` for each `index` in `slashable_indices`.
```python
def process_attester_slashing(state: BeaconState,
attester_slashing: AttesterSlashing) -> None:
"""
Process ``AttesterSlashing`` transaction.
Note that this function mutates ``state``.
"""
attestation1 = attester_slashing.slashable_attestation_1
attestation2 = attester_slashing.slashable_attestation_2
# Check that the attestations are conflicting
assert attestation1.data != attestation2.data
assert (
is_double_vote(attestation1.data, attestation2.data) or
is_surround_vote(attestation1.data, attestation2.data)
)
assert verify_slashable_attestation(state, attestation1)
assert verify_slashable_attestation(state, attestation2)
slashable_indices = [
index for index in attestation1.validator_indices
if (
index in attestation2.validator_indices and
state.validator_registry[index].slashed is False
)
]
assert len(slashable_indices) >= 1
for index in slashable_indices:
slash_validator(state, index)
```
##### Attestations
Verify that `len(block.body.attestations) <= MAX_ATTESTATIONS`.
For each `attestation` in `block.body.attestations`:
* Verify that `attestation.data.slot >= GENESIS_SLOT`.
* Verify that `attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot`.
* Verify that `state.slot < attestation.data.slot + SLOTS_PER_EPOCH.
* Verify that `attestation.data.justified_epoch` is equal to `state.justified_epoch if slot_to_epoch(attestation.data.slot + 1) >= get_current_epoch(state) else state.previous_justified_epoch`.
* Verify that `attestation.data.justified_block_root` is equal to `get_block_root(state, get_epoch_start_slot(attestation.data.justified_epoch))`.
* Verify that either (i) `state.latest_crosslinks[attestation.data.shard] == attestation.data.latest_crosslink` or (ii) `state.latest_crosslinks[attestation.data.shard] == Crosslink(crosslink_data_root=attestation.data.crosslink_data_root, epoch=slot_to_epoch(attestation.data.slot))`.
* Verify bitfields and aggregate signature:
For each `attestation` in `block.body.attestations`, run the following function:
```python
assert attestation.custody_bitfield == b'\x00' * len(attestation.custody_bitfield) # [TO BE REMOVED IN PHASE 1]
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
"""
Process ``Attestation`` transaction.
Note that this function mutates ``state``.
"""
# Can't submit attestations that are too far in history (or in prehistory)
assert attestation.data.slot >= GENESIS_SLOT
assert state.slot < attestation.data.slot + SLOTS_PER_EPOCH
# Can't submit attestations too quickly
assert attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot
# Verify that the justified epoch is correct, case 1: current epoch attestations
if slot_to_epoch(attestation.data.slot + 1) >= get_current_epoch(state):
assert attestation.data.justified_epoch == state.justified_epoch
# Case 2: previous epoch attestations
else:
assert attestation.data.justified_epoch == state.previous_justified_epoch
# Check that the justified block root is correct
assert attestation.data.justified_block_root == get_block_root(
state, get_epoch_start_slot(attestation.data.justified_epoch)
)
# Check that the crosslink data is valid
acceptable_crosslink_data = {
# Case 1: Latest crosslink matches the one in the state
attestation.data.latest_crosslink,
# Case 2: State has already been updated, state's latest crosslink matches the crosslink
# the attestation is trying to create
Crosslink(
crosslink_data_root=attestation.data.crosslink_data_root,
epoch=slot_to_epoch(attestation.data.slot)
)
}
assert state.latest_crosslinks[attestation.data.shard] in acceptable_crosslink_data
# Attestation must be nonempty!
assert attestation.aggregation_bitfield != b'\x00' * len(attestation.aggregation_bitfield)
# Custody must be empty (to be removed in phase 1)
assert attestation.custody_bitfield == b'\x00' * len(attestation.custody_bitfield)
# Get the committee for the specific shard that this attestation is for
crosslink_committee = [
committee for committee, shard in get_crosslink_committees_at_slot(state, attestation.data.slot)
if shard == attestation.data.shard
][0]
# Custody bitfield must be a subset of the attestation bitfield
for i in range(len(crosslink_committee)):
if get_bitfield_bit(attestation.aggregation_bitfield, i) == 0b0:
assert get_bitfield_bit(attestation.custody_bitfield, i) == 0b0
# Verify aggregate signature
participants = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield)
custody_bit_1_participants = get_attestation_participants(state, attestation.data, attestation.custody_bitfield)
custody_bit_0_participants = [i in participants for i not in custody_bit_1_participants]
@ -1696,56 +1816,50 @@ For each `attestation` in `block.body.attestations`:
signature=attestation.aggregate_signature,
domain=get_domain(state.fork, slot_to_epoch(attestation.data.slot), DOMAIN_ATTESTATION),
)
# Crosslink data root is zero (to be removed in phase 1)
assert attestation.data.crosslink_data_root == ZERO_HASH
# Apply the attestation
state.latest_attestations.append(PendingAttestation(
data=attestation.data,
aggregation_bitfield=attestation.aggregation_bitfield,
custody_bitfield=attestation.custody_bitfield,
inclusion_slot=state.slot)
)
```
* [TO BE REMOVED IN PHASE 1] Verify that `attestation.data.crosslink_data_root == ZERO_HASH`.
* Append `PendingAttestation(data=attestation.data, aggregation_bitfield=attestation.aggregation_bitfield, custody_bitfield=attestation.custody_bitfield, inclusion_slot=state.slot)` to `state.latest_attestations`.
##### Deposits
Verify that `len(block.body.deposits) <= MAX_DEPOSITS`.
[TODO: update the call to `verify_merkle_branch` below if it needs to change after we process deposits in order]
For each `deposit` in `block.body.deposits`:
* Let `serialized_deposit_data` be the serialized form of `deposit.deposit_data`. It should be 8 bytes for `deposit_data.amount` followed by 8 bytes for `deposit_data.timestamp` and then the `DepositInput` bytes. That is, it should match `deposit_data` in the [Ethereum 1.0 deposit contract](#ethereum-10-deposit-contract) of which the hash was placed into the Merkle tree.
* Verify that `deposit.index == state.deposit_index`.
* Verify that `verify_merkle_branch(hash(serialized_deposit_data), deposit.branch, DEPOSIT_CONTRACT_TREE_DEPTH, deposit.index, state.latest_eth1_data.deposit_root)` is `True`.
```python
def verify_merkle_branch(leaf: Bytes32, branch: List[Bytes32], depth: int, index: int, root: Bytes32) -> bool:
"""
Verify that the given ``leaf`` is on the merkle branch ``branch``.
"""
value = leaf
for i in range(depth):
if index // (2**i) % 2:
value = hash(branch[i] + value)
else:
value = hash(value + branch[i])
return value == root
```
* Run the following:
```python
process_deposit(state, deposit)
```
* Set `state.deposit_index += 1`.
For each `deposit` in `block.body.deposits`, run `process_deposit(state, deposit)`.
##### Voluntary exits
Verify that `len(block.body.voluntary_exits) <= MAX_VOLUNTARY_EXITS`.
For each `exit` in `block.body.voluntary_exits`:
For each `exit` in `block.body.voluntary_exits`, run the following function:
* Let `validator = state.validator_registry[exit.validator_index]`.
* Verify that `validator.exit_epoch > get_delayed_activation_exit_epoch(get_current_epoch(state))`.
* Verify that `get_current_epoch(state) >= exit.epoch`.
* Verify that `bls_verify(pubkey=validator.pubkey, message_hash=signed_root(exit, "signature"), signature=exit.signature, domain=get_domain(state.fork, exit.epoch, DOMAIN_EXIT))`.
* Run `initiate_validator_exit(state, exit.validator_index)`.
```python
def process_exit(state: BeaconState, exit: VoluntaryExit) -> None:
"""
Process ``VoluntaryExit`` transaction.
Note that this function mutates ``state``.
"""
validator = state.validator_registry[exit.validator_index]
# Verify the validator has not yet exited
assert validator.exit_epoch > get_delayed_activation_exit_epoch(get_current_epoch(state))
# Exits must specify an epoch when they become valid; they are not valid before then
assert get_current_epoch(state) >= exit.epoch
# Verify signature
assert bls_verify(
pubkey=validator.pubkey,
message_hash=signed_root(exit, "signature"),
signature=exit.signature,
domain=get_domain(state.fork, exit.epoch, DOMAIN_EXIT)
)
# Run the exit
initiate_validator_exit(state, exit.validator_index)
```
##### Transfers
@ -1753,18 +1867,46 @@ Note: Transfers are a temporary functionality for phases 0 and 1, to be removed
Verify that `len(block.body.transfers) <= MAX_TRANSFERS` and that all transfers are distinct.
For each `transfer` in `block.body.transfers`:
For each `transfer` in `block.body.transfers`, run the following function:
* Verify that `state.validator_balances[transfer.from] >= transfer.amount`.
* Verify that `state.validator_balances[transfer.from] >= transfer.fee`.
* Verify that `state.validator_balances[transfer.from] == transfer.amount + transfer.fee` or `state.validator_balances[transfer.from] >= transfer.amount + transfer.fee + MIN_DEPOSIT_AMOUNT`.
* Verify that `state.slot == transfer.slot`.
* Verify that `get_current_epoch(state) >= state.validator_registry[transfer.from].withdrawable_epoch` or `state.validator_registry[transfer.from].activation_epoch == FAR_FUTURE_EPOCH`.
* Verify that `state.validator_registry[transfer.from].withdrawal_credentials == BLS_WITHDRAWAL_PREFIX_BYTE + hash(transfer.pubkey)[1:]`.
* Verify that `bls_verify(pubkey=transfer.pubkey, message_hash=signed_root(transfer, "signature"), signature=transfer.signature, domain=get_domain(state.fork, slot_to_epoch(transfer.slot), DOMAIN_TRANSFER))`.
* Set `state.validator_balances[transfer.from] -= transfer.amount + transfer.fee`.
* Set `state.validator_balances[transfer.to] += transfer.amount`.
* Set `state.validator_balances[get_beacon_proposer_index(state, state.slot)] += transfer.fee`.
```python
def process_transfer(state: BeaconState, transfer: Transfer) -> None:
"""
Process ``Transfer`` transaction.
Note that this function mutates ``state``.
"""
# Verify the amount and fee aren't individually too big (for anti-overflow purposes)
assert state.validator_balances[transfer.from] >= max(transfer.amount, transfer.fee)
# Verify that we have enough ETH to send, and that after the transfer the balance will be either
# exactly zero or at least MIN_DEPOSIT_AMOUNT
assert (
state.validator_balances[transfer.from] == transfer.amount + transfer.fee or
state.validator_balances[transfer.from] >= transfer.amount + transfer.fee + MIN_DEPOSIT_AMOUNT
)
# A transfer is valid in only one slot
assert state.slot == transfer.slot
# Only withdrawn or not-yet-deposited accounts can transfer
assert (
get_current_epoch(state) >= state.validator_registry[transfer.from].withdrawable_epoch or
state.validator_registry[transfer.from].activation_epoch == FAR_FUTURE_EPOCH
)
# Verify that the pubkey is valid
assert (
state.validator_registry[transfer.from].withdrawal_credentials ==
BLS_WITHDRAWAL_PREFIX_BYTE + hash(transfer.pubkey)[1:]
)
# Verify that the signature is valid
assert bls_verify(
pubkey=transfer.pubkey,
message_hash=signed_root(transfer, "signature"),
signature=transfer.signature,
domain=get_domain(state.fork, slot_to_epoch(transfer.slot), DOMAIN_TRANSFER)
)
# Process the transfer
state.validator_balances[transfer.from] -= transfer.amount + transfer.fee
state.validator_balances[transfer.to] += transfer.amount
state.validator_balances[get_beacon_proposer_index(state, state.slot)] += transfer.fee
```
### Per-epoch processing
@ -1862,30 +2004,29 @@ Note: When applying penalties in the following balance recalculations implemente
##### Justification and finalization
Note: Rewards and penalties are for participation in the previous epoch, so the "active validator" set is drawn from `get_active_validator_indices(state.validator_registry, previous_epoch)`.
* Let `previous_active_validator_indices = get_active_validator_indices(state.validator_registry, previous_epoch)`
* Let `epochs_since_finality = next_epoch - state.finalized_epoch`.
Case 1: `epochs_since_finality <= 4`:
* Expected FFG source:
* Any [validator](#dfn-validator) `index` in `previous_epoch_attester_indices` gains `base_reward(state, index) * previous_epoch_attesting_balance // previous_total_balance`.
* Any [active validator](#dfn-active-validator) `index` not in `previous_epoch_attester_indices` loses `base_reward(state, index)`.
* Any [active validator](#dfn-active-validator) `index` from `previous_active_validator_indices` not in `previous_epoch_attester_indices` loses `base_reward(state, index)`.
* Expected FFG target:
* Any [validator](#dfn-validator) `index` in `previous_epoch_boundary_attester_indices` gains `base_reward(state, index) * previous_epoch_boundary_attesting_balance // previous_total_balance`.
* Any [active validator](#dfn-active-validator) `index` not in `previous_epoch_boundary_attester_indices` loses `base_reward(state, index)`.
* Any [active validator](#dfn-active-validator) `index` from `previous_active_validator_indices` not in `previous_epoch_boundary_attester_indices` loses `base_reward(state, index)`.
* Expected beacon chain head:
* Any [validator](#dfn-validator) `index` in `previous_epoch_head_attester_indices` gains `base_reward(state, index) * previous_epoch_head_attesting_balance // previous_total_balance)`.
* Any [active validator](#dfn-active-validator) `index` not in `previous_epoch_head_attester_indices` loses `base_reward(state, index)`.
* Any [active validator](#dfn-active-validator) `index` from `previous_active_validator_indices` not in `previous_epoch_head_attester_indices` loses `base_reward(state, index)`.
* Inclusion distance:
* Any [validator](#dfn-validator) `index` in `previous_epoch_attester_indices` gains `base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_distance(state, index)`
Case 2: `epochs_since_finality > 4`:
* Any [active validator](#dfn-active-validator) `index` not in `previous_epoch_attester_indices`, loses `inactivity_penalty(state, index, epochs_since_finality)`.
* Any [active validator](#dfn-active-validator) `index` not in `previous_epoch_boundary_attester_indices`, loses `inactivity_penalty(state, index, epochs_since_finality)`.
* Any [active validator](#dfn-active-validator) `index` not in `previous_epoch_head_attester_indices`, loses `base_reward(state, index)`.
* Any [active validator](#dfn-active-validator) `index` with `validator.slashed == True`, loses `2 * inactivity_penalty(state, index, epochs_since_finality) + base_reward(state, index)`.
* Any [active validator](#dfn-active-validator) `index` from `previous_active_validator_indices` not in `previous_epoch_attester_indices`, loses `inactivity_penalty(state, index, epochs_since_finality)`.
* Any [active validator](#dfn-active-validator) `index` from `previous_active_validator_indices` not in `previous_epoch_boundary_attester_indices`, loses `inactivity_penalty(state, index, epochs_since_finality)`.
* Any [active validator](#dfn-active-validator) `index` from `previous_active_validator_indices` not in `previous_epoch_head_attester_indices`, loses `base_reward(state, index)`.
* Any [active validator](#dfn-active-validator) `index` with `validator.slashed is True`, loses `2 * inactivity_penalty(state, index, epochs_since_finality) + base_reward(state, index)`.
* Any [validator](#dfn-validator) `index` in `previous_epoch_attester_indices` loses `base_reward(state, index) - base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_distance(state, index)`
##### Attestation inclusion
@ -1964,7 +2105,7 @@ def update_validator_registry(state: BeaconState) -> None:
# Exit validators within the allowable balance churn
balance_churn = 0
for index, validator in enumerate(state.validator_registry):
if validator.activation_epoch == FAR_FUTURE_EPOCH and validator.initiated_exit:
if validator.exit_epoch == FAR_FUTURE_EPOCH and validator.initiated_exit:
# Check the balance churn would be within the allowance
balance_churn += get_effective_balance(state, index)
if balance_churn > max_balance_churn:
@ -1978,8 +2119,8 @@ def update_validator_registry(state: BeaconState) -> None:
and perform the following updates:
* Set `state.current_shuffling_epoch = next_epoch`
* Set `state.current_shuffling_start_shard = (state.current_shuffling_start_shard + get_current_epoch_committee_count(state)) % SHARD_COUNT`
* Set `state.current_shuffling_epoch = next_epoch`
* Set `state.current_shuffling_seed = generate_seed(state, state.current_shuffling_epoch)`
If a validator registry update does _not_ happen do the following:
@ -2004,12 +2145,14 @@ def process_slashings(state: BeaconState) -> None:
active_validator_indices = get_active_validator_indices(state.validator_registry, current_epoch)
total_balance = sum(get_effective_balance(state, i) for i in active_validator_indices)
# Compute `total_penalties`
epoch_index = current_epoch % LATEST_SLASHED_EXIT_LENGTH
total_at_start = state.latest_slashed_balances[(epoch_index + 1) % LATEST_SLASHED_EXIT_LENGTH]
total_at_end = state.latest_slashed_balances[epoch_index]
total_penalties = total_at_end - total_at_start
for index, validator in enumerate(state.validator_registry):
if validator.slashed and current_epoch == validator.withdrawable_epoch - LATEST_SLASHED_EXIT_LENGTH // 2:
epoch_index = current_epoch % LATEST_SLASHED_EXIT_LENGTH
total_at_start = state.latest_slashed_balances[(epoch_index + 1) % LATEST_SLASHED_EXIT_LENGTH]
total_at_end = state.latest_slashed_balances[epoch_index]
total_penalties = total_at_end - total_at_start
penalty = max(
get_effective_balance(state, index) * min(total_penalties * 3, total_balance) // total_balance,
get_effective_balance(state, index) // MIN_PENALTY_QUOTIENT
@ -2044,7 +2187,7 @@ def process_exit_queue(state: BeaconState) -> None:
#### Final updates
* Set `state.latest_active_index_roots[(next_epoch + ACTIVATION_EXIT_DELAY) % LATEST_ACTIVE_INDEX_ROOTS_LENGTH] = hash_tree_root(get_active_validator_indices(state.validator_registry, next_epoch + ACTIVATION_EXIT_DELAY))`.
* Set `state.latest_slashed_balances[(next_epoch) % LATEST_SLASHED_EXIT_LENGTH] = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH]`.
* Set `state.latest_slashed_balances[next_epoch % LATEST_SLASHED_EXIT_LENGTH] = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH]`.
* Set `state.latest_randao_mixes[next_epoch % LATEST_RANDAO_MIXES_LENGTH] = get_randao_mix(state, current_epoch)`.
* Remove any `attestation` in `state.latest_attestations` such that `slot_to_epoch(attestation.data.slot) < current_epoch`.

View File

@ -2,6 +2,8 @@
**NOTICE**: This document is a work-in-progress for researchers and implementers. It reflects recent spec changes and takes precedence over the [Python proof-of-concept implementation](https://github.com/ethereum/beacon_chain).
At the current stage, Phase 1, while fundamentally feature-complete, is still subject to change. Development teams with spare resources may consider starting on the "Shard chains and crosslink data" section; at least basic properties, such as the fact that a shard block can get created every slot and is dependent on both a parent block in the same shard and a beacon chain block at or before that same slot, are unlikely to change, though details are likely to undergo similar kinds of changes to what Phase 0 has undergone since the start of the year.
## Table of contents
<!-- TOC -->
@ -15,19 +17,20 @@
- [Time parameters](#time-parameters)
- [Max operations per block](#max-operations-per-block)
- [Signature domains](#signature-domains)
- [Helper functions](#helper-functions)
- [Shard chains and crosslink data](#shard-chains-and-crosslink-data)
- [Helper functions](#helper-functions)
- [`get_split_offset`](#get_split_offset)
- [`get_shuffled_committee`](#get_shuffled_committee)
- [`get_persistent_committee`](#get_persistent_committee)
- [`get_shard_proposer_index`](#get_shard_proposer_index)
- [Data Structures](#data-structures)
- [Data Structures](#data-structures)
- [Shard chain blocks](#shard-chain-blocks)
- [Shard block processing](#shard-block-processing)
- [Shard block processing](#shard-block-processing)
- [Verifying shard block data](#verifying-shard-block-data)
- [Verifying a crosslink](#verifying-a-crosslink)
- [Shard block fork choice rule](#shard-block-fork-choice-rule)
- [Updates to the beacon chain](#updates-to-the-beacon-chain)
- [Data structures](#data-structures)
- [Updates to the beacon chain](#updates-to-the-beacon-chain)
- [Data structures](#data-structures)
- [`Validator`](#validator)
- [`BeaconBlockBody`](#beaconblockbody)
- [`BranchChallenge`](#branchchallenge)
@ -35,20 +38,20 @@
- [`BranchChallengeRecord`](#branchchallengerecord)
- [`SubkeyReveal`](#subkeyreveal)
- [Helpers](#helpers)
- [`get_attestation_merkle_depth`](#get_attestation_merkle_depth)
- [`get_attestation_data_merkle_depth`](#get_attestation_data_merkle_depth)
- [`epoch_to_custody_period`](#epoch_to_custody_period)
- [`slot_to_custody_period`](#slot_to_custody_period)
- [`get_current_custody_period`](#get_current_custody_period)
- [`verify_custody_subkey_reveal`](#verify_custody_subkey_reveal)
- [`prepare_validator_for_withdrawal`](#prepare_validator_for_withdrawal)
- [`penalize_validator`](#penalize_validator)
- [Per-slot processing](#per-slot-processing)
- [Per-slot processing](#per-slot-processing)
- [Operations](#operations)
- [Branch challenges](#branch-challenges)
- [Branch responses](#branch-responses)
- [Subkey reveals](#subkey-reveals)
- [Per-epoch processing](#per-epoch-processing)
- [One-time phase 1 initiation transition](#one-time-phase-1-initiation-transition)
- [Per-epoch processing](#per-epoch-processing)
- [One-time phase 1 initiation transition](#one-time-phase-1-initiation-transition)
<!-- /TOC -->
@ -71,6 +74,9 @@ Phase 1 depends upon all of the constants defined in [Phase 0](0_beacon-chain.md
| `SHARD_CHUNK_SIZE` | 2**5 (= 32) | bytes |
| `SHARD_BLOCK_SIZE` | 2**14 (= 16,384) | bytes |
| `MINOR_REWARD_QUOTIENT` | 2**8 (= 256) | |
| `MAX_POC_RESPONSE_DEPTH` | 5 | |
| `ZERO_PUBKEY` | int_to_bytes48(0)| |
| `VALIDATOR_NULL` | 2**64 - 1 | |
#### Time parameters
@ -84,19 +90,25 @@ Phase 1 depends upon all of the constants defined in [Phase 0](0_beacon-chain.md
#### Max operations per block
| Name | Value |
|-------------------------------|---------------|
| `MAX_BRANCH_CHALLENGES` | 2**2 (= 4) |
| `MAX_BRANCH_RESPONSES` | 2**4 (= 16) |
| `MAX_EARLY_SUBKEY_REVEALS` | 2**4 (= 16) |
| Name | Value |
|----------------------------------------------------|---------------|
| `MAX_BRANCH_CHALLENGES` | 2**2 (= 4) |
| `MAX_BRANCH_RESPONSES` | 2**4 (= 16) |
| `MAX_EARLY_SUBKEY_REVEALS` | 2**4 (= 16) |
| `MAX_INTERACTIVE_CUSTODY_CHALLENGE_INITIATIONS` | 2 |
| `MAX_INTERACTIVE_CUSTODY_CHALLENGE_RESPONSES` | 16 |
| `MAX_INTERACTIVE_CUSTODY_CHALLENGE_CONTINUTATIONS` | 16 |
#### Signature domains
| Name | Value |
|------------------------|-----------------|
| `DOMAIN_SHARD_PROPOSER`| 129 |
| `DOMAIN_SHARD_ATTESTER`| 130 |
| `DOMAIN_CUSTODY_SUBKEY`| 131 |
| Name | Value |
|------------------------------|-----------------|
| `DOMAIN_SHARD_PROPOSER` | 129 |
| `DOMAIN_SHARD_ATTESTER` | 130 |
| `DOMAIN_CUSTODY_SUBKEY` | 131 |
| `DOMAIN_CUSTODY_INTERACTIVE` | 132 |
# Shard chains and crosslink data
## Helper functions
@ -158,7 +170,6 @@ def get_persistent_committee(state: BeaconState,
[i for i in later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)]
)))
```
#### `get_shard_proposer_index`
```python
@ -290,7 +301,7 @@ The `shard_chain_commitment` is only valid if it equals `compute_commitment(head
### Shard block fork choice rule
The fork choice rule for any shard is LMD GHOST using the shard chain attestations of the persistent committee and the beacon chain attestations of the crosslink committee currently assigned to that shard, but instead of being rooted in the genesis it is rooted in the latest block referenced in the most recent accepted crosslink (ie. `state.crosslinks[shard].crosslink_data_root`). Only blocks whose `beacon_chain_ref` is the block in the main beacon chain at the specified `slot` should be considered (if the beacon chain skips a slot, then the block at that slot is considered to be the block in the beacon chain at the highest slot lower than a slot).
The fork choice rule for any shard is LMD GHOST using the shard chain attestations of the persistent committee and the beacon chain attestations of the crosslink committee currently assigned to that shard, but instead of being rooted in the genesis it is rooted in the block referenced in the most recent accepted crosslink (ie. `state.crosslinks[shard].shard_block_root`). Only blocks whose `beacon_chain_ref` is the block in the main beacon chain at the specified `slot` should be considered (if the beacon chain skips a slot, then the block at that slot is considered to be the block in the beacon chain at the highest slot lower than a slot).
# Updates to the beacon chain
@ -301,7 +312,6 @@ The fork choice rule for any shard is LMD GHOST using the shard chain attestatio
Add member values to the end of the `Validator` object:
```python
'open_branch_challenges': [BranchChallengeRecord],
'next_subkey_to_reveal': 'uint64',
'reveal_max_periods_late': 'uint64',
```
@ -309,7 +319,6 @@ Add member values to the end of the `Validator` object:
And the initializers:
```python
'open_branch_challenges': [],
'next_subkey_to_reveal': get_current_custody_period(state),
'reveal_max_periods_late': 0,
```
@ -322,6 +331,10 @@ Add member values to the `BeaconBlockBody` structure:
'branch_challenges': [BranchChallenge],
'branch_responses': [BranchResponse],
'subkey_reveals': [SubkeyReveal],
'interactive_custody_challenge_initiations': [InteractiveCustodyChallengeInitiation],
'interactive_custody_challenge_responses': [InteractiveCustodyChallengeResponse],
'interactive_custody_challenge_continuations': [InteractiveCustodyChallengeContinuation],
```
And initialize to the following:
@ -332,6 +345,17 @@ And initialize to the following:
'subkey_reveals': [],
```
### `BeaconState`
Add member values to the `BeaconState` structure:
```python
'branch_challenge_records': [BranchChallengeRecord],
'next_branch_challenge_id': 'uint64',
'custody_challenge_records': [InteractiveCustodyChallengeRecord],
'next_custody_challenge_id': 'uint64',
```
### `BranchChallenge`
Define a `BranchChallenge` as follows:
@ -350,11 +374,10 @@ Define a `BranchResponse` as follows:
```python
{
'responder_index': 'uint64',
'challenge_id': 'uint64',
'responding_to_custody_challenge': 'bool',
'data': 'bytes32',
'branch': ['bytes32'],
'data_index': 'uint64',
'root': 'bytes32',
}
```
@ -364,14 +387,75 @@ Define a `BranchChallengeRecord` as follows:
```python
{
'challenge_id': 'uint64',
'challenger_index': 'uint64',
'responder_index': 'uint64',
'root': 'bytes32',
'depth': 'uint64',
'inclusion_epoch': 'uint64',
'deadline': 'uint64',
'data_index': 'uint64',
}
```
### `InteractiveCustodyChallengeRecord`
```python
{
'challenge_id': 'uint64',
'challenger_index': 'uint64',
'responder_index': 'uint64',
# Initial data root
'data_root': 'bytes32',
# Initial custody bit
'custody_bit': 'bool',
# Responder subkey
'responder_subkey': 'bytes96',
# The hash in the PoC tree in the position that we are currently at
'current_custody_tree_node': 'bytes32',
# The position in the tree, in terms of depth and position offset
'depth': 'uint64',
'offset': 'uint64',
# Max depth of the branch
'max_depth': 'uint64',
# Deadline to respond (as an epoch)
'deadline': 'uint64',
}
```
### `InteractiveCustodyChallengeInitiation`
```python
{
'attestation': SlashableAttestation,
'responder_index': 'uint64',
'challenger_index': 'uint64',
'responder_subkey': 'bytes96',
'signature': 'bytes96',
}
```
### `InteractiveCustodyChallengeResponse`
```python
{
'challenge_id': 'uint64',
'hashes': ['bytes32'],
'signature': 'bytes96',
}
```
### `InteractiveCustodyChallengeContinuation`
```python
{
'challenge_id': 'uint64',
'sub_index': 'uint64',
'new_custody_tree_node': 'bytes32',
'proof': ['bytes32'],
'signature': 'bytes96',
}
```
### `SubkeyReveal`
Define a `SubkeyReveal` as follows:
@ -388,6 +472,20 @@ Define a `SubkeyReveal` as follows:
## Helpers
### `get_branch_challenge_record_by_id`
```python
def get_branch_challenge_record_by_id(state: BeaconState, id: int) -> BranchChallengeRecord:
return [c for c in state.branch_challenges if c.challenge_id == id][0]
```
### `get_custody_challenge_record_by_id`
```python
def get_custody_challenge_record_by_id(state: BeaconState, id: int) -> BranchChallengeRecord:
return [c for c in state.branch_challenges if c.challenge_id == id][0]
```
### `get_attestation_merkle_depth`
```python
@ -453,6 +551,19 @@ def verify_custody_subkey_reveal(pubkey: bytes48,
)
```
### `verify_signed_challenge_message`
```python
def verify_signed_challenge_message(message: Any, pubkey: bytes48) -> bool:
return bls_verify(
message_hash=signed_root(message, 'signature'),
pubkey=pubkey,
signature=message.signature,
domain=get_domain(state, get_current_epoch(state), DOMAIN_CUSTODY_INTERACTIVE)
)
```
### `penalize_validator`
Change the definition of `penalize_validator` as follows:
@ -493,29 +604,88 @@ Add the following operations to the per-slot processing, in order the given belo
Verify that `len(block.body.branch_challenges) <= MAX_BRANCH_CHALLENGES`.
For each `challenge` in `block.body.branch_challenges`:
For each `challenge` in `block.body.branch_challenges`, run:
* Verify that `slot_to_epoch(challenge.attestation.data.slot) >= get_current_epoch(state) - MAX_BRANCH_CHALLENGE_DELAY`.
* Verify that `state.validator_registry[responder_index].exit_epoch >= get_current_epoch(state) - MAX_BRANCH_CHALLENGE_DELAY`.
* Verify that `verify_slashable_attestation(state, challenge.attestation)` returns `True`.
* Verify that `challenge.responder_index` is in `challenge.attestation.validator_indices`.
* Let `depth = get_attestation_merkle_depth(challenge.attestation)`. Verify that `challenge.data_index < 2**depth`.
* Verify that there does not exist a `BranchChallengeRecord` in `state.validator_registry[challenge.responder_index].open_branch_challenges` with `root == challenge.attestation.data.shard_chain_commitment` and `data_index == data_index`.
* Append to `state.validator_registry[challenge.responder_index].open_branch_challenges` the object `BranchChallengeRecord(challenger_index=get_beacon_proposer_index(state, state.slot), root=challenge.attestation.data.shard_chain_commitment, depth=depth, inclusion_epoch=get_current_epoch(state), data_index=data_index)`.
**Invariant**: the `open_branch_challenges` array will always stay sorted in order of `inclusion_epoch`.
```python
def process_branch_challenge(challenge: BranchChallenge,
state: BeaconState):
# Check that it's not too late to challenge
assert slot_to_epoch(challenge.attestation.data.slot) >= get_current_epoch(state) - MAX_BRANCH_CHALLENGE_DELAY
assert state.validator_registry[responder_index].exit_epoch >= get_current_epoch(state) - MAX_BRANCH_CHALLENGE_DELAY
# Check the attestation is valid
assert verify_slashable_attestation(state, challenge.attestation)
# Check that the responder participated
assert challenger.responder_index in challenge.attestation.validator_indices
# Check the challenge is not a duplicate
assert [
c for c in state.branch_challenge_records if c.root == challenge.attestation.data.crosslink_data_root and
c.data_index == challenge.data_index
] == []
# Check validity of depth
depth = get_attestation_merkle_depth(challenge.attestation)
assert c.data_index < 2**depth
# Add new challenge
state.branch_challenge_records.append(BranchChallengeRecord(
challenge_id=state.next_branch_challenge_id,
challenger_index=get_beacon_proposer_index(state, state.slot),
root=challenge.attestation.data.shard_chain_commitment,
depth=depth,
deadline=get_current_epoch(state) + CHALLENGE_RESPONSE_DEADLINE,
data_index=challenge.data_index
))
state.next_branch_challenge_id += 1
```
#### Branch responses
Verify that `len(block.body.branch_responses) <= MAX_BRANCH_RESPONSES`.
For each `response` in `block.body.branch_responses`:
For each `response` in `block.body.branch_responses`, if `response.responding_to_custody_challenge == False`, run:
* Find the `BranchChallengeRecord` in `state.validator_registry[response.responder_index].open_branch_challenges` whose (`root`, `data_index`) match the (`root`, `data_index`) of the `response`. Verify that one such record exists (it is not possible for there to be more than one), call it `record`.
* Verify that `verify_merkle_branch(leaf=response.data, branch=response.branch, depth=record.depth, index=record.data_index, root=record.root)` is True.
* Verify that `get_current_epoch(state) >= record.inclusion_epoch + ENTRY_EXIT_DELAY`.
* Remove the `record` from `state.validator_registry[response.responder_index].open_branch_challenges`
* Determine the proposer `proposer_index = get_beacon_proposer_index(state, state.slot)` and set `state.validator_balances[proposer_index] += base_reward(state, index) // MINOR_REWARD_QUOTIENT`.
```python
def process_branch_exploration_response(response: BranchResponse,
state: BeaconState):
challenge = get_branch_challenge_record_by_id(response.challenge_id)
assert verify_merkle_branch(
leaf=response.data,
branch=response.branch,
depth=challenge.depth,
index=challenge.data_index,
root=challenge.root
)
# Must wait at least ENTRY_EXIT_DELAY before responding to a branch challenge
assert get_current_epoch(state) >= challenge.inclusion_epoch + ENTRY_EXIT_DELAY
state.branch_challenge_records.pop(challenge)
# Reward the proposer
proposer_index = get_beacon_proposer_index(state, state.slot)
state.validator_balances[proposer_index] += base_reward(state, index) // MINOR_REWARD_QUOTIENT
```
If `response.responding_to_custody_challenge == True`, run:
```python
def process_branch_custody_response(response: BranchResponse,
state: BeaconState):
challenge = get_custody_challenge_record_by_id(response.challenge_id)
responder = state.validator_registry[challenge.responder_index]
# Verify we're not too late
assert get_current_epoch(state) < responder.withdrawable_epoch
# Verify the Merkle branch *of the data tree*
assert verify_merkle_branch(
leaf=response.data,
branch=response.branch,
depth=challenge.max_depth,
index=challenge.offset,
root=challenge.data_root
)
# Responder wins
if hash(challenge.responder_subkey + response.data) == challenge.current_custody_tree_node:
penalize_validator(state, challenge.challenger_index, challenge.responder_index)
# Challenger wins
else:
penalize_validator(state, challenge.responder_index, challenge.challenger_index)
state.custody_challenge_records.pop(challenge)
```
#### Subkey reveals
@ -541,6 +711,126 @@ In case (ii):
* Set `state.validator_registry[reveal.validator_index].next_subkey_to_reveal += 1`
* Set `state.validator_registry[reveal.validator_index].reveal_max_periods_late = max(state.validator_registry[reveal.validator_index].reveal_max_periods_late, get_current_period(state) - reveal.period)`.
#### Interactive custody challenge initiations
Verify that `len(block.body.interactive_custody_challenge_initiations) <= MAX_INTERACTIVE_CUSTODY_CHALLENGE_INITIATIONS`.
For each `initiation` in `block.body.interactive_custody_challenge_initiations`, use the following function to process it:
```python
def process_initiation(initiation: InteractiveCustodyChallengeInitiation,
state: BeaconState):
challenger = state.validator_registry[initiation.challenger_index]
responder = state.validator_registry[initiation.responder_index]
# Verify the signature
assert verify_signed_challenge_message(initiation, challenger.pubkey)
# Verify the attestation
assert verify_slashable_attestation(initiation.attestation, state)
# Check that the responder actually participated in the attestation
assert initiation.responder_index in attestation.validator_indices
# Any validator can be a challenger or responder of max 1 challenge at a time
for c in state.custody_challenge_records:
assert c.challenger_index != initiation.challenger_index
assert c.responder_index != initiation.responder_index
# Can't challenge if you've been penalized
assert challenger.penalized_epoch == FAR_FUTURE_EPOCH
# Make sure the revealed subkey is valid
assert verify_custody_subkey_reveal(
pubkey=state.validator_registry[responder_index].pubkey,
subkey=initiation.responder_subkey,
period=slot_to_custody_period(attestation.data.slot)
)
# Verify that the attestation is still eligible for challenging
min_challengeable_epoch = responder.exit_epoch - CUSTODY_PERIOD_LENGTH * (1 + responder.reveal_max_periods_late)
assert min_challengeable_epoch <= slot_to_epoch(initiation.attestation.data.slot)
# Create a new challenge object
state.branch_challenge_records.append(InteractiveCustodyChallengeRecord(
challenge_id=state.next_branch_challenge_id,
challenger_index=initiation.challenger_index,
responder_index=initiation.responder_index,
data_root=attestation.custody_commitment,
custody_bit=get_bitfield_bit(attestation.custody_bitfield, attestation.validator_indices.index(responder_index)),
responder_subkey=responder_subkey,
current_custody_tree_node=ZERO_HASH,
depth=0,
offset=0,
max_depth=get_attestation_data_merkle_depth(initiation.attestation.data),
deadline=get_current_epoch(state) + CHALLENGE_RESPONSE_DEADLINE
))
state.next_branch_challenge_id += 1
# Responder can't withdraw yet!
state.validator_registry[responder_index].withdrawable_epoch = FAR_FUTURE_EPOCH
```
#### Interactive custody challenge responses
A response provides 32 hashes that are under current known proof of custody tree node. Note that at the beginning the tree node is just one bit of the custody root, so we ask the responder to sign to commit to the top 5 levels of the tree and therefore the root hash; at all other stages in the game responses are self-verifying.
Verify that `len(block.body.interactive_custody_challenge_responses) <= MAX_INTERACTIVE_CUSTODY_CHALLENGE_RESPONSES`.
For each `response` in `block.body.interactive_custody_challenge_responses`, use the following function to process it:
```python
def process_response(response: InteractiveCustodyChallengeResponse,
state: State):
challenge = get_custody_challenge_record_by_id(state, response.challenge_id)
responder = state.validator_registry[challenge.responder_index]
# Check that the right number of hashes was provided
expected_depth = min(challenge.max_depth - challenge.depth, MAX_POC_RESPONSE_DEPTH)
assert 2**expected_depth == len(response.hashes)
# Must make some progress!
assert expected_depth > 0
# Check the hashes match the previously provided root
root = merkle_root(response.hashes)
# If this is the first response check the bit and the signature and set the root
if challenge.depth == 0:
assert get_bitfield_bit(root, 0) == challenge.custody_bit
assert verify_signed_challenge_message(response, responder.pubkey)
challenge.current_custody_tree_node = root
# Otherwise just check the response against the root
else:
assert root == challenge_data.current_custody_tree_node
# Update challenge data
challenge.deadline=FAR_FUTURE_EPOCH
responder.withdrawable_epoch = get_current_epoch(state) + MAX_POC_RESPONSE_DEPTH
```
#### Interactive custody challenge continuations
Once a response provides 32 hashes, the challenger has the right to choose any one of them that they feel is constructed incorrectly to continue the game. Note that eventually, the game will get to the point where the `new_custody_tree_node` is a leaf node.
Verify that `len(block.body.interactive_custody_challenge_continuations) <= MAX_INTERACTIVE_CUSTODY_CHALLENGE_CONTINUATIONS`.
For each `continuation` in `block.body.interactive_custody_challenge_continuations`, use the following function to process it:
```python
def process_continuation(continuation: InteractiveCustodyChallengeContinuation,
state: State):
challenge = get_custody_challenge_record_by_id(state, continuation.challenge_id)
challenger = state.validator_registry[challenge.challenger_index]
responder = state.validator_registry[challenge.responder_index]
expected_depth = min(challenge_data.max_depth - challenge_data.depth, MAX_POC_RESPONSE_DEPTH)
# Verify we're not too late
assert get_current_epoch(state) < responder.withdrawable_epoch
# Verify the Merkle branch (the previous custody response provided the next level of hashes so the
# challenger has the info to make any Merkle branch)
assert verify_merkle_branch(
leaf=new_custody_tree_node,
branch=continuation.proof,
depth=expected_depth,
index=sub_index,
root=challenge_data.current_custody_tree_node
)
# Verify signature
assert verify_signed_challenge_message(continuation, challenger.pubkey)
# Update the challenge data
challenge.current_custody_tree_node = continuation.new_custody_tree_node
challenge.depth += expected_depth
challenge.deadline = get_current_epoch(state) + MAX_POC_RESPONSE_DEPTH
responder.withdrawable_epoch = FAR_FUTURE_EPOCH
challenge.offset = challenge_data.offset * 2**expected_depth + sub_index
```
## Per-epoch processing
Add the following loop immediately below the `process_ejections` loop:
@ -548,12 +838,18 @@ Add the following loop immediately below the `process_ejections` loop:
```python
def process_challenge_absences(state: BeaconState) -> None:
"""
Iterate through the validator registry
Iterate through the challenge list
and penalize validators with balance that did not answer challenges.
"""
for index, validator in enumerate(state.validator_registry):
if len(validator.open_branch_challenges) > 0 and get_current_epoch(state) > validator.open_branch_challenges[0].inclusion_epoch + CHALLENGE_RESPONSE_DEADLINE:
penalize_validator(state, index, validator.open_branch_challenges[0].challenger_index)
for c in state.branch_challenge_records:
if get_current_epoch(state) > c.deadline:
penalize_validator(state, c.responder_index, c.challenger_index)
for c in state.custody_challenge_records:
if get_current_epoch(state) > c.deadline:
penalize_validator(state, c.responder_index, c.challenger_index)
if get_current_epoch(state) > state.validator_registry[c.responder_index].withdrawable_epoch:
penalize_validator(state, c.challenger_index, c.responder_index)
```
In `process_penalties_and_exits`, change the definition of `eligible` to the following (note that it is not a pure function because `state` is declared in the surrounding scope):
@ -562,7 +858,7 @@ In `process_penalties_and_exits`, change the definition of `eligible` to the fol
def eligible(index):
validator = state.validator_registry[index]
# Cannot exit if there are still open branch challenges
if len(validator.open_branch_challenges) > 0:
if [c for c in state.branch_challenge_records if c.responder_index == index] != []:
return False
# Cannot exit if you have not revealed all of your subkeys
elif validator.next_subkey_to_reveal <= epoch_to_custody_period(validator.exit_epoch):
@ -582,7 +878,15 @@ Run the following on the fork block after per-slot processing and before per-blo
For all `validator` in `ValidatorRegistry`, update it to the new format and fill the new member values with:
```python
'open_branch_challenges': [],
'next_subkey_to_reveal': get_current_custody_period(state),
'reveal_max_periods_late': 0,
```
Update the `BeaconState` to the new format and fill the new member values with:
```python
'branch_challenge_records': [],
'next_branch_challenge_id': 0,
'custody_challenge_records': [],
'next_custody_challenge_id': 0,
```

View File

@ -1,424 +1,124 @@
# [WIP] SimpleSerialize (SSZ) Spec
# SimpleSerialiZe (SSZ)
This is the **work in progress** document to describe `SimpleSerialize`, the
current selected serialization method for Ethereum 2.0 using the Beacon Chain.
This is a **work in progress** describing typing, serialization and Merkleization of Ethereum 2.0 objects.
This document specifies the general information for serializing and
deserializing objects and data types.
## Table of contents
## ToC
* [About](#about)
* [Variables and Functions](#variables-and-functions)
* [Constants](#constants)
* [Overview](#overview)
+ [Serialize/Encode](#serializeencode)
- [uintN](#uintn)
- [bool](#bool)
- [bytesN](#bytesn)
- [List/Vectors](#listvectors)
- [Container](#container)
+ [Deserialize/Decode](#deserializedecode)
- [uintN](#uintn-1)
- [bool](#bool-1)
- [bytesN](#bytesn-1)
- [List/Vectors](#listvectors-1)
- [Container](#container-1)
+ [Tree Hash](#tree-hash)
- [`uint8`..`uint256`, `bool`, `bytes1`..`bytes32`](#uint8uint256-bool-bytes1bytes32)
- [`uint264`..`uintN`, `bytes33`..`bytesN`](#uint264uintn-bytes33bytesn)
- [List/Vectors](#listvectors-2)
- [Container](#container-2)
+ [Signed Roots](#signed-roots)
* [Implementations](#implementations)
## About
`SimpleSerialize` was first proposed by Vitalik Buterin as the serialization
protocol for use in the Ethereum 2.0 Beacon Chain.
The core feature of `ssz` is the simplicity of the serialization with low
overhead.
## Variables and Functions
| Term | Definition |
|:-------------|:-----------------------------------------------------------------------------------------------|
| `little` | Little endian. |
| `byteorder` | Specifies [endianness](https://en.wikipedia.org/wiki/Endianness): big endian or little endian. |
| `len` | Length/number of bytes. |
| `to_bytes` | Convert to bytes. Should take parameters ``size`` and ``byteorder``. |
| `from_bytes` | Convert from bytes to object. Should take ``bytes`` and ``byteorder``. |
| `value` | The value to serialize. |
| `rawbytes` | Raw serialized bytes. |
| `deserialized_object` | The deserialized data in the data structure of your programming language. |
| `new_index` | An index to keep track the latest position where the `rawbytes` have been deserialized. |
- [Constants](#constants)
- [Typing](#typing)
- [Basic types](#basic-types)
- [Composite types](#composite-types)
- [Aliases](#aliases)
- [Serialization](#serialization)
- [`uintN`](#uintn)
- [`bool`](#bool)
- [Tuples, containers, lists](#tuples-containers-lists)
- [Deserialization](#deserialization)
- [Merkleization](#merkleization)
- [Self-signed containers](#self-signed-containers)
- [Implementations](#implementations)
## Constants
| Constant | Value | Definition |
|:------------------|:-----:|:--------------------------------------------------------------------------------------|
| `LENGTH_BYTES` | 4 | Number of bytes used for the length added before a variable-length serialized object. |
| `SSZ_CHUNK_SIZE` | 128 | Number of bytes for the chunk size of the Merkle tree leaf. |
| Name | Value | Description |
|-|-|-|
| `BYTES_PER_CHUNK` | `32` | Number of bytes per chunk.
| `BYTES_PER_LENGTH_PREFIX` | `4` | Number of bytes per serialized length prefix. |
## Overview
## Typing
### Basic types
### Serialize/Encode
* `uintN`: `N`-bit unsigned integer (where `N in [8, 16, 32, 64, 128, 256]`)
* `bool`: `True` or `False`
#### uintN
### Composite types
| uint Type | Usage |
|:---------:|:-----------------------------------------------------------|
| `uintN` | Type of `N` bits unsigned integer, where ``N % 8 == 0``. |
* **container**: ordered heterogenous collection of values
* key-pair curly bracket notation `{}`, e.g. `{'foo': "uint64", 'bar': "bool"}`
* **tuple**: ordered fixed-length homogeneous collection of values
* angle bracket notation `[N]`, e.g. `uint64[N]`
* **list**: ordered variable-length homogenous collection of values
* angle bracket notation `[]`, e.g. `uint64[]`
Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
### Aliases
All integers are serialized as **little endian**.
For convenience we alias:
| Check to perform | Code |
|:-----------------------|:----------------------|
| Size is a byte integer | ``int_size % 8 == 0`` |
* `byte` to `uint8`
* `bytes` to `byte[]`
* `bytesN` to `byte[N]`
## Serialization
We recursively define the `serialize` function which consumes an object `value` (of the type specified) and returns a bytestring of type `bytes`.
*Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signed_root`, etc.) objects implicitly carry their type.
### `uintN`
```python
assert(int_size % 8 == 0)
buffer_size = int_size / 8
return value.to_bytes(buffer_size, 'little')
assert N in [8, 16, 32, 64, 128, 256]
return value.to_bytes(N // 8, 'little')
```
#### bool
Convert directly to a single 0x00 or 0x01 byte.
| Check to perform | Code |
|:------------------|:---------------------------|
| Value is boolean | ``value in (True, False)`` |
### `bool`
```python
assert(value in (True, False))
assert value in (True, False)
return b'\x01' if value is True else b'\x00'
```
#### bytesN
### Tuples, containers, lists
A fixed-size byte array.
| Checks to perform | Code |
|:---------------------------------------|:---------------------|
| Length in bytes is correct for `bytesN` | ``len(value) == N`` |
If `value` is fixed-length (i.e. does not embed a list):
```python
assert(len(value) == N)
return value
return ''.join([serialize(element) for element in value])
```
#### List/Vectors
Lists are a collection of elements of the same homogeneous type.
| Check to perform | Code |
|:--------------------------------------------|:----------------------------|
| Length of serialized list fits into 4 bytes | ``len(serialized) < 2**32`` |
1. Serialize all list elements individually and concatenate them.
2. Prefix the concatenation with its length encoded as a `4-byte` **little-endian** unsigned integer.
We define `bytes` to be a synonym of `List[bytes1]`.
**Example in Python**
If `value` is variable-length (i.e. embeds a list):
```python
serialized_list_string = b''
for item in value:
serialized_list_string += serialize(item)
assert(len(serialized_list_string) < 2**32)
serialized_len = (len(serialized_list_string).to_bytes(LENGTH_BYTES, 'little'))
return serialized_len + serialized_list_string
serialized_bytes = ''.join([serialize(element) for element in value])
assert len(serialized_bytes) < 2**(8 * BYTES_PER_LENGTH_PREFIX)
serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, 'little')
return serialized_length + serialized_bytes
```
#### Container
## Deserialization
A container represents a heterogenous, associative collection of key-value pairs. Each pair is referred to as a `field`. To get the value for a given field, you supply the key which is a symbol unique to the container referred to as the field's `name`. The container data type is analogous to the `struct` type found in many languages like C or Go.
Because serialization is an injective function (i.e. two distinct objects of the same type will serialize to different values) any bytestring has at most one object it could deserialize to. Efficient algorithms for computing this object can be found in [the implementations](#implementations).
To serialize a container, obtain the list of its field's names in the specified order. For each field name in this list, obtain the corresponding value and serialize it. Tightly pack the complete set of serialized values in the same order as the field names into a buffer. Calculate the size of this buffer of serialized bytes and encode as a `4-byte` **little endian** `uint32`. Prepend the encoded length to the buffer. The result of this concatenation is the final serialized value of the container.
## Merkleization
| Check to perform | Code |
|:----------------------------------------------|:----------------------------|
| Length of serialized fields fits into 4 bytes | ``len(serialized) < 2**32`` |
We first define helper functions:
To serialize:
* `pack`: Given ordered objects of the same basic type, serialize them, pack them into `BYTES_PER_CHUNK`-byte chunks, right-pad the last chunk with zero bytes, and return the chunks.
* `merkleize`: Given ordered `BYTES_PER_CHUNK`-byte chunks, if necessary append zero chunks so that the number of chunks is a power of two, Merkleize the chunks, and return the root.
* `mix_in_length`: Given a Merkle root `root` and a length `length` (`uint256` little-endian serialization) return `hash(root + length)`.
1. Get the list of the container's fields.
We now define Merkleization `hash_tree_root(value)` of an object `value` recursively:
2. For each name in the list, obtain the corresponding value from the container and serialize it. Place this serialized value into a buffer. The serialized values should be tightly packed.
* `merkleize(pack(value))` if `value` is a basic object or a tuple of basic objects
* `mix_in_length(merkleize(pack(value)), len(value))` if `value` is a list of basic objects
* `merkleize([hash_tree_root(element) for element in value])` if `value` is a tuple of composite objects or a container
* `mix_in_length(merkleize([hash_tree_root(element) for element in value]), len(value))` if `value` is a list of composite objects
3. Get the number of raw bytes in the serialized buffer. Encode that number as a `4-byte` **little endian** `uint32`.
## Self-signed containers
4. Prepend the length to the serialized buffer.
**Example in Python**
```python
def get_field_names(typ):
return typ.fields.keys()
def get_value_for_field_name(value, field_name):
return getattr(value, field_name)
def get_type_for_field_name(typ, field_name):
return typ.fields[field_name]
serialized_buffer = b''
typ = type(value)
for field_name in get_field_names(typ):
field_value = get_value_for_field_name(value, field_name)
field_type = get_type_for_field_name(typ, field_name)
serialized_buffer += serialize(field_value, field_type)
assert(len(serialized_buffer) < 2**32)
serialized_len = (len(serialized_buffer).to_bytes(LENGTH_BYTES, 'little'))
return serialized_len + serialized_buffer
```
### Deserialize/Decode
The decoding requires knowledge of the type of the item to be decoded. When
performing decoding on an entire serialized string, it also requires knowledge
of the order in which the objects have been serialized.
Note: Each return will provide:
- `deserialized_object`
- `new_index`
At each step, the following checks should be made:
| Check to perform | Check |
|:-------------------------|:-----------------------------------------------------------|
| Ensure sufficient length | ``len(rawbytes) >= current_index + deserialize_length`` |
At the final step, the following checks should be made:
| Check to perform | Check |
|:-------------------------|:-------------------------------------|
| Ensure no extra length | `new_index == len(rawbytes)` |
#### uintN
Convert directly from bytes into integer utilising the number of bytes the same
size as the integer length. (e.g. ``uint16 == 2 bytes``)
All integers are interpreted as **little endian**.
```python
byte_length = int_size / 8
new_index = current_index + byte_length
assert(len(rawbytes) >= new_index)
return int.from_bytes(rawbytes[current_index:current_index+byte_length], 'little'), new_index
```
#### bool
Return True if 0x01, False if 0x00.
```python
assert rawbytes in (b'\x00', b'\x01')
return True if rawbytes == b'\x01' else False
```
#### bytesN
Return the `N` bytes.
```python
assert(len(rawbytes) >= current_index + N)
new_index = current_index + N
return rawbytes[current_index:current_index+N], new_index
```
#### List/Vectors
Deserialize each element in the list.
1. Get the length of the serialized list.
2. Loop through deserializing each item in the list until you reach the
entire length of the list.
| Check to perform | code |
|:------------------------------------------|:----------------------------------------------------------------|
| ``rawbytes`` has enough left for length | ``len(rawbytes) > current_index + LENGTH_BYTES`` |
| list is not greater than serialized bytes | ``len(rawbytes) > current_index + LENGTH_BYTES + total_length`` |
```python
assert(len(rawbytes) > current_index + LENGTH_BYTES)
total_length = int.from_bytes(rawbytes[current_index:current_index + LENGTH_BYTES], 'little')
new_index = current_index + LENGTH_BYTES + total_length
assert(len(rawbytes) >= new_index)
item_index = current_index + LENGTH_BYTES
deserialized_list = []
while item_index < new_index:
object, item_index = deserialize(rawbytes, item_index, item_type)
deserialized_list.append(object)
return deserialized_list, new_index
```
#### Container
Refer to the section on container encoding for some definitions.
To deserialize a container, loop over each field in the container and use the type of that field to know what kind of deserialization to perform. Consume successive elements of the data stream for each successful deserialization.
Instantiate a container with the full set of deserialized data, matching each member with the corresponding field.
| Check to perform | code |
|:------------------------------------------|:----------------------------------------------------------------|
| ``rawbytes`` has enough left for length | ``len(rawbytes) > current_index + LENGTH_BYTES`` |
| list is not greater than serialized bytes | ``len(rawbytes) > current_index + LENGTH_BYTES + total_length`` |
To deserialize:
1. Get the list of the container's fields.
2. For each name in the list, attempt to deserialize a value for that type. Collect these values as they will be used to construct an instance of the container.
3. Construct a container instance after successfully consuming the entire subset of the stream for the serialized container.
**Example in Python**
```python
def get_field_names(typ):
return typ.fields.keys()
def get_value_for_field_name(value, field_name):
return getattr(value, field_name)
def get_type_for_field_name(typ, field_name):
return typ.fields[field_name]
class Container:
# this is the container; here we will define an empty class for demonstration
pass
# get a reference to the type in some way...
container = Container()
typ = type(container)
assert(len(rawbytes) > current_index + LENGTH_BYTES)
total_length = int.from_bytes(rawbytes[current_index:current_index + LENGTH_BYTES], 'little')
new_index = current_index + LENGTH_BYTES + total_length
assert(len(rawbytes) >= new_index)
item_index = current_index + LENGTH_BYTES
values = {}
for field_name in get_field_names(typ):
field_name_type = get_type_for_field_name(typ, field_name)
values[field_name], item_index = deserialize(data, item_index, field_name_type)
assert item_index == new_index
return typ(**values), item_index
```
### Tree Hash
The below `hash_tree_root_internal` algorithm is defined recursively in the case of lists and containers, and it outputs a value equal to or less than 32 bytes in size. For use as a "final output" (eg. for signing), use `hash_tree_root(x) = zpad(hash_tree_root_internal(x), 32)`, where `zpad` is a helper that extends the given `bytes` value to the desired `length` by adding zero bytes on the right:
```python
def zpad(input: bytes, length: int) -> bytes:
return input + b'\x00' * (length - len(input))
```
Refer to [the helper function `hash`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#hash) of Phase 0 of the [Eth2.0 specs](https://github.com/ethereum/eth2.0-specs) for a definition of the hash function used below, `hash(x)`.
#### `uint8`..`uint256`, `bool`, `bytes1`..`bytes32`
Return the serialization of the value.
#### `uint264`..`uintN`, `bytes33`..`bytesN`
Return the hash of the serialization of the value.
#### List/Vectors
First, we define the Merkle tree function.
```python
# Merkle tree hash of a list of homogenous, non-empty items
def merkle_hash(lst):
# Store length of list (to compensate for non-bijectiveness of padding)
datalen = len(lst).to_bytes(32, 'little')
if len(lst) == 0:
# Handle empty list case
chunkz = [b'\x00' * SSZ_CHUNK_SIZE]
elif len(lst[0]) < SSZ_CHUNK_SIZE:
# See how many items fit in a chunk
items_per_chunk = SSZ_CHUNK_SIZE // len(lst[0])
# Build a list of chunks based on the number of items in the chunk
chunkz = [
zpad(b''.join(lst[i:i + items_per_chunk]), SSZ_CHUNK_SIZE)
for i in range(0, len(lst), items_per_chunk)
]
else:
# Leave large items alone
chunkz = lst
# Merkleise
def next_power_of_2(x):
return 1 if x == 0 else 2**(x - 1).bit_length()
for i in range(len(chunkz), next_power_of_2(len(chunkz))):
chunkz.append(b'\x00' * SSZ_CHUNK_SIZE)
while len(chunkz) > 1:
chunkz = [hash(chunkz[i] + chunkz[i+1]) for i in range(0, len(chunkz), 2)]
# Return hash of root and data length
return hash(chunkz[0] + datalen)
```
To `hash_tree_root_internal` a list, we simply do:
```python
return merkle_hash([hash_tree_root_internal(item) for item in value])
```
Where the inner `hash_tree_root_internal` is a recursive application of the tree-hashing function (returning less than 32 bytes for short single values).
#### Container
Recursively tree hash the values in the container in the same order as the fields, and Merkle hash the results.
```python
return merkle_hash([hash_tree_root_internal(getattr(x, field)) for field in value.fields])
```
### Signed roots
Let `field_name` be a field name in an SSZ container `container`. We define `truncate(container, field_name)` to be the `container` with the fields from `field_name` onwards truncated away. That is, `truncate(container, field_name) = [getattr(container, field)) for field in value.fields[:i]]` where `i = value.fields.index(field_name)`.
When `field_name` maps to a signature (e.g. a BLS12-381 signature of type `Bytes96`) the convention is that the corresponding signed message be `signed_root(container, field_name) = hash_tree_root(truncate(container, field_name))`. For example if `container = {"foo": sub_object_1, "bar": sub_object_2, "signature": bytes96, "baz": sub_object_3}` then `signed_root(container, "signature") = merkle_hash([hash_tree_root(sub_object_1), hash_tree_root(sub_object_2)])`.
Note that this convention means that fields after the signature are _not_ signed over. If there are multiple signatures in `container` then those are expected to be signing over the fields in the order specified. If multiple signatures of the same value are expected the convention is that the signature field be an array of signatures.
Let `value` be a self-signed container object. The convention is that the signature (e.g. a `bytes96` BLS12-381 signature) be the last field of `value`. Further, the signed message for `value` is `signed_root(value) = hash_tree_root(truncate_last(value))` where `truncate_last` truncates the last element of `value`.
## Implementations
| Language | Implementation | Description |
|:--------:|--------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------|
| Python | [ https://github.com/ethereum/py-ssz ](https://github.com/ethereum/py-ssz) | Python implementation of SSZ |
| Rust | [ https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz ](https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz) | Lighthouse (Rust Ethereum 2.0 Node) maintained SSZ. |
| Nim | [ https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim ](https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim) | Nim Implementation maintained SSZ. |
| Rust | [ https://github.com/paritytech/shasper/tree/master/util/ssz ](https://github.com/paritytech/shasper/tree/master/util/ssz) | Shasper implementation of SSZ maintained by ParityTech. |
| Javascript | [ https://github.com/ChainSafeSystems/ssz-js/blob/master/src/index.js ](https://github.com/ChainSafeSystems/ssz-js/blob/master/src/index.js) | Javascript Implementation maintained SSZ |
| Java | [ https://www.github.com/ConsenSys/cava/tree/master/ssz ](https://www.github.com/ConsenSys/cava/tree/master/ssz) | SSZ Java library part of the Cava suite |
| Go | [ https://github.com/prysmaticlabs/prysm/tree/master/shared/ssz ](https://github.com/prysmaticlabs/prysm/tree/master/shared/ssz) | Go implementation of SSZ mantained by Prysmatic Labs |
| Swift | [ https://github.com/yeeth/SimpleSerialize.swift ](https://github.com/yeeth/SimpleSerialize.swift) | Swift implementation maintained SSZ |
| C# | [ https://github.com/codingupastorm/csharp-ssz ](https://github.com/codingupastorm/csharp-ssz) | C# implementation maintained SSZ |
| C++ | [ https://github.com/NAKsir-melody/cpp_ssz](https://github.com/NAKsir-melody/cpp_ssz) | C++ implementation maintained SSZ |
## Copyright
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
| Language | Project | Maintainer | Implementation |
|-|-|-|-|
| Python | Ethereum 2.0 | Ethereum Foundation | [https://github.com/ethereum/py-ssz](https://github.com/ethereum/py-ssz) |
| Rust | Lighthouse | Sigma Prime | [https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz](https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz) |
| Nim | Nimbus | Status | [https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim](https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim) |
| Rust | Shasper | ParityTech | [https://github.com/paritytech/shasper/tree/master/util/ssz](https://github.com/paritytech/shasper/tree/master/util/ssz) |
| Javascript | Lodestart | Chain Safe Systems | [https://github.com/ChainSafeSystems/ssz-js/blob/master/src/index.js](https://github.com/ChainSafeSystems/ssz-js/blob/master/src/index.js) |
| Java | Cava | ConsenSys | [https://www.github.com/ConsenSys/cava/tree/master/ssz](https://www.github.com/ConsenSys/cava/tree/master/ssz) |
| Go | Prysm | Prysmatic Labs | [https://github.com/prysmaticlabs/prysm/tree/master/shared/ssz](https://github.com/prysmaticlabs/prysm/tree/master/shared/ssz) |
| Swift | Yeeth | Dean Eigenmann | [https://github.com/yeeth/SimpleSerialize.swift](https://github.com/yeeth/SimpleSerialize.swift) |
| C# | | Jordan Andrews | [https://github.com/codingupastorm/csharp-ssz](https://github.com/codingupastorm/csharp-ssz) |
| C++ | | | [https://github.com/NAKsir-melody/cpp_ssz](https://github.com/NAKsir-melody/cpp_ssz) |

View File

@ -50,7 +50,7 @@ __NOTICE__: This document is a work-in-progress for researchers and implementers
- [Aggregation bitfield](#aggregation-bitfield)
- [Custody bitfield](#custody-bitfield)
- [Aggregate signature](#aggregate-signature)
- [Validator assigments](#validator-assignments)
- [Validator assignments](#validator-assignments)
- [Lookahead](#lookahead)
- [How to avoid slashing](#how-to-avoid-slashing)
- [Proposer slashing](#proposer-slashing)
@ -353,7 +353,7 @@ def get_committee_assignment(
a beacon block at the assigned slot.
"""
previous_epoch = get_previous_epoch(state)
next_epoch = get_current_epoch(state)
next_epoch = get_current_epoch(state) + 1
assert previous_epoch <= epoch <= next_epoch
epoch_start_slot = get_epoch_start_slot(epoch)
@ -371,8 +371,7 @@ def get_committee_assignment(
if len(selected_committees) > 0:
validators = selected_committees[0][0]
shard = selected_committees[0][1]
first_committee_at_slot = crosslink_committees[0][0] # List[ValidatorIndex]
is_proposer = first_committee_at_slot[slot % len(first_committee_at_slot)] == validator_index
is_proposer = validator_index == get_beacon_proposer_index(state, slot, registry_change=registry_change)
assignment = (validators, shard, slot, is_proposer)
return assignment