From 0759e170a7faafca0cee7b9b8929b0c6e15bcf77 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 7 Mar 2019 01:38:03 -0600 Subject: [PATCH 001/133] High/low balance separation See #685 for reasoning --- specs/core/0_beacon-chain.md | 95 +++++++++++++++++++++++++++--------- 1 file changed, 72 insertions(+), 23 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index bd709218a..3c2d90b69 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -64,6 +64,10 @@ - [`get_epoch_start_slot`](#get_epoch_start_slot) - [`is_active_validator`](#is_active_validator) - [`get_active_validator_indices`](#get_active_validator_indices) + - [`get_balance`](#get_balance) + - [`set_balance`](#set_balance) + - [`increase_balance`](#increase_balance) + - [`decrease_balance`](#decrease_balance) - [`get_permuted_index`](#get_permuted_index) - [`split`](#split) - [`get_epoch_committee_count`](#get_epoch_committee_count) @@ -205,10 +209,10 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | Name | Value | Unit | | - | - | :-: | -| `MIN_DEPOSIT_AMOUNT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei | +| `MIN_DEPOSIT_AMOUNT` | `2**0 * 10**9` (= 2,000,000,000) | Gwei | | `MAX_DEPOSIT_AMOUNT` | `2**5 * 10**9` (= 32,000,000,000) | Gwei | -| `FORK_CHOICE_BALANCE_INCREMENT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei | | `EJECTION_BALANCE` | `2**4 * 10**9` (= 16,000,000,000) | Gwei | +| `HIGH_BALANCE_INCREMENT` | `10 ** 9` (= 1,000,000,000) | Gwei | ### Initial values @@ -516,7 +520,7 @@ The following data structures are defined as [SimpleSerialize (SSZ)](https://git # Validator registry 'validator_registry': [Validator], - 'validator_balances': ['uint64'], + 'low_balances': ['uint32'], 'validator_registry_update_epoch': 'uint64', # Randomness and committees @@ -570,6 +574,8 @@ The following data structures are defined as [SimpleSerialize (SSZ)](https://git 'initiated_exit': 'bool', # Was the validator slashed 'slashed': 'bool', + # Rounded balance + 'high_balance': 'uint32' } ``` @@ -749,6 +755,45 @@ def get_active_validator_indices(validators: List[Validator], epoch: Epoch) -> L return [i for i, v in enumerate(validators) if is_active_validator(v, epoch)] ``` +### `get_balance` + +```python +def get_balance(state: BeaconState, index: int) -> int: + return ( + state.validator_registry[index].high_balance * HIGH_BALANCE_INCREMENT + + state.low_balances[index] + ) +``` +#### `set_balance` + +````python +def set_balance(state: BeaconState, index: int, new_balance: int) -> None: + validator = state.validator_registry[index] + HALF_INCREMENT = HIGH_BALANCE_INCREMENT // 2 + if ( + validator.rounded_balance * HIGH_BALANCE_INCREMENT > new_balance or + validator.rounded_balance * HIGH_BALANCE_INCREMENT + HALF_INCREMENT * 3 < new_balance + ): + validator.rounded_balance = new_balance // HIGH_BALANCE_INCREMENT + state.validator_fractional_balances[index] = ( + new_balance - validator.rounded_balance * HIGH_BALANCE_INCREMENT + ) +```` + +#### `increase_balance` + +````python +def increase_balance(state: BeaconState, index: int, delta: int) -> None: + set_balance(state, index, get_balance(state, index) + delta) +```` + +#### `decrease_balance` + +````python +def decrease_balance(state: BeaconState, index: int, delta: int) -> None: + set_balance(state, index, get_balance(state, index) - delta) +```` + ### `get_permuted_index` ```python @@ -1105,7 +1150,7 @@ def get_effective_balance(state: BeaconState, index: ValidatorIndex) -> Gwei: """ Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. """ - return min(state.validator_balances[index], MAX_DEPOSIT_AMOUNT) + return min(get_balance(state, index), MAX_DEPOSIT_AMOUNT) ``` ### `get_total_balance` @@ -1351,17 +1396,18 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: withdrawable_epoch=FAR_FUTURE_EPOCH, initiated_exit=False, slashed=False, + high_balance=0 ) # Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled. state.validator_registry.append(validator) - state.validator_balances.append(amount) + state.low_balances.append(0) + set_balance(state, len(state.validator_registry)-1, amount) else: # Increase balance by deposit amount index = validator_pubkeys.index(pubkey) assert state.validator_registry[index].withdrawal_credentials == withdrawal_credentials - - state.validator_balances[index] += amount + increase_balance(state, index, amount) ``` ### Routines for updating validator status @@ -1426,8 +1472,8 @@ def slash_validator(state: BeaconState, index: ValidatorIndex) -> None: whistleblower_index = get_beacon_proposer_index(state, state.slot) whistleblower_reward = get_effective_balance(state, index) // WHISTLEBLOWER_REWARD_QUOTIENT - state.validator_balances[whistleblower_index] += whistleblower_reward - state.validator_balances[index] -= whistleblower_reward + increase_balance(state, whistleblower_index, whistleblower_reward) + decrease_balance(state, index, whistleblower_reward) validator.slashed = True validator.withdrawable_epoch = get_current_epoch(state) + LATEST_SLASHED_EXIT_LENGTH ``` @@ -1545,7 +1591,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Validator registry validator_registry=[], - validator_balances=[], + low_balances=[], validator_registry_update_epoch=GENESIS_EPOCH, # Randomness and committees @@ -1657,9 +1703,12 @@ def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) for validator_index in active_validator_indices ] + # Use the rounded-balance-with-hysteresis supplied by the protocol for fork + # choice voting. This reduces the number of recomputations that need to be + # made for optimized implementations that precompute and save data def get_vote_count(block: BeaconBlock) -> int: return sum( - get_effective_balance(start_state.validator_balances[validator_index]) // FORK_CHOICE_BALANCE_INCREMENT + start_state.validator_registry[validator_index].high_balance for validator_index, target in attestation_targets if get_ancestor(store, target, block.slot) == block ) @@ -1956,12 +2005,12 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: Note that this function mutates ``state``. """ # Verify the amount and fee aren't individually too big (for anti-overflow purposes) - assert state.validator_balances[transfer.sender] >= max(transfer.amount, transfer.fee) + assert get_balance(state, transfer.sender) >= max(transfer.amount, transfer.fee) # Verify that we have enough ETH to send, and that after the transfer the balance will be either # exactly zero or at least MIN_DEPOSIT_AMOUNT assert ( - state.validator_balances[transfer.sender] == transfer.amount + transfer.fee or - state.validator_balances[transfer.sender] >= transfer.amount + transfer.fee + MIN_DEPOSIT_AMOUNT + get_balance(state, transfer.sender) == transfer.amount + transfer.fee or + get_balance(state, transfer.sender) >= transfer.amount + transfer.fee + MIN_DEPOSIT_AMOUNT ) # A transfer is valid in only one slot assert state.slot == transfer.slot @@ -1983,9 +2032,9 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: domain=get_domain(state.fork, slot_to_epoch(transfer.slot), DOMAIN_TRANSFER) ) # Process the transfer - state.validator_balances[transfer.sender] -= transfer.amount + transfer.fee - state.validator_balances[transfer.recipient] += transfer.amount - state.validator_balances[get_beacon_proposer_index(state, state.slot)] += transfer.fee + decrease_balance(state, transfer.sender, transfer.amount + transfer.fee) + increase_balance(state, transfer.recipient, transfer.amount) + increase_balance(state, get_beacon_proposer_index(state, state.slot), transfer.fee) ``` ### Per-epoch processing @@ -2320,10 +2369,10 @@ def apply_rewards(state: BeaconState) -> None: deltas1 = get_justification_and_finalization_deltas(state) deltas2 = get_crosslink_deltas(state) for i in range(len(state.validator_registry)): - state.validator_balances[i] = max( + set_balance(state, i, max( 0, - state.validator_balances[i] + deltas1[0][i] + deltas2[0][i] - deltas1[1][i] - deltas2[1][i] - ) + get_balance(state, i) + deltas1[0][i] + deltas2[0][i] - deltas1[1][i] - deltas2[1][i] + )) ``` #### Ejections @@ -2337,7 +2386,7 @@ def process_ejections(state: BeaconState) -> None: and eject active validators with balance below ``EJECTION_BALANCE``. """ for index in get_active_validator_indices(state.validator_registry, get_current_epoch(state)): - if state.validator_balances[index] < EJECTION_BALANCE: + if get_balance(state, index) < EJECTION_BALANCE: exit_validator(state, index) ``` @@ -2380,7 +2429,7 @@ def update_validator_registry(state: BeaconState) -> None: # Activate validators within the allowable balance churn balance_churn = 0 for index, validator in enumerate(state.validator_registry): - if validator.activation_epoch == FAR_FUTURE_EPOCH and state.validator_balances[index] >= MAX_DEPOSIT_AMOUNT: + if validator.activation_epoch == FAR_FUTURE_EPOCH and get_balance(state, index) >= MAX_DEPOSIT_AMOUNT: # Check the balance churn would be within the allowance balance_churn += get_effective_balance(state, index) if balance_churn > max_balance_churn: @@ -2461,7 +2510,7 @@ def process_slashings(state: BeaconState) -> None: get_effective_balance(state, index) * min(total_penalties * 3, total_balance) // total_balance, get_effective_balance(state, index) // MIN_PENALTY_QUOTIENT ) - state.validator_balances[index] -= penalty + decrease_balance(state, index, penalty) ``` ```python From be4b912373b9ee89851e217e8f500f444ef0e1fa Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 7 Mar 2019 04:02:53 -0600 Subject: [PATCH 002/133] Added underflow checking to decrease_balance --- specs/core/0_beacon-chain.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 3c2d90b69..b0b3dbb2a 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -791,7 +791,8 @@ def increase_balance(state: BeaconState, index: int, delta: int) -> None: ````python def decrease_balance(state: BeaconState, index: int, delta: int) -> None: - set_balance(state, index, get_balance(state, index) - delta) + cur_balance = get_balance(state, index) + set_balance(state, index, cur_balance - delta if cur_balance >= delta else 0) ```` ### `get_permuted_index` From f9a07f7653890fd74c6c023182ccb56004b5579d Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 7 Mar 2019 04:04:05 -0600 Subject: [PATCH 003/133] Fixed MIN_DEPOSIT_AMOUNT --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index b0b3dbb2a..c548dbe14 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -209,10 +209,10 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | Name | Value | Unit | | - | - | :-: | -| `MIN_DEPOSIT_AMOUNT` | `2**0 * 10**9` (= 2,000,000,000) | Gwei | +| `MIN_DEPOSIT_AMOUNT` | `10**9` (= 1,000,000,000) | Gwei | | `MAX_DEPOSIT_AMOUNT` | `2**5 * 10**9` (= 32,000,000,000) | Gwei | | `EJECTION_BALANCE` | `2**4 * 10**9` (= 16,000,000,000) | Gwei | -| `HIGH_BALANCE_INCREMENT` | `10 ** 9` (= 1,000,000,000) | Gwei | +| `HIGH_BALANCE_INCREMENT` | `10**9` (= 1,000,000,000) | Gwei | ### Initial values From bf6bdbb0210ee8020cac21b3f731178caad03ab7 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 11 Mar 2019 12:38:11 -0600 Subject: [PATCH 004/133] cleanup minor var errors --- specs/core/0_beacon-chain.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index f5fa2128a..ec9eedb51 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -768,12 +768,12 @@ def set_balance(state: BeaconState, index: int, new_balance: int) -> None: validator = state.validator_registry[index] HALF_INCREMENT = HIGH_BALANCE_INCREMENT // 2 if ( - validator.rounded_balance * HIGH_BALANCE_INCREMENT > new_balance or - validator.rounded_balance * HIGH_BALANCE_INCREMENT + HALF_INCREMENT * 3 < new_balance + validator.high_balance * HIGH_BALANCE_INCREMENT > new_balance or + validator.high_balance * HIGH_BALANCE_INCREMENT + HALF_INCREMENT * 3 < new_balance ): - validator.rounded_balance = new_balance // HIGH_BALANCE_INCREMENT - state.validator_fractional_balances[index] = ( - new_balance - validator.rounded_balance * HIGH_BALANCE_INCREMENT + validator.high_balance = new_balance // HIGH_BALANCE_INCREMENT + state.low_balances[index] = ( + new_balance - validator.high_balance * HIGH_BALANCE_INCREMENT ) ```` From 3459ea0838da4b82b5f0e9d2fd2662e2c76529a2 Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 11 Mar 2019 22:07:34 +0000 Subject: [PATCH 005/133] Check proposer is not slashed --- specs/core/0_beacon-chain.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ceca50962..1bbab78dc 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2247,8 +2247,10 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None: assert block.previous_block_root == hash_tree_root(state.latest_block_header) # Save current block as the new latest block state.latest_block_header = get_temporary_block_header(block) - # Verify proposer signature + # Verify proposer proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] + assert not proposer.slashed + # Verify proposer signature assert bls_verify( pubkey=proposer.pubkey, message_hash=signed_root(block), From 4410a55c4d77921effbfecef131f9327a6633887 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Mon, 11 Mar 2019 23:30:08 -0500 Subject: [PATCH 006/133] Mandatory deposits Resolves #675 point 5. --- specs/core/0_beacon-chain.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ceca50962..4aaf09b4b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -315,6 +315,8 @@ The types are defined topologically to aid in facilitating an executable version { # Root of the deposit tree 'deposit_root': 'bytes32', + # Total number of deposits + 'deposit_count': 'uint64', # Block hash 'block_hash': 'bytes32', } @@ -1456,6 +1458,7 @@ When sufficiently many full deposits have been made the deposit contract emits t * `genesis_time` equals `time` in the `Eth2Genesis` log * `latest_eth1_data.deposit_root` equals `deposit_root` in the `Eth2Genesis` log +* `latest_eth1_data.deposit_count` equals `deposit_count` in the `Eth2Genesis` log * `latest_eth1_data.block_hash` equals the hash of the block that included the log * `genesis_validator_deposits` is a list of `Deposit` objects built according to the `Deposit` logs up to the deposit that triggered the `Eth2Genesis` log, processed in the order in which they were emitted (oldest to newest) @@ -1479,6 +1482,7 @@ When enough full deposits have been made to the deposit contract, an `Eth2Genesi * Let `genesis_time` be the timestamp specified in the `Eth2Genesis` log. * Let `genesis_eth1_data` be the `Eth1Data` object where: * `genesis_eth1_data.deposit_root` is the `deposit_root` contained in the `Eth2Genesis` log. + * `genesis_eth1_data.deposit_count` is the `deposit_count` contained in the `Eth2Genesis` log. * `genesis_eth1_data.block_hash` is the hash of the Ethereum 1.0 block that emitted the `Eth2Genesis` log. * Let `genesis_state = get_genesis_beacon_state(genesis_validator_deposits, genesis_time, genesis_eth1_data)`. * Let `genesis_block = get_empty_block()`. @@ -1497,6 +1501,7 @@ def get_empty_block() -> BeaconBlock: randao_reveal=EMPTY_SIGNATURE, eth1_data=Eth1Data( deposit_root=ZERO_HASH, + deposit_count=0, block_hash=ZERO_HASH, ), proposer_slashings=[], @@ -2443,7 +2448,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: ##### Deposits -Verify that `len(block.body.deposits) <= MAX_DEPOSITS`. +Verify that `len(block.body.deposits) <= MAX_DEPOSITS`. If `state.latest_eth1_data.deposit_count > state.deposit_index`, verify that `len(block.body.deposits) >= 1`. For each `deposit` in `block.body.deposits`, run `process_deposit(state, deposit)`. From 5266bbd378449d58746b505ddbe0b097fa3737e1 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Mon, 11 Mar 2019 23:38:22 -0500 Subject: [PATCH 007/133] Honest validator doc update for mandatory deposits Co-requisite with #758. --- specs/validator/0_beacon-chain-validator.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 0c95fb446..e1ccc9b31 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -182,15 +182,15 @@ epoch_signature = bls_sign( * Let `D` be the set of `Eth1DataVote` objects `vote` in `state.eth1_data_votes` where: * `vote.eth1_data.block_hash` is the hash of an eth1.0 block that is (i) part of the canonical chain, (ii) >= `ETH1_FOLLOW_DISTANCE` blocks behind the head, and (iii) newer than `state.latest_eth1_data.block_data`. + * `vote.eth1_data.deposit_count` is the deposit count of the eth1.0 deposit contract at the block defined by `vote.eth1_data.block_hash`. * `vote.eth1_data.deposit_root` is the deposit root of the eth1.0 deposit contract at the block defined by `vote.eth1_data.block_hash`. * If `D` is empty: * Let `block_hash` be the block hash of the `ETH1_FOLLOW_DISTANCE`'th ancestor of the head of the canonical eth1.0 chain. - * Let `deposit_root` be the deposit root of the eth1.0 deposit contract in the post-state of the block referenced by `block_hash` + * Let `deposit_root` and `deposit_count` be the deposit root and deposit count of the eth1.0 deposit contract in the post-state of the block referenced by `block_hash` + * Let `best_vote_data = Eth1Data(block_hash=block_hash, deposit_root=deposit_root, deposit_count=deposit_count)`. * If `D` is nonempty: - * Let `best_vote` be the member of `D` that has the highest `vote.vote_count`, breaking ties by favoring block hashes with higher associated block height. - * Let `block_hash = best_vote.eth1_data.block_hash`. - * Let `deposit_root = best_vote.eth1_data.deposit_root`. -* Set `block.eth1_data = Eth1Data(deposit_root=deposit_root, block_hash=block_hash)`. + * Let `best_vote_data` be the `eth1_data` of the member of `D` that has the highest `vote.vote_count`, breaking ties by favoring block hashes with higher associated block height. +* Set `block.eth1_data = best_vote_data`. ##### Signature From a7544864d5de8eaa27f4630d2740f4acc8383d99 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 12 Mar 2019 10:02:52 +0000 Subject: [PATCH 008/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ec9eedb51..7d59a9e6d 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -203,10 +203,10 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | Name | Value | Unit | | - | - | :-: | -| `MIN_DEPOSIT_AMOUNT` | `10**9` (= 1,000,000,000) | Gwei | +| `MIN_DEPOSIT_AMOUNT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei | | `MAX_DEPOSIT_AMOUNT` | `2**5 * 10**9` (= 32,000,000,000) | Gwei | | `EJECTION_BALANCE` | `2**4 * 10**9` (= 16,000,000,000) | Gwei | -| `HIGH_BALANCE_INCREMENT` | `10**9` (= 1,000,000,000) | Gwei | +| `HIGH_BALANCE_INCREMENT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei | ### Initial values @@ -440,7 +440,7 @@ The types are defined topologically to aid in facilitating an executable version # Was the validator slashed 'slashed': 'bool', # Rounded balance - 'high_balance': 'uint32' + 'high_balance': 'uint64' } ``` @@ -756,25 +756,17 @@ def get_active_validator_indices(validators: List[Validator], epoch: Epoch) -> L ```python def get_balance(state: BeaconState, index: int) -> int: - return ( - state.validator_registry[index].high_balance * HIGH_BALANCE_INCREMENT + - state.low_balances[index] - ) + return state.validator_registry[index].high_balance + state.low_balances[index] ``` #### `set_balance` ````python -def set_balance(state: BeaconState, index: int, new_balance: int) -> None: +def set_balance(state: BeaconState, index: int, balance: int) -> None: validator = state.validator_registry[index] HALF_INCREMENT = HIGH_BALANCE_INCREMENT // 2 - if ( - validator.high_balance * HIGH_BALANCE_INCREMENT > new_balance or - validator.high_balance * HIGH_BALANCE_INCREMENT + HALF_INCREMENT * 3 < new_balance - ): - validator.high_balance = new_balance // HIGH_BALANCE_INCREMENT - state.low_balances[index] = ( - new_balance - validator.high_balance * HIGH_BALANCE_INCREMENT - ) + if validator.high_balance > balance or validator.high_balance + 3 * HALF_INCREMENT < balance: + validator.high_balance = balance - balance % HIGH_BALANCE_INCREMENT + state.low_balances[index] = balance - validator.high_balance ```` #### `increase_balance` From 6c359340607af3b6680268a75489780896225a32 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 12 Mar 2019 10:34:24 +0000 Subject: [PATCH 009/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 4aaf09b4b..8edc3f232 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2448,7 +2448,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: ##### Deposits -Verify that `len(block.body.deposits) <= MAX_DEPOSITS`. If `state.latest_eth1_data.deposit_count > state.deposit_index`, verify that `len(block.body.deposits) >= 1`. +Verify that `len(block.body.deposits) == min(MAX_DEPOSITS, latest_eth1_data.deposit_count - state.deposit_index)`. For each `deposit` in `block.body.deposits`, run `process_deposit(state, deposit)`. From 0a349f8bdc31d08f2c6f4a5b8e98427845c1716e Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 12 Mar 2019 15:58:31 +0000 Subject: [PATCH 010/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 7d59a9e6d..63737962d 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -601,7 +601,7 @@ The types are defined topologically to aid in facilitating an executable version # Validator registry 'validator_registry': [Validator], - 'low_balances': ['uint32'], + 'balances': ['uint64'], 'validator_registry_update_epoch': 'uint64', # Randomness and committees @@ -756,7 +756,7 @@ def get_active_validator_indices(validators: List[Validator], epoch: Epoch) -> L ```python def get_balance(state: BeaconState, index: int) -> int: - return state.validator_registry[index].high_balance + state.low_balances[index] + return state.balances[index] ``` #### `set_balance` @@ -766,7 +766,7 @@ def set_balance(state: BeaconState, index: int, balance: int) -> None: HALF_INCREMENT = HIGH_BALANCE_INCREMENT // 2 if validator.high_balance > balance or validator.high_balance + 3 * HALF_INCREMENT < balance: validator.high_balance = balance - balance % HIGH_BALANCE_INCREMENT - state.low_balances[index] = balance - validator.high_balance + state.balances[index] = balance ```` #### `increase_balance` @@ -1377,7 +1377,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled. state.validator_registry.append(validator) - state.low_balances.append(0) + state.balances.append(0) set_balance(state, len(state.validator_registry)-1, amount) else: # Increase balance by deposit amount @@ -1567,7 +1567,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Validator registry validator_registry=[], - low_balances=[], + balances=[], validator_registry_update_epoch=GENESIS_EPOCH, # Randomness and committees From e4a1ef16e6a42424d7f617d342183c2d29ba9b56 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Tue, 12 Mar 2019 13:46:58 -0700 Subject: [PATCH 011/133] Add networking specs --- specs/networking/messaging.md | 41 ++++ specs/networking/node-identification.md | 32 +++ specs/networking/rpc-interface.md | 246 ++++++++++++++++++++++++ 3 files changed, 319 insertions(+) create mode 100644 specs/networking/messaging.md create mode 100644 specs/networking/node-identification.md create mode 100644 specs/networking/rpc-interface.md diff --git a/specs/networking/messaging.md b/specs/networking/messaging.md new file mode 100644 index 000000000..e88116f46 --- /dev/null +++ b/specs/networking/messaging.md @@ -0,0 +1,41 @@ +ETH 2.0 Networking Spec - Messaging +=== + +# Abstract + +This specification describes how individual Ethereum 2.0 messages are represented on the wire. + +The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL”, NOT", “SHOULD”, “SHOULD NOT”, “RECOMMENDED”, “MAY”, and “OPTIONAL” in this document are to be interpreted as described in RFC 2119. + +# Motivation + +This specification seeks to define a messaging protocol that is flexible enough to be changed easily as the ETH 2.0 specification evolves. + +# Specification + +## Message Structure + +An ETH 2.0 message consists of a single byte representing the message version followed by the encoded, potentially compressed body. We separate the message's version from the version included in the `libp2p` protocol path in order to allow encoding and compression schemes to be updated independently of the `libp2p` protocols themselves. + +It is unlikely that more than 255 message versions will need to be supported, so a single byte should suffice. + +Visually, a message looks like this: + +``` ++--------------------------+ +| version byte | ++--------------------------+ +| | +| body | +| | ++--------------------------+ +``` + +Clients MUST ignore messages with mal-formed bodies. The `version` byte MUST be one of the below values: + +## Version Byte Values + +### `0x01` + +- **Encoding Scheme:** SSZ +- **Compression Scheme:** Snappy diff --git a/specs/networking/node-identification.md b/specs/networking/node-identification.md new file mode 100644 index 000000000..27c1ebf9d --- /dev/null +++ b/specs/networking/node-identification.md @@ -0,0 +1,32 @@ +ETH 2.0 Networking Spec - Node Identification +=== + +# Abstract + +This specification describes how Ethereum 2.0 nodes identify and address each other on the network. + +The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL", NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in RFC 2119. + +# Specification + +Clients use Ethereum Node Records (as described in [EIP-778](http://eips.ethereum.org/EIPS/eip-778)) to discover one another. Each ENR includes, among other things, the following keys: + +- The node's IP. +- The node's TCP port. +- The node's public key. + +For clients to be addressable, their ENR responses MUST contain all of the above keys. Client MUST verify the signature of any received ENRs, and disconnect from peers whose ENR signatures are invalid. Each node's public key MUST be unique. + +The keys above are enough to construct a [multiaddr](https://github.com/multiformats/multiaddr) for use with the rest of the `libp2p` stack. + +It is RECOMMENDED that clients set their TCP port to the default of `9000`. + +## Peer ID Generation + +The `libp2p` networking stack identifies peers via a "peer ID." Simply put, a node's Peer ID is the SHA2-256 `multihash` of the node's public key. `go-libp2p-crypto` contains the canonical implementation of how to hash `secp256k1` keys for use as a peer ID. + +# See Also + +- [multiaddr](https://github.com/multiformats/multiaddr) +- [multihash](https://multiformats.io/multihash/) +- [go-libp2p-crypto](https://github.com/libp2p/go-libp2p-crypto) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md new file mode 100644 index 000000000..fdc9a11b3 --- /dev/null +++ b/specs/networking/rpc-interface.md @@ -0,0 +1,246 @@ +ETH 2.0 Networking Spec - RPC Interface +=== + +# Abstract + +The Ethereum 2.0 networking stack uses two modes of communication: a broadcast protocol that gossips information to interested parties via GossipSub, and an RPC protocol that retrieves information from specific clients. This specification defines the RPC protocol. + +The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL", NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in RFC 2119. + +# Dependencies + +This specification assumes familiarity with the [Messaging](./messaging.md), [Node Identification](./node-identification), and [Beacon Chain](../core/0_beacon-chain.md) specifications. + +# Specification + +## Message Schemas + +Message body schemas are notated like this: + +``` +( + field_name_1: type + field_name_2: type +) +``` + +SSZ serialization is field-order dependent. Therefore, fields MUST be encoded and decoded according to the order described in this document. The encoded values of each field are concatenated to form the final encoded message body. Embedded structs are serialized as Containers unless otherwise noted. + +All referenced data structures can be found in the [0-beacon-chain](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/core/0_beacon-chain.md#data-structures) specification. + +## `libp2p` Protocol Names + +A "Protocol Name" in `libp2p` parlance refers to a human-readable identifier `libp2p` uses in order to identify sub-protocols and stream messages of different types over the same connection. A client's supported protocol paths are negotiated by the `libp2p` stack at connection time; as such they are not part of individual message bodies. + +## RPC-Over-`libp2p` + +To facilitate RPC-over-`libp2p`, a single protocol path is used: `/eth/serenity/rpc/1.0.0`. Remote method calls are wrapped in a "request" structure: + +``` +( + id: uint64 + method_id: uint16 + body: Request +) +``` + +and their corresponding responses are wrapped in a "response" structure: + +``` +( + id: uint64 + result: Response +) +``` + +If an error occurs, a variant of the response structure is returned: + +``` +( + id: uint64 + error: ( + code: uint16 + data: bytes + ) +) +``` + +The details of the RPC-Over-`libp2p` protocol are similar to [JSON-RPC 2.0](https://www.jsonrpc.org/specification). Specifically: + +1. The `id` member is REQUIRED. +2. The `id` member in the response MUST be the same as the value of the `id` in the request. +3. The `method_id` member is REQUIRED. +4. The `result` member is required on success, and MUST NOT exist if there was an error. +5. The `error` member is REQUIRED on errors, and MUST NOT exist if there wasn't an error. + +Structuring RPC requests in this manner allows multiple calls and responses to be multiplexed over the same stream without switching. + +The "method ID" fields in the below messages refer to the `method` field in the request structure above. + +The first 1,000 values in `error.code` are reserved for system use. The following error codes are predefined: + +1. `0`: Parse error. +2. `10`: Invalid request. +3. `20`: Method not found. +4. `30`: Server error. + +## Messages + +### Hello + +**Method ID:** `0` + +**Body**: + +``` +( + network_id: uint8 + latest_finalized_root: bytes32 + latest_finalized_epoch: uint64 + best_root: bytes32 + best_slot: uint64 +) +``` + +Clients exchange `hello` messages upon connection, forming a two-phase handshake. The first message the initiating client sends MUST be the `hello` message. In response, the receiving client MUST respond with its own `hello` message. + +Clients SHOULD immediately disconnect from one another following the handshake above under the following conditions: + +1. If `network_id` belongs to a different chain, since the client definitionally cannot sync with this client. +2. If the `latest_finalized_root` shared by the peer is not in the client's chain at the expected epoch. For example, if Peer 1 in the diagram below has `(root, epoch)` of `(A, 5)` and Peer 2 has `(B, 3)`, Peer 1 would disconnect because it knows that `B` is not the root in their chain at epoch 3: + +``` + Root A + + +---+ + |xxx| +----+ Epoch 5 + +-+-+ + ^ + | + +-+-+ + | | +----+ Epoch 4 + +-+-+ +Root B ^ + | ++---+ +-+-+ +|xxx+<---+--->+ | +----+ Epoch 3 ++---+ | +---+ + | + +-+-+ + | | +-----------+ Epoch 2 + +-+-+ + ^ + | + +-+-+ + | | +-----------+ Epoch 1 + +---+ +``` + +Once the handshake completes, the client with the higher `latest_finalized_epoch` or `best_slot` (if the clients have equal `latest_finalized_epoch`s) SHOULD send beacon block roots to its counterparty via `beacon_block_roots` (i.e., RPC method `10`). + +### Goodbye + +**Method ID:** `1` + +**Body:** + +``` +( + reason: uint64 +) +``` + +Client MAY send `goodbye` messages upon disconnection. The reason field MUST be one of the following values: + +- `1`: Client shut down. +- `2`: Irrelevant network. +- `3`: Irrelevant shard. + +### Provide Beacon Block Roots + +**Method ID:** `10` + +**Body:** + +``` +# BlockRootSlot +( + block_root: HashTreeRoot + slot: uint64 +) + +( + roots: []BlockRootSlot +) +``` + +Send a list of block roots and slots to the peer. + +### Beacon Block Headers + +**Method ID:** `11` + +**Request Body** + +``` +( + start_root: HashTreeRoot + start_slot: uint64 + max_headers: uint64 + skip_slots: uint64 +) +``` + +**Response Body:** + +``` +( + headers: []BlockHeader +) +``` + +Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `2` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is undefined for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were undefined in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further undefined, the array would contain `[2, 6, 8, 10]` - i.e., duplicate blocks MUST be collapsed. + +The function of the `skip_slots` parameter helps facilitate light client sync - for example, in [#459](https://github.com/ethereum/eth2.0-specs/issues/459) - and allows clients to balance the peers from whom they request headers. Client could, for instance, request every 10th block from a set of peers where each per has a different starting block in order to populate block data. + +### Beacon Block Bodies + +**Method ID:** `12` + +**Request Body:** + +``` +( + block_roots: []HashTreeRoot +) +``` + +**Response Body:** + +``` +( + block_bodies: []BeaconBlockBody +) +``` + +Requests the `block_bodies` associated with the provided `block_roots` from the peer. Responses MUST return `block_roots` in the order provided in the request. If the receiver does not have a particular `block_root`, it must return a zero-value `block_body` (i.e., a `block_body` container with all zero fields). + +### Beacon Chain State + +**Note:** This section is preliminary, pending the definition of the data structures to be transferred over the wire during fast sync operations. + +**Method ID:** `13` + +**Request Body:** + +``` +( + hashes: []HashTreeRoot +) +``` + +**Response Body:** TBD + +Requests contain the hashes of Merkle tree nodes that when merkelized yield the block's `state_root`. + +The response will contain the values that, when hashed, yield the hashes inside the request body. From 34cd96be54ba59c5715900b9b55f16e069b42daa Mon Sep 17 00:00:00 2001 From: Vitalik Buterin Date: Wed, 13 Mar 2019 02:47:29 -0500 Subject: [PATCH 012/133] Added light client related files --- specs/light_client/merkle_proofs.md | 134 ++++++++++++++++++++++ specs/light_client/sync_protocol.md | 172 ++++++++++++++++++++++++++++ 2 files changed, 306 insertions(+) create mode 100644 specs/light_client/merkle_proofs.md create mode 100644 specs/light_client/sync_protocol.md diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md new file mode 100644 index 000000000..cf4dad2e3 --- /dev/null +++ b/specs/light_client/merkle_proofs.md @@ -0,0 +1,134 @@ +### Generalized Merkle tree index + +In a binary Merkle tree, we define a "generalized index" of a node as `2**depth + index`. Visually, this looks as follows: + +``` + 1 + 2 3 +4 5 6 7 + ... +``` + +Note that the generalized index has the convenient property that the two children of node `k` are `2k` and `2k+1`, and also that it equals the position of a node in the linear representation of the Merkle tree that's computed by this function: + +```python +def merkle_tree(leaves): + o = [0] * len(leaves) + leaves + for i in range(len(leaves)-1, 0, -1): + o[i] = hash(o[i*2] + o[i*2+1]) + return o +``` + +We will define Merkle proofs in terms of generalized indices. + +### SSZ object to index + +We can describe the hash tree of any SSZ object, rooted in `hash_tree_root(object)`, as a binary Merkle tree whose depth may vary. For example, an object `{x: bytes32, y: List[uint64]}` would look as follows: + +``` + root + / \ + x y_root + / \ +y_data_root len(y) + / \ + /\ /\ + ....... +``` + +We can now define a concept of a "path", a way of describing a function that takes as input an SSZ object and outputs some specific (possibly deeply nested) member. For example, `foo -> foo.x` is a path, as are `foo -> len(foo.y)` and `foo -> foo[5]`. We'll describe paths as lists: in these three cases they are `["x"]`, `["y", "len"]` and `["y", 5]` respectively. We can now define a function `get_generalized_indices(object: Any, path: List[str OR int], root=1: int) -> int` that converts an object and a path to a set of generalized indices (note that for constant-sized objects, there is only one generalized index and it only depends on the path, but for dynamically sized objects the indices may depend on the object itself too). For dynamically-sized objects, the set of indices will have more than one member because of the need to access an array's length to determine the correct generalized index for some array access. + +```python +def get_generalized_indices(obj: Any, path: List[str or int], root=1) -> List[int]: + if len(path) == 0: + return [root] + elif isinstance(obj, StaticList): + items_per_chunk = (32 // len(serialize(x))) if isinstance(x, int) else 1 + new_root = root * next_power_of_2(len(obj) // items_per_chunk) + path[0] // items_per_chunk + return get_generalized_indices(obj[path[0]], path[1:], new_root) + elif isinstance(obj, DynamicList) and path[0] == "len": + return [root * 2 + 1] + elif isinstance(obj, DynamicList) and isinstance(path[0], int): + assert path[0] < len(obj) + items_per_chunk = (32 // len(serialize(x))) if isinstance(x, int) else 1 + new_root = root * 2 * next_power_of_2(len(obj) // items_per_chunk) + path[0] // items_per_chunk + return [root *2 + 1] + get_generalized_indices(obj[path[0]], path[1:], new_root) + elif hasattr(obj, "fields"): + index = list(fields.keys()).index(path[0]) + new_root = root * next_power_of_2(len(fields)) + index + return get_generalized_indices(getattr(obj, path[0]), path[1:], new_root) + else: + raise Exception("Unknown type / path") +``` + +### Merkle multiproofs + +We define a Merkle multiproof as a minimal subset of nodes in a Merkle tree needed to fully authenticate that a set of nodes actually are part of a Merkle tree with some specified root, at a particular set of generalized indices. For example, here is the Merkle multiproof for positions 0, 1, 6 in an 8-node Merkle tree (ie. generalized indices 8, 9, 14): + +``` + . + . . + . * * . +x x . . . . x * +``` + +. are unused nodes, * are used nodes, x are the values we are trying to prove. Notice how despite being a multiproof for 3 values, it requires only 3 auxiliary nodes, only one node more than would be required to prove a single value. Normally the efficiency gains are not quite that extreme, but the savings relative to individual Merkle proofs are still significant. As a rule of thumb, a multiproof for k nodes at the same level of an n-node tree has size `k * (n/k + log(n/k))`. + +Here is code for creating and verifying a multiproof. First a helper: + +```python +def log2(x): + return 0 if x == 1 else 1 + log2(x//2) +``` + +First, a method for computing the generalized indices of the auxiliary tree nodes that a proof of a given set of generalized indices will require: + +```python +def get_proof_indices(tree_indices: List[int]) -> List[int]: + # Get all indices touched by the proof + maximal_indices = set({}) + for i in tree_indices: + x = i + while x > 1: + maximal_indices.add(x ^ 1) + x //= 2 + maximal_indices = tree_indices + sorted(list(maximal_indices))[::-1] + # Get indices that cannot be recalculated from earlier indices + redundant_indices = set({}) + proof = [] + for index in maximal_indices: + if index not in redundant_indices: + proof.append(index) + while index > 1: + redundant_indices.add(index) + if (index ^ 1) not in redundant_indices: + break + index //= 2 + return [i for i in proof if i not in tree_indices] +```` + +Generating a proof is simply a matter of taking the node of the SSZ hash tree with the union of the given generalized indices for each index given by `get_proof_indices`, and outputting the list of nodes in the same order. + +```python +def verify_multi_proof(root, indices, leaves, proof): + tree = {} + for index, leaf in zip(indices, leaves): + tree[index] = leaf + for index, proofitem in zip(get_proof_indices(indices), proof): + tree[index] = proofitem + indexqueue = sorted(tree.keys())[:-1] + i = 0 + while i < len(indexqueue): + index = indexqueue[i] + if index >= 2 and index^1 in tree: + tree[index//2] = hash(tree[index - index%2] + tree[index - index%2 + 1]) + indexqueue.append(index//2) + i += 1 + return (indices == []) or (1 in tree and tree[1] == root) +``` + +#### Proofs for execution + +We define `MerklePartial(f, arg1, arg2...)` as being a list of Merkle multiproofs of the sets of nodes in the hash trees of the SSZ objects that are needed to authenticate the values needed to compute some function `f(arg1, arg2...)`. An individual Merkle multiproof is given as a dynamic sized list of `bytes32` values, a `MerklePartial` is a fixed-size list of objects `{proof: ["bytes32"], value: "bytes32"}`, one for each `arg` to `f` (if some `arg` is a base type, then the multiproof is empty). + +Ideally, any function which accepts an SSZ object should also be able to accept a `MerklePartial` object as a substitute. diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md new file mode 100644 index 000000000..8878545bb --- /dev/null +++ b/specs/light_client/sync_protocol.md @@ -0,0 +1,172 @@ +# Beacon chain light client syncing + +One of the design goals of the eth2 beacon chain is light-client friendlines, both to allow low-resource clients (mobile phones, IoT, etc) to maintain access to the blockchain in a reasonably safe way, but also to facilitate the development of "bridges" between the eth2 beacon chain and other chains. + +### Preliminaries + +We define an "expansion" of an object as an object where a field in an object that is meant to represent the `hash_tree_root` of another object is replaced by the object. Note that defining expansions is not a consensus-layer-change; it is merely a "re-interpretation" of the object. Particularly, the `hash_tree_root` of an expansion of an object is identical to that of the original object, and we can define expansions where, given a complete history, it is always possible to compute the expansion of any object in the history. The opposite of an expansion is a "summary" (eg. `BeaconBlockHeader` is a summary of `BeaconBlock`). + +We define two expansions: + +* `ExtendedBeaconBlock`, which is identical to a `BeaconBlock` except `state_root` is replaced with the corresponding `state: ExtendedBeaconState` +* `ExtendedBeaconState`, which is identical to a `BeaconState` except `latest_active_index_roots: List[Bytes32]` is replaced by `latest_active_indices: List[List[ValidatorIndex]]`, where `BeaconState.latest_active_index_roots[i] = hash_tree_root(ExtendedBeaconState.latest_active_indices[i])` + +Note that there is now a new way to compute `get_active_validator_indices`: + +```python +def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> List[ValidatorIndex]: + return state.latest_active_indices[epoch % LATEST_ACTIVE_INDEX_ROOTS_LENGTH] +``` + +Note that it takes `state` instead of `state.validator_registry` as an argument. This does not affect its use in `get_shuffled_committee`, because `get_shuffled_committee` has access to the full `state` as one of its arguments. + +A `MerklePartial(f, *args)` is an object that contains a minimal Merkle proof needed to compute `f(*args)`. A `MerklePartial` can be used in place of a regular SSZ object, though a computation would return an error if it attempts to access part of the object that is not contained in the proof. + +We add a data type `PeriodData` and four helpers: + +```python +{ + 'validator_count': 'uint64', + 'seed': 'bytes32', + 'committee': [Validator] +} +``` + +```python +def get_earlier_start_epoch(slot: Slot) -> int: + return slot - slot % PERSISTENT_COMMITTEE_PERIOD - PERSISTENT_COMMITTEE_PERIOD * 2 + +def get_later_start_epoch(slot: Slot) -> int: + return slot - slot % PERSISTENT_COMMITTEE_PERIOD - PERSISTENT_COMMITTEE_PERIOD + +def get_earlier_period_data(block: ExtendedBeaconBlock, shard_id: Shard) -> PeriodData: + period_start = get_earlier_start_epoch(header.slot) + validator_count = len(get_active_validator_indices(state, period_start)) + committee_count = validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE) + 1 + indices = get_shuffled_committee(block.state, shard_id, period_start, 0, committee_count) + return PeriodData( + validator_count, + generate_seed(block.state, period_start), + [block.state.validator_registry[i] for i in indices] + ) + +def get_later_period_data(block: ExtendedBeaconBlock, shard_id: Shard) -> PeriodData: + period_start = get_later_start_epoch(header.slot) + validator_count = len(get_active_validator_indices(state, period_start)) + committee_count = validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE) + 1 + indices = get_shuffled_committee(block.state, shard_id, period_start, 0, committee_count) + return PeriodData( + validator_count, + generate_seed(block.state, period_start), + [block.state.validator_registry[i] for i in indices] + ) +``` + +### Light client state + +A light client will keep track of: + +* A random `shard_id` in `[0...SHARD_COUNT-1]` (selected once and retained forever) +* A block header that they consider to be finalized (`finalized_header`) and do not expect to revert. +* `later_period_data = get_maximal_later_committee(finalized_header, shard_id)` +* `earlier_period_data = get_maximal_earlier_committee(finalized_header, shard_id)` + +We use the struct `validator_memory` to keep track of these variables. + +### Updating the shuffled committee + +If a client's `validator_memory.finalized_header` changes so that `header.slot // PERSISTENT_COMMITTEE_PERIOD` increases, then the client can ask the network for a `new_committee_proof = MerklePartial(get_maximal_later_committee, validator_memory.finalized_header, shard_id)`. It can then compute: + +```python +earlier_period_data = later_period_data +later_period_data = get_later_period_data(new_committee_proof, finalized_header, shard_id) +``` + +The maximum size of a proof is `128 * ((22-7) * 32 + 110) = 75520` bytes for validator records and `(22-7) * 32 + 128 * 8 = 1504` for the active index proof (much smaller because the relevant active indices are all beside each other in the Merkle tree). This needs to be done once per `PERSISTENT_COMMITTEE_PERIOD` epochs (2048 epochs / 9 days), or ~38 bytes per epoch. + +### Computing the current committee + +Here is a helper to compute the committee at a slot given the maximal earlier and later committees: + +```python +def compute_committee(header: BeaconBlockHeader, + validator_memory: ValidatorMemory): + + earlier_validator_count = validator_memory.earlier_period_data.validator_count + later_validator_count = validator_memory.later_period_data.validator_count + earlier_committee = validator_memory.earlier_period_data.committee + later_committee = validator_memory.later_period_data.committee + earlier_start_epoch = get_earlier_start_epoch(header.slot) + later_start_epoch = get_later_start_epoch(header.slot) + epoch = slot_to_epoch(header.slot) + + actual_committee_count = max( + earlier_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE), + later_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE), + ) + 1 + + def get_offset(count, end:bool): + return get_split_offset(count, + SHARD_COUNT * committee_count, + validator_memory.shard_id * committee_count + (1 if end else 0)) + + actual_earlier_committee = maximal_earlier_committee[ + 0:get_offset(earlier_validator_count, True) - get_offset(earlier_validator_count, False) + ] + actual_later_committee = maximal_later_committee[ + 0:get_offset(later_validator_count, True) - get_offset(later_validator_count, False) + ] + def get_switchover_epoch(index): + return ( + bytes_to_int(hash(validator_memory.earlier_period_data.seed + bytes3(index))[0:8]) % + PERSISTENT_COMMITTEE_PERIOD + ) + + # Take not-yet-cycled-out validators from earlier committee and already-cycled-in validators from + # later committee; return a sorted list of the union of the two, deduplicated + return sorted(list(set( + [i for i in earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(i)] + + [i for i in later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)] + ))) + +``` + +Note that this method makes use of the fact that the committee for any given shard always starts and ends at the same validator index independently of the committee count (this is because the validator set is split into `SHARD_COUNT * committee_count` slices but the first slice of a shard is a multiple `committee_count * i`, so the start of the slice is `n * committee_count * i // (SHARD_COUNT * committee_count) = n * i // SHARD_COUNT`, using the slightly nontrivial algebraic identity `(x * a) // ab == x // b`). + +### Verifying blocks + +If a client wants to update its `finalized_header` it asks the network for a `BlockValidityProof`, which is simply: + +```python +{ + 'header': BlockHeader, + 'shard_aggregate_signature': 'bytes96', + 'shard_bitfield': 'bytes', + 'shard_parent_block': ShardBlock +} +``` + +The verification procedure is as follows: + +```python +def verify_block_validity_proof(proof: BlockValidityProof, validator_memory: ValidatorMemory) -> bool: + assert proof.shard_parent_block.beacon_chain_ref == hash_tree_root(proof.header) + committee = compute_committee(proof.header, validator_memory) + # Verify that we have >=50% support + support_balance = sum([c.high_balance for i, c in enumerate(committee) if get_bitfield_bit(proof.shard_bitfield, i) is True]) + total_balance = sum([c.high_balance for i, c in enumerate(committee)] + assert support_balance * 2 > total_balance + # Verify shard attestations + group_public_key = bls_aggregate_pubkeys([ + v.pubkey for v, index in enumerate(committee) if + get_bitfield_bit(proof.shard_bitfield, i) is True + ]) + assert bls_verify( + pubkey=group_public_key, + message_hash=hash_tree_root(shard_parent_block), + signature=shard_aggregate_signature, + domain=get_domain(state, slot_to_epoch(shard_block.slot), DOMAIN_SHARD_ATTESTER) + ) +``` + +The size of this proof is only 200 (header) + 96 (signature) + 16 (bitfield) + 352 (shard block) = 664 bytes. It can be reduced further by replacing `ShardBlock` with `MerklePartial(lambda x: x.beacon_chain_ref, ShardBlock)`, which would cut off ~220 bytes. From b40236685c1fd8bf016761dfa861e05686dc6d1f Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 13 Mar 2019 09:04:12 -0600 Subject: [PATCH 013/133] phase 1 nitpicks --- specs/core/1_shard-data-chains.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index db68591e7..d8efe0c85 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -556,7 +556,7 @@ def verify_custody_subkey_reveal(pubkey: bytes48, ```python def verify_signed_challenge_message(message: Any, pubkey: bytes48) -> bool: return bls_verify( - message_hash=signed_root(message, 'signature'), + message_hash=signed_root(message), pubkey=pubkey, signature=message.signature, domain=get_domain(state, get_current_epoch(state), DOMAIN_CUSTODY_INTERACTIVE) @@ -607,8 +607,8 @@ Verify that `len(block.body.branch_challenges) <= MAX_BRANCH_CHALLENGES`. For each `challenge` in `block.body.branch_challenges`, run: ```python -def process_branch_challenge(challenge: BranchChallenge, - state: BeaconState): +def process_branch_challenge(state: BeaconState, + challenge: BranchChallenge): # Check that it's not too late to challenge assert slot_to_epoch(challenge.attestation.data.slot) >= get_current_epoch(state) - MAX_BRANCH_CHALLENGE_DELAY assert state.validator_registry[responder_index].exit_epoch >= get_current_epoch(state) - MAX_BRANCH_CHALLENGE_DELAY @@ -643,8 +643,8 @@ Verify that `len(block.body.branch_responses) <= MAX_BRANCH_RESPONSES`. For each `response` in `block.body.branch_responses`, if `response.responding_to_custody_challenge == False`, run: ```python -def process_branch_exploration_response(response: BranchResponse, - state: BeaconState): +def process_branch_exploration_response(state: BeaconState, + response: BranchResponse): challenge = get_branch_challenge_record_by_id(response.challenge_id) assert verify_merkle_branch( leaf=response.data, @@ -664,8 +664,8 @@ def process_branch_exploration_response(response: BranchResponse, If `response.responding_to_custody_challenge == True`, run: ```python -def process_branch_custody_response(response: BranchResponse, - state: BeaconState): +def process_branch_custody_response(state: BeaconState, + response: BranchResponse): challenge = get_custody_challenge_record_by_id(response.challenge_id) responder = state.validator_registry[challenge.responder_index] # Verify we're not too late @@ -718,8 +718,8 @@ Verify that `len(block.body.interactive_custody_challenge_initiations) <= MAX_IN For each `initiation` in `block.body.interactive_custody_challenge_initiations`, use the following function to process it: ```python -def process_initiation(initiation: InteractiveCustodyChallengeInitiation, - state: BeaconState): +def process_initiation(state: BeaconState, + initiation: InteractiveCustodyChallengeInitiation): challenger = state.validator_registry[initiation.challenger_index] responder = state.validator_registry[initiation.responder_index] # Verify the signature @@ -771,8 +771,8 @@ Verify that `len(block.body.interactive_custody_challenge_responses) <= MAX_INTE For each `response` in `block.body.interactive_custody_challenge_responses`, use the following function to process it: ```python -def process_response(response: InteractiveCustodyChallengeResponse, - state: State): +def process_response(state: BeaconState, + response: InteractiveCustodyChallengeResponse): challenge = get_custody_challenge_record_by_id(state, response.challenge_id) responder = state.validator_registry[challenge.responder_index] # Check that the right number of hashes was provided @@ -804,8 +804,8 @@ Verify that `len(block.body.interactive_custody_challenge_continuations) <= MAX_ For each `continuation` in `block.body.interactive_custody_challenge_continuations`, use the following function to process it: ```python -def process_continuation(continuation: InteractiveCustodyChallengeContinuation, - state: State): +def process_continuation(state: BeaconState, + continuation: InteractiveCustodyChallengeContinuation): challenge = get_custody_challenge_record_by_id(state, continuation.challenge_id) challenger = state.validator_registry[challenge.challenger_index] responder = state.validator_registry[challenge.responder_index] From cdd59ae2309dddbe211418e03e9ccc459b4a7ddb Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 13 Mar 2019 09:11:35 -0600 Subject: [PATCH 014/133] add return types to phase 1 functions Co-Authored-By: djrtwo --- specs/core/1_shard-data-chains.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index d8efe0c85..1713c6cbf 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -608,7 +608,7 @@ For each `challenge` in `block.body.branch_challenges`, run: ```python def process_branch_challenge(state: BeaconState, - challenge: BranchChallenge): + challenge: BranchChallenge) -> None: # Check that it's not too late to challenge assert slot_to_epoch(challenge.attestation.data.slot) >= get_current_epoch(state) - MAX_BRANCH_CHALLENGE_DELAY assert state.validator_registry[responder_index].exit_epoch >= get_current_epoch(state) - MAX_BRANCH_CHALLENGE_DELAY @@ -644,7 +644,7 @@ For each `response` in `block.body.branch_responses`, if `response.responding_to ```python def process_branch_exploration_response(state: BeaconState, - response: BranchResponse): + response: BranchResponse) -> None: challenge = get_branch_challenge_record_by_id(response.challenge_id) assert verify_merkle_branch( leaf=response.data, @@ -665,7 +665,7 @@ If `response.responding_to_custody_challenge == True`, run: ```python def process_branch_custody_response(state: BeaconState, - response: BranchResponse): + response: BranchResponse) -> None: challenge = get_custody_challenge_record_by_id(response.challenge_id) responder = state.validator_registry[challenge.responder_index] # Verify we're not too late @@ -719,7 +719,7 @@ For each `initiation` in `block.body.interactive_custody_challenge_initiations`, ```python def process_initiation(state: BeaconState, - initiation: InteractiveCustodyChallengeInitiation): + initiation: InteractiveCustodyChallengeInitiation) -> None: challenger = state.validator_registry[initiation.challenger_index] responder = state.validator_registry[initiation.responder_index] # Verify the signature @@ -772,7 +772,7 @@ For each `response` in `block.body.interactive_custody_challenge_responses`, use ```python def process_response(state: BeaconState, - response: InteractiveCustodyChallengeResponse): + response: InteractiveCustodyChallengeResponse) -> None: challenge = get_custody_challenge_record_by_id(state, response.challenge_id) responder = state.validator_registry[challenge.responder_index] # Check that the right number of hashes was provided @@ -805,7 +805,7 @@ For each `continuation` in `block.body.interactive_custody_challenge_continuatio ```python def process_continuation(state: BeaconState, - continuation: InteractiveCustodyChallengeContinuation): + continuation: InteractiveCustodyChallengeContinuation) -> None: challenge = get_custody_challenge_record_by_id(state, continuation.challenge_id) challenger = state.validator_registry[challenge.challenger_index] responder = state.validator_registry[challenge.responder_index] From 0e837c3386bbb919610665fe70ca747fb4fb9afe Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 13 Mar 2019 12:17:21 -0600 Subject: [PATCH 015/133] update v-guide to v0.5.0 --- specs/validator/0_beacon-chain-validator.md | 78 ++++++++++----------- 1 file changed, 37 insertions(+), 41 deletions(-) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 0c95fb446..7293675f1 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -101,8 +101,7 @@ In phase 0, all incoming validator deposits originate from the Ethereum 1.0 PoW To submit a deposit: * Pack the validator's [initialization parameters](#initialization) into `deposit_input`, a [`DepositInput`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#depositinput) SSZ object. -* Set `deposit_input.proof_of_possession = EMPTY_SIGNATURE`. -* Let `proof_of_possession` be the result of `bls_sign` of the `hash_tree_root(deposit_input)` with `domain=DOMAIN_DEPOSIT`. +* Let `proof_of_possession` be the result of `bls_sign` of the `signed_root(deposit_input)` with `domain=DOMAIN_DEPOSIT`. * Set `deposit_input.proof_of_possession = proof_of_possession`. * Let `amount` be the amount in Gwei to be deposited by the validator where `MIN_DEPOSIT_AMOUNT <= amount <= MAX_DEPOSIT_AMOUNT`. * Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `deposit` along with `serialize(deposit_input)` as the singular `bytes` input along with a deposit `amount` in Gwei. @@ -121,11 +120,12 @@ Once a validator has been processed and added to the beacon state's `validator_r In normal operation, the validator is quickly activated at which point the validator is added to the shuffling and begins validation after an additional `ACTIVATION_EXIT_DELAY` epochs (25.6 minutes). -The function [`is_active_validator`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#is_active_validator) can be used to check if a validator is active during a given epoch. Usage is as follows: +The function [`is_active_validator`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#is_active_validator) can be used to check if a validator is active during a given shuffling epoch. Note that the `BeaconState` contains a field `current_shuffling_epoch` which dictates from which epoch the current active validators are taken. Usage is as follows: ```python +shuffling_epoch = state.current_shuffling_epoch validator = state.validator_registry[validator_index] -is_active = is_active_validator(validator, epoch) +is_active = is_active_validator(validator, shuffling_epoch) ``` Once a validator is activated, the validator is assigned [responsibilities](#beacon-chain-responsibilities) until exited. @@ -138,7 +138,7 @@ A validator has two primary responsibilities to the beacon chain -- [proposing b ### Block proposal -A validator is expected to propose a [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `get_beacon_proposer_index(state, slot)` returns the validator's `validator_index`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot`. The validator is to create, sign, and broadcast a `block` that is a child of `parent` and that executes a valid [beacon chain state transition](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beacon-chain-state-transition-function). +A validator is expected to propose a [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `get_beacon_proposer_index(state, slot)` returns the validator's `validator_index`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator is to create, sign, and broadcast a `block` that is a child of `parent` and that executes a valid [beacon chain state transition](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beacon-chain-state-transition-function). There is one proposer per slot, so if there are N active validators any individual validator will on average be assigned to propose once per N slots (eg. at 312500 validators = 10 million ETH, that's once per ~3 weeks). @@ -152,13 +152,13 @@ _Note:_ there might be "skipped" slots between the `parent` and `block`. These s ##### Parent root -Set `block.parent_root = hash_tree_root(parent)`. +Set `block.previous_block_root = hash_tree_root(parent)`. ##### State root Set `block.state_root = hash_tree_root(state)` of the resulting `state` of the `parent -> block` state transition. -_Note_: To calculate `state_root`, the validator should first run the state transition function on an unsigned `block` containing a stub for the `state_root`. It is useful to be able to run a state transition function that does _not_ validate signatures for this purpose. +_Note_: To calculate `state_root`, the validator should first run the state transition function on an unsigned `block` containing a stub for the `state_root`. It is useful to be able to run a state transition function that does _not_ validate signatures or state root for this purpose. ##### Randao reveal @@ -166,8 +166,8 @@ Set `block.randao_reveal = epoch_signature` where `epoch_signature` is defined a ```python epoch_signature = bls_sign( - privkey=validator.privkey, # privkey store locally, not in state - message_hash=int_to_bytes32(slot_to_epoch(block.slot)), + privkey=validator.privkey, # privkey stored locally, not in state + message_hash=hash_tree_root(slot_to_epoch(block.slot)), domain=get_domain( fork=fork, # `fork` is the fork object at the slot `block.slot` epoch=slot_to_epoch(block.slot), @@ -194,23 +194,16 @@ epoch_signature = bls_sign( ##### Signature -Set `block.signature = signed_proposal_data` where `signed_proposal_data` is defined as: +Set `block.signature = block_signature` where `block_signature` is defined as: ```python -proposal_data = ProposalSignedData( - slot=slot, - shard=BEACON_CHAIN_SHARD_NUMBER, - block_root=hash_tree_root(block), # where `block.sigature == EMPTY_SIGNATURE -) -proposal_root = hash_tree_root(proposal_data) - -signed_proposal_data = bls_sign( +block_signature = bls_sign( privkey=validator.privkey, # privkey store locally, not in state - message_hash=proposal_root, + message_hash=signed_root(block), domain=get_domain( fork=fork, # `fork` is the fork object at the slot `block.slot` epoch=slot_to_epoch(block.slot), - domain_type=DOMAIN_PROPOSAL, + domain_type=DOMAIN_BEACON_BLOCK, ) ) ``` @@ -227,12 +220,14 @@ Up to `MAX_ATTESTER_SLASHINGS` [`AttesterSlashing`](https://github.com/ethereum/ ##### Attestations -Up to `MAX_ATTESTATIONS` aggregate attestations can be included in the `block`. The attestations added must satisfy the verification conditions found in [attestation processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestations-1). To maximize profit, the validator should attempt to create aggregate attestations that include singular attestations from the largest number of validators whose signatures from the same epoch have not previously been added on chain. +Up to `MAX_ATTESTATIONS` aggregate attestations can be included in the `block`. The attestations added must satisfy the verification conditions found in [attestation processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestations-1). To maximize profit, the validator should attempt to gather aggregate attestations that include singular attestations from the largest number of validators whose signatures from the same epoch have not previously been added on chain. ##### Deposits Up to `MAX_DEPOSITS` [`Deposit`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposit) objects can be included in the `block`. These deposits are constructed from the `Deposit` logs from the [Eth1.0 deposit contract](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#ethereum-10-deposit-contract) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposits-1). +The `proof` for each deposit must be constructed against the deposit root contained in `state.latest_eth1_data` rather than the deposit root at the time the deposit was initially logged from the 1.0 chain. This entails storing a full deposit merkle tree locally and computing updated proofs against the `latest_eth1_data.deposit_root` as needed. See [`minimal_merkle.py`](https://github.com/ethereum/research/blob/master/spec_pythonizer/utils/merkle_minimal.py) for a sample implementation. + ##### Voluntary exits Up to `MAX_VOLUNTARY_EXITS` [`VoluntaryExit`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#exits-1). @@ -247,9 +242,12 @@ A validator should create and broadcast the attestation halfway through the `slo First the validator should construct `attestation_data`, an [`AttestationData`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestationdata) object based upon the state at the assigned slot. +* Let `head_block` be the result of running the fork choice during the assigned slot. +* Let `head_state` be the state of `head_block` processed through any empty slots up to the assigned slot. + ##### Slot -Set `attestation_data.slot = slot` where `slot` is the current slot of which the validator is a member of a committee. +Set `attestation_data.slot = head_state.slot`. ##### Shard @@ -257,15 +255,15 @@ Set `attestation_data.shard = shard` where `shard` is the shard associated with ##### Beacon block root -Set `attestation_data.beacon_block_root = hash_tree_root(head)` where `head` is the validator's view of the `head` block of the beacon chain during `slot`. +Set `attestation_data.beacon_block_root = hash_tree_root(head_block)`. -##### Epoch boundary root +##### Target root -Set `attestation_data.epoch_boundary_root = hash_tree_root(epoch_boundary)` where `epoch_boundary` is the block at the most recent epoch boundary in the chain defined by `head` -- i.e. the `BeaconBlock` where `block.slot == get_epoch_start_slot(slot_to_epoch(head.slot))`. +Set `attestation_data.target_root = hash_tree_root(epoch_boundary)` where `epoch_boundary` is the block at the most recent epoch boundary. _Note:_ This can be looked up in the state using: -* Let `epoch_start_slot = get_epoch_start_slot(slot_to_epoch(head.slot))`. -* Set `epoch_boundary_root = hash_tree_root(head) if epoch_start_slot == head.slot else get_block_root(state, epoch_start_slot)`. +* Let `epoch_start_slot = get_epoch_start_slot(get_current_epoch(head_state))`. +* Set `epoch_boundary = head if epoch_start_slot == head_state.slot else get_block_root(state, epoch_start_slot)`. ##### Crosslink data root @@ -275,17 +273,15 @@ _Note:_ This is a stub for phase 0. ##### Latest crosslink -Set `attestation_data.latest_crosslink = state.latest_crosslinks[shard]` where `state` is the beacon state at `head` and `shard` is the validator's assigned shard. +Set `attestation_data.previous_crosslink = head_state.latest_crosslinks[shard]`. -##### Justified epoch +##### Source epoch -Set `attestation_data.justified_epoch = state.justified_epoch` where `state` is the beacon state at `head`. +Set `attestation_data.source_epoch = head_state.justified_epoch`. -##### Justified block root +##### Source root -Set `attestation_data.justified_block_root = hash_tree_root(justified_block)` where `justified_block` is the block at the slot `get_epoch_start_slot(state.justified_epoch)` in the chain defined by `head`. - -_Note:_ This can be looked up in the state using `get_block_root(state, get_epoch_start_slot(state.justified_epoch))`. +Set `attestation_data.source_root = head_state.current_justified_root`. #### Construct attestation @@ -320,11 +316,11 @@ attestation_data_and_custody_bit = AttestationDataAndCustodyBit( data=attestation.data, custody_bit=0b0, ) -attestation_message_to_sign = hash_tree_root(attestation_data_and_custody_bit) +attestation_message = hash_tree_root(attestation_data_and_custody_bit) signed_attestation_data = bls_sign( - privkey=validator.privkey, # privkey store locally, not in state - message_hash=attestation_message_to_sign, + privkey=validator.privkey, # privkey stored locally, not in state + message_hash=attestation_message, domain=get_domain( fork=fork, # `fork` is the fork object at the slot, `attestation_data.slot` epoch=slot_to_epoch(attestation_data.slot), @@ -402,12 +398,12 @@ _Note_: Signed data must be within a sequential `Fork` context to conflict. Mess ### Proposer slashing -To avoid "proposer slashings", a validator must not sign two conflicting [`ProposalSignedData`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposalsigneddata) where conflicting is defined as having the same `slot` and `shard` but a different `block_root`. In phase 0, proposals are only made for the beacon chain (`shard == BEACON_CHAIN_SHARD_NUMBER`). +To avoid "proposer slashings", a validator must not sign two conflicting [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposalsigneddata) where conflicting is defined as two distinct blocks within the same epoch. -_In phase 0, as long as the validator does not sign two different beacon chain proposals for the same slot, the validator is safe against proposer slashings._ +_In phase 0, as long as the validator does not sign two different beacon blocks for the same epoch, the validator is safe against proposer slashings._ Specifically, when signing an `BeaconBlock`, a validator should perform the following steps in the following order: -1. Save a record to hard disk that an beacon block has been signed for the `slot=slot` and `shard=BEACON_CHAIN_SHARD_NUMBER`. +1. Save a record to hard disk that an beacon block has been signed for the `epoch=slot_to_epoch(block.slot)`. 2. Generate and broadcast the block. If the software crashes at some point within this routine, then when the validator comes back online the hard disk has the record of the _potentially_ signed/broadcast block and can effectively avoid slashing. @@ -417,7 +413,7 @@ If the software crashes at some point within this routine, then when the validat To avoid "attester slashings", a validator must not sign two conflicting [`AttestationData`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestationdata) objects where conflicting is defined as a set of two attestations that satisfy either [`is_double_vote`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#is_double_vote) or [`is_surround_vote`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#is_surround_vote). Specifically, when signing an `Attestation`, a validator should perform the following steps in the following order: -1. Save a record to hard disk that an attestation has been signed for source -- `attestation_data.justified_epoch` -- and target -- `slot_to_epoch(attestation_data.slot)`. +1. Save a record to hard disk that an attestation has been signed for source -- `attestation_data.source_epoch` -- and target -- `slot_to_epoch(attestation_data.slot)`. 2. Generate and broadcast attestation. If the software crashes at some point within this routine, then when the validator comes back online the hard disk has the record of the _potentially_ signed/broadcast attestation and can effectively avoid slashing. From 9774a3d5811396c609ffa1b74d0f6e4f3a642d02 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Wed, 13 Mar 2019 17:01:47 -0700 Subject: [PATCH 016/133] Helper function returns correct type of `Gwei` instead of indices --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index bd0187a9f..daa1bc108 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1719,7 +1719,7 @@ def get_attesting_indices(state: BeaconState, attestations: List[PendingAttestat ``` ```python -def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestation]) -> List[ValidatorIndex]: +def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestation]) -> Gwei: return get_total_balance(state, get_attesting_indices(state, attestations)) ``` From c30018a71657b62f6c088f19eb85a721f0641305 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Wed, 13 Mar 2019 18:45:52 -0700 Subject: [PATCH 017/133] Update 0_beacon-chain-validator.md --- specs/validator/0_beacon-chain-validator.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 7293675f1..be3008227 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -40,11 +40,11 @@ __NOTICE__: This document is a work-in-progress for researchers and implementers - [Slot](#slot-1) - [Shard](#shard) - [Beacon block root](#beacon-block-root) - - [Epoch boundary root](#epoch-boundary-root) + - [Target root](#target-root) - [Crosslink data root](#crosslink-data-root) - [Latest crosslink](#latest-crosslink) - - [Justified epoch](#justified-epoch) - - [Justified block root](#justified-block-root) + - [Source epoch](#source-epoch) + - [Source root](#source-root) - [Construct attestation](#construct-attestation) - [Data](#data) - [Aggregation bitfield](#aggregation-bitfield) From 4442dfffb97c04a0697dd84bb85ba692613a3fff Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 13 Mar 2019 21:42:49 -0500 Subject: [PATCH 018/133] Fair proposer selection probability Note that as a side effect, proposer selection becomes less predictable, but I don't feel like this is a large downside. --- specs/core/0_beacon-chain.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index daa1bc108..5ca59c66b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1014,7 +1014,13 @@ def get_beacon_proposer_index(state: BeaconState, assert previous_epoch <= epoch <= next_epoch - first_committee, _ = get_crosslink_committees_at_slot(state, slot, registry_change)[0] + first_committee, _ = get_crosslink_committees_at_slot(state, slot, registry_change)[0] i = 0 + while i < len(first_committee): + rand_byte = hash(generate_seed(get_current_epoch(state)) + int_to_bytes8(i // 32))[i % 32] + candidate = first_committee[(epoch % i) % len(first_committee)] + if get_effective_balance(state, candidate) * 256 > MAX_DEPOSIT_AMOUNT * rand_byte: + return candidate + i += 1 return first_committee[epoch % len(first_committee)] ``` From 29caafc7567096325c14e7961550c4ba6f7c046b Mon Sep 17 00:00:00 2001 From: jannikluhn Date: Wed, 13 Mar 2019 21:52:25 -0700 Subject: [PATCH 019/133] Update specs/networking/rpc-interface.md Co-Authored-By: mslipper --- specs/networking/rpc-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index fdc9a11b3..e59f6a6b1 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -199,7 +199,7 @@ Send a list of block roots and slots to the peer. ) ``` -Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `2` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is undefined for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were undefined in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further undefined, the array would contain `[2, 6, 8, 10]` - i.e., duplicate blocks MUST be collapsed. +Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `2` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is empty for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were empty in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further empty, the array would contain `[2, 6, 8, 10]` - i.e., duplicate blocks MUST be collapsed. The function of the `skip_slots` parameter helps facilitate light client sync - for example, in [#459](https://github.com/ethereum/eth2.0-specs/issues/459) - and allows clients to balance the peers from whom they request headers. Client could, for instance, request every 10th block from a set of peers where each per has a different starting block in order to populate block data. From f3bddee7a5dcc8df1dfe0deeea9c875df0911415 Mon Sep 17 00:00:00 2001 From: jannikluhn Date: Wed, 13 Mar 2019 21:55:48 -0700 Subject: [PATCH 020/133] Update specs/networking/rpc-interface.md Co-Authored-By: mslipper --- specs/networking/rpc-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index e59f6a6b1..e087abe96 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -165,7 +165,7 @@ Client MAY send `goodbye` messages upon disconnection. The reason field MUST be ``` # BlockRootSlot ( - block_root: HashTreeRoot + block_root: bytes32 slot: uint64 ) From 5a9ef0fd982f7c23c55afcfd43e07a022a2878b9 Mon Sep 17 00:00:00 2001 From: jannikluhn Date: Wed, 13 Mar 2019 21:55:59 -0700 Subject: [PATCH 021/133] Update specs/networking/rpc-interface.md Co-Authored-By: mslipper --- specs/networking/rpc-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index e087abe96..e69f60801 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -201,7 +201,7 @@ Send a list of block roots and slots to the peer. Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `2` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is empty for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were empty in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further empty, the array would contain `[2, 6, 8, 10]` - i.e., duplicate blocks MUST be collapsed. -The function of the `skip_slots` parameter helps facilitate light client sync - for example, in [#459](https://github.com/ethereum/eth2.0-specs/issues/459) - and allows clients to balance the peers from whom they request headers. Client could, for instance, request every 10th block from a set of peers where each per has a different starting block in order to populate block data. +The function of the `skip_slots` parameter helps facilitate light client sync - for example, in [#459](https://github.com/ethereum/eth2.0-specs/issues/459) - and allows clients to balance the peers from whom they request headers. Clients could, for instance, request every 10th block from a set of peers where each per has a different starting block in order to populate block data. ### Beacon Block Bodies From 22e6212e6f08581aeca48dd6efee5e3c81c78f9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Kripalani?= Date: Wed, 13 Mar 2019 21:56:47 -0700 Subject: [PATCH 022/133] Update specs/networking/node-identification.md Co-Authored-By: mslipper --- specs/networking/node-identification.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/networking/node-identification.md b/specs/networking/node-identification.md index 27c1ebf9d..0f1f9832b 100644 --- a/specs/networking/node-identification.md +++ b/specs/networking/node-identification.md @@ -23,7 +23,7 @@ It is RECOMMENDED that clients set their TCP port to the default of `9000`. ## Peer ID Generation -The `libp2p` networking stack identifies peers via a "peer ID." Simply put, a node's Peer ID is the SHA2-256 `multihash` of the node's public key. `go-libp2p-crypto` contains the canonical implementation of how to hash `secp256k1` keys for use as a peer ID. +The `libp2p` networking stack identifies peers via a "peer ID." Simply put, a node's Peer ID is the SHA2-256 `multihash` of the node's public key struct (serialized in protobuf, refer to the [Peer ID spec](https://github.com/libp2p/specs/pull/100)). `go-libp2p-crypto` contains the canonical implementation of how to hash `secp256k1` keys for use as a peer ID. # See Also From 863f85c45ab2e3327c8c2e5f620af040b239fb40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Kripalani?= Date: Wed, 13 Mar 2019 21:57:29 -0700 Subject: [PATCH 023/133] Update specs/networking/rpc-interface.md Co-Authored-By: mslipper --- specs/networking/rpc-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index e69f60801..d07e728c9 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -30,7 +30,7 @@ All referenced data structures can be found in the [0-beacon-chain](https://gith ## `libp2p` Protocol Names -A "Protocol Name" in `libp2p` parlance refers to a human-readable identifier `libp2p` uses in order to identify sub-protocols and stream messages of different types over the same connection. A client's supported protocol paths are negotiated by the `libp2p` stack at connection time; as such they are not part of individual message bodies. +A "Protocol ID" in `libp2p` parlance refers to a human-readable identifier `libp2p` uses in order to identify sub-protocols and stream messages of different types over the same connection. Peers exchange supported protocol IDs via the `Identify` protocol upon connection. When opening a new stream, peers pin a particular protocol ID to it, and the stream remains contextualised thereafter. Since messages are sent inside a stream, they do not need to bear the protocol ID. ## RPC-Over-`libp2p` From 23d15f51a799fa9fc3c2a7aa5493b05fcad81568 Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 14 Mar 2019 18:57:17 +0000 Subject: [PATCH 024/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 5ca59c66b..d0be14e47 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1014,14 +1014,14 @@ def get_beacon_proposer_index(state: BeaconState, assert previous_epoch <= epoch <= next_epoch - first_committee, _ = get_crosslink_committees_at_slot(state, slot, registry_change)[0] i = 0 - while i < len(first_committee): + first_committee, _ = get_crosslink_committees_at_slot(state, slot, registry_change)[0] + i = 0 + while True: rand_byte = hash(generate_seed(get_current_epoch(state)) + int_to_bytes8(i // 32))[i % 32] candidate = first_committee[(epoch % i) % len(first_committee)] if get_effective_balance(state, candidate) * 256 > MAX_DEPOSIT_AMOUNT * rand_byte: return candidate i += 1 - return first_committee[epoch % len(first_committee)] ``` ### `verify_merkle_branch` From bbc51391153169e2d8071ab059b9b5001b5da072 Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 14 Mar 2019 19:01:32 +0000 Subject: [PATCH 025/133] Update 0_beacon-chain.md Assuming `epoch % i` is a bug, and you meant `epoch + i`. @vbuterin --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index d0be14e47..f2d06472b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1018,7 +1018,7 @@ def get_beacon_proposer_index(state: BeaconState, i = 0 while True: rand_byte = hash(generate_seed(get_current_epoch(state)) + int_to_bytes8(i // 32))[i % 32] - candidate = first_committee[(epoch % i) % len(first_committee)] + candidate = first_committee[(epoch + i) % len(first_committee)] if get_effective_balance(state, candidate) * 256 > MAX_DEPOSIT_AMOUNT * rand_byte: return candidate i += 1 From 24468de23bf1e0e4059ff5eeb5f9a5c621bc4f75 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 14 Mar 2019 20:28:44 -0500 Subject: [PATCH 026/133] Change get_shuffling to compute_committee See #729 and #774 The behavior now is that the first committee will consist of `get_permuted_index(0..n-1)`, the second committee `get_permuted_index(n....2n-1)`, etc. --- specs/core/0_beacon-chain.md | 47 +++++++++++++++--------------------- 1 file changed, 20 insertions(+), 27 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index daa1bc108..206aebf76 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -63,7 +63,7 @@ - [`get_permuted_index`](#get_permuted_index) - [`split`](#split) - [`get_epoch_committee_count`](#get_epoch_committee_count) - - [`get_shuffling`](#get_shuffling) + - [`compute_committee`](#compute_committee) - [`get_previous_epoch_committee_count`](#get_previous_epoch_committee_count) - [`get_current_epoch_committee_count`](#get_current_epoch_committee_count) - [`get_next_epoch_committee_count`](#get_next_epoch_committee_count) @@ -803,28 +803,26 @@ def get_epoch_committee_count(active_validator_count: int) -> int: ) * SLOTS_PER_EPOCH ``` -### `get_shuffling` +### `compute_committee` ```python -def get_shuffling(seed: Bytes32, - validators: List[Validator], - epoch: Epoch) -> List[List[ValidatorIndex]]: +def compute_committee(validator_indices: [int], + seed: Bytes32, + index: int, + total_committees: int) -> List[ValidatorIndex]: """ - Shuffle active validators and split into crosslink committees. - Return a list of committees (each a list of validator indices). + Return the index'th shuffled committee out of a total `total_committees` + using the given validator_indices and seed """ - # Shuffle active validator indices - active_validator_indices = get_active_validator_indices(validators, epoch) - length = len(active_validator_indices) - shuffled_indices = [active_validator_indices[get_permuted_index(i, length, seed)] for i in range(length)] - - # Split the shuffled active validator indices - return split(shuffled_indices, get_epoch_committee_count(length)) + start_offset = get_split_offset(len(validator_indices), total_committees, index) + end_offset = get_split_offset(len(validator_indices), total_committees, index + 1) + return [ + validator_indices[get_permuted_index(i, len(validator_indices), seed)] + for i in range(start_offset, end_offset) + ] ``` -**Invariant**: if `get_shuffling(seed, validators, epoch)` returns some value `x` for some `epoch <= get_current_epoch(state) + ACTIVATION_EXIT_DELAY`, it should return the same value `x` for the same `seed` and `epoch` and possible future modifications of `validators` forever in phase 0, and until the ~1 year deletion delay in phase 2 and in the future. - -**Note**: this definition and the next few definitions make heavy use of repetitive computing. Production implementations are expected to appropriately use caching/memoization to avoid redoing work. +**Note**: this definition and the next few definitions are highly inefficient as algorithms as they re-calculate many sub-expressions. Production implementations are expected to appropriately use caching/memoization to avoid redoing work. ### `get_previous_epoch_committee_count` @@ -916,22 +914,17 @@ def get_crosslink_committees_at_slot(state: BeaconState, shuffling_epoch = state.current_shuffling_epoch shuffling_start_shard = state.current_shuffling_start_shard - shuffling = get_shuffling( - seed, - state.validator_registry, - shuffling_epoch, - ) - offset = slot % SLOTS_PER_EPOCH - committees_per_slot = committees_per_epoch // SLOTS_PER_EPOCH - slot_start_shard = (shuffling_start_shard + committees_per_slot * offset) % SHARD_COUNT - + indices = get_active_validator_indices(state.validator_registry, shuffling_epoch) + committee_count = get_epoch_committee_count(len(indices)) + committees_per_slot = committee_count // EPOCH_LENGTH return [ ( - shuffling[committees_per_slot * offset + i], + compute_committee(indices, seed, committees_per_slot * offset + i, committee_count) (slot_start_shard + i) % SHARD_COUNT, ) for i in range(committees_per_slot) ] + ``` ### `get_block_root` From 5d327b63646d8831412853e7c972f866b78e628e Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 09:43:38 +0000 Subject: [PATCH 027/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 206aebf76..2dfeb7d69 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -806,7 +806,7 @@ def get_epoch_committee_count(active_validator_count: int) -> int: ### `compute_committee` ```python -def compute_committee(validator_indices: [int], +def compute_committee(validator_indices: List[ValidatorIndex], seed: Bytes32, index: int, total_committees: int) -> List[ValidatorIndex]: From 68d1c74784b8d5a1daa05b8098fa8bfb2e17b009 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 09:45:20 +0000 Subject: [PATCH 028/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2dfeb7d69..be3544ab8 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -811,8 +811,8 @@ def compute_committee(validator_indices: List[ValidatorIndex], index: int, total_committees: int) -> List[ValidatorIndex]: """ - Return the index'th shuffled committee out of a total `total_committees` - using the given validator_indices and seed + Return the ``index``'th shuffled committee out of a total ``total_committees`` + using ``validator_indices`` and ``seed``. """ start_offset = get_split_offset(len(validator_indices), total_committees, index) end_offset = get_split_offset(len(validator_indices), total_committees, index + 1) From 4a5ef988138772f7c1851b4c72634af217142d2f Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 10:51:08 +0000 Subject: [PATCH 029/133] Move to SHA256 SHA256 is de facto blockchain standard. Standardisation of the hash function is a prerequisite for [full standardisation of BLS12-381 signatures](https://github.com/ethereum/eth2.0-specs/issues/605). Blockchain projects are likely to provide a cheap SHA256 opcods/precompile, and unlikely to provide a Keccak256 equivelent. (Even WASM-enabled blockchains are likely to provide a SHA256 opcode/precompile since WASM does *not* natively support optimised SHA256 CPU instructions.) With Ethereum 2.0 embracing SHA256 the wider industry is more likely to converge towards a unified cross-blockchain communication scheme via Merkle receipts. There are no security blockers with SHA256 (see comments by Dan Boneh [here](https://github.com/ethereum/eth2.0-specs/issues/612#issuecomment-470452562)). --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index daa1bc108..1d474f618 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -660,7 +660,7 @@ def xor(bytes1: Bytes32, bytes2: Bytes32) -> Bytes32: ### `hash` -The hash function is denoted by `hash`. In Phase 0 the beacon chain is deployed with the same hash function as Ethereum 1.0, i.e. Keccak-256 (also incorrectly known as SHA3). +The `hash` function is SHA256. Note: We aim to migrate to a S[T/N]ARK-friendly hash function in a future Ethereum 2.0 deployment phase. From dac43eb564a3da19bf878364295486d0b7c03fb2 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 11:18:06 +0000 Subject: [PATCH 030/133] Simplify deposits Fix #760 --- specs/core/0_beacon-chain.md | 49 +++++++++++------------------------- 1 file changed, 14 insertions(+), 35 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index daa1bc108..2d168cfc2 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -29,7 +29,6 @@ - [`AttestationData`](#attestationdata) - [`AttestationDataAndCustodyBit`](#attestationdataandcustodybit) - [`SlashableAttestation`](#slashableattestation) - - [`DepositInput`](#depositinput) - [`DepositData`](#depositdata) - [`BeaconBlockHeader`](#beaconblockheader) - [`Validator`](#validator) @@ -377,7 +376,7 @@ The types are defined topologically to aid in facilitating an executable version } ``` -#### `DepositInput` +#### `DepositData` ```python { @@ -385,21 +384,10 @@ The types are defined topologically to aid in facilitating an executable version 'pubkey': 'bytes48', # Withdrawal credentials 'withdrawal_credentials': 'bytes32', - # A BLS signature of this `DepositInput` - 'proof_of_possession': 'bytes96', -} -``` - -#### `DepositData` - -```python -{ # Amount in Gwei 'amount': 'uint64', - # Timestamp from deposit contract - 'timestamp': 'uint64', - # Deposit input - 'deposit_input': DepositInput, + # Container self-signature + 'proof_of_possession': 'bytes96', } ``` @@ -512,7 +500,7 @@ The types are defined topologically to aid in facilitating an executable version # Index in the deposit tree 'index': 'uint64', # Data - 'deposit_data': DepositData, + 'data': DepositData, } ``` @@ -1278,19 +1266,12 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: Process a deposit from Ethereum 1.0. Note that this function mutates ``state``. """ - deposit_input = deposit.deposit_data.deposit_input - - # Should equal 8 bytes for deposit_data.amount + - # 8 bytes for deposit_data.timestamp + - # 176 bytes for deposit_data.deposit_input - # It should match the deposit_data in the eth1.0 deposit contract - serialized_deposit_data = serialize(deposit.deposit_data) # Deposits must be processed in order assert deposit.index == state.deposit_index # Verify the Merkle branch merkle_branch_is_valid = verify_merkle_branch( - leaf=hash(serialized_deposit_data), + leaf=hash(serialize(deposit.data)), # 48 + 32 + 8 + 96 = 184 bytes serialisation proof=deposit.proof, depth=DEPOSIT_CONTRACT_TREE_DEPTH, index=deposit.index, @@ -1305,16 +1286,14 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: state.deposit_index += 1 validator_pubkeys = [v.pubkey for v in state.validator_registry] - pubkey = deposit_input.pubkey - amount = deposit.deposit_data.amount - withdrawal_credentials = deposit_input.withdrawal_credentials + pubkey = deposit.data.pubkey if pubkey not in validator_pubkeys: # Verify the proof of possession proof_is_valid = bls_verify( - pubkey=deposit_input.pubkey, - message_hash=signed_root(deposit_input), - signature=deposit_input.proof_of_possession, + pubkey=pubkey, + message_hash=signed_root(deposit.data), + signature=deposit.data.proof_of_possession, domain=get_domain( state.fork, get_current_epoch(state), @@ -1327,7 +1306,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Add new validator validator = Validator( pubkey=pubkey, - withdrawal_credentials=withdrawal_credentials, + withdrawal_credentials=deposit.data.withdrawal_credentials, activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, withdrawable_epoch=FAR_FUTURE_EPOCH, @@ -1337,10 +1316,10 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled. state.validator_registry.append(validator) - state.validator_balances.append(amount) + state.validator_balances.append(deposit.data.amount) else: # Increase balance by deposit amount - state.validator_balances[validator_pubkeys.index(pubkey)] += amount + state.validator_balances[validator_pubkeys.index(pubkey)] += deposit.data.amount ``` ### Routines for updating validator status @@ -1430,11 +1409,11 @@ The initial deployment phases of Ethereum 2.0 are implemented without consensus ### Deposit arguments -The deposit contract has a single `deposit` function which takes as argument a SimpleSerialize'd `DepositInput`. +The deposit contract has a single `deposit` function which takes as argument a SimpleSerialize'd `DepositData`. ### Withdrawal credentials -One of the `DepositInput` fields is `withdrawal_credentials`. It is a commitment to credentials for withdrawals to shards. The first byte of `withdrawal_credentials` is a version number. As of now the only expected format is as follows: +One of the `DepositData` fields is `withdrawal_credentials`. It is a commitment to credentials for withdrawals to shards. The first byte of `withdrawal_credentials` is a version number. As of now the only expected format is as follows: * `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX_BYTE` * `withdrawal_credentials[1:] == hash(withdrawal_pubkey)[1:]` where `withdrawal_pubkey` is a BLS pubkey From 58603f276e3dc137599d6684b7e47650f03871b7 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 11:28:55 +0000 Subject: [PATCH 031/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2d168cfc2..9f8bec933 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1271,7 +1271,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Verify the Merkle branch merkle_branch_is_valid = verify_merkle_branch( - leaf=hash(serialize(deposit.data)), # 48 + 32 + 8 + 96 = 184 bytes serialisation + leaf=hash(serialize(deposit.data)), # 48 + 32 + 8 + 96 = 184 bytes serialization proof=deposit.proof, depth=DEPOSIT_CONTRACT_TREE_DEPTH, index=deposit.index, From 96ab535704fb18b3bbcf585159bf499a87d277bf Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 12:40:52 +0000 Subject: [PATCH 032/133] Simplify and cleanup process_attestation Improve readability and testability (by avoiding untriggerable `assert`). Fix #753. --- specs/core/0_beacon-chain.md | 86 +++++++++++++----------------------- 1 file changed, 30 insertions(+), 56 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index daa1bc108..53695aeea 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2375,65 +2375,39 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: Process ``Attestation`` transaction. Note that this function mutates ``state``. """ - # Can't submit attestations that are too far in history (or in prehistory) - assert attestation.data.slot >= GENESIS_SLOT - assert state.slot <= attestation.data.slot + SLOTS_PER_EPOCH - # Can't submit attestations too quickly - assert attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot - # Verify that the justified epoch and root is correct - if slot_to_epoch(attestation.data.slot) >= get_current_epoch(state): - # Case 1: current epoch attestations - assert attestation.data.source_epoch == state.current_justified_epoch - assert attestation.data.source_root == state.current_justified_root - else: - # Case 2: previous epoch attestations - assert attestation.data.source_epoch == state.previous_justified_epoch - assert attestation.data.source_root == state.previous_justified_root - # Check that the crosslink data is valid - acceptable_crosslink_data = { - # Case 1: Latest crosslink matches the one in the state - attestation.data.previous_crosslink, - # Case 2: State has already been updated, state's latest crosslink matches the crosslink - # the attestation is trying to create - Crosslink( - crosslink_data_root=attestation.data.crosslink_data_root, - epoch=slot_to_epoch(attestation.data.slot) - ) - } - assert state.latest_crosslinks[attestation.data.shard] in acceptable_crosslink_data - # Attestation must be nonempty! - assert attestation.aggregation_bitfield != b'\x00' * len(attestation.aggregation_bitfield) - # Custody must be empty (to be removed in phase 1) - assert attestation.custody_bitfield == b'\x00' * len(attestation.custody_bitfield) - # Get the committee for the specific shard that this attestation is for - crosslink_committee = [ - committee for committee, shard in get_crosslink_committees_at_slot(state, attestation.data.slot) - if shard == attestation.data.shard - ][0] - # Custody bitfield must be a subset of the attestation bitfield - for i in range(len(crosslink_committee)): - if get_bitfield_bit(attestation.aggregation_bitfield, i) == 0b0: - assert get_bitfield_bit(attestation.custody_bitfield, i) == 0b0 - # Verify aggregate signature - participants = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) - custody_bit_1_participants = get_attestation_participants(state, attestation.data, attestation.custody_bitfield) - custody_bit_0_participants = [i for i in participants if i not in custody_bit_1_participants] + assert max(GENESIS_SLOT, state.slot - SLOTS_PER_EPOCH) <= attestation.data.slot + assert attestation.data.slot <= state.slot - MIN_ATTESTATION_INCLUSION_DELAY - assert bls_verify_multiple( - pubkeys=[ - bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_0_participants]), - bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_1_participants]), - ], - message_hashes=[ - hash_tree_root(AttestationDataAndCustodyBit(data=attestation.data, custody_bit=0b0)), - hash_tree_root(AttestationDataAndCustodyBit(data=attestation.data, custody_bit=0b1)), - ], + # Check source epoch and root match current or previous justified epoch and root + assert (slot_to_epoch(attestation.data.slot), attestation.data.source_epoch, attestation.data.source_root) in { + (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root), + (get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root), + } + + # Check crosslink data + assert attestation.data.crosslink_data_root == ZERO_HASH # [to be removed in phase 1] + assert state.latest_crosslinks[attestation.data.shard] in { + attestation.data.previous_crosslink, # Case 1: latest crosslink matches previous crosslink + Crosslink( # Case 2: latest crosslink matches current crosslink + crosslink_data_root=attestation.data.crosslink_data_root, + epoch=slot_to_epoch(attestation.data.slot), + ), + } + + # Check custody bits [to be generalised in phase 1] + assert attestation.custody_bitfield == b'\x00' * len(attestation.custody_bitfield) + + # Check aggregate signature [to be generalised in phase 1] + participants = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) + assert len(participants) != 0 + assert bls_verify( + pubkey=bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in participants]), + message_hash=hash_tree_root(AttestationDataAndCustodyBit(data=attestation.data, custody_bit=0b0)), signature=attestation.aggregate_signature, domain=get_domain(state.fork, slot_to_epoch(attestation.data.slot), DOMAIN_ATTESTATION), ) - # Crosslink data root is zero (to be removed in phase 1) - assert attestation.data.crosslink_data_root == ZERO_HASH - # Apply the attestation + + # Cache pending attestation pending_attestation = PendingAttestation( data=attestation.data, aggregation_bitfield=attestation.aggregation_bitfield, @@ -2442,7 +2416,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: ) if slot_to_epoch(attestation.data.slot) == get_current_epoch(state): state.current_epoch_attestations.append(pending_attestation) - elif slot_to_epoch(attestation.data.slot) == get_previous_epoch(state): + else: state.previous_epoch_attestations.append(pending_attestation) ``` From d8d653dd949e92e4baf368040afa0b8216922a55 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 12:51:46 +0000 Subject: [PATCH 033/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 53695aeea..766bdf53c 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2378,8 +2378,9 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: assert max(GENESIS_SLOT, state.slot - SLOTS_PER_EPOCH) <= attestation.data.slot assert attestation.data.slot <= state.slot - MIN_ATTESTATION_INCLUSION_DELAY - # Check source epoch and root match current or previous justified epoch and root - assert (slot_to_epoch(attestation.data.slot), attestation.data.source_epoch, attestation.data.source_root) in { + # Check target epoch, source epoch, and source root + target_epoch = slot_to_epoch(attestation.data.slot) + assert (target_epoch, attestation.data.source_epoch, attestation.data.source_root) in { (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root), (get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root), } @@ -2390,7 +2391,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: attestation.data.previous_crosslink, # Case 1: latest crosslink matches previous crosslink Crosslink( # Case 2: latest crosslink matches current crosslink crosslink_data_root=attestation.data.crosslink_data_root, - epoch=slot_to_epoch(attestation.data.slot), + epoch=target_epoch, ), } @@ -2404,7 +2405,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: pubkey=bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in participants]), message_hash=hash_tree_root(AttestationDataAndCustodyBit(data=attestation.data, custody_bit=0b0)), signature=attestation.aggregate_signature, - domain=get_domain(state.fork, slot_to_epoch(attestation.data.slot), DOMAIN_ATTESTATION), + domain=get_domain(state.fork, target_epoch, DOMAIN_ATTESTATION), ) # Cache pending attestation @@ -2414,7 +2415,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: custody_bitfield=attestation.custody_bitfield, inclusion_slot=state.slot ) - if slot_to_epoch(attestation.data.slot) == get_current_epoch(state): + if target_epoch == get_current_epoch(state): state.current_epoch_attestations.append(pending_attestation) else: state.previous_epoch_attestations.append(pending_attestation) From 4a8d748c55aecbdf91170677321abb46dca4fc4b Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 13:05:46 +0000 Subject: [PATCH 034/133] Milder ejections See item 22 in https://github.com/ethereum/eth2.0-specs/issues/675. Also partially addresses https://github.com/ethereum/eth2.0-specs/issues/527. --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index daa1bc108..0c06972ff 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2050,7 +2050,7 @@ def process_ejections(state: BeaconState) -> None: """ for index in get_active_validator_indices(state.validator_registry, get_current_epoch(state)): if state.validator_balances[index] < EJECTION_BALANCE: - exit_validator(state, index) + initiate_validator_exit(state, index) ``` #### Validator registry and shuffling seed data From e912ed7fca1d62c16979074b28c79fc1072df019 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 13:12:03 +0000 Subject: [PATCH 035/133] Include recently slashed churn in churn queue Addresses #527 in combination with #784. --- specs/core/0_beacon-chain.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index daa1bc108..36d45b31b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2102,7 +2102,9 @@ def update_validator_registry(state: BeaconState) -> None: activate_validator(state, index, is_genesis=False) # Exit validators within the allowable balance churn - balance_churn = 0 + total_at_start = state.latest_slashed_balances[(current_epoch + 1) % LATEST_SLASHED_EXIT_LENGTH] + total_at_end = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] + balance_churn = total_at_end - total_at_start for index, validator in enumerate(state.validator_registry): if validator.exit_epoch == FAR_FUTURE_EPOCH and validator.initiated_exit: # Check the balance churn would be within the allowance From 4b461838d27647b80a487b9543ffbf64e610adac Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 13:32:24 +0000 Subject: [PATCH 036/133] `GENESIS_EPOCH - 1` for `previous_shuffling_epoch` and `previous_justified_epoch` See item 26 in #675. --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index daa1bc108..54741c1e0 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1531,7 +1531,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], latest_randao_mixes=[ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)], previous_shuffling_start_shard=GENESIS_START_SHARD, current_shuffling_start_shard=GENESIS_START_SHARD, - previous_shuffling_epoch=GENESIS_EPOCH, + previous_shuffling_epoch=GENESIS_EPOCH - 1, current_shuffling_epoch=GENESIS_EPOCH, previous_shuffling_seed=ZERO_HASH, current_shuffling_seed=ZERO_HASH, @@ -1539,7 +1539,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Finality previous_epoch_attestations=[], current_epoch_attestations=[], - previous_justified_epoch=GENESIS_EPOCH, + previous_justified_epoch=GENESIS_EPOCH - 1, current_justified_epoch=GENESIS_EPOCH, previous_justified_root=ZERO_HASH, current_justified_root=ZERO_HASH, From 1236e8e1fa8c9ba235f316f6739aa55672ffcd45 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 13:53:24 +0000 Subject: [PATCH 037/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 36d45b31b..ff8b09071 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -595,6 +595,7 @@ The types are defined topologically to aid in facilitating an executable version 'validator_registry': [Validator], 'validator_balances': ['uint64'], 'validator_registry_update_epoch': 'uint64', + 'validator_registry_update_slashed_balances': 'uint64', # Randomness and committees 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], @@ -2116,6 +2117,7 @@ def update_validator_registry(state: BeaconState) -> None: exit_validator(state, index) state.validator_registry_update_epoch = current_epoch + state.validator_registry_update_slashed_balances = total_at_end ``` Run the following function: @@ -2164,7 +2166,7 @@ def process_slashings(state: BeaconState) -> None: total_balance = get_total_balance(state, active_validator_indices) # Compute `total_penalties` - total_at_start = state.latest_slashed_balances[(current_epoch + 1) % LATEST_SLASHED_EXIT_LENGTH] + total_at_start = state.validator_registry_update_slashed_balances total_at_end = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] total_penalties = total_at_end - total_at_start From 709e0df39f4161e63a1a7877a133b1e121fcb174 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 13:54:33 +0000 Subject: [PATCH 038/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ff8b09071..36c6023e1 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2103,7 +2103,7 @@ def update_validator_registry(state: BeaconState) -> None: activate_validator(state, index, is_genesis=False) # Exit validators within the allowable balance churn - total_at_start = state.latest_slashed_balances[(current_epoch + 1) % LATEST_SLASHED_EXIT_LENGTH] + total_at_start = state.validator_registry_update_slashed_balances total_at_end = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] balance_churn = total_at_end - total_at_start for index, validator in enumerate(state.validator_registry): @@ -2166,7 +2166,7 @@ def process_slashings(state: BeaconState) -> None: total_balance = get_total_balance(state, active_validator_indices) # Compute `total_penalties` - total_at_start = state.validator_registry_update_slashed_balances + total_at_start = state.latest_slashed_balances[(current_epoch + 1) % LATEST_SLASHED_EXIT_LENGTH] total_at_end = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] total_penalties = total_at_end - total_at_start From 3944fd4b1554ea928b625c6e7ae47fc6d6076737 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 18:18:37 +0000 Subject: [PATCH 039/133] Clarify empty sums in BLS spec (#782) Fix #775. --- specs/bls_signature.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/bls_signature.md b/specs/bls_signature.md index b0490b7ae..14a4f1cb7 100644 --- a/specs/bls_signature.md +++ b/specs/bls_signature.md @@ -110,11 +110,11 @@ def modular_squareroot(value: Fq2) -> Fq2: ### `bls_aggregate_pubkeys` -Let `bls_aggregate_pubkeys(pubkeys: List[Bytes48]) -> Bytes48` return `pubkeys[0] + .... + pubkeys[len(pubkeys)-1]`, where `+` is the elliptic curve addition operation over the G1 curve. +Let `bls_aggregate_pubkeys(pubkeys: List[Bytes48]) -> Bytes48` return `pubkeys[0] + .... + pubkeys[len(pubkeys)-1]`, where `+` is the elliptic curve addition operation over the G1 curve. (When `len(pubkeys) == 0` the empty sum is the G1 point at infinity.) ### `bls_aggregate_signatures` -Let `bls_aggregate_signatures(signatures: List[Bytes96]) -> Bytes96` return `signatures[0] + .... + signatures[len(signatures)-1]`, where `+` is the elliptic curve addition operation over the G2 curve. +Let `bls_aggregate_signatures(signatures: List[Bytes96]) -> Bytes96` return `signatures[0] + .... + signatures[len(signatures)-1]`, where `+` is the elliptic curve addition operation over the G2 curve. (When `len(signatures) == 0` the empty sum is the G2 point at infinity.) ## Signature verification From 6b118d2398d5506fdd5d1659e85b93b1cf9e2bc6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=B6ren=20Steiger?= Date: Sat, 16 Mar 2019 05:16:47 +0100 Subject: [PATCH 040/133] Add trailing comma (#789) --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index daa1bc108..f2e639a96 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -628,7 +628,7 @@ The types are defined topologically to aid in facilitating an executable version # Ethereum 1.0 chain data 'latest_eth1_data': Eth1Data, 'eth1_data_votes': [Eth1DataVote], - 'deposit_index': 'uint64' + 'deposit_index': 'uint64', } ``` From e5ff0d59ad22a9bf42acaa0bcf1e7ba646d4b41d Mon Sep 17 00:00:00 2001 From: Justin Date: Sat, 16 Mar 2019 11:23:41 +0000 Subject: [PATCH 041/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 36c6023e1..26f579233 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -595,7 +595,6 @@ The types are defined topologically to aid in facilitating an executable version 'validator_registry': [Validator], 'validator_balances': ['uint64'], 'validator_registry_update_epoch': 'uint64', - 'validator_registry_update_slashed_balances': 'uint64', # Randomness and committees 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], @@ -2103,21 +2102,23 @@ def update_validator_registry(state: BeaconState) -> None: activate_validator(state, index, is_genesis=False) # Exit validators within the allowable balance churn - total_at_start = state.validator_registry_update_slashed_balances - total_at_end = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] - balance_churn = total_at_end - total_at_start - for index, validator in enumerate(state.validator_registry): - if validator.exit_epoch == FAR_FUTURE_EPOCH and validator.initiated_exit: - # Check the balance churn would be within the allowance - balance_churn += get_effective_balance(state, index) - if balance_churn > max_balance_churn: - break + if state.current_epoch < state.validator_registry_update_epoch + LATEST_SLASHED_EXIT_LENGTH: + balance_churn = ( + state.latest_slashed_balances[state.validator_registry_update_epoch % LATEST_SLASHED_EXIT_LENGTH] - + state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] + ) - # Exit validator - exit_validator(state, index) + for index, validator in enumerate(state.validator_registry): + if validator.exit_epoch == FAR_FUTURE_EPOCH and validator.initiated_exit: + # Check the balance churn would be within the allowance + balance_churn += get_effective_balance(state, index) + if balance_churn > max_balance_churn: + break + + # Exit validator + exit_validator(state, index) state.validator_registry_update_epoch = current_epoch - state.validator_registry_update_slashed_balances = total_at_end ``` Run the following function: From 1a0938169bce4388c4443809a221862a259d9b69 Mon Sep 17 00:00:00 2001 From: NIC Lin Date: Sat, 16 Mar 2019 20:45:39 +0800 Subject: [PATCH 042/133] Fix `get_split_offset` (#790) --- specs/core/1_shard-data-chains.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 1713c6cbf..b2f567ed8 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -118,9 +118,9 @@ Phase 1 depends upon all of the constants defined in [Phase 0](0_beacon-chain.md def get_split_offset(list_size: int, chunks: int, index: int) -> int: """ Returns a value such that for a list L, chunk count k and index i, - split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k+1, i)] + split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i+1)] """ - return (len(list_size) * index) // chunks + return (list_size * index) // chunks ```` #### `get_shuffled_committee` From 919b99e0aea2a8338cafcdd984a5531cfbfe08fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=B6ren=20Steiger?= Date: Sat, 16 Mar 2019 13:46:45 +0100 Subject: [PATCH 043/133] Add missing word (#788) --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index f2e639a96..1d53f1c3f 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -186,7 +186,7 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | `MAX_EXIT_DEQUEUES_PER_EPOCH` | `2**2` (= 4) | | `SHUFFLE_ROUND_COUNT` | 90 | -* For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) +* For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) ### Deposit contract From 65162e037110be66dd793a37ecbc0a285e36e8b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=B6ren=20Steiger?= Date: Sat, 16 Mar 2019 21:56:10 +0100 Subject: [PATCH 044/133] Update 0_beacon-chain.md (#791) --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1d53f1c3f..5ab3da052 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1042,7 +1042,7 @@ def get_attestation_participants(state: BeaconState, attestation_data: AttestationData, bitfield: bytes) -> List[ValidatorIndex]: """ - Return the participant indices at for the ``attestation_data`` and ``bitfield``. + Return the participant indices corresponding to ``attestation_data`` and ``bitfield``. """ # Find the committee in the list with the desired shard crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) From 506fdf40424e05876dfbd32bc2cece4895330185 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sun, 17 Mar 2019 06:33:19 -0500 Subject: [PATCH 045/133] Added FixedSizeList wrappers (#777) * Added FixedSizeList wrappers Requires corresponding changes to the spec pythonizer. * FixedSizeList -> Vector --- specs/core/0_beacon-chain.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 5ab3da052..454cf105b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1528,7 +1528,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], validator_registry_update_epoch=GENESIS_EPOCH, # Randomness and committees - latest_randao_mixes=[ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)], + latest_randao_mixes=Vector([ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)]), previous_shuffling_start_shard=GENESIS_START_SHARD, current_shuffling_start_shard=GENESIS_START_SHARD, previous_shuffling_epoch=GENESIS_EPOCH, @@ -1548,11 +1548,11 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], finalized_root=ZERO_HASH, # Recent state - latest_crosslinks=[Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)], - latest_block_roots=[ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)], - latest_state_roots=[ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)], - latest_active_index_roots=[ZERO_HASH for _ in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH)], - latest_slashed_balances=[0 for _ in range(LATEST_SLASHED_EXIT_LENGTH)], + latest_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), + latest_block_roots=Vector([ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)]), + latest_state_roots=Vector([ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)]), + latest_active_index_roots=Vector([ZERO_HASH for _ in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH)]), + latest_slashed_balances=Vector([0 for _ in range(LATEST_SLASHED_EXIT_LENGTH)]), latest_block_header=get_temporary_block_header(get_empty_block()), historical_roots=[], From 390ece7fbeb09e285f3bd79d89fcf6d9a5f75dc4 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 17 Mar 2019 11:33:29 +0000 Subject: [PATCH 046/133] Rename "vector" to "tuple" in SSZ spec (#794) To be done in combination with #777. Also: * Define "fixed-size" and "variable-size" more rigorously * Use `"` vs `'` consistently * Add missing `"` --- specs/simple-serialize.md | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 862d13edf..378a1a7cb 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -12,7 +12,7 @@ This is a **work in progress** describing typing, serialization and Merkleizatio - [Serialization](#serialization) - [`"uintN"`](#uintn) - [`"bool"`](#bool) - - [Tuples, containers, lists](#tuples-containers-lists) + - [Vectors, containers, lists](#vectors-containers-lists) - [Deserialization](#deserialization) - [Merkleization](#merkleization) - [Self-signed containers](#self-signed-containers) @@ -34,12 +34,14 @@ This is a **work in progress** describing typing, serialization and Merkleizatio ### Composite types * **container**: ordered heterogenous collection of values - * key-pair curly bracket notation `{}`, e.g. `{'foo': "uint64", 'bar': "bool"}` -* **tuple**: ordered fixed-length homogeneous collection of values + * key-pair curly bracket notation `{}`, e.g. `{"foo": "uint64", "bar": "bool"}` +* **vector**: ordered fixed-length homogeneous collection of values * angle bracket notation `[type, N]`, e.g. `["uint64", N]` * **list**: ordered variable-length homogenous collection of values * angle bracket notation `[type]`, e.g. `["uint64"]` +We recursively define "variable-size" types to be lists and all types that contains a variable-size type. All other types are said to be "fixed-size". + ### Aliases For convenience we alias: @@ -54,34 +56,34 @@ We recursively define the `serialize` function which consumes an object `value` *Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signed_root`, etc.) objects implicitly carry their type. -### `uintN` +### `"uintN"` ```python assert N in [8, 16, 32, 64, 128, 256] -return value.to_bytes(N // 8, 'little') +return value.to_bytes(N // 8, "little") ``` -### `bool` +### `"bool"` ```python assert value in (True, False) -return b'\x01' if value is True else b'\x00' +return b"\x01" if value is True else b"\x00" ``` -### Tuples, containers, lists +### Vectors, containers, lists -If `value` is fixed-length (i.e. does not embed a list): +If `value` is fixed-size: ```python -return ''.join([serialize(element) for element in value]) +return "".join([serialize(element) for element in value]) ``` -If `value` is variable-length (i.e. embeds a list): +If `value` is variable-size: ```python -serialized_bytes = ''.join([serialize(element) for element in value]) +serialized_bytes = "".join([serialize(element) for element in value]) assert len(serialized_bytes) < 2**(8 * BYTES_PER_LENGTH_PREFIX) -serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, 'little') +serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, "little") return serialized_length + serialized_bytes ``` @@ -99,9 +101,9 @@ We first define helper functions: We now define Merkleization `hash_tree_root(value)` of an object `value` recursively: -* `merkleize(pack(value))` if `value` is a basic object or a tuple of basic objects +* `merkleize(pack(value))` if `value` is a basic object or a vector of basic objects * `mix_in_length(merkleize(pack(value)), len(value))` if `value` is a list of basic objects -* `merkleize([hash_tree_root(element) for element in value])` if `value` is a tuple of composite objects or a container +* `merkleize([hash_tree_root(element) for element in value])` if `value` is a vector of composite objects or a container * `mix_in_length(merkleize([hash_tree_root(element) for element in value]), len(value))` if `value` is a list of composite objects ## Self-signed containers From 6b82f5e9995acc7ed9f2e24bb8edf213767c60e7 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sun, 17 Mar 2019 19:33:42 +0800 Subject: [PATCH 047/133] Set `GENESIS_FORK_VERSION` to a `bytes4` constant (#792) * Set `GENESIS_FORK_VERSION` to a `bytes4` constant * Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 454cf105b..a631bf2fc 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -208,7 +208,7 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | Name | Value | | - | - | -| `GENESIS_FORK_VERSION` | `0` | +| `GENESIS_FORK_VERSION` | `int_to_bytes4(0)` | | `GENESIS_SLOT` | `2**32` | | `GENESIS_EPOCH` | `slot_to_epoch(GENESIS_SLOT)` | | `GENESIS_START_SHARD` | `0` | @@ -1517,8 +1517,8 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], slot=GENESIS_SLOT, genesis_time=genesis_time, fork=Fork( - previous_version=int_to_bytes4(GENESIS_FORK_VERSION), - current_version=int_to_bytes4(GENESIS_FORK_VERSION), + previous_version=GENESIS_FORK_VERSION, + current_version=GENESIS_FORK_VERSION, epoch=GENESIS_EPOCH, ), From 91a0c1ba5f6c4439345b4476c8a1637140b48f28 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sun, 17 Mar 2019 06:44:19 -0500 Subject: [PATCH 048/133] Persistent committee size per slot reduced to max 128 (#734) * Persistent committee size per slot target 128 max 256 Cuts down the cost of verifying a shard chain and aggregating signatures for a shard chain, and also makes the shard chain signatures more usable by light clients for verification as they would only need to keep track of a max 256-sized committee. --- specs/core/1_shard-data-chains.md | 85 +++++++++++++++++++++---------- 1 file changed, 58 insertions(+), 27 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index b2f567ed8..c76f9ba08 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -17,41 +17,51 @@ At the current stage, Phase 1, while fundamentally feature-complete, is still su - [Time parameters](#time-parameters) - [Max operations per block](#max-operations-per-block) - [Signature domains](#signature-domains) - - [Shard chains and crosslink data](#shard-chains-and-crosslink-data) - - [Helper functions](#helper-functions) +- [Shard chains and crosslink data](#shard-chains-and-crosslink-data) + - [Helper functions](#helper-functions) - [`get_split_offset`](#get_split_offset) - [`get_shuffled_committee`](#get_shuffled_committee) - [`get_persistent_committee`](#get_persistent_committee) - [`get_shard_proposer_index`](#get_shard_proposer_index) - - [Data Structures](#data-structures) + - [Data Structures](#data-structures) - [Shard chain blocks](#shard-chain-blocks) - - [Shard block processing](#shard-block-processing) + - [Shard block processing](#shard-block-processing) - [Verifying shard block data](#verifying-shard-block-data) - [Verifying a crosslink](#verifying-a-crosslink) - [Shard block fork choice rule](#shard-block-fork-choice-rule) - - [Updates to the beacon chain](#updates-to-the-beacon-chain) - - [Data structures](#data-structures) +- [Updates to the beacon chain](#updates-to-the-beacon-chain) + - [Data structures](#data-structures) - [`Validator`](#validator) - [`BeaconBlockBody`](#beaconblockbody) + - [`BeaconState`](#beaconstate) - [`BranchChallenge`](#branchchallenge) - [`BranchResponse`](#branchresponse) - [`BranchChallengeRecord`](#branchchallengerecord) + - [`InteractiveCustodyChallengeRecord`](#interactivecustodychallengerecord) + - [`InteractiveCustodyChallengeInitiation`](#interactivecustodychallengeinitiation) + - [`InteractiveCustodyChallengeResponse`](#interactivecustodychallengeresponse) + - [`InteractiveCustodyChallengeContinuation`](#interactivecustodychallengecontinuation) - [`SubkeyReveal`](#subkeyreveal) - [Helpers](#helpers) - - [`get_attestation_data_merkle_depth`](#get_attestation_data_merkle_depth) + - [`get_branch_challenge_record_by_id`](#get_branch_challenge_record_by_id) + - [`get_custody_challenge_record_by_id`](#get_custody_challenge_record_by_id) + - [`get_attestation_merkle_depth`](#get_attestation_merkle_depth) - [`epoch_to_custody_period`](#epoch_to_custody_period) - [`slot_to_custody_period`](#slot_to_custody_period) - [`get_current_custody_period`](#get_current_custody_period) - [`verify_custody_subkey_reveal`](#verify_custody_subkey_reveal) - - [`prepare_validator_for_withdrawal`](#prepare_validator_for_withdrawal) + - [`verify_signed_challenge_message`](#verify_signed_challenge_message) - [`penalize_validator`](#penalize_validator) - - [Per-slot processing](#per-slot-processing) + - [Per-slot processing](#per-slot-processing) - [Operations](#operations) - [Branch challenges](#branch-challenges) - [Branch responses](#branch-responses) - [Subkey reveals](#subkey-reveals) - - [Per-epoch processing](#per-epoch-processing) - - [One-time phase 1 initiation transition](#one-time-phase-1-initiation-transition) + - [Interactive custody challenge initiations](#interactive-custody-challenge-initiations) + - [Interactive custody challenge responses](#interactive-custody-challenge-responses) + - [Interactive custody challenge continuations](#interactive-custody-challenge-continuations) + - [Per-epoch processing](#per-epoch-processing) + - [One-time phase 1 initiation transition](#one-time-phase-1-initiation-transition) @@ -128,16 +138,27 @@ def get_split_offset(list_size: int, chunks: int, index: int) -> int: ```python def get_shuffled_committee(state: BeaconState, shard: Shard, - committee_start_epoch: Epoch) -> List[ValidatorIndex]: + committee_start_epoch: Epoch, + index: int, + committee_count: int) -> List[ValidatorIndex]: """ Return shuffled committee. """ - validator_indices = get_active_validator_indices(state.validators, committee_start_epoch) + active_validator_indices = get_active_validator_indices(state.validator_registry, committee_start_epoch) + length = len(active_validator_indices) seed = generate_seed(state, committee_start_epoch) - start_offset = get_split_offset(len(validator_indices), SHARD_COUNT, shard) - end_offset = get_split_offset(len(validator_indices), SHARD_COUNT, shard + 1) + start_offset = get_split_offset( + length, + SHARD_COUNT * committee_count, + shard * committee_count + index, + ) + end_offset = get_split_offset( + length, + SHARD_COUNT * committee_count, + shard * committee_count + index + 1, + ) return [ - validator_indices[get_permuted_index(i, len(validator_indices), seed)] + active_validator_indices[get_permuted_index(i, length, seed)] for i in range(start_offset, end_offset) ] ``` @@ -147,15 +168,24 @@ def get_shuffled_committee(state: BeaconState, ```python def get_persistent_committee(state: BeaconState, shard: Shard, - epoch: Epoch) -> List[ValidatorIndex]: + slot: Slot) -> List[ValidatorIndex]: """ - Return the persistent committee for the given ``shard`` at the given ``epoch``. + Return the persistent committee for the given ``shard`` at the given ``slot``. """ - earlier_committee_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2 - earlier_committee = get_shuffled_committee(state, shard, earlier_committee_start_epoch) + + earlier_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2 + later_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD - later_committee_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD - later_committee = get_shuffled_committee(state, shard, later_committee_start_epoch) + committee_count = max( + len(get_active_validator_indices(state.validator_registry, earlier_start_epoch)) // + (SHARD_COUNT * TARGET_COMMITTEE_SIZE), + len(get_active_validator_indices(state.validator_registry, later_start_epoch)) // + (SHARD_COUNT * TARGET_COMMITTEE_SIZE), + ) + 1 + + index = slot % committee_count + earlier_committee = get_shuffled_committee(state, shard, earlier_start_epoch, index, committee_count) + later_committee = get_shuffled_committee(state, shard, later_start_epoch, index, committee_count) def get_switchover_epoch(index): return ( @@ -170,6 +200,7 @@ def get_persistent_committee(state: BeaconState, [i for i in later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)] ))) ``` + #### `get_shard_proposer_index` ```python @@ -181,14 +212,14 @@ def get_shard_proposer_index(state: BeaconState, int_to_bytes8(shard) + int_to_bytes8(slot) ) - persistent_committee = get_persistent_committee(state, shard, slot_to_epoch(slot)) + persistent_committee = get_persistent_committee(state, shard, slot) # Default proposer index = bytes_to_int(seed[0:8]) % len(persistent_committee) # If default proposer exits, try the other proposers in order; if all are exited # return None (ie. no block can be proposed) validators_to_try = persistent_committee[index:] + persistent_committee[:index] for index in validators_to_try: - if is_active_validator(state.validators[index], get_current_epoch(state)): + if is_active_validator(state.validator_registry[index], get_current_epoch(state)): return index return None ``` @@ -233,14 +264,14 @@ To validate a block header on shard `shard_block.shard_id`, compute as follows: * Verify that `shard_block.beacon_chain_ref` is the hash of a block in the (canonical) beacon chain with slot less than or equal to `slot`. * Verify that `shard_block.beacon_chain_ref` is equal to or a descendant of the `shard_block.beacon_chain_ref` specified in the `ShardBlock` pointed to by `shard_block.parent_root`. * Let `state` be the state of the beacon chain block referred to by `shard_block.beacon_chain_ref`. -* Let `persistent_committee = get_persistent_committee(state, shard_block.shard_id, slot_to_epoch(shard_block.slot))`. +* Let `persistent_committee = get_persistent_committee(state, shard_block.shard_id, shard_block.slot)`. * Assert `verify_bitfield(shard_block.participation_bitfield, len(persistent_committee))` -* For every `i in range(len(persistent_committee))` where `is_active_validator(state.validators[persistent_committee[i]], get_current_epoch(state))` returns `False`, verify that `get_bitfield_bit(shard_block.participation_bitfield, i) == 0` +* For every `i in range(len(persistent_committee))` where `is_active_validator(state.validator_registry[persistent_committee[i]], get_current_epoch(state))` returns `False`, verify that `get_bitfield_bit(shard_block.participation_bitfield, i) == 0` * Let `proposer_index = get_shard_proposer_index(state, shard_block.shard_id, shard_block.slot)`. * Verify that `proposer_index` is not `None`. * Let `msg` be the `shard_block` but with `shard_block.signature` set to `[0, 0]`. * Verify that `bls_verify(pubkey=validators[proposer_index].pubkey, message_hash=hash(msg), signature=shard_block.signature, domain=get_domain(state, slot_to_epoch(shard_block.slot), DOMAIN_SHARD_PROPOSER))` passes. -* Let `group_public_key = bls_aggregate_pubkeys([state.validators[index].pubkey for i, index in enumerate(persistent_committee) if get_bitfield_bit(shard_block.participation_bitfield, i) is True])`. +* Let `group_public_key = bls_aggregate_pubkeys([state.validator_registry[index].pubkey for i, index in enumerate(persistent_committee) if get_bitfield_bit(shard_block.participation_bitfield, i) is True])`. * Verify that `bls_verify(pubkey=group_public_key, message_hash=shard_block.parent_root, sig=shard_block.aggregate_signature, domain=get_domain(state, slot_to_epoch(shard_block.slot), DOMAIN_SHARD_ATTESTER))` passes. ### Verifying shard block data From d25c18b320ac9acac0d825f2d1977e313613c1d5 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 17 Mar 2019 11:48:47 +0000 Subject: [PATCH 049/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1bbab78dc..03a9df2a1 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2247,7 +2247,7 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None: assert block.previous_block_root == hash_tree_root(state.latest_block_header) # Save current block as the new latest block state.latest_block_header = get_temporary_block_header(block) - # Verify proposer + # Verify proposer is not slashed proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] assert not proposer.slashed # Verify proposer signature From fba333c79185f8eaa84cad816f82dc124c581988 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Sun, 17 Mar 2019 21:19:12 -0700 Subject: [PATCH 050/133] Updates from review --- specs/networking/rpc-interface.md | 35 ++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index d07e728c9..f505a4663 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -24,7 +24,7 @@ Message body schemas are notated like this: ) ``` -SSZ serialization is field-order dependent. Therefore, fields MUST be encoded and decoded according to the order described in this document. The encoded values of each field are concatenated to form the final encoded message body. Embedded structs are serialized as Containers unless otherwise noted. +Embedded types are serialized as SSZ Containers unless otherwise noted. All referenced data structures can be found in the [0-beacon-chain](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/core/0_beacon-chain.md#data-structures) specification. @@ -34,7 +34,7 @@ A "Protocol ID" in `libp2p` parlance refers to a human-readable identifier `libp ## RPC-Over-`libp2p` -To facilitate RPC-over-`libp2p`, a single protocol path is used: `/eth/serenity/rpc/1.0.0`. Remote method calls are wrapped in a "request" structure: +To facilitate RPC-over-`libp2p`, a single protocol path is used: `/eth/serenity/beacon/rpc/1.0.0`. Remote method calls are wrapped in a "request" structure: ``` ( @@ -49,6 +49,7 @@ and their corresponding responses are wrapped in a "response" structure: ``` ( id: uint64 + is_error: boolean result: Response ) ``` @@ -58,7 +59,8 @@ If an error occurs, a variant of the response structure is returned: ``` ( id: uint64 - error: ( + is_error: boolean + result: ( code: uint16 data: bytes ) @@ -69,11 +71,13 @@ The details of the RPC-Over-`libp2p` protocol are similar to [JSON-RPC 2.0](http 1. The `id` member is REQUIRED. 2. The `id` member in the response MUST be the same as the value of the `id` in the request. -3. The `method_id` member is REQUIRED. -4. The `result` member is required on success, and MUST NOT exist if there was an error. -5. The `error` member is REQUIRED on errors, and MUST NOT exist if there wasn't an error. +3. The `id` member MUST be unique within the context of a single connection. Monotonically increasing `id`s are RECOMMENDED. +4. The `method_id` member is REQUIRED. +5. The `result` member is required on success, and MUST NOT exist if there was an error. +6. The `error` member is REQUIRED on errors, and MUST NOT exist if there wasn't an error. +7. `is_error` MUST be `true` on errors, or `false` otherwise. -Structuring RPC requests in this manner allows multiple calls and responses to be multiplexed over the same stream without switching. +Structuring RPC requests in this manner allows multiple calls and responses to be multiplexed over the same stream without switching. Note that this implies that responses MAY arrive in a different order than requests. The "method ID" fields in the below messages refer to the `method` field in the request structure above. @@ -136,7 +140,7 @@ Root B ^ +---+ ``` -Once the handshake completes, the client with the higher `latest_finalized_epoch` or `best_slot` (if the clients have equal `latest_finalized_epoch`s) SHOULD send beacon block roots to its counterparty via `beacon_block_roots` (i.e., RPC method `10`). +Once the handshake completes, the client with the higher `latest_finalized_epoch` or `best_slot` (if the clients have equal `latest_finalized_epoch`s) SHOULD request beacon block roots from its counterparty via `beacon_block_roots` (i.e., RPC method `10`). ### Goodbye @@ -154,13 +158,20 @@ Client MAY send `goodbye` messages upon disconnection. The reason field MUST be - `1`: Client shut down. - `2`: Irrelevant network. -- `3`: Irrelevant shard. +- `3`: Too many peers. +- `4`: Fault/error. -### Provide Beacon Block Roots +### Request Beacon Block Roots **Method ID:** `10` -**Body:** +**Request Body** + +``` +() +``` + +**Response Body:** ``` # BlockRootSlot @@ -174,7 +185,7 @@ Client MAY send `goodbye` messages upon disconnection. The reason field MUST be ) ``` -Send a list of block roots and slots to the peer. +Send a list of block roots and slots to the requesting peer. ### Beacon Block Headers From 828dd455ba158aaf187c2805a9561d90bbd0b20e Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 18 Mar 2019 10:18:57 -0600 Subject: [PATCH 051/133] add basic dependencies and build script for phase0 testing --- .gitignore | 5 + Makefile | 15 +++ requirements.txt | 6 + scripts/__init__.py | 0 scripts/phase0/__init__.py | 0 scripts/phase0/bls_stub.py | 12 ++ scripts/phase0/build_spec.py | 43 +++++++ scripts/phase0/function_puller.py | 46 +++++++ scripts/phase0/minimal_ssz.py | 190 +++++++++++++++++++++++++++++ scripts/phase0/monkey_patches.py | 29 +++++ scripts/phase0/state_transition.py | 84 +++++++++++++ tests/phase0/conftest.py | 6 + 12 files changed, 436 insertions(+) create mode 100644 .gitignore create mode 100644 Makefile create mode 100644 requirements.txt create mode 100644 scripts/__init__.py create mode 100644 scripts/phase0/__init__.py create mode 100644 scripts/phase0/bls_stub.py create mode 100644 scripts/phase0/build_spec.py create mode 100644 scripts/phase0/function_puller.py create mode 100644 scripts/phase0/minimal_ssz.py create mode 100644 scripts/phase0/monkey_patches.py create mode 100644 scripts/phase0/state_transition.py create mode 100644 tests/phase0/conftest.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..36c14f343 --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +*.pyc +/__pycache__ +/venv + +/build \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..724a0392e --- /dev/null +++ b/Makefile @@ -0,0 +1,15 @@ +SPEC_DIR = ./specs +SCRIPT_DIR = ./scripts +BUILD_DIR = ./build + +.PHONY: clean all + + +clean: + rm -rf $(BUILD_DIR) + + +$(BUILD_DIR)/phase0: + mkdir -p $@ + python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $(SCRIPT_DIR)/phase0/minimal_ssz.py \ + $(SCRIPT_DIR)/phase0/bls_stub.py $(SCRIPT_DIR)/phase0/state_transition.py $(SCRIPT_DIR)/phase0/monkey_patches.py > $@/spec.py diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 000000000..9145e951e --- /dev/null +++ b/requirements.txt @@ -0,0 +1,6 @@ +eth-utils>=1.3.0,<2 +eth-typing>=2.1.0,<3.0.0 +oyaml==0.7 +pycryptodome==3.7.3 +py_ecc>=1.6.0 +pytest>=3.6,<3.7 diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/phase0/__init__.py b/scripts/phase0/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/phase0/bls_stub.py b/scripts/phase0/bls_stub.py new file mode 100644 index 000000000..7e3a6a308 --- /dev/null +++ b/scripts/phase0/bls_stub.py @@ -0,0 +1,12 @@ + + +def bls_verify(pubkey, message_hash, signature, domain): + return True + + +def bls_verify_multiple(pubkeys, message_hashes, signature, domain): + return True + + +def bls_aggregate_pubkeys(pubkeys): + return b'\x42'*96 diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py new file mode 100644 index 000000000..c4f8ab38c --- /dev/null +++ b/scripts/phase0/build_spec.py @@ -0,0 +1,43 @@ +import sys +import function_puller + +code_lines = [] + +for i in (1, 2, 3, 4, 8, 32, 48, 96): + code_lines.append("def int_to_bytes%d(x): return x.to_bytes(%d, 'little')" % (i, i)) +code_lines.append("SLOTS_PER_EPOCH = 64") # stub, will get overwritten by real var +code_lines.append("def slot_to_epoch(x): return x // SLOTS_PER_EPOCH") + +code_lines.append(""" +from typing import ( + Any, + Callable, + List, + NewType, + Tuple, +) + + +Slot = NewType('Slot', int) # uint64 +Epoch = NewType('Epoch', int) # uint64 +Shard = NewType('Shard', int) # uint64 +ValidatorIndex = NewType('ValidatorIndex', int) # uint64 +Gwei = NewType('Gwei', int) # uint64 +Bytes32 = NewType('Bytes32', bytes) # bytes32 +BLSPubkey = NewType('BLSPubkey', bytes) # bytes48 +BLSSignature = NewType('BLSSignature', bytes) # bytes96 +Any = None +Store = None +""") + + +code_lines += function_puller.get_lines(sys.argv[1]) + +print(open(sys.argv[2]).read()) +print(open(sys.argv[3]).read()) + +for line in code_lines: + print(line) + +print(open(sys.argv[4]).read()) +print(open(sys.argv[5]).read()) diff --git a/scripts/phase0/function_puller.py b/scripts/phase0/function_puller.py new file mode 100644 index 000000000..8d1c1a0cc --- /dev/null +++ b/scripts/phase0/function_puller.py @@ -0,0 +1,46 @@ +import sys + + +def get_lines(file_name): + code_lines = [] + pulling_from = None + current_name = None + processing_typedef = False + for linenum, line in enumerate(open(sys.argv[1]).readlines()): + line = line.rstrip() + if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`': + current_name = line[line[:-1].rfind('`')+1: -1] + if line[:9] == '```python': + assert pulling_from is None + pulling_from = linenum + 1 + elif line[:3] == '```': + if pulling_from is None: + pulling_from = linenum + else: + if processing_typedef: + assert code_lines[-1] == '}' + code_lines[-1] = '})' + pulling_from = None + processing_typedef = False + else: + if pulling_from == linenum and line == '{': + code_lines.append('%s = SSZType({' % current_name) + processing_typedef = True + elif pulling_from is not None: + code_lines.append(line) + elif pulling_from is None and len(line) > 0 and line[0] == '|': + row = line[1:].split('|') + if len(row) >= 2: + for i in range(2): + row[i] = row[i].strip().strip('`') + if '`' in row[i]: + row[i] = row[i][:row[i].find('`')] + eligible = True + if row[0][0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_': + eligible = False + for c in row[0]: + if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789': + eligible = False + if eligible: + code_lines.append(row[0] + ' = ' + (row[1].replace('**TBD**', '0x1234567890123567890123456789012357890'))) + return code_lines diff --git a/scripts/phase0/minimal_ssz.py b/scripts/phase0/minimal_ssz.py new file mode 100644 index 000000000..5caaf8f09 --- /dev/null +++ b/scripts/phase0/minimal_ssz.py @@ -0,0 +1,190 @@ +from utils.hash import hash + + +BYTES_PER_CHUNK = 32 +BYTES_PER_LENGTH_PREFIX = 4 +ZERO_CHUNK = b'\x00' * BYTES_PER_CHUNK + +def SSZType(fields): + class SSZObject(): + def __init__(self, **kwargs): + for f in fields: + if f not in kwargs: + raise Exception("Missing constructor argument: %s" % f) + setattr(self, f, kwargs[f]) + + def __eq__(self, other): + return ( + self.fields == other.fields and + self.serialize() == other.serialize() + ) + + def __hash__(self): + return int.from_bytes(self.hash_tree_root(), byteorder="little") + + def __str__(self): + output = [] + for field in self.fields: + output.append(f'{field}: {getattr(self, field)}') + return "\n".join(output) + + def serialize(self): + return serialize_value(self, self.__class__) + + def hash_tree_root(self): + return hash_tree_root(self, self.__class__) + + SSZObject.fields = fields + return SSZObject + +class Vector(list): + def __init__(self, x): + list.__init__(self, x) + self.length = len(x) + + def append(*args): + raise Exception("Cannot change the length of a vector") + + remove = clear = extend = pop = insert = append + +def is_basic(typ): + return isinstance(typ, str) and (typ[:4] in ('uint', 'bool') or typ == 'byte') + +def is_constant_sized(typ): + if is_basic(typ): + return True + elif isinstance(typ, list) and len(typ) == 1: + return is_constant_sized(typ[0]) + elif isinstance(typ, list) and len(typ) == 2: + return False + elif isinstance(typ, str) and typ[:5] == 'bytes': + return len(typ) > 5 + elif hasattr(typ, 'fields'): + for subtype in typ.fields.values(): + if not is_constant_sized(subtype): + return False + return True + else: + raise Exception("Type not recognized") + +def coerce_to_bytes(x): + if isinstance(x, str): + o = x.encode('utf-8') + assert len(o) == len(x) + return o + elif isinstance(x, bytes): + return x + else: + raise Exception("Expecting bytes") + +def serialize_value(value, typ=None): + if typ is None: + typ = infer_type(value) + if isinstance(typ, str) and typ[:4] == 'uint': + length = int(typ[4:]) + assert length in (8, 16, 32, 64, 128, 256) + return value.to_bytes(length // 8, 'little') + elif typ == 'bool': + assert value in (True, False) + return b'\x01' if value is True else b'\x00' + elif (isinstance(typ, list) and len(typ) == 1) or typ == 'bytes': + serialized_bytes = coerce_to_bytes(value) if typ == 'bytes' else b''.join([serialize_value(element, typ[0]) for element in value]) + assert len(serialized_bytes) < 2**(8 * BYTES_PER_LENGTH_PREFIX) + serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, 'little') + return serialized_length + serialized_bytes + elif isinstance(typ, list) and len(typ) == 2: + assert len(value) == typ[1] + return b''.join([serialize_value(element, typ[0]) for element in value]) + elif isinstance(typ, str) and len(typ) > 5 and typ[:5] == 'bytes': + assert len(value) == int(typ[5:]), (value, int(typ[5:])) + return coerce_to_bytes(value) + elif hasattr(typ, 'fields'): + serialized_bytes = b''.join([serialize_value(getattr(value, field), subtype) for field, subtype in typ.fields.items()]) + if is_constant_sized(typ): + return serialized_bytes + else: + assert len(serialized_bytes) < 2**(8 * BYTES_PER_LENGTH_PREFIX) + serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, 'little') + return serialized_length + serialized_bytes + else: + print(value, typ) + raise Exception("Type not recognized") + +def chunkify(bytez): + bytez += b'\x00' * (-len(bytez) % BYTES_PER_CHUNK) + return [bytez[i:i+32] for i in range(0, len(bytez), 32)] + +def pack(values, subtype): + return chunkify(b''.join([serialize_value(value, subtype) for value in values])) + +def is_power_of_two(x): + return x > 0 and x & (x-1) == 0 + +def merkleize(chunks): + tree = chunks[::] + while not is_power_of_two(len(tree)): + tree.append(ZERO_CHUNK) + tree = [ZERO_CHUNK] * len(tree) + tree + for i in range(len(tree)//2-1, 0, -1): + tree[i] = hash(tree[i*2] + tree[i*2+1]) + return tree[1] + +def mix_in_length(root, length): + return hash(root + length.to_bytes(32, 'little')) + +def infer_type(value): + if hasattr(value.__class__, 'fields'): + return value.__class__ + elif isinstance(value, Vector): + return [infer_type(value[0]) if len(value) > 0 else 'uint64', len(value)] + elif isinstance(value, list): + return [infer_type(value[0])] if len(value) > 0 else ['uint64'] + elif isinstance(value, (bytes, str)): + return 'bytes' + elif isinstance(value, int): + return 'uint64' + else: + raise Exception("Failed to infer type") + +def hash_tree_root(value, typ=None): + if typ is None: + typ = infer_type(value) + if is_basic(typ): + return merkleize(pack([value], typ)) + elif isinstance(typ, list) and len(typ) == 1 and is_basic(typ[0]): + return mix_in_length(merkleize(pack(value, typ[0])), len(value)) + elif isinstance(typ, list) and len(typ) == 1 and not is_basic(typ[0]): + return mix_in_length(merkleize([hash_tree_root(element, typ[0]) for element in value]), len(value)) + elif isinstance(typ, list) and len(typ) == 2 and is_basic(typ[0]): + assert len(value) == typ[1] + return merkleize(pack(value, typ[0])) + elif typ == 'bytes': + return mix_in_length(merkleize(chunkify(coerce_to_bytes(value))), len(value)) + elif isinstance(typ, str) and typ[:5] == 'bytes' and len(typ) > 5: + assert len(value) == int(typ[5:]) + return merkleize(chunkify(coerce_to_bytes(value))) + elif isinstance(typ, list) and len(typ) == 2 and not is_basic(typ[0]): + return merkleize([hash_tree_root(element, typ[0]) for element in value]) + elif hasattr(typ, 'fields'): + return merkleize([hash_tree_root(getattr(value, field), subtype) for field, subtype in typ.fields.items()]) + else: + raise Exception("Type not recognized") + +def truncate(container): + field_keys = list(container.fields.keys()) + truncated_fields = { + key: container.fields[key] + for key in field_keys[:-1] + } + truncated_class = SSZType(truncated_fields) + kwargs = { + field: getattr(container, field) + for field in field_keys[:-1] + } + return truncated_class(**kwargs) + +def signed_root(container): + return hash_tree_root(truncate(container)) + +def serialize(ssz_object): + return getattr(ssz_object, 'serialize')() diff --git a/scripts/phase0/monkey_patches.py b/scripts/phase0/monkey_patches.py new file mode 100644 index 000000000..8a35b8f27 --- /dev/null +++ b/scripts/phase0/monkey_patches.py @@ -0,0 +1,29 @@ +# Monkey patch validator shuffling cache +_get_shuffling = get_shuffling +shuffling_cache = {} +def get_shuffling(seed: Bytes32, + validators: List[Validator], + epoch: Epoch) -> List[List[ValidatorIndex]]: + + param_hash = (seed, hash_tree_root(validators, [Validator]), epoch) + + if param_hash in shuffling_cache: + # print("Cache hit, epoch={0}".format(epoch)) + return shuffling_cache[param_hash] + else: + # print("Cache miss, epoch={0}".format(epoch)) + ret = _get_shuffling(seed, validators, epoch) + shuffling_cache[param_hash] = ret + return ret + + +# Monkey patch hash cache +_hash = hash +hash_cache = {} +def hash(x): + if x in hash_cache: + return hash_cache[x] + else: + ret = _hash(x) + hash_cache[x] = ret + return ret diff --git a/scripts/phase0/state_transition.py b/scripts/phase0/state_transition.py new file mode 100644 index 000000000..f78119cf2 --- /dev/null +++ b/scripts/phase0/state_transition.py @@ -0,0 +1,84 @@ + + +def process_transaction_type(state: BeaconState, + transactions: List[Any], + max_transactions: int, + tx_fn: Callable[[BeaconState, Any], None]) -> None: + assert len(transactions) <= max_transactions + for transaction in transactions: + tx_fn(state, transaction) + + +def process_transactions(state: BeaconState, block: BeaconBlock) -> None: + process_transaction_type( + state, + block.body.proposer_slashings, + MAX_PROPOSER_SLASHINGS, + process_proposer_slashing, + ) + process_transaction_type( + state, + block.body.attester_slashings, + MAX_ATTESTER_SLASHINGS, + process_attester_slashing, + ) + process_transaction_type( + state, + block.body.attestations, + MAX_ATTESTATIONS, + process_attestation, + ) + process_transaction_type( + state, + block.body.deposits, + MAX_DEPOSITS, + process_deposit, + ) + process_transaction_type( + state, + block.body.voluntary_exits, + MAX_VOLUNTARY_EXITS, + process_voluntary_exit, + ) + assert len(block.body.transfers) == len(set(block.body.transfers)) + process_transaction_type( + state, + block.body.transfers, + MAX_TRANSFERS, + process_transfer, + ) + + +def process_block(state: BeaconState, + block: BeaconBlock, + verify_state_root: bool=False) -> None: + process_block_header(state, block) + process_randao(state, block) + process_eth1_data(state, block) + process_transactions(state, block) + if verify_state_root: + verify_block_state_root(state, block) + + +def process_epoch_transition(state: BeaconState) -> None: + update_justification_and_finalization(state) + process_crosslinks(state) + maybe_reset_eth1_period(state) + apply_rewards(state) + process_ejections(state) + update_registry_and_shuffling_data(state) + process_slashings(state) + process_exit_queue(state) + finish_epoch_update(state) + + +def state_transition(state: BeaconState, + block: BeaconBlock, + verify_state_root: bool=False) -> BeaconState: + while state.slot < block.slot: + cache_state(state) + if (state.slot + 1) % SLOTS_PER_EPOCH == 0: + process_epoch_transition(state) + advance_slot(state) + if block.slot == state.slot: + process_block(state, block) diff --git a/tests/phase0/conftest.py b/tests/phase0/conftest.py new file mode 100644 index 000000000..d3ebabaa2 --- /dev/null +++ b/tests/phase0/conftest.py @@ -0,0 +1,6 @@ +import pytest +from build.phase0 import spec + + +# @pytest.fixture(autouse=True) +# def build_clean(): \ No newline at end of file From 839590b5f41f4b07a755ef00f7a96563858b3e7a Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 18 Mar 2019 12:51:52 -0600 Subject: [PATCH 052/133] initial pytests passing --- .gitignore | 3 +- Makefile | 8 +- build/__init__.py | 0 build/phase0/__init__.py | 0 build/phase0/spec.py | 1620 +++++++++++++++++ {scripts => build}/phase0/state_transition.py | 72 +- build/utils/__init__.py | 0 {scripts/phase0 => build/utils}/bls_stub.py | 0 build/utils/hash_function.py | 6 + build/utils/merkle_minimal.py | 28 + .../phase0 => build/utils}/minimal_ssz.py | 2 +- .../phase0 => build/utils}/monkey_patches.py | 0 scripts/phase0/build_spec.py | 64 +- tests/__init__.py | 0 tests/conftest.py | 0 tests/phase0/conftest.py | 132 +- tests/phase0/test_sanity.py | 632 +++++++ 17 files changed, 2514 insertions(+), 53 deletions(-) create mode 100644 build/__init__.py create mode 100644 build/phase0/__init__.py create mode 100644 build/phase0/spec.py rename {scripts => build}/phase0/state_transition.py (56%) create mode 100644 build/utils/__init__.py rename {scripts/phase0 => build/utils}/bls_stub.py (100%) create mode 100644 build/utils/hash_function.py create mode 100644 build/utils/merkle_minimal.py rename {scripts/phase0 => build/utils}/minimal_ssz.py (99%) rename {scripts/phase0 => build/utils}/monkey_patches.py (100%) create mode 100644 tests/__init__.py create mode 100644 tests/conftest.py create mode 100644 tests/phase0/test_sanity.py diff --git a/.gitignore b/.gitignore index 36c14f343..5e19cd2a5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,4 @@ *.pyc /__pycache__ /venv - -/build \ No newline at end of file +/.pytest_cache diff --git a/Makefile b/Makefile index 724a0392e..745f8f901 100644 --- a/Makefile +++ b/Makefile @@ -5,11 +5,7 @@ BUILD_DIR = ./build .PHONY: clean all -clean: - rm -rf $(BUILD_DIR) - - $(BUILD_DIR)/phase0: mkdir -p $@ - python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $(SCRIPT_DIR)/phase0/minimal_ssz.py \ - $(SCRIPT_DIR)/phase0/bls_stub.py $(SCRIPT_DIR)/phase0/state_transition.py $(SCRIPT_DIR)/phase0/monkey_patches.py > $@/spec.py + python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $@/spec.py + touch $(BUILD_DIR)/__init__.py $(BUILD_DIR)/phase0/__init__.py diff --git a/build/__init__.py b/build/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/build/phase0/__init__.py b/build/phase0/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/build/phase0/spec.py b/build/phase0/spec.py new file mode 100644 index 000000000..8c05b1208 --- /dev/null +++ b/build/phase0/spec.py @@ -0,0 +1,1620 @@ +from build.utils.minimal_ssz import * +from build.utils.bls_stub import * +def int_to_bytes1(x): return x.to_bytes(1, 'little') +def int_to_bytes2(x): return x.to_bytes(2, 'little') +def int_to_bytes3(x): return x.to_bytes(3, 'little') +def int_to_bytes4(x): return x.to_bytes(4, 'little') +def int_to_bytes8(x): return x.to_bytes(8, 'little') +def int_to_bytes32(x): return x.to_bytes(32, 'little') +def int_to_bytes48(x): return x.to_bytes(48, 'little') +def int_to_bytes96(x): return x.to_bytes(96, 'little') +SLOTS_PER_EPOCH = 64 +def slot_to_epoch(x): return x // SLOTS_PER_EPOCH + +from typing import ( + Any, + Callable, + List, + NewType, + Tuple, +) + + +Slot = NewType('Slot', int) # uint64 +Epoch = NewType('Epoch', int) # uint64 +Shard = NewType('Shard', int) # uint64 +ValidatorIndex = NewType('ValidatorIndex', int) # uint64 +Gwei = NewType('Gwei', int) # uint64 +Bytes32 = NewType('Bytes32', bytes) # bytes32 +BLSPubkey = NewType('BLSPubkey', bytes) # bytes48 +BLSSignature = NewType('BLSSignature', bytes) # bytes96 +Any = None +Store = None + +SHARD_COUNT = 2**10 +TARGET_COMMITTEE_SIZE = 2**7 +MAX_BALANCE_CHURN_QUOTIENT = 2**5 +MAX_INDICES_PER_SLASHABLE_VOTE = 2**12 +MAX_EXIT_DEQUEUES_PER_EPOCH = 2**2 +SHUFFLE_ROUND_COUNT = 90 +DEPOSIT_CONTRACT_ADDRESS = 0x1234567890123567890123456789012357890 +DEPOSIT_CONTRACT_TREE_DEPTH = 2**5 +MIN_DEPOSIT_AMOUNT = 2**0 * 10**9 +MAX_DEPOSIT_AMOUNT = 2**5 * 10**9 +FORK_CHOICE_BALANCE_INCREMENT = 2**0 * 10**9 +EJECTION_BALANCE = 2**4 * 10**9 +GENESIS_FORK_VERSION = 0 +GENESIS_SLOT = 2**32 +GENESIS_EPOCH = slot_to_epoch(GENESIS_SLOT) +GENESIS_START_SHARD = 0 +FAR_FUTURE_EPOCH = 2**64 - 1 +ZERO_HASH = int_to_bytes32(0) +EMPTY_SIGNATURE = int_to_bytes96(0) +BLS_WITHDRAWAL_PREFIX_BYTE = int_to_bytes1(0) +SECONDS_PER_SLOT = 6 +MIN_ATTESTATION_INCLUSION_DELAY = 2**2 +SLOTS_PER_EPOCH = 2**6 +MIN_SEED_LOOKAHEAD = 2**0 +ACTIVATION_EXIT_DELAY = 2**2 +EPOCHS_PER_ETH1_VOTING_PERIOD = 2**4 +SLOTS_PER_HISTORICAL_ROOT = 2**13 +MIN_VALIDATOR_WITHDRAWABILITY_DELAY = 2**8 +PERSISTENT_COMMITTEE_PERIOD = 2**11 +LATEST_RANDAO_MIXES_LENGTH = 2**13 +LATEST_ACTIVE_INDEX_ROOTS_LENGTH = 2**13 +LATEST_SLASHED_EXIT_LENGTH = 2**13 +BASE_REWARD_QUOTIENT = 2**5 +WHISTLEBLOWER_REWARD_QUOTIENT = 2**9 +ATTESTATION_INCLUSION_REWARD_QUOTIENT = 2**3 +INACTIVITY_PENALTY_QUOTIENT = 2**24 +MIN_PENALTY_QUOTIENT = 2**5 +MAX_PROPOSER_SLASHINGS = 2**4 +MAX_ATTESTER_SLASHINGS = 2**0 +MAX_ATTESTATIONS = 2**7 +MAX_DEPOSITS = 2**4 +MAX_VOLUNTARY_EXITS = 2**4 +MAX_TRANSFERS = 2**4 +DOMAIN_BEACON_BLOCK = 0 +DOMAIN_RANDAO = 1 +DOMAIN_ATTESTATION = 2 +DOMAIN_DEPOSIT = 3 +DOMAIN_VOLUNTARY_EXIT = 4 +DOMAIN_TRANSFER = 5 +Fork = SSZType({ + # Previous fork version + 'previous_version': 'bytes4', + # Current fork version + 'current_version': 'bytes4', + # Fork epoch number + 'epoch': 'uint64', +}) +Crosslink = SSZType({ + # Epoch number + 'epoch': 'uint64', + # Shard data since the previous crosslink + 'crosslink_data_root': 'bytes32', +}) +Eth1Data = SSZType({ + # Root of the deposit tree + 'deposit_root': 'bytes32', + # Block hash + 'block_hash': 'bytes32', +}) +Eth1DataVote = SSZType({ + # Data being voted for + 'eth1_data': Eth1Data, + # Vote count + 'vote_count': 'uint64', +}) +AttestationData = SSZType({ + # LMD GHOST vote + 'slot': 'uint64', + 'beacon_block_root': 'bytes32', + + # FFG vote + 'source_epoch': 'uint64', + 'source_root': 'bytes32', + 'target_root': 'bytes32', + + # Crosslink vote + 'shard': 'uint64', + 'previous_crosslink': Crosslink, + 'crosslink_data_root': 'bytes32', +}) +AttestationDataAndCustodyBit = SSZType({ + # Attestation data + 'data': AttestationData, + # Custody bit + 'custody_bit': 'bool', +}) +SlashableAttestation = SSZType({ + # Validator indices + 'validator_indices': ['uint64'], + # Attestation data + 'data': AttestationData, + # Custody bitfield + 'custody_bitfield': 'bytes', + # Aggregate signature + 'aggregate_signature': 'bytes96', +}) +DepositInput = SSZType({ + # BLS pubkey + 'pubkey': 'bytes48', + # Withdrawal credentials + 'withdrawal_credentials': 'bytes32', + # A BLS signature of this `DepositInput` + 'proof_of_possession': 'bytes96', +}) +DepositData = SSZType({ + # Amount in Gwei + 'amount': 'uint64', + # Timestamp from deposit contract + 'timestamp': 'uint64', + # Deposit input + 'deposit_input': DepositInput, +}) +BeaconBlockHeader = SSZType({ + 'slot': 'uint64', + 'previous_block_root': 'bytes32', + 'state_root': 'bytes32', + 'block_body_root': 'bytes32', + 'signature': 'bytes96', +}) +Validator = SSZType({ + # BLS public key + 'pubkey': 'bytes48', + # Withdrawal credentials + 'withdrawal_credentials': 'bytes32', + # Epoch when validator activated + 'activation_epoch': 'uint64', + # Epoch when validator exited + 'exit_epoch': 'uint64', + # Epoch when validator is eligible to withdraw + 'withdrawable_epoch': 'uint64', + # Did the validator initiate an exit + 'initiated_exit': 'bool', + # Was the validator slashed + 'slashed': 'bool', +}) +PendingAttestation = SSZType({ + # Attester aggregation bitfield + 'aggregation_bitfield': 'bytes', + # Attestation data + 'data': AttestationData, + # Custody bitfield + 'custody_bitfield': 'bytes', + # Inclusion slot + 'inclusion_slot': 'uint64', +}) +HistoricalBatch = SSZType({ + # Block roots + 'block_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], + # State roots + 'state_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], +}) +ProposerSlashing = SSZType({ + # Proposer index + 'proposer_index': 'uint64', + # First block header + 'header_1': BeaconBlockHeader, + # Second block header + 'header_2': BeaconBlockHeader, +}) +AttesterSlashing = SSZType({ + # First slashable attestation + 'slashable_attestation_1': SlashableAttestation, + # Second slashable attestation + 'slashable_attestation_2': SlashableAttestation, +}) +Attestation = SSZType({ + # Attester aggregation bitfield + 'aggregation_bitfield': 'bytes', + # Attestation data + 'data': AttestationData, + # Custody bitfield + 'custody_bitfield': 'bytes', + # BLS aggregate signature + 'aggregate_signature': 'bytes96', +}) +Deposit = SSZType({ + # Branch in the deposit tree + 'proof': ['bytes32', DEPOSIT_CONTRACT_TREE_DEPTH], + # Index in the deposit tree + 'index': 'uint64', + # Data + 'deposit_data': DepositData, +}) +VoluntaryExit = SSZType({ + # Minimum epoch for processing exit + 'epoch': 'uint64', + # Index of the exiting validator + 'validator_index': 'uint64', + # Validator signature + 'signature': 'bytes96', +}) +Transfer = SSZType({ + # Sender index + 'sender': 'uint64', + # Recipient index + 'recipient': 'uint64', + # Amount in Gwei + 'amount': 'uint64', + # Fee in Gwei for block proposer + 'fee': 'uint64', + # Inclusion slot + 'slot': 'uint64', + # Sender withdrawal pubkey + 'pubkey': 'bytes48', + # Sender signature + 'signature': 'bytes96', +}) +BeaconBlockBody = SSZType({ + 'randao_reveal': 'bytes96', + 'eth1_data': Eth1Data, + 'proposer_slashings': [ProposerSlashing], + 'attester_slashings': [AttesterSlashing], + 'attestations': [Attestation], + 'deposits': [Deposit], + 'voluntary_exits': [VoluntaryExit], + 'transfers': [Transfer], +}) +BeaconBlock = SSZType({ + # Header + 'slot': 'uint64', + 'previous_block_root': 'bytes32', + 'state_root': 'bytes32', + 'body': BeaconBlockBody, + 'signature': 'bytes96', +}) +BeaconState = SSZType({ + # Misc + 'slot': 'uint64', + 'genesis_time': 'uint64', + 'fork': Fork, # For versioning hard forks + + # Validator registry + 'validator_registry': [Validator], + 'validator_balances': ['uint64'], + 'validator_registry_update_epoch': 'uint64', + + # Randomness and committees + 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], + 'previous_shuffling_start_shard': 'uint64', + 'current_shuffling_start_shard': 'uint64', + 'previous_shuffling_epoch': 'uint64', + 'current_shuffling_epoch': 'uint64', + 'previous_shuffling_seed': 'bytes32', + 'current_shuffling_seed': 'bytes32', + + # Finality + 'previous_epoch_attestations': [PendingAttestation], + 'current_epoch_attestations': [PendingAttestation], + 'previous_justified_epoch': 'uint64', + 'current_justified_epoch': 'uint64', + 'previous_justified_root': 'bytes32', + 'current_justified_root': 'bytes32', + 'justification_bitfield': 'uint64', + 'finalized_epoch': 'uint64', + 'finalized_root': 'bytes32', + + # Recent state + 'latest_crosslinks': [Crosslink, SHARD_COUNT], + 'latest_block_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], + 'latest_state_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], + 'latest_active_index_roots': ['bytes32', LATEST_ACTIVE_INDEX_ROOTS_LENGTH], + 'latest_slashed_balances': ['uint64', LATEST_SLASHED_EXIT_LENGTH], # Balances slashed at every withdrawal period + 'latest_block_header': BeaconBlockHeader, # `latest_block_header.state_root == ZERO_HASH` temporarily + 'historical_roots': ['bytes32'], + + # Ethereum 1.0 chain data + 'latest_eth1_data': Eth1Data, + 'eth1_data_votes': [Eth1DataVote], + 'deposit_index': 'uint64' +}) +def xor(bytes1: Bytes32, bytes2: Bytes32) -> Bytes32: + return bytes(a ^ b for a, b in zip(bytes1, bytes2)) +def get_temporary_block_header(block: BeaconBlock) -> BeaconBlockHeader: + """ + Return the block header corresponding to a block with ``state_root`` set to ``ZERO_HASH``. + """ + return BeaconBlockHeader( + slot=block.slot, + previous_block_root=block.previous_block_root, + state_root=ZERO_HASH, + block_body_root=hash_tree_root(block.body), + signature=block.signature, + ) +def slot_to_epoch(slot: Slot) -> Epoch: + """ + Return the epoch number of the given ``slot``. + """ + return slot // SLOTS_PER_EPOCH +def get_previous_epoch(state: BeaconState) -> Epoch: + """` + Return the previous epoch of the given ``state``. + """ + return get_current_epoch(state) - 1 +def get_current_epoch(state: BeaconState) -> Epoch: + """ + Return the current epoch of the given ``state``. + """ + return slot_to_epoch(state.slot) +def get_epoch_start_slot(epoch: Epoch) -> Slot: + """ + Return the starting slot of the given ``epoch``. + """ + return epoch * SLOTS_PER_EPOCH +def is_active_validator(validator: Validator, epoch: Epoch) -> bool: + """ + Check if ``validator`` is active. + """ + return validator.activation_epoch <= epoch < validator.exit_epoch +def get_active_validator_indices(validators: List[Validator], epoch: Epoch) -> List[ValidatorIndex]: + """ + Get indices of active validators from ``validators``. + """ + return [i for i, v in enumerate(validators) if is_active_validator(v, epoch)] +def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: + """ + Return `p(index)` in a pseudorandom permutation `p` of `0...list_size-1` with ``seed`` as entropy. + + Utilizes 'swap or not' shuffling found in + https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf + See the 'generalized domain' algorithm on page 3. + """ + assert index < list_size + assert list_size <= 2**40 + + for round in range(SHUFFLE_ROUND_COUNT): + pivot = bytes_to_int(hash(seed + int_to_bytes1(round))[0:8]) % list_size + flip = (pivot - index) % list_size + position = max(index, flip) + source = hash(seed + int_to_bytes1(round) + int_to_bytes4(position // 256)) + byte = source[(position % 256) // 8] + bit = (byte >> (position % 8)) % 2 + index = flip if bit else index + + return index +def split(values: List[Any], split_count: int) -> List[List[Any]]: + """ + Splits ``values`` into ``split_count`` pieces. + """ + list_length = len(values) + return [ + values[(list_length * i // split_count): (list_length * (i + 1) // split_count)] + for i in range(split_count) + ] +def get_epoch_committee_count(active_validator_count: int) -> int: + """ + Return the number of committees in one epoch. + """ + return max( + 1, + min( + SHARD_COUNT // SLOTS_PER_EPOCH, + active_validator_count // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, + ) + ) * SLOTS_PER_EPOCH +def get_shuffling(seed: Bytes32, + validators: List[Validator], + epoch: Epoch) -> List[List[ValidatorIndex]]: + """ + Shuffle active validators and split into crosslink committees. + Return a list of committees (each a list of validator indices). + """ + # Shuffle active validator indices + active_validator_indices = get_active_validator_indices(validators, epoch) + length = len(active_validator_indices) + shuffled_indices = [active_validator_indices[get_permuted_index(i, length, seed)] for i in range(length)] + + # Split the shuffled active validator indices + return split(shuffled_indices, get_epoch_committee_count(length)) +def get_previous_epoch_committee_count(state: BeaconState) -> int: + """ + Return the number of committees in the previous epoch of the given ``state``. + """ + previous_active_validators = get_active_validator_indices( + state.validator_registry, + state.previous_shuffling_epoch, + ) + return get_epoch_committee_count(len(previous_active_validators)) +def get_current_epoch_committee_count(state: BeaconState) -> int: + """ + Return the number of committees in the current epoch of the given ``state``. + """ + current_active_validators = get_active_validator_indices( + state.validator_registry, + state.current_shuffling_epoch, + ) + return get_epoch_committee_count(len(current_active_validators)) +def get_next_epoch_committee_count(state: BeaconState) -> int: + """ + Return the number of committees in the next epoch of the given ``state``. + """ + next_active_validators = get_active_validator_indices( + state.validator_registry, + get_current_epoch(state) + 1, + ) + return get_epoch_committee_count(len(next_active_validators)) +def get_crosslink_committees_at_slot(state: BeaconState, + slot: Slot, + registry_change: bool=False) -> List[Tuple[List[ValidatorIndex], Shard]]: + """ + Return the list of ``(committee, shard)`` tuples for the ``slot``. + + Note: There are two possible shufflings for crosslink committees for a + ``slot`` in the next epoch -- with and without a `registry_change` + """ + epoch = slot_to_epoch(slot) + current_epoch = get_current_epoch(state) + previous_epoch = get_previous_epoch(state) + next_epoch = current_epoch + 1 + + assert previous_epoch <= epoch <= next_epoch + + if epoch == current_epoch: + committees_per_epoch = get_current_epoch_committee_count(state) + seed = state.current_shuffling_seed + shuffling_epoch = state.current_shuffling_epoch + shuffling_start_shard = state.current_shuffling_start_shard + elif epoch == previous_epoch: + committees_per_epoch = get_previous_epoch_committee_count(state) + seed = state.previous_shuffling_seed + shuffling_epoch = state.previous_shuffling_epoch + shuffling_start_shard = state.previous_shuffling_start_shard + elif epoch == next_epoch: + epochs_since_last_registry_update = current_epoch - state.validator_registry_update_epoch + if registry_change: + committees_per_epoch = get_next_epoch_committee_count(state) + seed = generate_seed(state, next_epoch) + shuffling_epoch = next_epoch + current_committees_per_epoch = get_current_epoch_committee_count(state) + shuffling_start_shard = (state.current_shuffling_start_shard + current_committees_per_epoch) % SHARD_COUNT + elif epochs_since_last_registry_update > 1 and is_power_of_two(epochs_since_last_registry_update): + committees_per_epoch = get_next_epoch_committee_count(state) + seed = generate_seed(state, next_epoch) + shuffling_epoch = next_epoch + shuffling_start_shard = state.current_shuffling_start_shard + else: + committees_per_epoch = get_current_epoch_committee_count(state) + seed = state.current_shuffling_seed + shuffling_epoch = state.current_shuffling_epoch + shuffling_start_shard = state.current_shuffling_start_shard + + shuffling = get_shuffling( + seed, + state.validator_registry, + shuffling_epoch, + ) + offset = slot % SLOTS_PER_EPOCH + committees_per_slot = committees_per_epoch // SLOTS_PER_EPOCH + slot_start_shard = (shuffling_start_shard + committees_per_slot * offset) % SHARD_COUNT + + return [ + ( + shuffling[committees_per_slot * offset + i], + (slot_start_shard + i) % SHARD_COUNT, + ) + for i in range(committees_per_slot) + ] +def get_block_root(state: BeaconState, + slot: Slot) -> Bytes32: + """ + Return the block root at a recent ``slot``. + """ + assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT + return state.latest_block_roots[slot % SLOTS_PER_HISTORICAL_ROOT] +def get_state_root(state: BeaconState, + slot: Slot) -> Bytes32: + """ + Return the state root at a recent ``slot``. + """ + assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT + return state.latest_state_roots[slot % SLOTS_PER_HISTORICAL_ROOT] +def get_randao_mix(state: BeaconState, + epoch: Epoch) -> Bytes32: + """ + Return the randao mix at a recent ``epoch``. + """ + assert get_current_epoch(state) - LATEST_RANDAO_MIXES_LENGTH < epoch <= get_current_epoch(state) + return state.latest_randao_mixes[epoch % LATEST_RANDAO_MIXES_LENGTH] +def get_active_index_root(state: BeaconState, + epoch: Epoch) -> Bytes32: + """ + Return the index root at a recent ``epoch``. + """ + assert get_current_epoch(state) - LATEST_ACTIVE_INDEX_ROOTS_LENGTH + ACTIVATION_EXIT_DELAY < epoch <= get_current_epoch(state) + ACTIVATION_EXIT_DELAY + return state.latest_active_index_roots[epoch % LATEST_ACTIVE_INDEX_ROOTS_LENGTH] +def generate_seed(state: BeaconState, + epoch: Epoch) -> Bytes32: + """ + Generate a seed for the given ``epoch``. + """ + return hash( + get_randao_mix(state, epoch - MIN_SEED_LOOKAHEAD) + + get_active_index_root(state, epoch) + + int_to_bytes32(epoch) + ) +def get_beacon_proposer_index(state: BeaconState, + slot: Slot, + registry_change: bool=False) -> ValidatorIndex: + """ + Return the beacon proposer index for the ``slot``. + """ + epoch = slot_to_epoch(slot) + current_epoch = get_current_epoch(state) + previous_epoch = get_previous_epoch(state) + next_epoch = current_epoch + 1 + + assert previous_epoch <= epoch <= next_epoch + + first_committee, _ = get_crosslink_committees_at_slot(state, slot, registry_change)[0] + return first_committee[epoch % len(first_committee)] +def verify_merkle_branch(leaf: Bytes32, proof: List[Bytes32], depth: int, index: int, root: Bytes32) -> bool: + """ + Verify that the given ``leaf`` is on the merkle branch ``proof`` + starting with the given ``root``. + """ + value = leaf + for i in range(depth): + if index // (2**i) % 2: + value = hash(proof[i] + value) + else: + value = hash(value + proof[i]) + return value == root +def get_attestation_participants(state: BeaconState, + attestation_data: AttestationData, + bitfield: bytes) -> List[ValidatorIndex]: + """ + Return the participant indices at for the ``attestation_data`` and ``bitfield``. + """ + # Find the committee in the list with the desired shard + crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) + + assert attestation_data.shard in [shard for _, shard in crosslink_committees] + crosslink_committee = [committee for committee, shard in crosslink_committees if shard == attestation_data.shard][0] + + assert verify_bitfield(bitfield, len(crosslink_committee)) + + # Find the participating attesters in the committee + participants = [] + for i, validator_index in enumerate(crosslink_committee): + aggregation_bit = get_bitfield_bit(bitfield, i) + if aggregation_bit == 0b1: + participants.append(validator_index) + return participants +def is_power_of_two(value: int) -> bool: + """ + Check if ``value`` is a power of two integer. + """ + return (value > 0) and (value & (value - 1) == 0) +def bytes_to_int(data: bytes) -> int: + return int.from_bytes(data, 'little') +def get_effective_balance(state: BeaconState, index: ValidatorIndex) -> Gwei: + """ + Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. + """ + return min(state.validator_balances[index], MAX_DEPOSIT_AMOUNT) +def get_total_balance(state: BeaconState, validators: List[ValidatorIndex]) -> Gwei: + """ + Return the combined effective balance of an array of ``validators``. + """ + return sum([get_effective_balance(state, i) for i in validators]) +def get_fork_version(fork: Fork, + epoch: Epoch) -> bytes: + """ + Return the fork version of the given ``epoch``. + """ + if epoch < fork.epoch: + return fork.previous_version + else: + return fork.current_version +def get_domain(fork: Fork, + epoch: Epoch, + domain_type: int) -> int: + """ + Get the domain number that represents the fork meta and signature domain. + """ + return bytes_to_int(get_fork_version(fork, epoch) + int_to_bytes4(domain_type)) +def get_bitfield_bit(bitfield: bytes, i: int) -> int: + """ + Extract the bit in ``bitfield`` at position ``i``. + """ + return (bitfield[i // 8] >> (i % 8)) % 2 +def verify_bitfield(bitfield: bytes, committee_size: int) -> bool: + """ + Verify ``bitfield`` against the ``committee_size``. + """ + if len(bitfield) != (committee_size + 7) // 8: + return False + + # Check `bitfield` is padded with zero bits only + for i in range(committee_size, len(bitfield) * 8): + if get_bitfield_bit(bitfield, i) == 0b1: + return False + + return True +def verify_slashable_attestation(state: BeaconState, slashable_attestation: SlashableAttestation) -> bool: + """ + Verify validity of ``slashable_attestation`` fields. + """ + if slashable_attestation.custody_bitfield != b'\x00' * len(slashable_attestation.custody_bitfield): # [TO BE REMOVED IN PHASE 1] + return False + + if len(slashable_attestation.validator_indices) == 0: + return False + + for i in range(len(slashable_attestation.validator_indices) - 1): + if slashable_attestation.validator_indices[i] >= slashable_attestation.validator_indices[i + 1]: + return False + + if not verify_bitfield(slashable_attestation.custody_bitfield, len(slashable_attestation.validator_indices)): + return False + + if len(slashable_attestation.validator_indices) > MAX_INDICES_PER_SLASHABLE_VOTE: + return False + + custody_bit_0_indices = [] + custody_bit_1_indices = [] + for i, validator_index in enumerate(slashable_attestation.validator_indices): + if get_bitfield_bit(slashable_attestation.custody_bitfield, i) == 0b0: + custody_bit_0_indices.append(validator_index) + else: + custody_bit_1_indices.append(validator_index) + + return bls_verify_multiple( + pubkeys=[ + bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_0_indices]), + bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_1_indices]), + ], + message_hashes=[ + hash_tree_root(AttestationDataAndCustodyBit(data=slashable_attestation.data, custody_bit=0b0)), + hash_tree_root(AttestationDataAndCustodyBit(data=slashable_attestation.data, custody_bit=0b1)), + ], + signature=slashable_attestation.aggregate_signature, + domain=get_domain(state.fork, slot_to_epoch(slashable_attestation.data.slot), DOMAIN_ATTESTATION), + ) +def is_double_vote(attestation_data_1: AttestationData, + attestation_data_2: AttestationData) -> bool: + """ + Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target. + """ + target_epoch_1 = slot_to_epoch(attestation_data_1.slot) + target_epoch_2 = slot_to_epoch(attestation_data_2.slot) + return target_epoch_1 == target_epoch_2 +def is_surround_vote(attestation_data_1: AttestationData, + attestation_data_2: AttestationData) -> bool: + """ + Check if ``attestation_data_1`` surrounds ``attestation_data_2``. + """ + source_epoch_1 = attestation_data_1.source_epoch + source_epoch_2 = attestation_data_2.source_epoch + target_epoch_1 = slot_to_epoch(attestation_data_1.slot) + target_epoch_2 = slot_to_epoch(attestation_data_2.slot) + + return source_epoch_1 < source_epoch_2 and target_epoch_2 < target_epoch_1 +def integer_squareroot(n: int) -> int: + """ + The largest integer ``x`` such that ``x**2`` is less than or equal to ``n``. + """ + assert n >= 0 + x = n + y = (x + 1) // 2 + while y < x: + x = y + y = (x + n // x) // 2 + return x +def get_delayed_activation_exit_epoch(epoch: Epoch) -> Epoch: + """ + Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. + """ + return epoch + 1 + ACTIVATION_EXIT_DELAY +def process_deposit(state: BeaconState, deposit: Deposit) -> None: + """ + Process a deposit from Ethereum 1.0. + Note that this function mutates ``state``. + """ + deposit_input = deposit.deposit_data.deposit_input + + # Should equal 8 bytes for deposit_data.amount + + # 8 bytes for deposit_data.timestamp + + # 176 bytes for deposit_data.deposit_input + # It should match the deposit_data in the eth1.0 deposit contract + serialized_deposit_data = serialize(deposit.deposit_data) + # Deposits must be processed in order + assert deposit.index == state.deposit_index + + # Verify the Merkle branch + merkle_branch_is_valid = verify_merkle_branch( + leaf=hash(serialized_deposit_data), + proof=deposit.proof, + depth=DEPOSIT_CONTRACT_TREE_DEPTH, + index=deposit.index, + root=state.latest_eth1_data.deposit_root, + ) + assert merkle_branch_is_valid + + # Increment the next deposit index we are expecting. Note that this + # needs to be done here because while the deposit contract will never + # create an invalid Merkle branch, it may admit an invalid deposit + # object, and we need to be able to skip over it + state.deposit_index += 1 + + validator_pubkeys = [v.pubkey for v in state.validator_registry] + pubkey = deposit_input.pubkey + amount = deposit.deposit_data.amount + withdrawal_credentials = deposit_input.withdrawal_credentials + + if pubkey not in validator_pubkeys: + # Verify the proof of possession + proof_is_valid = bls_verify( + pubkey=deposit_input.pubkey, + message_hash=signed_root(deposit_input), + signature=deposit_input.proof_of_possession, + domain=get_domain( + state.fork, + get_current_epoch(state), + DOMAIN_DEPOSIT, + ) + ) + if not proof_is_valid: + return + + # Add new validator + validator = Validator( + pubkey=pubkey, + withdrawal_credentials=withdrawal_credentials, + activation_epoch=FAR_FUTURE_EPOCH, + exit_epoch=FAR_FUTURE_EPOCH, + withdrawable_epoch=FAR_FUTURE_EPOCH, + initiated_exit=False, + slashed=False, + ) + + # Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled. + state.validator_registry.append(validator) + state.validator_balances.append(amount) + else: + # Increase balance by deposit amount + state.validator_balances[validator_pubkeys.index(pubkey)] += amount +def activate_validator(state: BeaconState, index: ValidatorIndex, is_genesis: bool) -> None: + """ + Activate the validator of the given ``index``. + Note that this function mutates ``state``. + """ + validator = state.validator_registry[index] + + validator.activation_epoch = GENESIS_EPOCH if is_genesis else get_delayed_activation_exit_epoch(get_current_epoch(state)) +def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: + """ + Initiate the validator of the given ``index``. + Note that this function mutates ``state``. + """ + validator = state.validator_registry[index] + validator.initiated_exit = True +def exit_validator(state: BeaconState, index: ValidatorIndex) -> None: + """ + Exit the validator of the given ``index``. + Note that this function mutates ``state``. + """ + validator = state.validator_registry[index] + delayed_activation_exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) + + # The following updates only occur if not previous exited + if validator.exit_epoch <= delayed_activation_exit_epoch: + return + else: + validator.exit_epoch = delayed_activation_exit_epoch +def slash_validator(state: BeaconState, index: ValidatorIndex) -> None: + """ + Slash the validator with index ``index``. + Note that this function mutates ``state``. + """ + validator = state.validator_registry[index] + assert state.slot < get_epoch_start_slot(validator.withdrawable_epoch) # [TO BE REMOVED IN PHASE 2] + exit_validator(state, index) + state.latest_slashed_balances[get_current_epoch(state) % LATEST_SLASHED_EXIT_LENGTH] += get_effective_balance(state, index) + + whistleblower_index = get_beacon_proposer_index(state, state.slot) + whistleblower_reward = get_effective_balance(state, index) // WHISTLEBLOWER_REWARD_QUOTIENT + state.validator_balances[whistleblower_index] += whistleblower_reward + state.validator_balances[index] -= whistleblower_reward + validator.slashed = True + validator.withdrawable_epoch = get_current_epoch(state) + LATEST_SLASHED_EXIT_LENGTH +def prepare_validator_for_withdrawal(state: BeaconState, index: ValidatorIndex) -> None: + """ + Set the validator with the given ``index`` as withdrawable + ``MIN_VALIDATOR_WITHDRAWABILITY_DELAY`` after the current epoch. + Note that this function mutates ``state``. + """ + validator = state.validator_registry[index] + validator.withdrawable_epoch = get_current_epoch(state) + MIN_VALIDATOR_WITHDRAWABILITY_DELAY +def get_empty_block() -> BeaconBlock: + """ + Get an empty ``BeaconBlock``. + """ + return BeaconBlock( + slot=GENESIS_SLOT, + previous_block_root=ZERO_HASH, + state_root=ZERO_HASH, + body=BeaconBlockBody( + randao_reveal=EMPTY_SIGNATURE, + eth1_data=Eth1Data( + deposit_root=ZERO_HASH, + block_hash=ZERO_HASH, + ), + proposer_slashings=[], + attester_slashings=[], + attestations=[], + deposits=[], + voluntary_exits=[], + transfers=[], + ), + signature=EMPTY_SIGNATURE, + ) +def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], + genesis_time: int, + genesis_eth1_data: Eth1Data) -> BeaconState: + """ + Get the genesis ``BeaconState``. + """ + state = BeaconState( + # Misc + slot=GENESIS_SLOT, + genesis_time=genesis_time, + fork=Fork( + previous_version=int_to_bytes4(GENESIS_FORK_VERSION), + current_version=int_to_bytes4(GENESIS_FORK_VERSION), + epoch=GENESIS_EPOCH, + ), + + # Validator registry + validator_registry=[], + validator_balances=[], + validator_registry_update_epoch=GENESIS_EPOCH, + + # Randomness and committees + latest_randao_mixes=[ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)], + previous_shuffling_start_shard=GENESIS_START_SHARD, + current_shuffling_start_shard=GENESIS_START_SHARD, + previous_shuffling_epoch=GENESIS_EPOCH, + current_shuffling_epoch=GENESIS_EPOCH, + previous_shuffling_seed=ZERO_HASH, + current_shuffling_seed=ZERO_HASH, + + # Finality + previous_epoch_attestations=[], + current_epoch_attestations=[], + previous_justified_epoch=GENESIS_EPOCH, + current_justified_epoch=GENESIS_EPOCH, + previous_justified_root=ZERO_HASH, + current_justified_root=ZERO_HASH, + justification_bitfield=0, + finalized_epoch=GENESIS_EPOCH, + finalized_root=ZERO_HASH, + + # Recent state + latest_crosslinks=[Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)], + latest_block_roots=[ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)], + latest_state_roots=[ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)], + latest_active_index_roots=[ZERO_HASH for _ in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH)], + latest_slashed_balances=[0 for _ in range(LATEST_SLASHED_EXIT_LENGTH)], + latest_block_header=get_temporary_block_header(get_empty_block()), + historical_roots=[], + + # Ethereum 1.0 chain data + latest_eth1_data=genesis_eth1_data, + eth1_data_votes=[], + deposit_index=0, + ) + + # Process genesis deposits + for deposit in genesis_validator_deposits: + process_deposit(state, deposit) + + # Process genesis activations + for validator_index, _ in enumerate(state.validator_registry): + if get_effective_balance(state, validator_index) >= MAX_DEPOSIT_AMOUNT: + activate_validator(state, validator_index, is_genesis=True) + + genesis_active_index_root = hash_tree_root(get_active_validator_indices(state.validator_registry, GENESIS_EPOCH)) + for index in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH): + state.latest_active_index_roots[index] = genesis_active_index_root + state.current_shuffling_seed = generate_seed(state, GENESIS_EPOCH) + + return state +def get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock: + """ + Get the ancestor of ``block`` with slot number ``slot``; return ``None`` if not found. + """ + if block.slot == slot: + return block + elif block.slot < slot: + return None + else: + return get_ancestor(store, store.get_parent(block), slot) +def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) -> BeaconBlock: + """ + Execute the LMD-GHOST algorithm to find the head ``BeaconBlock``. + """ + validators = start_state.validator_registry + active_validator_indices = get_active_validator_indices(validators, slot_to_epoch(start_state.slot)) + attestation_targets = [ + (validator_index, get_latest_attestation_target(store, validator_index)) + for validator_index in active_validator_indices + ] + + def get_vote_count(block: BeaconBlock) -> int: + return sum( + get_effective_balance(start_state.validator_balances[validator_index]) // FORK_CHOICE_BALANCE_INCREMENT + for validator_index, target in attestation_targets + if get_ancestor(store, target, block.slot) == block + ) + + head = start_block + while 1: + children = get_children(store, head) + if len(children) == 0: + return head + head = max(children, key=lambda x: (get_vote_count(x), hash_tree_root(x))) +def cache_state(state: BeaconState) -> None: + previous_slot_state_root = hash_tree_root(state) + + # store the previous slot's post state transition root + state.latest_state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_slot_state_root + + # cache state root in stored latest_block_header if empty + if state.latest_block_header.state_root == ZERO_HASH: + state.latest_block_header.state_root = previous_slot_state_root + + # store latest known block for previous slot + state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = hash_tree_root(state.latest_block_header) +def get_current_total_balance(state: BeaconState) -> Gwei: + return get_total_balance(state, get_active_validator_indices(state.validator_registry, get_current_epoch(state))) +def get_previous_total_balance(state: BeaconState) -> Gwei: + return get_total_balance(state, get_active_validator_indices(state.validator_registry, get_previous_epoch(state))) +def get_attesting_indices(state: BeaconState, attestations: List[PendingAttestation]) -> List[ValidatorIndex]: + output = set() + for a in attestations: + output = output.union(get_attestation_participants(state, a.data, a.aggregation_bitfield)) + return sorted(list(output)) +def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestation]) -> Gwei: + return get_total_balance(state, get_attesting_indices(state, attestations)) +def get_current_epoch_boundary_attestations(state: BeaconState) -> List[PendingAttestation]: + return [ + a for a in state.current_epoch_attestations + if a.data.target_root == get_block_root(state, get_epoch_start_slot(get_current_epoch(state))) + ] +def get_previous_epoch_boundary_attestations(state: BeaconState) -> List[PendingAttestation]: + return [ + a for a in state.previous_epoch_attestations + if a.data.target_root == get_block_root(state, get_epoch_start_slot(get_previous_epoch(state))) + ] +def get_previous_epoch_matching_head_attestations(state: BeaconState) -> List[PendingAttestation]: + return [ + a for a in state.previous_epoch_attestations + if a.data.beacon_block_root == get_block_root(state, a.data.slot) + ] +def get_winning_root_and_participants(state: BeaconState, shard: Shard) -> Tuple[Bytes32, List[ValidatorIndex]]: + all_attestations = state.current_epoch_attestations + state.previous_epoch_attestations + valid_attestations = [ + a for a in all_attestations if a.data.previous_crosslink == state.latest_crosslinks[shard] + ] + all_roots = [a.data.crosslink_data_root for a in valid_attestations] + + # handle when no attestations for shard available + if len(all_roots) == 0: + return ZERO_HASH, [] + + def get_attestations_for(root) -> List[PendingAttestation]: + return [a for a in valid_attestations if a.data.crosslink_data_root == root] + + # Winning crosslink root is the root with the most votes for it, ties broken in favor of + # lexicographically higher hash + winning_root = max(all_roots, key=lambda r: (get_attesting_balance(state, get_attestations_for(r)), r)) + + return winning_root, get_attesting_indices(state, get_attestations_for(winning_root)) +def earliest_attestation(state: BeaconState, validator_index: ValidatorIndex) -> PendingAttestation: + return min([ + a for a in state.previous_epoch_attestations if + validator_index in get_attestation_participants(state, a.data, a.aggregation_bitfield) + ], key=lambda a: a.inclusion_slot) +def inclusion_slot(state: BeaconState, validator_index: ValidatorIndex) -> Slot: + return earliest_attestation(state, validator_index).inclusion_slot +def inclusion_distance(state: BeaconState, validator_index: ValidatorIndex) -> int: + attestation = earliest_attestation(state, validator_index) + return attestation.inclusion_slot - attestation.data.slot +def update_justification_and_finalization(state: BeaconState) -> None: + new_justified_epoch = state.current_justified_epoch + new_finalized_epoch = state.finalized_epoch + + # Rotate the justification bitfield up one epoch to make room for the current epoch + state.justification_bitfield <<= 1 + # If the previous epoch gets justified, fill the second last bit + previous_boundary_attesting_balance = get_attesting_balance(state, get_previous_epoch_boundary_attestations(state)) + if previous_boundary_attesting_balance * 3 >= get_previous_total_balance(state) * 2: + new_justified_epoch = get_current_epoch(state) - 1 + state.justification_bitfield |= 2 + # If the current epoch gets justified, fill the last bit + current_boundary_attesting_balance = get_attesting_balance(state, get_current_epoch_boundary_attestations(state)) + if current_boundary_attesting_balance * 3 >= get_current_total_balance(state) * 2: + new_justified_epoch = get_current_epoch(state) + state.justification_bitfield |= 1 + + # Process finalizations + bitfield = state.justification_bitfield + current_epoch = get_current_epoch(state) + # The 2nd/3rd/4th most recent epochs are all justified, the 2nd using the 4th as source + if (bitfield >> 1) % 8 == 0b111 and state.previous_justified_epoch == current_epoch - 3: + new_finalized_epoch = state.previous_justified_epoch + # The 2nd/3rd most recent epochs are both justified, the 2nd using the 3rd as source + if (bitfield >> 1) % 4 == 0b11 and state.previous_justified_epoch == current_epoch - 2: + new_finalized_epoch = state.previous_justified_epoch + # The 1st/2nd/3rd most recent epochs are all justified, the 1st using the 3rd as source + if (bitfield >> 0) % 8 == 0b111 and state.current_justified_epoch == current_epoch - 2: + new_finalized_epoch = state.current_justified_epoch + # The 1st/2nd most recent epochs are both justified, the 1st using the 2nd as source + if (bitfield >> 0) % 4 == 0b11 and state.current_justified_epoch == current_epoch - 1: + new_finalized_epoch = state.current_justified_epoch + + # Update state jusification/finality fields + state.previous_justified_epoch = state.current_justified_epoch + state.previous_justified_root = state.current_justified_root + if new_justified_epoch != state.current_justified_epoch: + state.current_justified_epoch = new_justified_epoch + state.current_justified_root = get_block_root(state, get_epoch_start_slot(new_justified_epoch)) + if new_finalized_epoch != state.finalized_epoch: + state.finalized_epoch = new_finalized_epoch + state.finalized_root = get_block_root(state, get_epoch_start_slot(new_finalized_epoch)) +def process_crosslinks(state: BeaconState) -> None: + current_epoch = get_current_epoch(state) + previous_epoch = current_epoch - 1 + next_epoch = current_epoch + 1 + for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): + for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): + winning_root, participants = get_winning_root_and_participants(state, shard) + participating_balance = get_total_balance(state, participants) + total_balance = get_total_balance(state, crosslink_committee) + if 3 * participating_balance >= 2 * total_balance: + state.latest_crosslinks[shard] = Crosslink( + epoch=slot_to_epoch(slot), + crosslink_data_root=winning_root + ) +def maybe_reset_eth1_period(state: BeaconState) -> None: + if (get_current_epoch(state) + 1) % EPOCHS_PER_ETH1_VOTING_PERIOD == 0: + for eth1_data_vote in state.eth1_data_votes: + # If a majority of all votes were for a particular eth1_data value, + # then set that as the new canonical value + if eth1_data_vote.vote_count * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH: + state.latest_eth1_data = eth1_data_vote.eth1_data + state.eth1_data_votes = [] +def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: + if get_previous_total_balance(state) == 0: + return 0 + + adjusted_quotient = integer_squareroot(get_previous_total_balance(state)) // BASE_REWARD_QUOTIENT + return get_effective_balance(state, index) // adjusted_quotient // 5 +def get_inactivity_penalty(state: BeaconState, index: ValidatorIndex, epochs_since_finality: int) -> Gwei: + return ( + get_base_reward(state, index) + + get_effective_balance(state, index) * epochs_since_finality // INACTIVITY_PENALTY_QUOTIENT // 2 + ) +def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: + epochs_since_finality = get_current_epoch(state) + 1 - state.finalized_epoch + if epochs_since_finality <= 4: + return compute_normal_justification_and_finalization_deltas(state) + else: + return compute_inactivity_leak_deltas(state) +def compute_normal_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: + # deltas[0] for rewards + # deltas[1] for penalties + deltas = [ + [0 for index in range(len(state.validator_registry))], + [0 for index in range(len(state.validator_registry))] + ] + # Some helper variables + boundary_attestations = get_previous_epoch_boundary_attestations(state) + boundary_attesting_balance = get_attesting_balance(state, boundary_attestations) + total_balance = get_previous_total_balance(state) + total_attesting_balance = get_attesting_balance(state, state.previous_epoch_attestations) + matching_head_attestations = get_previous_epoch_matching_head_attestations(state) + matching_head_balance = get_attesting_balance(state, matching_head_attestations) + # Process rewards or penalties for all validators + for index in get_active_validator_indices(state.validator_registry, get_previous_epoch(state)): + # Expected FFG source + if index in get_attesting_indices(state, state.previous_epoch_attestations): + deltas[0][index] += get_base_reward(state, index) * total_attesting_balance // total_balance + # Inclusion speed bonus + deltas[0][index] += ( + get_base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // + inclusion_distance(state, index) + ) + else: + deltas[1][index] += get_base_reward(state, index) + # Expected FFG target + if index in get_attesting_indices(state, boundary_attestations): + deltas[0][index] += get_base_reward(state, index) * boundary_attesting_balance // total_balance + else: + deltas[1][index] += get_base_reward(state, index) + # Expected head + if index in get_attesting_indices(state, matching_head_attestations): + deltas[0][index] += get_base_reward(state, index) * matching_head_balance // total_balance + else: + deltas[1][index] += get_base_reward(state, index) + # Proposer bonus + if index in get_attesting_indices(state, state.previous_epoch_attestations): + proposer_index = get_beacon_proposer_index(state, inclusion_slot(state, index)) + deltas[0][proposer_index] += get_base_reward(state, index) // ATTESTATION_INCLUSION_REWARD_QUOTIENT + return deltas +def compute_inactivity_leak_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: + # deltas[0] for rewards + # deltas[1] for penalties + deltas = [ + [0 for index in range(len(state.validator_registry))], + [0 for index in range(len(state.validator_registry))] + ] + boundary_attestations = get_previous_epoch_boundary_attestations(state) + matching_head_attestations = get_previous_epoch_matching_head_attestations(state) + active_validator_indices = get_active_validator_indices(state.validator_registry, get_previous_epoch(state)) + epochs_since_finality = get_current_epoch(state) + 1 - state.finalized_epoch + for index in active_validator_indices: + if index not in get_attesting_indices(state, state.previous_epoch_attestations): + deltas[1][index] += get_inactivity_penalty(state, index, epochs_since_finality) + else: + # If a validator did attest, apply a small penalty for getting attestations included late + deltas[0][index] += ( + get_base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // + inclusion_distance(state, index) + ) + deltas[1][index] += get_base_reward(state, index) + if index not in get_attesting_indices(state, boundary_attestations): + deltas[1][index] += get_inactivity_penalty(state, index, epochs_since_finality) + if index not in get_attesting_indices(state, matching_head_attestations): + deltas[1][index] += get_base_reward(state, index) + # Penalize slashed-but-inactive validators as though they were active but offline + for index in range(len(state.validator_registry)): + eligible = ( + index not in active_validator_indices and + state.validator_registry[index].slashed and + get_current_epoch(state) < state.validator_registry[index].withdrawable_epoch + ) + if eligible: + deltas[1][index] += ( + 2 * get_inactivity_penalty(state, index, epochs_since_finality) + + get_base_reward(state, index) + ) + return deltas +def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: + # deltas[0] for rewards + # deltas[1] for penalties + deltas = [ + [0 for index in range(len(state.validator_registry))], + [0 for index in range(len(state.validator_registry))] + ] + previous_epoch_start_slot = get_epoch_start_slot(get_previous_epoch(state)) + current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) + for slot in range(previous_epoch_start_slot, current_epoch_start_slot): + for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): + winning_root, participants = get_winning_root_and_participants(state, shard) + participating_balance = get_total_balance(state, participants) + total_balance = get_total_balance(state, crosslink_committee) + for index in crosslink_committee: + if index in participants: + deltas[0][index] += get_base_reward(state, index) * participating_balance // total_balance + else: + deltas[1][index] += get_base_reward(state, index) + return deltas +def apply_rewards(state: BeaconState) -> None: + deltas1 = get_justification_and_finalization_deltas(state) + deltas2 = get_crosslink_deltas(state) + for i in range(len(state.validator_registry)): + state.validator_balances[i] = max( + 0, + state.validator_balances[i] + deltas1[0][i] + deltas2[0][i] - deltas1[1][i] - deltas2[1][i] + ) +def process_ejections(state: BeaconState) -> None: + """ + Iterate through the validator registry + and eject active validators with balance below ``EJECTION_BALANCE``. + """ + for index in get_active_validator_indices(state.validator_registry, get_current_epoch(state)): + if state.validator_balances[index] < EJECTION_BALANCE: + exit_validator(state, index) +def should_update_validator_registry(state: BeaconState) -> bool: + # Must have finalized a new block + if state.finalized_epoch <= state.validator_registry_update_epoch: + return False + # Must have processed new crosslinks on all shards of the current epoch + shards_to_check = [ + (state.current_shuffling_start_shard + i) % SHARD_COUNT + for i in range(get_current_epoch_committee_count(state)) + ] + for shard in shards_to_check: + if state.latest_crosslinks[shard].epoch <= state.validator_registry_update_epoch: + return False + return True +def update_validator_registry(state: BeaconState) -> None: + """ + Update validator registry. + Note that this function mutates ``state``. + """ + current_epoch = get_current_epoch(state) + # The active validators + active_validator_indices = get_active_validator_indices(state.validator_registry, current_epoch) + # The total effective balance of active validators + total_balance = get_total_balance(state, active_validator_indices) + + # The maximum balance churn in Gwei (for deposits and exits separately) + max_balance_churn = max( + MAX_DEPOSIT_AMOUNT, + total_balance // (2 * MAX_BALANCE_CHURN_QUOTIENT) + ) + + # Activate validators within the allowable balance churn + balance_churn = 0 + for index, validator in enumerate(state.validator_registry): + if validator.activation_epoch == FAR_FUTURE_EPOCH and state.validator_balances[index] >= MAX_DEPOSIT_AMOUNT: + # Check the balance churn would be within the allowance + balance_churn += get_effective_balance(state, index) + if balance_churn > max_balance_churn: + break + + # Activate validator + activate_validator(state, index, is_genesis=False) + + # Exit validators within the allowable balance churn + balance_churn = 0 + for index, validator in enumerate(state.validator_registry): + if validator.exit_epoch == FAR_FUTURE_EPOCH and validator.initiated_exit: + # Check the balance churn would be within the allowance + balance_churn += get_effective_balance(state, index) + if balance_churn > max_balance_churn: + break + + # Exit validator + exit_validator(state, index) + + state.validator_registry_update_epoch = current_epoch +def update_registry_and_shuffling_data(state: BeaconState) -> None: + # First set previous shuffling data to current shuffling data + state.previous_shuffling_epoch = state.current_shuffling_epoch + state.previous_shuffling_start_shard = state.current_shuffling_start_shard + state.previous_shuffling_seed = state.current_shuffling_seed + current_epoch = get_current_epoch(state) + next_epoch = current_epoch + 1 + # Check if we should update, and if so, update + if should_update_validator_registry(state): + update_validator_registry(state) + # If we update the registry, update the shuffling data and shards as well + state.current_shuffling_epoch = next_epoch + state.current_shuffling_start_shard = ( + state.current_shuffling_start_shard + + get_current_epoch_committee_count(state) % SHARD_COUNT + ) + state.current_shuffling_seed = generate_seed(state, state.current_shuffling_epoch) + else: + # If processing at least one crosslink keeps failing, then reshuffle every power of two, + # but don't update the current_shuffling_start_shard + epochs_since_last_registry_update = current_epoch - state.validator_registry_update_epoch + if epochs_since_last_registry_update > 1 and is_power_of_two(epochs_since_last_registry_update): + state.current_shuffling_epoch = next_epoch + state.current_shuffling_seed = generate_seed(state, state.current_shuffling_epoch) +def process_slashings(state: BeaconState) -> None: + """ + Process the slashings. + Note that this function mutates ``state``. + """ + current_epoch = get_current_epoch(state) + active_validator_indices = get_active_validator_indices(state.validator_registry, current_epoch) + total_balance = get_total_balance(state, active_validator_indices) + + # Compute `total_penalties` + total_at_start = state.latest_slashed_balances[(current_epoch + 1) % LATEST_SLASHED_EXIT_LENGTH] + total_at_end = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] + total_penalties = total_at_end - total_at_start + + for index, validator in enumerate(state.validator_registry): + if validator.slashed and current_epoch == validator.withdrawable_epoch - LATEST_SLASHED_EXIT_LENGTH // 2: + penalty = max( + get_effective_balance(state, index) * min(total_penalties * 3, total_balance) // total_balance, + get_effective_balance(state, index) // MIN_PENALTY_QUOTIENT + ) + state.validator_balances[index] -= penalty +def process_exit_queue(state: BeaconState) -> None: + """ + Process the exit queue. + Note that this function mutates ``state``. + """ + def eligible(index): + validator = state.validator_registry[index] + # Filter out dequeued validators + if validator.withdrawable_epoch != FAR_FUTURE_EPOCH: + return False + # Dequeue if the minimum amount of time has passed + else: + return get_current_epoch(state) >= validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY + + eligible_indices = filter(eligible, list(range(len(state.validator_registry)))) + # Sort in order of exit epoch, and validators that exit within the same epoch exit in order of validator index + sorted_indices = sorted(eligible_indices, key=lambda index: state.validator_registry[index].exit_epoch) + for dequeues, index in enumerate(sorted_indices): + if dequeues >= MAX_EXIT_DEQUEUES_PER_EPOCH: + break + prepare_validator_for_withdrawal(state, index) +def finish_epoch_update(state: BeaconState) -> None: + current_epoch = get_current_epoch(state) + next_epoch = current_epoch + 1 + # Set active index root + index_root_position = (next_epoch + ACTIVATION_EXIT_DELAY) % LATEST_ACTIVE_INDEX_ROOTS_LENGTH + state.latest_active_index_roots[index_root_position] = hash_tree_root( + get_active_validator_indices(state.validator_registry, next_epoch + ACTIVATION_EXIT_DELAY) + ) + # Set total slashed balances + state.latest_slashed_balances[next_epoch % LATEST_SLASHED_EXIT_LENGTH] = ( + state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] + ) + # Set randao mix + state.latest_randao_mixes[next_epoch % LATEST_RANDAO_MIXES_LENGTH] = get_randao_mix(state, current_epoch) + # Set historical root accumulator + if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0: + historical_batch = HistoricalBatch( + block_roots=state.latest_block_roots, + state_roots=state.latest_state_roots, + ) + state.historical_roots.append(hash_tree_root(historical_batch)) + # Rotate current/previous epoch attestations + state.previous_epoch_attestations = state.current_epoch_attestations + state.current_epoch_attestations = [] +def advance_slot(state: BeaconState) -> None: + state.slot += 1 +def process_block_header(state: BeaconState, block: BeaconBlock) -> None: + # Verify that the slots match + assert block.slot == state.slot + # Verify that the parent matches + assert block.previous_block_root == hash_tree_root(state.latest_block_header) + # Save current block as the new latest block + state.latest_block_header = get_temporary_block_header(block) + # Verify proposer signature + proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] + assert bls_verify( + pubkey=proposer.pubkey, + message_hash=signed_root(block), + signature=block.signature, + domain=get_domain(state.fork, get_current_epoch(state), DOMAIN_BEACON_BLOCK) + ) +def process_randao(state: BeaconState, block: BeaconBlock) -> None: + proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] + # Verify that the provided randao value is valid + assert bls_verify( + pubkey=proposer.pubkey, + message_hash=hash_tree_root(get_current_epoch(state)), + signature=block.body.randao_reveal, + domain=get_domain(state.fork, get_current_epoch(state), DOMAIN_RANDAO) + ) + # Mix it in + state.latest_randao_mixes[get_current_epoch(state) % LATEST_RANDAO_MIXES_LENGTH] = ( + xor(get_randao_mix(state, get_current_epoch(state)), + hash(block.body.randao_reveal)) + ) +def process_eth1_data(state: BeaconState, block: BeaconBlock) -> None: + for eth1_data_vote in state.eth1_data_votes: + # If someone else has already voted for the same hash, add to its counter + if eth1_data_vote.eth1_data == block.body.eth1_data: + eth1_data_vote.vote_count += 1 + return + # If we're seeing this hash for the first time, make a new counter + state.eth1_data_votes.append(Eth1DataVote(eth1_data=block.body.eth1_data, vote_count=1)) +def process_proposer_slashing(state: BeaconState, + proposer_slashing: ProposerSlashing) -> None: + """ + Process ``ProposerSlashing`` transaction. + Note that this function mutates ``state``. + """ + proposer = state.validator_registry[proposer_slashing.proposer_index] + # Verify that the epoch is the same + assert slot_to_epoch(proposer_slashing.header_1.slot) == slot_to_epoch(proposer_slashing.header_2.slot) + # But the headers are different + assert proposer_slashing.header_1 != proposer_slashing.header_2 + # Proposer is not yet slashed + assert proposer.slashed is False + # Signatures are valid + for header in (proposer_slashing.header_1, proposer_slashing.header_2): + assert bls_verify( + pubkey=proposer.pubkey, + message_hash=signed_root(header), + signature=header.signature, + domain=get_domain(state.fork, slot_to_epoch(header.slot), DOMAIN_BEACON_BLOCK) + ) + slash_validator(state, proposer_slashing.proposer_index) +def process_attester_slashing(state: BeaconState, + attester_slashing: AttesterSlashing) -> None: + """ + Process ``AttesterSlashing`` transaction. + Note that this function mutates ``state``. + """ + attestation1 = attester_slashing.slashable_attestation_1 + attestation2 = attester_slashing.slashable_attestation_2 + # Check that the attestations are conflicting + assert attestation1.data != attestation2.data + assert ( + is_double_vote(attestation1.data, attestation2.data) or + is_surround_vote(attestation1.data, attestation2.data) + ) + assert verify_slashable_attestation(state, attestation1) + assert verify_slashable_attestation(state, attestation2) + slashable_indices = [ + index for index in attestation1.validator_indices + if ( + index in attestation2.validator_indices and + state.validator_registry[index].slashed is False + ) + ] + assert len(slashable_indices) >= 1 + for index in slashable_indices: + slash_validator(state, index) +def process_attestation(state: BeaconState, attestation: Attestation) -> None: + """ + Process ``Attestation`` transaction. + Note that this function mutates ``state``. + """ + # Can't submit attestations that are too far in history (or in prehistory) + assert attestation.data.slot >= GENESIS_SLOT + assert state.slot <= attestation.data.slot + SLOTS_PER_EPOCH + # Can't submit attestations too quickly + assert attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot + # Verify that the justified epoch and root is correct + if slot_to_epoch(attestation.data.slot) >= get_current_epoch(state): + # Case 1: current epoch attestations + assert attestation.data.source_epoch == state.current_justified_epoch + assert attestation.data.source_root == state.current_justified_root + else: + # Case 2: previous epoch attestations + assert attestation.data.source_epoch == state.previous_justified_epoch + assert attestation.data.source_root == state.previous_justified_root + # Check that the crosslink data is valid + acceptable_crosslink_data = { + # Case 1: Latest crosslink matches the one in the state + attestation.data.previous_crosslink, + # Case 2: State has already been updated, state's latest crosslink matches the crosslink + # the attestation is trying to create + Crosslink( + crosslink_data_root=attestation.data.crosslink_data_root, + epoch=slot_to_epoch(attestation.data.slot) + ) + } + assert state.latest_crosslinks[attestation.data.shard] in acceptable_crosslink_data + # Attestation must be nonempty! + assert attestation.aggregation_bitfield != b'\x00' * len(attestation.aggregation_bitfield) + # Custody must be empty (to be removed in phase 1) + assert attestation.custody_bitfield == b'\x00' * len(attestation.custody_bitfield) + # Get the committee for the specific shard that this attestation is for + crosslink_committee = [ + committee for committee, shard in get_crosslink_committees_at_slot(state, attestation.data.slot) + if shard == attestation.data.shard + ][0] + # Custody bitfield must be a subset of the attestation bitfield + for i in range(len(crosslink_committee)): + if get_bitfield_bit(attestation.aggregation_bitfield, i) == 0b0: + assert get_bitfield_bit(attestation.custody_bitfield, i) == 0b0 + # Verify aggregate signature + participants = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) + custody_bit_1_participants = get_attestation_participants(state, attestation.data, attestation.custody_bitfield) + custody_bit_0_participants = [i for i in participants if i not in custody_bit_1_participants] + + assert bls_verify_multiple( + pubkeys=[ + bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_0_participants]), + bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_1_participants]), + ], + message_hashes=[ + hash_tree_root(AttestationDataAndCustodyBit(data=attestation.data, custody_bit=0b0)), + hash_tree_root(AttestationDataAndCustodyBit(data=attestation.data, custody_bit=0b1)), + ], + signature=attestation.aggregate_signature, + domain=get_domain(state.fork, slot_to_epoch(attestation.data.slot), DOMAIN_ATTESTATION), + ) + # Crosslink data root is zero (to be removed in phase 1) + assert attestation.data.crosslink_data_root == ZERO_HASH + # Apply the attestation + pending_attestation = PendingAttestation( + data=attestation.data, + aggregation_bitfield=attestation.aggregation_bitfield, + custody_bitfield=attestation.custody_bitfield, + inclusion_slot=state.slot + ) + if slot_to_epoch(attestation.data.slot) == get_current_epoch(state): + state.current_epoch_attestations.append(pending_attestation) + elif slot_to_epoch(attestation.data.slot) == get_previous_epoch(state): + state.previous_epoch_attestations.append(pending_attestation) +def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: + """ + Process ``VoluntaryExit`` transaction. + Note that this function mutates ``state``. + """ + validator = state.validator_registry[exit.validator_index] + # Verify the validator has not yet exited + assert validator.exit_epoch == FAR_FUTURE_EPOCH + # Verify the validator has not initiated an exit + assert validator.initiated_exit is False + # Exits must specify an epoch when they become valid; they are not valid before then + assert get_current_epoch(state) >= exit.epoch + # Must have been in the validator set long enough + assert get_current_epoch(state) - validator.activation_epoch >= PERSISTENT_COMMITTEE_PERIOD + # Verify signature + assert bls_verify( + pubkey=validator.pubkey, + message_hash=signed_root(exit), + signature=exit.signature, + domain=get_domain(state.fork, exit.epoch, DOMAIN_VOLUNTARY_EXIT) + ) + # Run the exit + initiate_validator_exit(state, exit.validator_index) +def process_transfer(state: BeaconState, transfer: Transfer) -> None: + """ + Process ``Transfer`` transaction. + Note that this function mutates ``state``. + """ + # Verify the amount and fee aren't individually too big (for anti-overflow purposes) + assert state.validator_balances[transfer.sender] >= max(transfer.amount, transfer.fee) + # Verify that we have enough ETH to send, and that after the transfer the balance will be either + # exactly zero or at least MIN_DEPOSIT_AMOUNT + assert ( + state.validator_balances[transfer.sender] == transfer.amount + transfer.fee or + state.validator_balances[transfer.sender] >= transfer.amount + transfer.fee + MIN_DEPOSIT_AMOUNT + ) + # A transfer is valid in only one slot + assert state.slot == transfer.slot + # Only withdrawn or not-yet-deposited accounts can transfer + assert ( + get_current_epoch(state) >= state.validator_registry[transfer.sender].withdrawable_epoch or + state.validator_registry[transfer.sender].activation_epoch == FAR_FUTURE_EPOCH + ) + # Verify that the pubkey is valid + assert ( + state.validator_registry[transfer.sender].withdrawal_credentials == + BLS_WITHDRAWAL_PREFIX_BYTE + hash(transfer.pubkey)[1:] + ) + # Verify that the signature is valid + assert bls_verify( + pubkey=transfer.pubkey, + message_hash=signed_root(transfer), + signature=transfer.signature, + domain=get_domain(state.fork, slot_to_epoch(transfer.slot), DOMAIN_TRANSFER) + ) + # Process the transfer + state.validator_balances[transfer.sender] -= transfer.amount + transfer.fee + state.validator_balances[transfer.recipient] += transfer.amount + state.validator_balances[get_beacon_proposer_index(state, state.slot)] += transfer.fee +def verify_block_state_root(state: BeaconState, block: BeaconBlock) -> None: + assert block.state_root == hash_tree_root(state) + +# Monkey patch validator shuffling cache +_get_shuffling = get_shuffling +shuffling_cache = {} +def get_shuffling(seed: Bytes32, + validators: List[Validator], + epoch: Epoch) -> List[List[ValidatorIndex]]: + + param_hash = (seed, hash_tree_root(validators, [Validator]), epoch) + + if param_hash in shuffling_cache: + # print("Cache hit, epoch={0}".format(epoch)) + return shuffling_cache[param_hash] + else: + # print("Cache miss, epoch={0}".format(epoch)) + ret = _get_shuffling(seed, validators, epoch) + shuffling_cache[param_hash] = ret + return ret + + +# Monkey patch hash cache +_hash = hash +hash_cache = {} +def hash(x): + if x in hash_cache: + return hash_cache[x] + else: + ret = _hash(x) + hash_cache[x] = ret + return ret + \ No newline at end of file diff --git a/scripts/phase0/state_transition.py b/build/phase0/state_transition.py similarity index 56% rename from scripts/phase0/state_transition.py rename to build/phase0/state_transition.py index f78119cf2..2bd33f6d6 100644 --- a/scripts/phase0/state_transition.py +++ b/build/phase0/state_transition.py @@ -1,3 +1,18 @@ +import build.phase0.spec as spec + + +from typing import ( + Any, + Callable, + List, + NewType, + Tuple, +) + +from build.phase0.spec import ( + BeaconState, + BeaconBlock, +) def process_transaction_type(state: BeaconState, @@ -13,72 +28,73 @@ def process_transactions(state: BeaconState, block: BeaconBlock) -> None: process_transaction_type( state, block.body.proposer_slashings, - MAX_PROPOSER_SLASHINGS, - process_proposer_slashing, + spec.MAX_PROPOSER_SLASHINGS, + spec.process_proposer_slashing, ) process_transaction_type( state, block.body.attester_slashings, - MAX_ATTESTER_SLASHINGS, - process_attester_slashing, + spec.MAX_ATTESTER_SLASHINGS, + spec.process_attester_slashing, ) process_transaction_type( state, block.body.attestations, - MAX_ATTESTATIONS, - process_attestation, + spec.MAX_ATTESTATIONS, + spec.process_attestation, ) process_transaction_type( state, block.body.deposits, - MAX_DEPOSITS, - process_deposit, + spec.MAX_DEPOSITS, + spec.process_deposit, ) process_transaction_type( state, block.body.voluntary_exits, - MAX_VOLUNTARY_EXITS, - process_voluntary_exit, + spec.MAX_VOLUNTARY_EXITS, + spec.process_voluntary_exit, ) assert len(block.body.transfers) == len(set(block.body.transfers)) process_transaction_type( state, block.body.transfers, - MAX_TRANSFERS, - process_transfer, + spec.MAX_TRANSFERS, + spec.process_transfer, ) def process_block(state: BeaconState, block: BeaconBlock, verify_state_root: bool=False) -> None: - process_block_header(state, block) - process_randao(state, block) - process_eth1_data(state, block) + spec.process_block_header(state, block) + spec.process_randao(state, block) + spec.process_eth1_data(state, block) + process_transactions(state, block) if verify_state_root: - verify_block_state_root(state, block) + spec.verify_block_state_root(state, block) def process_epoch_transition(state: BeaconState) -> None: - update_justification_and_finalization(state) - process_crosslinks(state) - maybe_reset_eth1_period(state) - apply_rewards(state) - process_ejections(state) - update_registry_and_shuffling_data(state) - process_slashings(state) - process_exit_queue(state) - finish_epoch_update(state) + spec.update_justification_and_finalization(state) + spec.process_crosslinks(state) + spec.maybe_reset_eth1_period(state) + spec.apply_rewards(state) + spec.process_ejections(state) + spec.update_registry_and_shuffling_data(state) + spec.process_slashings(state) + spec.process_exit_queue(state) + spec.finish_epoch_update(state) def state_transition(state: BeaconState, block: BeaconBlock, verify_state_root: bool=False) -> BeaconState: while state.slot < block.slot: - cache_state(state) - if (state.slot + 1) % SLOTS_PER_EPOCH == 0: + spec.cache_state(state) + if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0: process_epoch_transition(state) - advance_slot(state) + spec.advance_slot(state) if block.slot == state.slot: process_block(state, block) diff --git a/build/utils/__init__.py b/build/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/phase0/bls_stub.py b/build/utils/bls_stub.py similarity index 100% rename from scripts/phase0/bls_stub.py rename to build/utils/bls_stub.py diff --git a/build/utils/hash_function.py b/build/utils/hash_function.py new file mode 100644 index 000000000..da5b4d979 --- /dev/null +++ b/build/utils/hash_function.py @@ -0,0 +1,6 @@ +from hashlib import sha256 +from eth_utils import keccak + + +# def hash(x): return sha256(x).digest() +def hash(x): return keccak(x) diff --git a/build/utils/merkle_minimal.py b/build/utils/merkle_minimal.py new file mode 100644 index 000000000..a811350ce --- /dev/null +++ b/build/utils/merkle_minimal.py @@ -0,0 +1,28 @@ +from .hash_function import hash + + +zerohashes = [b'\x00' * 32] +for i in range(1, 32): + zerohashes.append(hash(zerohashes[i-1] + zerohashes[i-1])) + +# Compute a Merkle root of a right-zerobyte-padded 2**32 sized tree +def calc_merkle_tree_from_leaves(values): + values = list(values) + tree = [values[::]] + for h in range(32): + if len(values) % 2 == 1: + values.append(zerohashes[h]) + # print(values) + values = [hash(values[i] + values[i+1]) for i in range(0, len(values), 2)] + tree.append(values[::]) + return tree + +def get_merkle_root(values): + return calc_merkle_tree_from_leaves(values)[-1][0] + +def get_merkle_proof(tree, item_index): + proof = [] + for i in range(32): + subindex = (item_index//2**i)^1 + proof.append(tree[i][subindex] if subindex < len(tree[i]) else zerohashes[i]) + return proof diff --git a/scripts/phase0/minimal_ssz.py b/build/utils/minimal_ssz.py similarity index 99% rename from scripts/phase0/minimal_ssz.py rename to build/utils/minimal_ssz.py index 5caaf8f09..845de18c3 100644 --- a/scripts/phase0/minimal_ssz.py +++ b/build/utils/minimal_ssz.py @@ -1,4 +1,4 @@ -from utils.hash import hash +from .hash_function import hash BYTES_PER_CHUNK = 32 diff --git a/scripts/phase0/monkey_patches.py b/build/utils/monkey_patches.py similarity index 100% rename from scripts/phase0/monkey_patches.py rename to build/utils/monkey_patches.py diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py index c4f8ab38c..8b5941b62 100644 --- a/scripts/phase0/build_spec.py +++ b/scripts/phase0/build_spec.py @@ -1,14 +1,18 @@ import sys import function_puller -code_lines = [] -for i in (1, 2, 3, 4, 8, 32, 48, 96): - code_lines.append("def int_to_bytes%d(x): return x.to_bytes(%d, 'little')" % (i, i)) -code_lines.append("SLOTS_PER_EPOCH = 64") # stub, will get overwritten by real var -code_lines.append("def slot_to_epoch(x): return x // SLOTS_PER_EPOCH") +def build_spec(sourcefile, outfile): + code_lines = [] -code_lines.append(""" + code_lines.append("from build.utils.minimal_ssz import *") + code_lines.append("from build.utils.bls_stub import *") + for i in (1, 2, 3, 4, 8, 32, 48, 96): + code_lines.append("def int_to_bytes%d(x): return x.to_bytes(%d, 'little')" % (i, i)) + code_lines.append("SLOTS_PER_EPOCH = 64") # stub, will get overwritten by real var + code_lines.append("def slot_to_epoch(x): return x // SLOTS_PER_EPOCH") + + code_lines.append(""" from typing import ( Any, Callable, @@ -28,16 +32,48 @@ BLSPubkey = NewType('BLSPubkey', bytes) # bytes48 BLSSignature = NewType('BLSSignature', bytes) # bytes96 Any = None Store = None -""") + """) -code_lines += function_puller.get_lines(sys.argv[1]) + code_lines += function_puller.get_lines(sourcefile) -print(open(sys.argv[2]).read()) -print(open(sys.argv[3]).read()) + code_lines.append(""" +# Monkey patch validator shuffling cache +_get_shuffling = get_shuffling +shuffling_cache = {} +def get_shuffling(seed: Bytes32, + validators: List[Validator], + epoch: Epoch) -> List[List[ValidatorIndex]]: -for line in code_lines: - print(line) + param_hash = (seed, hash_tree_root(validators, [Validator]), epoch) -print(open(sys.argv[4]).read()) -print(open(sys.argv[5]).read()) + if param_hash in shuffling_cache: + # print("Cache hit, epoch={0}".format(epoch)) + return shuffling_cache[param_hash] + else: + # print("Cache miss, epoch={0}".format(epoch)) + ret = _get_shuffling(seed, validators, epoch) + shuffling_cache[param_hash] = ret + return ret + + +# Monkey patch hash cache +_hash = hash +hash_cache = {} +def hash(x): + if x in hash_cache: + return hash_cache[x] + else: + ret = _hash(x) + hash_cache[x] = ret + return ret + """) + + with open(outfile, 'w') as out: + out.write("\n".join(code_lines)) + + +if __name__ == '__main__': + if len(sys.argv) < 3: + print("Error: spec source and outfile must defined") + build_spec(sys.argv[1], sys.argv[2]) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/phase0/conftest.py b/tests/phase0/conftest.py index d3ebabaa2..7e2800afd 100644 --- a/tests/phase0/conftest.py +++ b/tests/phase0/conftest.py @@ -1,6 +1,134 @@ import pytest + +from py_ecc import bls + from build.phase0 import spec +from build.utils.merkle_minimal import ( + calc_merkle_tree_from_leaves, + get_merkle_proof, + get_merkle_root, +) +from build.phase0.spec import ( + Deposit, + DepositData, + DepositInput, + Eth1Data, + get_genesis_beacon_state, + verify_merkle_branch, + hash, +) -# @pytest.fixture(autouse=True) -# def build_clean(): \ No newline at end of file + +privkeys_list = [i+1 for i in range(1000)] +pubkeys_list = [bls.privtopub(privkey) for privkey in privkeys_list] +pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys_list, pubkeys_list)} + + +@pytest.fixture +def privkeys(): + return privkeys_list + + +@pytest.fixture +def pubkeys(): + return pubkeys_list + + +def overwrite_spec_config(config): + for field in config: + setattr(spec, field, config[field]) + if field == "LATEST_RANDAO_MIXES_LENGTH": + spec.BeaconState.fields['latest_randao_mixes'][1] = config[field] + elif field == "SHARD_COUNT": + spec.BeaconState.fields['latest_crosslinks'][1] = config[field] + elif field == "SLOTS_PER_HISTORICAL_ROOT": + spec.BeaconState.fields['latest_block_roots'][1] = config[field] + spec.BeaconState.fields['latest_state_roots'][1] = config[field] + spec.HistoricalBatch.fields['block_roots'][1] = config[field] + spec.HistoricalBatch.fields['state_roots'][1] = config[field] + elif field == "LATEST_ACTIVE_INDEX_ROOTS_LENGTH": + spec.BeaconState.fields['latest_active_index_roots'][1] = config[field] + elif field == "LATEST_SLASHED_EXIT_LENGTH": + spec.BeaconState.fields['latest_slashed_balances'][1] = config[field] + + +@pytest.fixture +def config(): + return { + "SHARD_COUNT": 8, + "MIN_ATTESTATION_INCLUSION_DELAY": 2, + "TARGET_COMMITTEE_SIZE": 4, + "SLOTS_PER_EPOCH": 8, + "GENESIS_EPOCH": spec.GENESIS_SLOT // 8, + "SLOTS_PER_HISTORICAL_ROOT": 64, + "LATEST_RANDAO_MIXES_LENGTH": 64, + "LATEST_ACTIVE_INDEX_ROOTS_LENGTH": 64, + "LATEST_SLASHED_EXIT_LENGTH": 64, + } + + +@pytest.fixture(autouse=True) +def overwrite_config(config): + overwrite_spec_config(config) + + +def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves): + deposit_timestamp = 0 + proof_of_possession = b'\x33' * 96 + + deposit_data_list = [] + for i in range(num_validators): + pubkey = pubkeys_list[i] + privkey = pubkey_to_privkey[pubkey] + deposit_data = DepositData( + amount=spec.MAX_DEPOSIT_AMOUNT, + timestamp=deposit_timestamp, + deposit_input=DepositInput( + pubkey=pubkey, + withdrawal_credentials=privkey.to_bytes(32, byteorder='big'), + proof_of_possession=proof_of_possession, + ), + ) + item = hash(deposit_data.serialize()) + deposit_data_leaves.append(item) + tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves)) + root = get_merkle_root((tuple(deposit_data_leaves))) + proof = list(get_merkle_proof(tree, item_index=i)) + assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, i, root) + deposit_data_list.append(deposit_data) + + genesis_validator_deposits = [] + for i in range(num_validators): + genesis_validator_deposits.append(Deposit( + proof=list(get_merkle_proof(tree, item_index=i)), + index=i, + deposit_data=deposit_data_list[i] + )) + return genesis_validator_deposits, root + + +def create_genesis_state(num_validators, deposit_data_leaves): + initial_deposits, deposit_root = create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves) + return get_genesis_beacon_state( + initial_deposits, + genesis_time=0, + genesis_eth1_data=Eth1Data( + deposit_root=deposit_root, + block_hash=spec.ZERO_HASH, + ), + ) + +@pytest.fixture +def num_validators(): + return 100 + + +@pytest.fixture +def deposit_data_leaves(): + return list() + + +@pytest.fixture +def state(num_validators, deposit_data_leaves): + return create_genesis_state(num_validators, deposit_data_leaves) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py new file mode 100644 index 000000000..573c3ba21 --- /dev/null +++ b/tests/phase0/test_sanity.py @@ -0,0 +1,632 @@ +import os +import sys +import time + +from copy import deepcopy +from py_ecc import bls +import build.phase0.spec as spec + +from build.utils.minimal_ssz import signed_root +from build.phase0.spec import ( + # SSZ + Attestation, + AttestationData, + AttestationDataAndCustodyBit, + BeaconBlockHeader, + Deposit, + DepositData, + DepositInput, + Eth1Data, + Transfer, + ProposerSlashing, + Validator, + VoluntaryExit, + # functions + int_to_bytes32, + int_to_bytes48, + get_active_validator_indices, + get_attestation_participants, + get_block_root, + get_crosslink_committees_at_slot, + get_current_epoch, + get_domain, + get_empty_block, + get_epoch_start_slot, + get_genesis_beacon_state, + get_state_root, + advance_slot, + slot_to_epoch, + cache_state, + verify_merkle_branch, + hash, +) +from build.phase0.state_transition import ( + state_transition, +) +from build.utils.merkle_minimal import ( + calc_merkle_tree_from_leaves, + get_merkle_proof, + get_merkle_root, +) +# from state_test_gen import ( + # generate_from_test, + # dump_json, + # dump_yaml, +# ) + + +def get_empty_root(): + return get_merkle_root((spec.ZERO_HASH,)) + + +def construct_empty_block_for_next_slot(state): + empty_block = get_empty_block() + empty_block.slot = state.slot + 1 + previous_block_header = deepcopy(state.latest_block_header) + if previous_block_header.state_root == spec.ZERO_HASH: + previous_block_header.state_root = state.hash_tree_root() + empty_block.previous_block_root = previous_block_header.hash_tree_root() + return empty_block + + +def create_deposit_data(state, pubkey, privkey, amount): + deposit_input = DepositInput( + pubkey=pubkey, + withdrawal_credentials=privkey.to_bytes(32, byteorder='big'), + proof_of_possession=b'00'*96, + ) + proof_of_possession = bls.sign( + message_hash=signed_root(deposit_input), + privkey=privkey, + domain=get_domain( + state.fork, + get_current_epoch(state), + spec.DOMAIN_DEPOSIT, + ) + ) + deposit_input.proof_of_possession = proof_of_possession + deposit_data = DepositData( + amount=amount, + timestamp=0, + deposit_input=deposit_input, + ) + return deposit_data + + +def build_attestation_data(state, slot, shard): + assert state.slot >= slot + + block_root = construct_empty_block_for_next_slot(state).previous_block_root + + epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) + if epoch_start_slot == slot: + epoch_boundary_root = block_root + else: + get_block_root(state, epoch_start_slot) + + if slot < epoch_start_slot: + justified_block_root = state.previous_justified_root + else: + justified_block_root = state.current_justified_root + + return AttestationData( + slot=slot, + shard=shard, + beacon_block_root=block_root, + source_epoch=state.current_justified_epoch, + source_root=justified_block_root, + target_root=epoch_boundary_root, + crosslink_data_root=spec.ZERO_HASH, + previous_crosslink=deepcopy(state.latest_crosslinks[shard]), + ) + + +def test_slot_transition(state): + test_state = deepcopy(state) + cache_state(test_state) + advance_slot(test_state) + assert test_state.slot == state.slot + 1 + assert get_state_root(test_state, state.slot) == state.hash_tree_root() + return test_state + + +def test_empty_block_transition(state): + test_state = deepcopy(state) + + block = construct_empty_block_for_next_slot(test_state) + state_transition(test_state, block) + + assert len(test_state.eth1_data_votes) == len(state.eth1_data_votes) + 1 + assert get_block_root(test_state, state.slot) == block.previous_block_root + + return state, [block], test_state + + +def test_skipped_slots(state): + test_state = deepcopy(state) + block = construct_empty_block_for_next_slot(test_state) + block.slot += 3 + + state_transition(test_state, block) + + assert test_state.slot == block.slot + for slot in range(state.slot, test_state.slot): + assert get_block_root(test_state, slot) == block.previous_block_root + + return state, [block], test_state + + +def test_empty_epoch_transition(state): + test_state = deepcopy(state) + block = construct_empty_block_for_next_slot(test_state) + block.slot += spec.SLOTS_PER_EPOCH + + state_transition(test_state, block) + + assert test_state.slot == block.slot + for slot in range(state.slot, test_state.slot): + assert get_block_root(test_state, slot) == block.previous_block_root + + return state, [block], test_state + + +def test_empty_epoch_transition_not_finalizing(state): + test_state = deepcopy(state) + block = construct_empty_block_for_next_slot(test_state) + block.slot += spec.SLOTS_PER_EPOCH * 5 + + state_transition(test_state, block) + + assert test_state.slot == block.slot + assert test_state.finalized_epoch < get_current_epoch(test_state) - 4 + + return state, [block], test_state + + +def test_proposer_slashing(state, pubkeys, privkeys): + test_state = deepcopy(state) + current_epoch = get_current_epoch(test_state) + validator_index = get_active_validator_indices(test_state.validator_registry, current_epoch)[-1] + pubkey = pubkeys[validator_index] + privkey = privkeys[validator_index] + slot = spec.GENESIS_SLOT + header_1 = BeaconBlockHeader( + slot=slot, + previous_block_root=b'\x00'*32, + state_root=b'\x00'*32, + block_body_root=b'\x00'*32, + signature=b'\x00'*96 + ) + header_2 = deepcopy(header_1) + header_2.previous_block_root = b'\x02'*32 + header_2.slot = slot + 1 + + domain = get_domain( + fork=test_state.fork, + epoch=get_current_epoch(test_state), + domain_type=spec.DOMAIN_BEACON_BLOCK, + ) + header_1.signature = bls.sign( + message_hash=signed_root(header_1), + privkey=privkey, + domain=domain, + ) + header_2.signature = bls.sign( + message_hash=signed_root(header_2), + privkey=privkey, + domain=domain, + ) + + proposer_slashing = ProposerSlashing( + proposer_index=validator_index, + header_1=header_1, + header_2=header_2, + ) + + # + # Add to state via block transition + # + block = construct_empty_block_for_next_slot(test_state) + block.body.proposer_slashings.append(proposer_slashing) + state_transition(test_state, block) + + assert not state.validator_registry[validator_index].initiated_exit + assert not state.validator_registry[validator_index].slashed + + slashed_validator = test_state.validator_registry[validator_index] + assert not slashed_validator.initiated_exit + assert slashed_validator.slashed + assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH + assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH + # lost whistleblower reward + assert test_state.validator_balances[validator_index] < state.validator_balances[validator_index] + + return state, [block], test_state + + +def test_deposit_in_block(state, deposit_data_leaves, pubkeys, privkeys): + pre_state = deepcopy(state) + test_deposit_data_leaves = deepcopy(deposit_data_leaves) + + index = len(test_deposit_data_leaves) + pubkey = pubkeys[index] + privkey = privkeys[index] + deposit_data = create_deposit_data(pre_state, pubkey, privkey, spec.MAX_DEPOSIT_AMOUNT) + + item = hash(deposit_data.serialize()) + test_deposit_data_leaves.append(item) + tree = calc_merkle_tree_from_leaves(tuple(test_deposit_data_leaves)) + root = get_merkle_root((tuple(test_deposit_data_leaves))) + proof = list(get_merkle_proof(tree, item_index=index)) + assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, root) + + deposit = Deposit( + proof=list(proof), + index=index, + deposit_data=deposit_data, + ) + + pre_state.latest_eth1_data.deposit_root = root + post_state = deepcopy(pre_state) + block = construct_empty_block_for_next_slot(post_state) + block.body.deposits.append(deposit) + + state_transition(post_state, block) + assert len(post_state.validator_registry) == len(state.validator_registry) + 1 + assert len(post_state.validator_balances) == len(state.validator_balances) + 1 + assert post_state.validator_registry[index].pubkey == pubkeys[index] + + return pre_state, [block], post_state + + +def test_deposit_top_up(state, pubkeys, privkeys, deposit_data_leaves): + pre_state = deepcopy(state) + test_deposit_data_leaves = deepcopy(deposit_data_leaves) + + validator_index = 0 + amount = spec.MAX_DEPOSIT_AMOUNT // 4 + pubkey = pubkeys[validator_index] + privkey = privkeys[validator_index] + deposit_data = create_deposit_data(pre_state, pubkey, privkey, amount) + + merkle_index = len(test_deposit_data_leaves) + item = hash(deposit_data.serialize()) + test_deposit_data_leaves.append(item) + tree = calc_merkle_tree_from_leaves(tuple(test_deposit_data_leaves)) + root = get_merkle_root((tuple(test_deposit_data_leaves))) + proof = list(get_merkle_proof(tree, item_index=merkle_index)) + assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, merkle_index, root) + + deposit = Deposit( + proof=list(proof), + index=merkle_index, + deposit_data=deposit_data, + ) + + pre_state.latest_eth1_data.deposit_root = root + block = construct_empty_block_for_next_slot(pre_state) + block.body.deposits.append(deposit) + + pre_balance = pre_state.validator_balances[validator_index] + post_state = deepcopy(pre_state) + state_transition(post_state, block) + assert len(post_state.validator_registry) == len(pre_state.validator_registry) + assert len(post_state.validator_balances) == len(pre_state.validator_balances) + assert post_state.validator_balances[validator_index] == pre_balance + amount + + return pre_state, [block], post_state + + +def test_attestation(state, pubkeys, privkeys): + test_state = deepcopy(state) + slot = state.slot + shard = state.current_shuffling_start_shard + attestation_data = build_attestation_data(state, slot, shard) + + crosslink_committees = get_crosslink_committees_at_slot(state, slot) + crosslink_committee = [committee for committee, _shard in crosslink_committees if _shard == attestation_data.shard][0] + + committee_size = len(crosslink_committee) + bitfield_length = (committee_size + 7) // 8 + aggregation_bitfield = b'\x01' + b'\x00' * (bitfield_length - 1) + custody_bitfield = b'\x00' * bitfield_length + attestation = Attestation( + aggregation_bitfield=aggregation_bitfield, + data=attestation_data, + custody_bitfield=custody_bitfield, + aggregate_signature=b'\x00'*96, + ) + participants = get_attestation_participants( + test_state, + attestation.data, + attestation.aggregation_bitfield, + ) + assert len(participants) == 1 + + validator_index = participants[0] + pubkey = pubkeys[validator_index] + privkey = privkeys[validator_index] + + message_hash = AttestationDataAndCustodyBit( + data=attestation.data, + custody_bit=0b0, + ).hash_tree_root() + + attestation.aggregation_signature = bls.sign( + message_hash=message_hash, + privkey=privkey, + domain=get_domain( + fork=test_state.fork, + epoch=get_current_epoch(test_state), + domain_type=spec.DOMAIN_ATTESTATION, + ) + ) + + # + # Add to state via block transition + # + attestation_block = construct_empty_block_for_next_slot(test_state) + attestation_block.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + attestation_block.body.attestations.append(attestation) + state_transition(test_state, attestation_block) + + assert len(test_state.current_epoch_attestations) == len(state.current_epoch_attestations) + 1 + + # + # Epoch transition should move to previous_epoch_attestations + # + pre_current_epoch_attestations = deepcopy(test_state.current_epoch_attestations) + + epoch_block = construct_empty_block_for_next_slot(test_state) + epoch_block.slot += spec.SLOTS_PER_EPOCH + state_transition(test_state, epoch_block) + + assert len(test_state.current_epoch_attestations) == 0 + assert test_state.previous_epoch_attestations == pre_current_epoch_attestations + + return state, [attestation_block, epoch_block], test_state + + +def test_voluntary_exit(state, pubkeys, privkeys): + pre_state = deepcopy(state) + validator_index = get_active_validator_indices(pre_state.validator_registry, get_current_epoch(pre_state))[-1] + pubkey = pubkeys[validator_index] + + # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit + pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + # artificially trigger registry update at next epoch transition + pre_state.validator_registry_update_epoch -= 1 + + post_state = deepcopy(pre_state) + + voluntary_exit = VoluntaryExit( + epoch=get_current_epoch(pre_state), + validator_index=validator_index, + signature=b'\x00'*96, + ) + voluntary_exit.signature = bls.sign( + message_hash=signed_root(voluntary_exit), + privkey=privkeys[validator_index], + domain=get_domain( + fork=pre_state.fork, + epoch=get_current_epoch(pre_state), + domain_type=spec.DOMAIN_VOLUNTARY_EXIT, + ) + ) + + # + # Add to state via block transition + # + initiate_exit_block = construct_empty_block_for_next_slot(post_state) + initiate_exit_block.body.voluntary_exits.append(voluntary_exit) + state_transition(post_state, initiate_exit_block) + + assert not pre_state.validator_registry[validator_index].initiated_exit + assert post_state.validator_registry[validator_index].initiated_exit + assert post_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH + + # + # Process within epoch transition + # + exit_block = construct_empty_block_for_next_slot(post_state) + exit_block.slot += spec.SLOTS_PER_EPOCH + state_transition(post_state, exit_block) + + assert post_state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH + + return pre_state, [initiate_exit_block, exit_block], post_state + + +def test_transfer(state, pubkeys, privkeys): + pre_state = deepcopy(state) + current_epoch = get_current_epoch(pre_state) + sender_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[-1] + recipient_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] + transfer_pubkey = pubkeys[-1] + transfer_privkey = privkeys[-1] + amount = pre_state.validator_balances[sender_index] + pre_transfer_recipient_balance = pre_state.validator_balances[recipient_index] + transfer = Transfer( + sender=sender_index, + recipient=recipient_index, + amount=amount, + fee=0, + slot=pre_state.slot + 1, + pubkey=transfer_pubkey, + signature=b'\x00'*96, + ) + transfer.signature = bls.sign( + message_hash=signed_root(transfer), + privkey=transfer_privkey, + domain=get_domain( + fork=pre_state.fork, + epoch=get_current_epoch(pre_state), + domain_type=spec.DOMAIN_TRANSFER, + ) + ) + + # ensure withdrawal_credentials reproducable + pre_state.validator_registry[sender_index].withdrawal_credentials = ( + spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(transfer_pubkey)[1:] + ) + # un-activate so validator can transfer + pre_state.validator_registry[sender_index].activation_epoch = spec.FAR_FUTURE_EPOCH + + post_state = deepcopy(pre_state) + # + # Add to state via block transition + # + block = construct_empty_block_for_next_slot(post_state) + block.body.transfers.append(transfer) + state_transition(post_state, block) + + sender_balance = post_state.validator_balances[sender_index] + recipient_balance = post_state.validator_balances[recipient_index] + assert sender_balance == 0 + assert recipient_balance == pre_transfer_recipient_balance + amount + + return pre_state, [block], post_state + + +def test_ejection(state): + pre_state = deepcopy(state) + + current_epoch = get_current_epoch(pre_state) + validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[-1] + + assert pre_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH + + # set validator balance to below ejection threshold + pre_state.validator_balances[validator_index] = spec.EJECTION_BALANCE - 1 + + post_state = deepcopy(pre_state) + # + # trigger epoch transition + # + block = construct_empty_block_for_next_slot(post_state) + block.slot += spec.SLOTS_PER_EPOCH + state_transition(post_state, block) + + assert post_state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH + + return pre_state, [block], post_state + + +def test_historical_batch(state): + pre_state = deepcopy(state) + pre_state.slot += spec.SLOTS_PER_HISTORICAL_ROOT - (pre_state.slot % spec.SLOTS_PER_HISTORICAL_ROOT) - 1 + + post_state = deepcopy(pre_state) + + block = construct_empty_block_for_next_slot(post_state) + + state_transition(post_state, block) + + assert post_state.slot == block.slot + assert get_current_epoch(post_state) % (spec.SLOTS_PER_HISTORICAL_ROOT // spec.SLOTS_PER_EPOCH) == 0 + assert len(post_state.historical_roots) == len(pre_state.historical_roots) + 1 + + return pre_state, [block], post_state + + +def sanity_tests(num_validators=100, config=None): + print(f"Buidling state with {num_validators} validators...") + if config: + overwrite_spec_config(config) + genesis_state = create_genesis_state(num_validators=num_validators) + print("done!") + print() + + test_cases = [] + + print("Running some sanity check tests...\n") + test_slot_transition(genesis_state) + print("Passed slot transition test\n") + test_cases.append( + generate_from_test(test_empty_block_transition, genesis_state, config=config, fields=['slot']) + ) + print("Passed empty block transition test\n") + test_cases.append( + generate_from_test(test_skipped_slots, genesis_state, config=config, fields=['slot', 'latest_block_roots']) + ) + print("Passed skipped slot test\n") + test_cases.append( + generate_from_test(test_empty_epoch_transition, genesis_state, config=config, fields=['slot', 'latest_block_roots']) + ) + print("Passed empty epoch transition test\n") + test_cases.append( + generate_from_test(test_empty_epoch_transition_not_finalizing, genesis_state, config=config, fields=['slot', 'finalized_epoch']) + ) + print("Passed non-finalizing epoch test\n") + test_cases.append( + generate_from_test(test_proposer_slashing, genesis_state, config=config, fields=['validator_registry', 'validator_balances']) + ) + print("Passed proposer slashing test\n") + test_cases.append( + generate_from_test(test_attestation, genesis_state, config=config, fields=['previous_epoch_attestations', 'current_epoch_attestations']) + ) + print("Passed attestation test\n") + test_cases.append( + generate_from_test(test_deposit_in_block, genesis_state, config=config, fields=['validator_registry', 'validator_balances']) + ) + print("Passed deposit test\n") + test_cases.append( + generate_from_test(test_deposit_top_up, genesis_state, config=config, fields=['validator_registry', 'validator_balances']) + ) + print("Passed deposit top up test\n") + test_cases.append( + generate_from_test(test_voluntary_exit, genesis_state, config=config, fields=['validator_registry']) + ) + print("Passed voluntary exit test\n") + test_cases.append( + generate_from_test(test_transfer, genesis_state, config=config, fields=['validator_balances']) + ) + print("Passed transfer test\n") + test_cases.append( + generate_from_test(test_ejection, genesis_state, config=config, fields=['validator_registry']) + ) + print("Passed ejection test\n") + test_cases.append( + generate_from_test(test_historical_batch, genesis_state, config=config, fields=['historical_roots']) + ) + print("Passed historical batch test\n") + print("done!") + + return test_cases + + +if __name__ == "__main__": + config = { + "SHARD_COUNT": 8, + "MIN_ATTESTATION_INCLUSION_DELAY": 2, + "TARGET_COMMITTEE_SIZE": 4, + "SLOTS_PER_EPOCH": 8, + "GENESIS_EPOCH": spec.GENESIS_SLOT // 8, + "SLOTS_PER_HISTORICAL_ROOT": 64, + "LATEST_RANDAO_MIXES_LENGTH": 64, + "LATEST_ACTIVE_INDEX_ROOTS_LENGTH": 64, + "LATEST_SLASHED_EXIT_LENGTH": 64, + } + + test_cases = sanity_tests(32, config) + # uncomment below to run/generate against the default config + # test_cases = sanity_tests(100) + + test = {} + metadata = {} + metadata['title'] = "Sanity tests" + metadata['summary'] = "Basic sanity checks from phase 0 spec pythonization. All tests are run with `verify_signatures` as set to False." + metadata['test_suite'] = "beacon_state" + metadata['fork'] = "tchaikovsky" + metadata['version'] = "v0.5.0" + test['metadata'] = metadata + test['test_cases'] = test_cases + + if '--output-json' in sys.argv: + os.makedirs('output', exist_ok=True) + with open("output/sanity_check_tests.json", "w+") as outfile: + dump_json(test, outfile) + if '--output-yaml' in sys.argv: + os.makedirs('output', exist_ok=True) + with open("output/sanity_check_tests.yaml", "w+") as outfile: + dump_yaml(test, outfile) From f41caa713b167d986acfdbeb61fb9f1bb8d1ce81 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 18 Mar 2019 13:06:45 -0600 Subject: [PATCH 053/133] add circleci config --- .circleci/config.yml | 46 ++++++++++++++++++++++++++++++++++++++++++++ .gitignore | 2 ++ Makefile | 4 +--- 3 files changed, 49 insertions(+), 3 deletions(-) create mode 100644 .circleci/config.yml diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 000000000..98d2367c9 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,46 @@ +# Python CircleCI 2.0 configuration file +version: 2 +jobs: + build: + docker: + - image: circleci/python:3.6 + working_directory: ~/repo + + steps: + - checkout + # Download and cache dependencies + - restore_cache: + keys: + - v1-dependencies-{{ checksum "requirements.txt" }} + # fallback to using the latest cache if no exact match is found + - v1-dependencies- + + - run: + name: install dependencies + command: | + python3 -m venv venv + . venv/bin/activate + pip install -r requirements.txt + - run: + name: build phase0 spec + command: make build/phase0 + + - save_cache: + paths: + - ./venv + key: v1-dependencies-{{ checksum "requirements.txt" }} + + # run tests! + # this example uses Django's built-in test-runner + # other common Python testing frameworks include pytest and nose + # https://pytest.org + # https://nose.readthedocs.io + - run: + name: run tests + command: | + . venv/bin/activate + pytest tests + + - store_artifacts: + path: test-reports + destination: test-reports \ No newline at end of file diff --git a/.gitignore b/.gitignore index 5e19cd2a5..f9a966cea 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,5 @@ /__pycache__ /venv /.pytest_cache + +build/phase0/spec.py diff --git a/Makefile b/Makefile index 745f8f901..8be65fd44 100644 --- a/Makefile +++ b/Makefile @@ -2,10 +2,8 @@ SPEC_DIR = ./specs SCRIPT_DIR = ./scripts BUILD_DIR = ./build -.PHONY: clean all +.PHONY: clean all $(BUILD_DIR)/phase0 $(BUILD_DIR)/phase0: - mkdir -p $@ python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $@/spec.py - touch $(BUILD_DIR)/__init__.py $(BUILD_DIR)/phase0/__init__.py From 003961362887a2e05dd7af55840da57aedfa41dd Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 18 Mar 2019 19:08:41 +0000 Subject: [PATCH 054/133] Simplify exit_validator Minor cleanup --- specs/core/0_beacon-chain.md | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a631bf2fc..434f2b680 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1377,17 +1377,14 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: ```python def exit_validator(state: BeaconState, index: ValidatorIndex) -> None: """ - Exit the validator of the given ``index``. + Exit the validator with the given ``index``. Note that this function mutates ``state``. """ validator = state.validator_registry[index] - delayed_activation_exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) - # The following updates only occur if not previous exited - if validator.exit_epoch <= delayed_activation_exit_epoch: - return - else: - validator.exit_epoch = delayed_activation_exit_epoch + # Update validator exit epoch if not previously exited + if validator.exit_epoch == FAR_FUTURE_EPOCH: + validator.exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) ``` #### `slash_validator` From 6715a0d4cc0c7b4842dac7cd88264de0f46ccde6 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 18 Mar 2019 14:14:26 -0600 Subject: [PATCH 055/133] reconfigure build a bit --- .circleci/config.yml | 7 +- .gitignore | 2 +- Makefile | 6 ++ build/phase0/spec.py | 4 +- build/phase0/state_transition.py | 4 +- build/utils/monkey_patches.py | 29 ----- scripts/phase0/build_spec.py | 4 +- tests/phase0/conftest.py | 2 +- tests/phase0/test_sanity.py | 4 +- {build => utils}/__init__.py | 0 {build/utils => utils/phase0}/__init__.py | 0 {build/utils => utils/phase0}/bls_stub.py | 0 .../utils => utils/phase0}/hash_function.py | 0 .../utils => utils/phase0}/merkle_minimal.py | 0 {build/utils => utils/phase0}/minimal_ssz.py | 0 utils/phase0/state_transition.py | 100 ++++++++++++++++++ 16 files changed, 117 insertions(+), 45 deletions(-) delete mode 100644 build/utils/monkey_patches.py rename {build => utils}/__init__.py (100%) rename {build/utils => utils/phase0}/__init__.py (100%) rename {build/utils => utils/phase0}/bls_stub.py (100%) rename {build/utils => utils/phase0}/hash_function.py (100%) rename {build/utils => utils/phase0}/merkle_minimal.py (100%) rename {build/utils => utils/phase0}/minimal_ssz.py (100%) create mode 100644 utils/phase0/state_transition.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 98d2367c9..02871530e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -30,11 +30,6 @@ jobs: - ./venv key: v1-dependencies-{{ checksum "requirements.txt" }} - # run tests! - # this example uses Django's built-in test-runner - # other common Python testing frameworks include pytest and nose - # https://pytest.org - # https://nose.readthedocs.io - run: name: run tests command: | @@ -43,4 +38,4 @@ jobs: - store_artifacts: path: test-reports - destination: test-reports \ No newline at end of file + destination: test-reports diff --git a/.gitignore b/.gitignore index f9a966cea..dfb38d170 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,4 @@ /venv /.pytest_cache -build/phase0/spec.py +build/ diff --git a/Makefile b/Makefile index 8be65fd44..593ea8bf4 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,15 @@ SPEC_DIR = ./specs SCRIPT_DIR = ./scripts BUILD_DIR = ./build +UTILS_DIR = ./utils .PHONY: clean all $(BUILD_DIR)/phase0 $(BUILD_DIR)/phase0: + mkdir -p $@ python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $@/spec.py + mkdir -p $@/utils + cp $(UTILS_DIR)/phase0/* $@/utils + cp $(UTILS_DIR)/phase0/state_transition.py $@ + touch $@/__init__.py $@/utils/__init__.py diff --git a/build/phase0/spec.py b/build/phase0/spec.py index 8c05b1208..3fd82c33f 100644 --- a/build/phase0/spec.py +++ b/build/phase0/spec.py @@ -1,5 +1,5 @@ -from build.utils.minimal_ssz import * -from build.utils.bls_stub import * +from build.phase0.utils.minimal_ssz import * +from build.phase0.utils.bls_stub import * def int_to_bytes1(x): return x.to_bytes(1, 'little') def int_to_bytes2(x): return x.to_bytes(2, 'little') def int_to_bytes3(x): return x.to_bytes(3, 'little') diff --git a/build/phase0/state_transition.py b/build/phase0/state_transition.py index 2bd33f6d6..170f647ab 100644 --- a/build/phase0/state_transition.py +++ b/build/phase0/state_transition.py @@ -1,4 +1,4 @@ -import build.phase0.spec as spec +from . import spec from typing import ( @@ -9,7 +9,7 @@ from typing import ( Tuple, ) -from build.phase0.spec import ( +from .spec import ( BeaconState, BeaconBlock, ) diff --git a/build/utils/monkey_patches.py b/build/utils/monkey_patches.py deleted file mode 100644 index 8a35b8f27..000000000 --- a/build/utils/monkey_patches.py +++ /dev/null @@ -1,29 +0,0 @@ -# Monkey patch validator shuffling cache -_get_shuffling = get_shuffling -shuffling_cache = {} -def get_shuffling(seed: Bytes32, - validators: List[Validator], - epoch: Epoch) -> List[List[ValidatorIndex]]: - - param_hash = (seed, hash_tree_root(validators, [Validator]), epoch) - - if param_hash in shuffling_cache: - # print("Cache hit, epoch={0}".format(epoch)) - return shuffling_cache[param_hash] - else: - # print("Cache miss, epoch={0}".format(epoch)) - ret = _get_shuffling(seed, validators, epoch) - shuffling_cache[param_hash] = ret - return ret - - -# Monkey patch hash cache -_hash = hash -hash_cache = {} -def hash(x): - if x in hash_cache: - return hash_cache[x] - else: - ret = _hash(x) - hash_cache[x] = ret - return ret diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py index 8b5941b62..eb4f580bd 100644 --- a/scripts/phase0/build_spec.py +++ b/scripts/phase0/build_spec.py @@ -5,8 +5,8 @@ import function_puller def build_spec(sourcefile, outfile): code_lines = [] - code_lines.append("from build.utils.minimal_ssz import *") - code_lines.append("from build.utils.bls_stub import *") + code_lines.append("from build.phase0.utils.minimal_ssz import *") + code_lines.append("from build.phase0.utils.bls_stub import *") for i in (1, 2, 3, 4, 8, 32, 48, 96): code_lines.append("def int_to_bytes%d(x): return x.to_bytes(%d, 'little')" % (i, i)) code_lines.append("SLOTS_PER_EPOCH = 64") # stub, will get overwritten by real var diff --git a/tests/phase0/conftest.py b/tests/phase0/conftest.py index 7e2800afd..7d372f164 100644 --- a/tests/phase0/conftest.py +++ b/tests/phase0/conftest.py @@ -4,7 +4,7 @@ from py_ecc import bls from build.phase0 import spec -from build.utils.merkle_minimal import ( +from build.phase0.utils.merkle_minimal import ( calc_merkle_tree_from_leaves, get_merkle_proof, get_merkle_root, diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 573c3ba21..0e04df5dd 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -6,7 +6,7 @@ from copy import deepcopy from py_ecc import bls import build.phase0.spec as spec -from build.utils.minimal_ssz import signed_root +from build.phase0.utils.minimal_ssz import signed_root from build.phase0.spec import ( # SSZ Attestation, @@ -43,7 +43,7 @@ from build.phase0.spec import ( from build.phase0.state_transition import ( state_transition, ) -from build.utils.merkle_minimal import ( +from build.phase0.utils.merkle_minimal import ( calc_merkle_tree_from_leaves, get_merkle_proof, get_merkle_root, diff --git a/build/__init__.py b/utils/__init__.py similarity index 100% rename from build/__init__.py rename to utils/__init__.py diff --git a/build/utils/__init__.py b/utils/phase0/__init__.py similarity index 100% rename from build/utils/__init__.py rename to utils/phase0/__init__.py diff --git a/build/utils/bls_stub.py b/utils/phase0/bls_stub.py similarity index 100% rename from build/utils/bls_stub.py rename to utils/phase0/bls_stub.py diff --git a/build/utils/hash_function.py b/utils/phase0/hash_function.py similarity index 100% rename from build/utils/hash_function.py rename to utils/phase0/hash_function.py diff --git a/build/utils/merkle_minimal.py b/utils/phase0/merkle_minimal.py similarity index 100% rename from build/utils/merkle_minimal.py rename to utils/phase0/merkle_minimal.py diff --git a/build/utils/minimal_ssz.py b/utils/phase0/minimal_ssz.py similarity index 100% rename from build/utils/minimal_ssz.py rename to utils/phase0/minimal_ssz.py diff --git a/utils/phase0/state_transition.py b/utils/phase0/state_transition.py new file mode 100644 index 000000000..170f647ab --- /dev/null +++ b/utils/phase0/state_transition.py @@ -0,0 +1,100 @@ +from . import spec + + +from typing import ( + Any, + Callable, + List, + NewType, + Tuple, +) + +from .spec import ( + BeaconState, + BeaconBlock, +) + + +def process_transaction_type(state: BeaconState, + transactions: List[Any], + max_transactions: int, + tx_fn: Callable[[BeaconState, Any], None]) -> None: + assert len(transactions) <= max_transactions + for transaction in transactions: + tx_fn(state, transaction) + + +def process_transactions(state: BeaconState, block: BeaconBlock) -> None: + process_transaction_type( + state, + block.body.proposer_slashings, + spec.MAX_PROPOSER_SLASHINGS, + spec.process_proposer_slashing, + ) + process_transaction_type( + state, + block.body.attester_slashings, + spec.MAX_ATTESTER_SLASHINGS, + spec.process_attester_slashing, + ) + process_transaction_type( + state, + block.body.attestations, + spec.MAX_ATTESTATIONS, + spec.process_attestation, + ) + process_transaction_type( + state, + block.body.deposits, + spec.MAX_DEPOSITS, + spec.process_deposit, + ) + process_transaction_type( + state, + block.body.voluntary_exits, + spec.MAX_VOLUNTARY_EXITS, + spec.process_voluntary_exit, + ) + assert len(block.body.transfers) == len(set(block.body.transfers)) + process_transaction_type( + state, + block.body.transfers, + spec.MAX_TRANSFERS, + spec.process_transfer, + ) + + +def process_block(state: BeaconState, + block: BeaconBlock, + verify_state_root: bool=False) -> None: + spec.process_block_header(state, block) + spec.process_randao(state, block) + spec.process_eth1_data(state, block) + + process_transactions(state, block) + if verify_state_root: + spec.verify_block_state_root(state, block) + + +def process_epoch_transition(state: BeaconState) -> None: + spec.update_justification_and_finalization(state) + spec.process_crosslinks(state) + spec.maybe_reset_eth1_period(state) + spec.apply_rewards(state) + spec.process_ejections(state) + spec.update_registry_and_shuffling_data(state) + spec.process_slashings(state) + spec.process_exit_queue(state) + spec.finish_epoch_update(state) + + +def state_transition(state: BeaconState, + block: BeaconBlock, + verify_state_root: bool=False) -> BeaconState: + while state.slot < block.slot: + spec.cache_state(state) + if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0: + process_epoch_transition(state) + spec.advance_slot(state) + if block.slot == state.slot: + process_block(state, block) From d9ac06e8edb814ba12128ae1fe0164496320a77c Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 18 Mar 2019 14:16:04 -0600 Subject: [PATCH 056/133] remove build from git --- build/phase0/__init__.py | 0 build/phase0/spec.py | 1620 ------------------------------ build/phase0/state_transition.py | 100 -- 3 files changed, 1720 deletions(-) delete mode 100644 build/phase0/__init__.py delete mode 100644 build/phase0/spec.py delete mode 100644 build/phase0/state_transition.py diff --git a/build/phase0/__init__.py b/build/phase0/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/build/phase0/spec.py b/build/phase0/spec.py deleted file mode 100644 index 3fd82c33f..000000000 --- a/build/phase0/spec.py +++ /dev/null @@ -1,1620 +0,0 @@ -from build.phase0.utils.minimal_ssz import * -from build.phase0.utils.bls_stub import * -def int_to_bytes1(x): return x.to_bytes(1, 'little') -def int_to_bytes2(x): return x.to_bytes(2, 'little') -def int_to_bytes3(x): return x.to_bytes(3, 'little') -def int_to_bytes4(x): return x.to_bytes(4, 'little') -def int_to_bytes8(x): return x.to_bytes(8, 'little') -def int_to_bytes32(x): return x.to_bytes(32, 'little') -def int_to_bytes48(x): return x.to_bytes(48, 'little') -def int_to_bytes96(x): return x.to_bytes(96, 'little') -SLOTS_PER_EPOCH = 64 -def slot_to_epoch(x): return x // SLOTS_PER_EPOCH - -from typing import ( - Any, - Callable, - List, - NewType, - Tuple, -) - - -Slot = NewType('Slot', int) # uint64 -Epoch = NewType('Epoch', int) # uint64 -Shard = NewType('Shard', int) # uint64 -ValidatorIndex = NewType('ValidatorIndex', int) # uint64 -Gwei = NewType('Gwei', int) # uint64 -Bytes32 = NewType('Bytes32', bytes) # bytes32 -BLSPubkey = NewType('BLSPubkey', bytes) # bytes48 -BLSSignature = NewType('BLSSignature', bytes) # bytes96 -Any = None -Store = None - -SHARD_COUNT = 2**10 -TARGET_COMMITTEE_SIZE = 2**7 -MAX_BALANCE_CHURN_QUOTIENT = 2**5 -MAX_INDICES_PER_SLASHABLE_VOTE = 2**12 -MAX_EXIT_DEQUEUES_PER_EPOCH = 2**2 -SHUFFLE_ROUND_COUNT = 90 -DEPOSIT_CONTRACT_ADDRESS = 0x1234567890123567890123456789012357890 -DEPOSIT_CONTRACT_TREE_DEPTH = 2**5 -MIN_DEPOSIT_AMOUNT = 2**0 * 10**9 -MAX_DEPOSIT_AMOUNT = 2**5 * 10**9 -FORK_CHOICE_BALANCE_INCREMENT = 2**0 * 10**9 -EJECTION_BALANCE = 2**4 * 10**9 -GENESIS_FORK_VERSION = 0 -GENESIS_SLOT = 2**32 -GENESIS_EPOCH = slot_to_epoch(GENESIS_SLOT) -GENESIS_START_SHARD = 0 -FAR_FUTURE_EPOCH = 2**64 - 1 -ZERO_HASH = int_to_bytes32(0) -EMPTY_SIGNATURE = int_to_bytes96(0) -BLS_WITHDRAWAL_PREFIX_BYTE = int_to_bytes1(0) -SECONDS_PER_SLOT = 6 -MIN_ATTESTATION_INCLUSION_DELAY = 2**2 -SLOTS_PER_EPOCH = 2**6 -MIN_SEED_LOOKAHEAD = 2**0 -ACTIVATION_EXIT_DELAY = 2**2 -EPOCHS_PER_ETH1_VOTING_PERIOD = 2**4 -SLOTS_PER_HISTORICAL_ROOT = 2**13 -MIN_VALIDATOR_WITHDRAWABILITY_DELAY = 2**8 -PERSISTENT_COMMITTEE_PERIOD = 2**11 -LATEST_RANDAO_MIXES_LENGTH = 2**13 -LATEST_ACTIVE_INDEX_ROOTS_LENGTH = 2**13 -LATEST_SLASHED_EXIT_LENGTH = 2**13 -BASE_REWARD_QUOTIENT = 2**5 -WHISTLEBLOWER_REWARD_QUOTIENT = 2**9 -ATTESTATION_INCLUSION_REWARD_QUOTIENT = 2**3 -INACTIVITY_PENALTY_QUOTIENT = 2**24 -MIN_PENALTY_QUOTIENT = 2**5 -MAX_PROPOSER_SLASHINGS = 2**4 -MAX_ATTESTER_SLASHINGS = 2**0 -MAX_ATTESTATIONS = 2**7 -MAX_DEPOSITS = 2**4 -MAX_VOLUNTARY_EXITS = 2**4 -MAX_TRANSFERS = 2**4 -DOMAIN_BEACON_BLOCK = 0 -DOMAIN_RANDAO = 1 -DOMAIN_ATTESTATION = 2 -DOMAIN_DEPOSIT = 3 -DOMAIN_VOLUNTARY_EXIT = 4 -DOMAIN_TRANSFER = 5 -Fork = SSZType({ - # Previous fork version - 'previous_version': 'bytes4', - # Current fork version - 'current_version': 'bytes4', - # Fork epoch number - 'epoch': 'uint64', -}) -Crosslink = SSZType({ - # Epoch number - 'epoch': 'uint64', - # Shard data since the previous crosslink - 'crosslink_data_root': 'bytes32', -}) -Eth1Data = SSZType({ - # Root of the deposit tree - 'deposit_root': 'bytes32', - # Block hash - 'block_hash': 'bytes32', -}) -Eth1DataVote = SSZType({ - # Data being voted for - 'eth1_data': Eth1Data, - # Vote count - 'vote_count': 'uint64', -}) -AttestationData = SSZType({ - # LMD GHOST vote - 'slot': 'uint64', - 'beacon_block_root': 'bytes32', - - # FFG vote - 'source_epoch': 'uint64', - 'source_root': 'bytes32', - 'target_root': 'bytes32', - - # Crosslink vote - 'shard': 'uint64', - 'previous_crosslink': Crosslink, - 'crosslink_data_root': 'bytes32', -}) -AttestationDataAndCustodyBit = SSZType({ - # Attestation data - 'data': AttestationData, - # Custody bit - 'custody_bit': 'bool', -}) -SlashableAttestation = SSZType({ - # Validator indices - 'validator_indices': ['uint64'], - # Attestation data - 'data': AttestationData, - # Custody bitfield - 'custody_bitfield': 'bytes', - # Aggregate signature - 'aggregate_signature': 'bytes96', -}) -DepositInput = SSZType({ - # BLS pubkey - 'pubkey': 'bytes48', - # Withdrawal credentials - 'withdrawal_credentials': 'bytes32', - # A BLS signature of this `DepositInput` - 'proof_of_possession': 'bytes96', -}) -DepositData = SSZType({ - # Amount in Gwei - 'amount': 'uint64', - # Timestamp from deposit contract - 'timestamp': 'uint64', - # Deposit input - 'deposit_input': DepositInput, -}) -BeaconBlockHeader = SSZType({ - 'slot': 'uint64', - 'previous_block_root': 'bytes32', - 'state_root': 'bytes32', - 'block_body_root': 'bytes32', - 'signature': 'bytes96', -}) -Validator = SSZType({ - # BLS public key - 'pubkey': 'bytes48', - # Withdrawal credentials - 'withdrawal_credentials': 'bytes32', - # Epoch when validator activated - 'activation_epoch': 'uint64', - # Epoch when validator exited - 'exit_epoch': 'uint64', - # Epoch when validator is eligible to withdraw - 'withdrawable_epoch': 'uint64', - # Did the validator initiate an exit - 'initiated_exit': 'bool', - # Was the validator slashed - 'slashed': 'bool', -}) -PendingAttestation = SSZType({ - # Attester aggregation bitfield - 'aggregation_bitfield': 'bytes', - # Attestation data - 'data': AttestationData, - # Custody bitfield - 'custody_bitfield': 'bytes', - # Inclusion slot - 'inclusion_slot': 'uint64', -}) -HistoricalBatch = SSZType({ - # Block roots - 'block_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], - # State roots - 'state_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], -}) -ProposerSlashing = SSZType({ - # Proposer index - 'proposer_index': 'uint64', - # First block header - 'header_1': BeaconBlockHeader, - # Second block header - 'header_2': BeaconBlockHeader, -}) -AttesterSlashing = SSZType({ - # First slashable attestation - 'slashable_attestation_1': SlashableAttestation, - # Second slashable attestation - 'slashable_attestation_2': SlashableAttestation, -}) -Attestation = SSZType({ - # Attester aggregation bitfield - 'aggregation_bitfield': 'bytes', - # Attestation data - 'data': AttestationData, - # Custody bitfield - 'custody_bitfield': 'bytes', - # BLS aggregate signature - 'aggregate_signature': 'bytes96', -}) -Deposit = SSZType({ - # Branch in the deposit tree - 'proof': ['bytes32', DEPOSIT_CONTRACT_TREE_DEPTH], - # Index in the deposit tree - 'index': 'uint64', - # Data - 'deposit_data': DepositData, -}) -VoluntaryExit = SSZType({ - # Minimum epoch for processing exit - 'epoch': 'uint64', - # Index of the exiting validator - 'validator_index': 'uint64', - # Validator signature - 'signature': 'bytes96', -}) -Transfer = SSZType({ - # Sender index - 'sender': 'uint64', - # Recipient index - 'recipient': 'uint64', - # Amount in Gwei - 'amount': 'uint64', - # Fee in Gwei for block proposer - 'fee': 'uint64', - # Inclusion slot - 'slot': 'uint64', - # Sender withdrawal pubkey - 'pubkey': 'bytes48', - # Sender signature - 'signature': 'bytes96', -}) -BeaconBlockBody = SSZType({ - 'randao_reveal': 'bytes96', - 'eth1_data': Eth1Data, - 'proposer_slashings': [ProposerSlashing], - 'attester_slashings': [AttesterSlashing], - 'attestations': [Attestation], - 'deposits': [Deposit], - 'voluntary_exits': [VoluntaryExit], - 'transfers': [Transfer], -}) -BeaconBlock = SSZType({ - # Header - 'slot': 'uint64', - 'previous_block_root': 'bytes32', - 'state_root': 'bytes32', - 'body': BeaconBlockBody, - 'signature': 'bytes96', -}) -BeaconState = SSZType({ - # Misc - 'slot': 'uint64', - 'genesis_time': 'uint64', - 'fork': Fork, # For versioning hard forks - - # Validator registry - 'validator_registry': [Validator], - 'validator_balances': ['uint64'], - 'validator_registry_update_epoch': 'uint64', - - # Randomness and committees - 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], - 'previous_shuffling_start_shard': 'uint64', - 'current_shuffling_start_shard': 'uint64', - 'previous_shuffling_epoch': 'uint64', - 'current_shuffling_epoch': 'uint64', - 'previous_shuffling_seed': 'bytes32', - 'current_shuffling_seed': 'bytes32', - - # Finality - 'previous_epoch_attestations': [PendingAttestation], - 'current_epoch_attestations': [PendingAttestation], - 'previous_justified_epoch': 'uint64', - 'current_justified_epoch': 'uint64', - 'previous_justified_root': 'bytes32', - 'current_justified_root': 'bytes32', - 'justification_bitfield': 'uint64', - 'finalized_epoch': 'uint64', - 'finalized_root': 'bytes32', - - # Recent state - 'latest_crosslinks': [Crosslink, SHARD_COUNT], - 'latest_block_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], - 'latest_state_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], - 'latest_active_index_roots': ['bytes32', LATEST_ACTIVE_INDEX_ROOTS_LENGTH], - 'latest_slashed_balances': ['uint64', LATEST_SLASHED_EXIT_LENGTH], # Balances slashed at every withdrawal period - 'latest_block_header': BeaconBlockHeader, # `latest_block_header.state_root == ZERO_HASH` temporarily - 'historical_roots': ['bytes32'], - - # Ethereum 1.0 chain data - 'latest_eth1_data': Eth1Data, - 'eth1_data_votes': [Eth1DataVote], - 'deposit_index': 'uint64' -}) -def xor(bytes1: Bytes32, bytes2: Bytes32) -> Bytes32: - return bytes(a ^ b for a, b in zip(bytes1, bytes2)) -def get_temporary_block_header(block: BeaconBlock) -> BeaconBlockHeader: - """ - Return the block header corresponding to a block with ``state_root`` set to ``ZERO_HASH``. - """ - return BeaconBlockHeader( - slot=block.slot, - previous_block_root=block.previous_block_root, - state_root=ZERO_HASH, - block_body_root=hash_tree_root(block.body), - signature=block.signature, - ) -def slot_to_epoch(slot: Slot) -> Epoch: - """ - Return the epoch number of the given ``slot``. - """ - return slot // SLOTS_PER_EPOCH -def get_previous_epoch(state: BeaconState) -> Epoch: - """` - Return the previous epoch of the given ``state``. - """ - return get_current_epoch(state) - 1 -def get_current_epoch(state: BeaconState) -> Epoch: - """ - Return the current epoch of the given ``state``. - """ - return slot_to_epoch(state.slot) -def get_epoch_start_slot(epoch: Epoch) -> Slot: - """ - Return the starting slot of the given ``epoch``. - """ - return epoch * SLOTS_PER_EPOCH -def is_active_validator(validator: Validator, epoch: Epoch) -> bool: - """ - Check if ``validator`` is active. - """ - return validator.activation_epoch <= epoch < validator.exit_epoch -def get_active_validator_indices(validators: List[Validator], epoch: Epoch) -> List[ValidatorIndex]: - """ - Get indices of active validators from ``validators``. - """ - return [i for i, v in enumerate(validators) if is_active_validator(v, epoch)] -def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: - """ - Return `p(index)` in a pseudorandom permutation `p` of `0...list_size-1` with ``seed`` as entropy. - - Utilizes 'swap or not' shuffling found in - https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf - See the 'generalized domain' algorithm on page 3. - """ - assert index < list_size - assert list_size <= 2**40 - - for round in range(SHUFFLE_ROUND_COUNT): - pivot = bytes_to_int(hash(seed + int_to_bytes1(round))[0:8]) % list_size - flip = (pivot - index) % list_size - position = max(index, flip) - source = hash(seed + int_to_bytes1(round) + int_to_bytes4(position // 256)) - byte = source[(position % 256) // 8] - bit = (byte >> (position % 8)) % 2 - index = flip if bit else index - - return index -def split(values: List[Any], split_count: int) -> List[List[Any]]: - """ - Splits ``values`` into ``split_count`` pieces. - """ - list_length = len(values) - return [ - values[(list_length * i // split_count): (list_length * (i + 1) // split_count)] - for i in range(split_count) - ] -def get_epoch_committee_count(active_validator_count: int) -> int: - """ - Return the number of committees in one epoch. - """ - return max( - 1, - min( - SHARD_COUNT // SLOTS_PER_EPOCH, - active_validator_count // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, - ) - ) * SLOTS_PER_EPOCH -def get_shuffling(seed: Bytes32, - validators: List[Validator], - epoch: Epoch) -> List[List[ValidatorIndex]]: - """ - Shuffle active validators and split into crosslink committees. - Return a list of committees (each a list of validator indices). - """ - # Shuffle active validator indices - active_validator_indices = get_active_validator_indices(validators, epoch) - length = len(active_validator_indices) - shuffled_indices = [active_validator_indices[get_permuted_index(i, length, seed)] for i in range(length)] - - # Split the shuffled active validator indices - return split(shuffled_indices, get_epoch_committee_count(length)) -def get_previous_epoch_committee_count(state: BeaconState) -> int: - """ - Return the number of committees in the previous epoch of the given ``state``. - """ - previous_active_validators = get_active_validator_indices( - state.validator_registry, - state.previous_shuffling_epoch, - ) - return get_epoch_committee_count(len(previous_active_validators)) -def get_current_epoch_committee_count(state: BeaconState) -> int: - """ - Return the number of committees in the current epoch of the given ``state``. - """ - current_active_validators = get_active_validator_indices( - state.validator_registry, - state.current_shuffling_epoch, - ) - return get_epoch_committee_count(len(current_active_validators)) -def get_next_epoch_committee_count(state: BeaconState) -> int: - """ - Return the number of committees in the next epoch of the given ``state``. - """ - next_active_validators = get_active_validator_indices( - state.validator_registry, - get_current_epoch(state) + 1, - ) - return get_epoch_committee_count(len(next_active_validators)) -def get_crosslink_committees_at_slot(state: BeaconState, - slot: Slot, - registry_change: bool=False) -> List[Tuple[List[ValidatorIndex], Shard]]: - """ - Return the list of ``(committee, shard)`` tuples for the ``slot``. - - Note: There are two possible shufflings for crosslink committees for a - ``slot`` in the next epoch -- with and without a `registry_change` - """ - epoch = slot_to_epoch(slot) - current_epoch = get_current_epoch(state) - previous_epoch = get_previous_epoch(state) - next_epoch = current_epoch + 1 - - assert previous_epoch <= epoch <= next_epoch - - if epoch == current_epoch: - committees_per_epoch = get_current_epoch_committee_count(state) - seed = state.current_shuffling_seed - shuffling_epoch = state.current_shuffling_epoch - shuffling_start_shard = state.current_shuffling_start_shard - elif epoch == previous_epoch: - committees_per_epoch = get_previous_epoch_committee_count(state) - seed = state.previous_shuffling_seed - shuffling_epoch = state.previous_shuffling_epoch - shuffling_start_shard = state.previous_shuffling_start_shard - elif epoch == next_epoch: - epochs_since_last_registry_update = current_epoch - state.validator_registry_update_epoch - if registry_change: - committees_per_epoch = get_next_epoch_committee_count(state) - seed = generate_seed(state, next_epoch) - shuffling_epoch = next_epoch - current_committees_per_epoch = get_current_epoch_committee_count(state) - shuffling_start_shard = (state.current_shuffling_start_shard + current_committees_per_epoch) % SHARD_COUNT - elif epochs_since_last_registry_update > 1 and is_power_of_two(epochs_since_last_registry_update): - committees_per_epoch = get_next_epoch_committee_count(state) - seed = generate_seed(state, next_epoch) - shuffling_epoch = next_epoch - shuffling_start_shard = state.current_shuffling_start_shard - else: - committees_per_epoch = get_current_epoch_committee_count(state) - seed = state.current_shuffling_seed - shuffling_epoch = state.current_shuffling_epoch - shuffling_start_shard = state.current_shuffling_start_shard - - shuffling = get_shuffling( - seed, - state.validator_registry, - shuffling_epoch, - ) - offset = slot % SLOTS_PER_EPOCH - committees_per_slot = committees_per_epoch // SLOTS_PER_EPOCH - slot_start_shard = (shuffling_start_shard + committees_per_slot * offset) % SHARD_COUNT - - return [ - ( - shuffling[committees_per_slot * offset + i], - (slot_start_shard + i) % SHARD_COUNT, - ) - for i in range(committees_per_slot) - ] -def get_block_root(state: BeaconState, - slot: Slot) -> Bytes32: - """ - Return the block root at a recent ``slot``. - """ - assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT - return state.latest_block_roots[slot % SLOTS_PER_HISTORICAL_ROOT] -def get_state_root(state: BeaconState, - slot: Slot) -> Bytes32: - """ - Return the state root at a recent ``slot``. - """ - assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT - return state.latest_state_roots[slot % SLOTS_PER_HISTORICAL_ROOT] -def get_randao_mix(state: BeaconState, - epoch: Epoch) -> Bytes32: - """ - Return the randao mix at a recent ``epoch``. - """ - assert get_current_epoch(state) - LATEST_RANDAO_MIXES_LENGTH < epoch <= get_current_epoch(state) - return state.latest_randao_mixes[epoch % LATEST_RANDAO_MIXES_LENGTH] -def get_active_index_root(state: BeaconState, - epoch: Epoch) -> Bytes32: - """ - Return the index root at a recent ``epoch``. - """ - assert get_current_epoch(state) - LATEST_ACTIVE_INDEX_ROOTS_LENGTH + ACTIVATION_EXIT_DELAY < epoch <= get_current_epoch(state) + ACTIVATION_EXIT_DELAY - return state.latest_active_index_roots[epoch % LATEST_ACTIVE_INDEX_ROOTS_LENGTH] -def generate_seed(state: BeaconState, - epoch: Epoch) -> Bytes32: - """ - Generate a seed for the given ``epoch``. - """ - return hash( - get_randao_mix(state, epoch - MIN_SEED_LOOKAHEAD) + - get_active_index_root(state, epoch) + - int_to_bytes32(epoch) - ) -def get_beacon_proposer_index(state: BeaconState, - slot: Slot, - registry_change: bool=False) -> ValidatorIndex: - """ - Return the beacon proposer index for the ``slot``. - """ - epoch = slot_to_epoch(slot) - current_epoch = get_current_epoch(state) - previous_epoch = get_previous_epoch(state) - next_epoch = current_epoch + 1 - - assert previous_epoch <= epoch <= next_epoch - - first_committee, _ = get_crosslink_committees_at_slot(state, slot, registry_change)[0] - return first_committee[epoch % len(first_committee)] -def verify_merkle_branch(leaf: Bytes32, proof: List[Bytes32], depth: int, index: int, root: Bytes32) -> bool: - """ - Verify that the given ``leaf`` is on the merkle branch ``proof`` - starting with the given ``root``. - """ - value = leaf - for i in range(depth): - if index // (2**i) % 2: - value = hash(proof[i] + value) - else: - value = hash(value + proof[i]) - return value == root -def get_attestation_participants(state: BeaconState, - attestation_data: AttestationData, - bitfield: bytes) -> List[ValidatorIndex]: - """ - Return the participant indices at for the ``attestation_data`` and ``bitfield``. - """ - # Find the committee in the list with the desired shard - crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) - - assert attestation_data.shard in [shard for _, shard in crosslink_committees] - crosslink_committee = [committee for committee, shard in crosslink_committees if shard == attestation_data.shard][0] - - assert verify_bitfield(bitfield, len(crosslink_committee)) - - # Find the participating attesters in the committee - participants = [] - for i, validator_index in enumerate(crosslink_committee): - aggregation_bit = get_bitfield_bit(bitfield, i) - if aggregation_bit == 0b1: - participants.append(validator_index) - return participants -def is_power_of_two(value: int) -> bool: - """ - Check if ``value`` is a power of two integer. - """ - return (value > 0) and (value & (value - 1) == 0) -def bytes_to_int(data: bytes) -> int: - return int.from_bytes(data, 'little') -def get_effective_balance(state: BeaconState, index: ValidatorIndex) -> Gwei: - """ - Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. - """ - return min(state.validator_balances[index], MAX_DEPOSIT_AMOUNT) -def get_total_balance(state: BeaconState, validators: List[ValidatorIndex]) -> Gwei: - """ - Return the combined effective balance of an array of ``validators``. - """ - return sum([get_effective_balance(state, i) for i in validators]) -def get_fork_version(fork: Fork, - epoch: Epoch) -> bytes: - """ - Return the fork version of the given ``epoch``. - """ - if epoch < fork.epoch: - return fork.previous_version - else: - return fork.current_version -def get_domain(fork: Fork, - epoch: Epoch, - domain_type: int) -> int: - """ - Get the domain number that represents the fork meta and signature domain. - """ - return bytes_to_int(get_fork_version(fork, epoch) + int_to_bytes4(domain_type)) -def get_bitfield_bit(bitfield: bytes, i: int) -> int: - """ - Extract the bit in ``bitfield`` at position ``i``. - """ - return (bitfield[i // 8] >> (i % 8)) % 2 -def verify_bitfield(bitfield: bytes, committee_size: int) -> bool: - """ - Verify ``bitfield`` against the ``committee_size``. - """ - if len(bitfield) != (committee_size + 7) // 8: - return False - - # Check `bitfield` is padded with zero bits only - for i in range(committee_size, len(bitfield) * 8): - if get_bitfield_bit(bitfield, i) == 0b1: - return False - - return True -def verify_slashable_attestation(state: BeaconState, slashable_attestation: SlashableAttestation) -> bool: - """ - Verify validity of ``slashable_attestation`` fields. - """ - if slashable_attestation.custody_bitfield != b'\x00' * len(slashable_attestation.custody_bitfield): # [TO BE REMOVED IN PHASE 1] - return False - - if len(slashable_attestation.validator_indices) == 0: - return False - - for i in range(len(slashable_attestation.validator_indices) - 1): - if slashable_attestation.validator_indices[i] >= slashable_attestation.validator_indices[i + 1]: - return False - - if not verify_bitfield(slashable_attestation.custody_bitfield, len(slashable_attestation.validator_indices)): - return False - - if len(slashable_attestation.validator_indices) > MAX_INDICES_PER_SLASHABLE_VOTE: - return False - - custody_bit_0_indices = [] - custody_bit_1_indices = [] - for i, validator_index in enumerate(slashable_attestation.validator_indices): - if get_bitfield_bit(slashable_attestation.custody_bitfield, i) == 0b0: - custody_bit_0_indices.append(validator_index) - else: - custody_bit_1_indices.append(validator_index) - - return bls_verify_multiple( - pubkeys=[ - bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_0_indices]), - bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_1_indices]), - ], - message_hashes=[ - hash_tree_root(AttestationDataAndCustodyBit(data=slashable_attestation.data, custody_bit=0b0)), - hash_tree_root(AttestationDataAndCustodyBit(data=slashable_attestation.data, custody_bit=0b1)), - ], - signature=slashable_attestation.aggregate_signature, - domain=get_domain(state.fork, slot_to_epoch(slashable_attestation.data.slot), DOMAIN_ATTESTATION), - ) -def is_double_vote(attestation_data_1: AttestationData, - attestation_data_2: AttestationData) -> bool: - """ - Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target. - """ - target_epoch_1 = slot_to_epoch(attestation_data_1.slot) - target_epoch_2 = slot_to_epoch(attestation_data_2.slot) - return target_epoch_1 == target_epoch_2 -def is_surround_vote(attestation_data_1: AttestationData, - attestation_data_2: AttestationData) -> bool: - """ - Check if ``attestation_data_1`` surrounds ``attestation_data_2``. - """ - source_epoch_1 = attestation_data_1.source_epoch - source_epoch_2 = attestation_data_2.source_epoch - target_epoch_1 = slot_to_epoch(attestation_data_1.slot) - target_epoch_2 = slot_to_epoch(attestation_data_2.slot) - - return source_epoch_1 < source_epoch_2 and target_epoch_2 < target_epoch_1 -def integer_squareroot(n: int) -> int: - """ - The largest integer ``x`` such that ``x**2`` is less than or equal to ``n``. - """ - assert n >= 0 - x = n - y = (x + 1) // 2 - while y < x: - x = y - y = (x + n // x) // 2 - return x -def get_delayed_activation_exit_epoch(epoch: Epoch) -> Epoch: - """ - Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. - """ - return epoch + 1 + ACTIVATION_EXIT_DELAY -def process_deposit(state: BeaconState, deposit: Deposit) -> None: - """ - Process a deposit from Ethereum 1.0. - Note that this function mutates ``state``. - """ - deposit_input = deposit.deposit_data.deposit_input - - # Should equal 8 bytes for deposit_data.amount + - # 8 bytes for deposit_data.timestamp + - # 176 bytes for deposit_data.deposit_input - # It should match the deposit_data in the eth1.0 deposit contract - serialized_deposit_data = serialize(deposit.deposit_data) - # Deposits must be processed in order - assert deposit.index == state.deposit_index - - # Verify the Merkle branch - merkle_branch_is_valid = verify_merkle_branch( - leaf=hash(serialized_deposit_data), - proof=deposit.proof, - depth=DEPOSIT_CONTRACT_TREE_DEPTH, - index=deposit.index, - root=state.latest_eth1_data.deposit_root, - ) - assert merkle_branch_is_valid - - # Increment the next deposit index we are expecting. Note that this - # needs to be done here because while the deposit contract will never - # create an invalid Merkle branch, it may admit an invalid deposit - # object, and we need to be able to skip over it - state.deposit_index += 1 - - validator_pubkeys = [v.pubkey for v in state.validator_registry] - pubkey = deposit_input.pubkey - amount = deposit.deposit_data.amount - withdrawal_credentials = deposit_input.withdrawal_credentials - - if pubkey not in validator_pubkeys: - # Verify the proof of possession - proof_is_valid = bls_verify( - pubkey=deposit_input.pubkey, - message_hash=signed_root(deposit_input), - signature=deposit_input.proof_of_possession, - domain=get_domain( - state.fork, - get_current_epoch(state), - DOMAIN_DEPOSIT, - ) - ) - if not proof_is_valid: - return - - # Add new validator - validator = Validator( - pubkey=pubkey, - withdrawal_credentials=withdrawal_credentials, - activation_epoch=FAR_FUTURE_EPOCH, - exit_epoch=FAR_FUTURE_EPOCH, - withdrawable_epoch=FAR_FUTURE_EPOCH, - initiated_exit=False, - slashed=False, - ) - - # Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled. - state.validator_registry.append(validator) - state.validator_balances.append(amount) - else: - # Increase balance by deposit amount - state.validator_balances[validator_pubkeys.index(pubkey)] += amount -def activate_validator(state: BeaconState, index: ValidatorIndex, is_genesis: bool) -> None: - """ - Activate the validator of the given ``index``. - Note that this function mutates ``state``. - """ - validator = state.validator_registry[index] - - validator.activation_epoch = GENESIS_EPOCH if is_genesis else get_delayed_activation_exit_epoch(get_current_epoch(state)) -def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: - """ - Initiate the validator of the given ``index``. - Note that this function mutates ``state``. - """ - validator = state.validator_registry[index] - validator.initiated_exit = True -def exit_validator(state: BeaconState, index: ValidatorIndex) -> None: - """ - Exit the validator of the given ``index``. - Note that this function mutates ``state``. - """ - validator = state.validator_registry[index] - delayed_activation_exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) - - # The following updates only occur if not previous exited - if validator.exit_epoch <= delayed_activation_exit_epoch: - return - else: - validator.exit_epoch = delayed_activation_exit_epoch -def slash_validator(state: BeaconState, index: ValidatorIndex) -> None: - """ - Slash the validator with index ``index``. - Note that this function mutates ``state``. - """ - validator = state.validator_registry[index] - assert state.slot < get_epoch_start_slot(validator.withdrawable_epoch) # [TO BE REMOVED IN PHASE 2] - exit_validator(state, index) - state.latest_slashed_balances[get_current_epoch(state) % LATEST_SLASHED_EXIT_LENGTH] += get_effective_balance(state, index) - - whistleblower_index = get_beacon_proposer_index(state, state.slot) - whistleblower_reward = get_effective_balance(state, index) // WHISTLEBLOWER_REWARD_QUOTIENT - state.validator_balances[whistleblower_index] += whistleblower_reward - state.validator_balances[index] -= whistleblower_reward - validator.slashed = True - validator.withdrawable_epoch = get_current_epoch(state) + LATEST_SLASHED_EXIT_LENGTH -def prepare_validator_for_withdrawal(state: BeaconState, index: ValidatorIndex) -> None: - """ - Set the validator with the given ``index`` as withdrawable - ``MIN_VALIDATOR_WITHDRAWABILITY_DELAY`` after the current epoch. - Note that this function mutates ``state``. - """ - validator = state.validator_registry[index] - validator.withdrawable_epoch = get_current_epoch(state) + MIN_VALIDATOR_WITHDRAWABILITY_DELAY -def get_empty_block() -> BeaconBlock: - """ - Get an empty ``BeaconBlock``. - """ - return BeaconBlock( - slot=GENESIS_SLOT, - previous_block_root=ZERO_HASH, - state_root=ZERO_HASH, - body=BeaconBlockBody( - randao_reveal=EMPTY_SIGNATURE, - eth1_data=Eth1Data( - deposit_root=ZERO_HASH, - block_hash=ZERO_HASH, - ), - proposer_slashings=[], - attester_slashings=[], - attestations=[], - deposits=[], - voluntary_exits=[], - transfers=[], - ), - signature=EMPTY_SIGNATURE, - ) -def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], - genesis_time: int, - genesis_eth1_data: Eth1Data) -> BeaconState: - """ - Get the genesis ``BeaconState``. - """ - state = BeaconState( - # Misc - slot=GENESIS_SLOT, - genesis_time=genesis_time, - fork=Fork( - previous_version=int_to_bytes4(GENESIS_FORK_VERSION), - current_version=int_to_bytes4(GENESIS_FORK_VERSION), - epoch=GENESIS_EPOCH, - ), - - # Validator registry - validator_registry=[], - validator_balances=[], - validator_registry_update_epoch=GENESIS_EPOCH, - - # Randomness and committees - latest_randao_mixes=[ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)], - previous_shuffling_start_shard=GENESIS_START_SHARD, - current_shuffling_start_shard=GENESIS_START_SHARD, - previous_shuffling_epoch=GENESIS_EPOCH, - current_shuffling_epoch=GENESIS_EPOCH, - previous_shuffling_seed=ZERO_HASH, - current_shuffling_seed=ZERO_HASH, - - # Finality - previous_epoch_attestations=[], - current_epoch_attestations=[], - previous_justified_epoch=GENESIS_EPOCH, - current_justified_epoch=GENESIS_EPOCH, - previous_justified_root=ZERO_HASH, - current_justified_root=ZERO_HASH, - justification_bitfield=0, - finalized_epoch=GENESIS_EPOCH, - finalized_root=ZERO_HASH, - - # Recent state - latest_crosslinks=[Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)], - latest_block_roots=[ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)], - latest_state_roots=[ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)], - latest_active_index_roots=[ZERO_HASH for _ in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH)], - latest_slashed_balances=[0 for _ in range(LATEST_SLASHED_EXIT_LENGTH)], - latest_block_header=get_temporary_block_header(get_empty_block()), - historical_roots=[], - - # Ethereum 1.0 chain data - latest_eth1_data=genesis_eth1_data, - eth1_data_votes=[], - deposit_index=0, - ) - - # Process genesis deposits - for deposit in genesis_validator_deposits: - process_deposit(state, deposit) - - # Process genesis activations - for validator_index, _ in enumerate(state.validator_registry): - if get_effective_balance(state, validator_index) >= MAX_DEPOSIT_AMOUNT: - activate_validator(state, validator_index, is_genesis=True) - - genesis_active_index_root = hash_tree_root(get_active_validator_indices(state.validator_registry, GENESIS_EPOCH)) - for index in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH): - state.latest_active_index_roots[index] = genesis_active_index_root - state.current_shuffling_seed = generate_seed(state, GENESIS_EPOCH) - - return state -def get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock: - """ - Get the ancestor of ``block`` with slot number ``slot``; return ``None`` if not found. - """ - if block.slot == slot: - return block - elif block.slot < slot: - return None - else: - return get_ancestor(store, store.get_parent(block), slot) -def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) -> BeaconBlock: - """ - Execute the LMD-GHOST algorithm to find the head ``BeaconBlock``. - """ - validators = start_state.validator_registry - active_validator_indices = get_active_validator_indices(validators, slot_to_epoch(start_state.slot)) - attestation_targets = [ - (validator_index, get_latest_attestation_target(store, validator_index)) - for validator_index in active_validator_indices - ] - - def get_vote_count(block: BeaconBlock) -> int: - return sum( - get_effective_balance(start_state.validator_balances[validator_index]) // FORK_CHOICE_BALANCE_INCREMENT - for validator_index, target in attestation_targets - if get_ancestor(store, target, block.slot) == block - ) - - head = start_block - while 1: - children = get_children(store, head) - if len(children) == 0: - return head - head = max(children, key=lambda x: (get_vote_count(x), hash_tree_root(x))) -def cache_state(state: BeaconState) -> None: - previous_slot_state_root = hash_tree_root(state) - - # store the previous slot's post state transition root - state.latest_state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_slot_state_root - - # cache state root in stored latest_block_header if empty - if state.latest_block_header.state_root == ZERO_HASH: - state.latest_block_header.state_root = previous_slot_state_root - - # store latest known block for previous slot - state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = hash_tree_root(state.latest_block_header) -def get_current_total_balance(state: BeaconState) -> Gwei: - return get_total_balance(state, get_active_validator_indices(state.validator_registry, get_current_epoch(state))) -def get_previous_total_balance(state: BeaconState) -> Gwei: - return get_total_balance(state, get_active_validator_indices(state.validator_registry, get_previous_epoch(state))) -def get_attesting_indices(state: BeaconState, attestations: List[PendingAttestation]) -> List[ValidatorIndex]: - output = set() - for a in attestations: - output = output.union(get_attestation_participants(state, a.data, a.aggregation_bitfield)) - return sorted(list(output)) -def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestation]) -> Gwei: - return get_total_balance(state, get_attesting_indices(state, attestations)) -def get_current_epoch_boundary_attestations(state: BeaconState) -> List[PendingAttestation]: - return [ - a for a in state.current_epoch_attestations - if a.data.target_root == get_block_root(state, get_epoch_start_slot(get_current_epoch(state))) - ] -def get_previous_epoch_boundary_attestations(state: BeaconState) -> List[PendingAttestation]: - return [ - a for a in state.previous_epoch_attestations - if a.data.target_root == get_block_root(state, get_epoch_start_slot(get_previous_epoch(state))) - ] -def get_previous_epoch_matching_head_attestations(state: BeaconState) -> List[PendingAttestation]: - return [ - a for a in state.previous_epoch_attestations - if a.data.beacon_block_root == get_block_root(state, a.data.slot) - ] -def get_winning_root_and_participants(state: BeaconState, shard: Shard) -> Tuple[Bytes32, List[ValidatorIndex]]: - all_attestations = state.current_epoch_attestations + state.previous_epoch_attestations - valid_attestations = [ - a for a in all_attestations if a.data.previous_crosslink == state.latest_crosslinks[shard] - ] - all_roots = [a.data.crosslink_data_root for a in valid_attestations] - - # handle when no attestations for shard available - if len(all_roots) == 0: - return ZERO_HASH, [] - - def get_attestations_for(root) -> List[PendingAttestation]: - return [a for a in valid_attestations if a.data.crosslink_data_root == root] - - # Winning crosslink root is the root with the most votes for it, ties broken in favor of - # lexicographically higher hash - winning_root = max(all_roots, key=lambda r: (get_attesting_balance(state, get_attestations_for(r)), r)) - - return winning_root, get_attesting_indices(state, get_attestations_for(winning_root)) -def earliest_attestation(state: BeaconState, validator_index: ValidatorIndex) -> PendingAttestation: - return min([ - a for a in state.previous_epoch_attestations if - validator_index in get_attestation_participants(state, a.data, a.aggregation_bitfield) - ], key=lambda a: a.inclusion_slot) -def inclusion_slot(state: BeaconState, validator_index: ValidatorIndex) -> Slot: - return earliest_attestation(state, validator_index).inclusion_slot -def inclusion_distance(state: BeaconState, validator_index: ValidatorIndex) -> int: - attestation = earliest_attestation(state, validator_index) - return attestation.inclusion_slot - attestation.data.slot -def update_justification_and_finalization(state: BeaconState) -> None: - new_justified_epoch = state.current_justified_epoch - new_finalized_epoch = state.finalized_epoch - - # Rotate the justification bitfield up one epoch to make room for the current epoch - state.justification_bitfield <<= 1 - # If the previous epoch gets justified, fill the second last bit - previous_boundary_attesting_balance = get_attesting_balance(state, get_previous_epoch_boundary_attestations(state)) - if previous_boundary_attesting_balance * 3 >= get_previous_total_balance(state) * 2: - new_justified_epoch = get_current_epoch(state) - 1 - state.justification_bitfield |= 2 - # If the current epoch gets justified, fill the last bit - current_boundary_attesting_balance = get_attesting_balance(state, get_current_epoch_boundary_attestations(state)) - if current_boundary_attesting_balance * 3 >= get_current_total_balance(state) * 2: - new_justified_epoch = get_current_epoch(state) - state.justification_bitfield |= 1 - - # Process finalizations - bitfield = state.justification_bitfield - current_epoch = get_current_epoch(state) - # The 2nd/3rd/4th most recent epochs are all justified, the 2nd using the 4th as source - if (bitfield >> 1) % 8 == 0b111 and state.previous_justified_epoch == current_epoch - 3: - new_finalized_epoch = state.previous_justified_epoch - # The 2nd/3rd most recent epochs are both justified, the 2nd using the 3rd as source - if (bitfield >> 1) % 4 == 0b11 and state.previous_justified_epoch == current_epoch - 2: - new_finalized_epoch = state.previous_justified_epoch - # The 1st/2nd/3rd most recent epochs are all justified, the 1st using the 3rd as source - if (bitfield >> 0) % 8 == 0b111 and state.current_justified_epoch == current_epoch - 2: - new_finalized_epoch = state.current_justified_epoch - # The 1st/2nd most recent epochs are both justified, the 1st using the 2nd as source - if (bitfield >> 0) % 4 == 0b11 and state.current_justified_epoch == current_epoch - 1: - new_finalized_epoch = state.current_justified_epoch - - # Update state jusification/finality fields - state.previous_justified_epoch = state.current_justified_epoch - state.previous_justified_root = state.current_justified_root - if new_justified_epoch != state.current_justified_epoch: - state.current_justified_epoch = new_justified_epoch - state.current_justified_root = get_block_root(state, get_epoch_start_slot(new_justified_epoch)) - if new_finalized_epoch != state.finalized_epoch: - state.finalized_epoch = new_finalized_epoch - state.finalized_root = get_block_root(state, get_epoch_start_slot(new_finalized_epoch)) -def process_crosslinks(state: BeaconState) -> None: - current_epoch = get_current_epoch(state) - previous_epoch = current_epoch - 1 - next_epoch = current_epoch + 1 - for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): - for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_root, participants = get_winning_root_and_participants(state, shard) - participating_balance = get_total_balance(state, participants) - total_balance = get_total_balance(state, crosslink_committee) - if 3 * participating_balance >= 2 * total_balance: - state.latest_crosslinks[shard] = Crosslink( - epoch=slot_to_epoch(slot), - crosslink_data_root=winning_root - ) -def maybe_reset_eth1_period(state: BeaconState) -> None: - if (get_current_epoch(state) + 1) % EPOCHS_PER_ETH1_VOTING_PERIOD == 0: - for eth1_data_vote in state.eth1_data_votes: - # If a majority of all votes were for a particular eth1_data value, - # then set that as the new canonical value - if eth1_data_vote.vote_count * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH: - state.latest_eth1_data = eth1_data_vote.eth1_data - state.eth1_data_votes = [] -def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: - if get_previous_total_balance(state) == 0: - return 0 - - adjusted_quotient = integer_squareroot(get_previous_total_balance(state)) // BASE_REWARD_QUOTIENT - return get_effective_balance(state, index) // adjusted_quotient // 5 -def get_inactivity_penalty(state: BeaconState, index: ValidatorIndex, epochs_since_finality: int) -> Gwei: - return ( - get_base_reward(state, index) + - get_effective_balance(state, index) * epochs_since_finality // INACTIVITY_PENALTY_QUOTIENT // 2 - ) -def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: - epochs_since_finality = get_current_epoch(state) + 1 - state.finalized_epoch - if epochs_since_finality <= 4: - return compute_normal_justification_and_finalization_deltas(state) - else: - return compute_inactivity_leak_deltas(state) -def compute_normal_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: - # deltas[0] for rewards - # deltas[1] for penalties - deltas = [ - [0 for index in range(len(state.validator_registry))], - [0 for index in range(len(state.validator_registry))] - ] - # Some helper variables - boundary_attestations = get_previous_epoch_boundary_attestations(state) - boundary_attesting_balance = get_attesting_balance(state, boundary_attestations) - total_balance = get_previous_total_balance(state) - total_attesting_balance = get_attesting_balance(state, state.previous_epoch_attestations) - matching_head_attestations = get_previous_epoch_matching_head_attestations(state) - matching_head_balance = get_attesting_balance(state, matching_head_attestations) - # Process rewards or penalties for all validators - for index in get_active_validator_indices(state.validator_registry, get_previous_epoch(state)): - # Expected FFG source - if index in get_attesting_indices(state, state.previous_epoch_attestations): - deltas[0][index] += get_base_reward(state, index) * total_attesting_balance // total_balance - # Inclusion speed bonus - deltas[0][index] += ( - get_base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // - inclusion_distance(state, index) - ) - else: - deltas[1][index] += get_base_reward(state, index) - # Expected FFG target - if index in get_attesting_indices(state, boundary_attestations): - deltas[0][index] += get_base_reward(state, index) * boundary_attesting_balance // total_balance - else: - deltas[1][index] += get_base_reward(state, index) - # Expected head - if index in get_attesting_indices(state, matching_head_attestations): - deltas[0][index] += get_base_reward(state, index) * matching_head_balance // total_balance - else: - deltas[1][index] += get_base_reward(state, index) - # Proposer bonus - if index in get_attesting_indices(state, state.previous_epoch_attestations): - proposer_index = get_beacon_proposer_index(state, inclusion_slot(state, index)) - deltas[0][proposer_index] += get_base_reward(state, index) // ATTESTATION_INCLUSION_REWARD_QUOTIENT - return deltas -def compute_inactivity_leak_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: - # deltas[0] for rewards - # deltas[1] for penalties - deltas = [ - [0 for index in range(len(state.validator_registry))], - [0 for index in range(len(state.validator_registry))] - ] - boundary_attestations = get_previous_epoch_boundary_attestations(state) - matching_head_attestations = get_previous_epoch_matching_head_attestations(state) - active_validator_indices = get_active_validator_indices(state.validator_registry, get_previous_epoch(state)) - epochs_since_finality = get_current_epoch(state) + 1 - state.finalized_epoch - for index in active_validator_indices: - if index not in get_attesting_indices(state, state.previous_epoch_attestations): - deltas[1][index] += get_inactivity_penalty(state, index, epochs_since_finality) - else: - # If a validator did attest, apply a small penalty for getting attestations included late - deltas[0][index] += ( - get_base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // - inclusion_distance(state, index) - ) - deltas[1][index] += get_base_reward(state, index) - if index not in get_attesting_indices(state, boundary_attestations): - deltas[1][index] += get_inactivity_penalty(state, index, epochs_since_finality) - if index not in get_attesting_indices(state, matching_head_attestations): - deltas[1][index] += get_base_reward(state, index) - # Penalize slashed-but-inactive validators as though they were active but offline - for index in range(len(state.validator_registry)): - eligible = ( - index not in active_validator_indices and - state.validator_registry[index].slashed and - get_current_epoch(state) < state.validator_registry[index].withdrawable_epoch - ) - if eligible: - deltas[1][index] += ( - 2 * get_inactivity_penalty(state, index, epochs_since_finality) + - get_base_reward(state, index) - ) - return deltas -def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: - # deltas[0] for rewards - # deltas[1] for penalties - deltas = [ - [0 for index in range(len(state.validator_registry))], - [0 for index in range(len(state.validator_registry))] - ] - previous_epoch_start_slot = get_epoch_start_slot(get_previous_epoch(state)) - current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) - for slot in range(previous_epoch_start_slot, current_epoch_start_slot): - for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_root, participants = get_winning_root_and_participants(state, shard) - participating_balance = get_total_balance(state, participants) - total_balance = get_total_balance(state, crosslink_committee) - for index in crosslink_committee: - if index in participants: - deltas[0][index] += get_base_reward(state, index) * participating_balance // total_balance - else: - deltas[1][index] += get_base_reward(state, index) - return deltas -def apply_rewards(state: BeaconState) -> None: - deltas1 = get_justification_and_finalization_deltas(state) - deltas2 = get_crosslink_deltas(state) - for i in range(len(state.validator_registry)): - state.validator_balances[i] = max( - 0, - state.validator_balances[i] + deltas1[0][i] + deltas2[0][i] - deltas1[1][i] - deltas2[1][i] - ) -def process_ejections(state: BeaconState) -> None: - """ - Iterate through the validator registry - and eject active validators with balance below ``EJECTION_BALANCE``. - """ - for index in get_active_validator_indices(state.validator_registry, get_current_epoch(state)): - if state.validator_balances[index] < EJECTION_BALANCE: - exit_validator(state, index) -def should_update_validator_registry(state: BeaconState) -> bool: - # Must have finalized a new block - if state.finalized_epoch <= state.validator_registry_update_epoch: - return False - # Must have processed new crosslinks on all shards of the current epoch - shards_to_check = [ - (state.current_shuffling_start_shard + i) % SHARD_COUNT - for i in range(get_current_epoch_committee_count(state)) - ] - for shard in shards_to_check: - if state.latest_crosslinks[shard].epoch <= state.validator_registry_update_epoch: - return False - return True -def update_validator_registry(state: BeaconState) -> None: - """ - Update validator registry. - Note that this function mutates ``state``. - """ - current_epoch = get_current_epoch(state) - # The active validators - active_validator_indices = get_active_validator_indices(state.validator_registry, current_epoch) - # The total effective balance of active validators - total_balance = get_total_balance(state, active_validator_indices) - - # The maximum balance churn in Gwei (for deposits and exits separately) - max_balance_churn = max( - MAX_DEPOSIT_AMOUNT, - total_balance // (2 * MAX_BALANCE_CHURN_QUOTIENT) - ) - - # Activate validators within the allowable balance churn - balance_churn = 0 - for index, validator in enumerate(state.validator_registry): - if validator.activation_epoch == FAR_FUTURE_EPOCH and state.validator_balances[index] >= MAX_DEPOSIT_AMOUNT: - # Check the balance churn would be within the allowance - balance_churn += get_effective_balance(state, index) - if balance_churn > max_balance_churn: - break - - # Activate validator - activate_validator(state, index, is_genesis=False) - - # Exit validators within the allowable balance churn - balance_churn = 0 - for index, validator in enumerate(state.validator_registry): - if validator.exit_epoch == FAR_FUTURE_EPOCH and validator.initiated_exit: - # Check the balance churn would be within the allowance - balance_churn += get_effective_balance(state, index) - if balance_churn > max_balance_churn: - break - - # Exit validator - exit_validator(state, index) - - state.validator_registry_update_epoch = current_epoch -def update_registry_and_shuffling_data(state: BeaconState) -> None: - # First set previous shuffling data to current shuffling data - state.previous_shuffling_epoch = state.current_shuffling_epoch - state.previous_shuffling_start_shard = state.current_shuffling_start_shard - state.previous_shuffling_seed = state.current_shuffling_seed - current_epoch = get_current_epoch(state) - next_epoch = current_epoch + 1 - # Check if we should update, and if so, update - if should_update_validator_registry(state): - update_validator_registry(state) - # If we update the registry, update the shuffling data and shards as well - state.current_shuffling_epoch = next_epoch - state.current_shuffling_start_shard = ( - state.current_shuffling_start_shard + - get_current_epoch_committee_count(state) % SHARD_COUNT - ) - state.current_shuffling_seed = generate_seed(state, state.current_shuffling_epoch) - else: - # If processing at least one crosslink keeps failing, then reshuffle every power of two, - # but don't update the current_shuffling_start_shard - epochs_since_last_registry_update = current_epoch - state.validator_registry_update_epoch - if epochs_since_last_registry_update > 1 and is_power_of_two(epochs_since_last_registry_update): - state.current_shuffling_epoch = next_epoch - state.current_shuffling_seed = generate_seed(state, state.current_shuffling_epoch) -def process_slashings(state: BeaconState) -> None: - """ - Process the slashings. - Note that this function mutates ``state``. - """ - current_epoch = get_current_epoch(state) - active_validator_indices = get_active_validator_indices(state.validator_registry, current_epoch) - total_balance = get_total_balance(state, active_validator_indices) - - # Compute `total_penalties` - total_at_start = state.latest_slashed_balances[(current_epoch + 1) % LATEST_SLASHED_EXIT_LENGTH] - total_at_end = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] - total_penalties = total_at_end - total_at_start - - for index, validator in enumerate(state.validator_registry): - if validator.slashed and current_epoch == validator.withdrawable_epoch - LATEST_SLASHED_EXIT_LENGTH // 2: - penalty = max( - get_effective_balance(state, index) * min(total_penalties * 3, total_balance) // total_balance, - get_effective_balance(state, index) // MIN_PENALTY_QUOTIENT - ) - state.validator_balances[index] -= penalty -def process_exit_queue(state: BeaconState) -> None: - """ - Process the exit queue. - Note that this function mutates ``state``. - """ - def eligible(index): - validator = state.validator_registry[index] - # Filter out dequeued validators - if validator.withdrawable_epoch != FAR_FUTURE_EPOCH: - return False - # Dequeue if the minimum amount of time has passed - else: - return get_current_epoch(state) >= validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY - - eligible_indices = filter(eligible, list(range(len(state.validator_registry)))) - # Sort in order of exit epoch, and validators that exit within the same epoch exit in order of validator index - sorted_indices = sorted(eligible_indices, key=lambda index: state.validator_registry[index].exit_epoch) - for dequeues, index in enumerate(sorted_indices): - if dequeues >= MAX_EXIT_DEQUEUES_PER_EPOCH: - break - prepare_validator_for_withdrawal(state, index) -def finish_epoch_update(state: BeaconState) -> None: - current_epoch = get_current_epoch(state) - next_epoch = current_epoch + 1 - # Set active index root - index_root_position = (next_epoch + ACTIVATION_EXIT_DELAY) % LATEST_ACTIVE_INDEX_ROOTS_LENGTH - state.latest_active_index_roots[index_root_position] = hash_tree_root( - get_active_validator_indices(state.validator_registry, next_epoch + ACTIVATION_EXIT_DELAY) - ) - # Set total slashed balances - state.latest_slashed_balances[next_epoch % LATEST_SLASHED_EXIT_LENGTH] = ( - state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] - ) - # Set randao mix - state.latest_randao_mixes[next_epoch % LATEST_RANDAO_MIXES_LENGTH] = get_randao_mix(state, current_epoch) - # Set historical root accumulator - if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0: - historical_batch = HistoricalBatch( - block_roots=state.latest_block_roots, - state_roots=state.latest_state_roots, - ) - state.historical_roots.append(hash_tree_root(historical_batch)) - # Rotate current/previous epoch attestations - state.previous_epoch_attestations = state.current_epoch_attestations - state.current_epoch_attestations = [] -def advance_slot(state: BeaconState) -> None: - state.slot += 1 -def process_block_header(state: BeaconState, block: BeaconBlock) -> None: - # Verify that the slots match - assert block.slot == state.slot - # Verify that the parent matches - assert block.previous_block_root == hash_tree_root(state.latest_block_header) - # Save current block as the new latest block - state.latest_block_header = get_temporary_block_header(block) - # Verify proposer signature - proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] - assert bls_verify( - pubkey=proposer.pubkey, - message_hash=signed_root(block), - signature=block.signature, - domain=get_domain(state.fork, get_current_epoch(state), DOMAIN_BEACON_BLOCK) - ) -def process_randao(state: BeaconState, block: BeaconBlock) -> None: - proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] - # Verify that the provided randao value is valid - assert bls_verify( - pubkey=proposer.pubkey, - message_hash=hash_tree_root(get_current_epoch(state)), - signature=block.body.randao_reveal, - domain=get_domain(state.fork, get_current_epoch(state), DOMAIN_RANDAO) - ) - # Mix it in - state.latest_randao_mixes[get_current_epoch(state) % LATEST_RANDAO_MIXES_LENGTH] = ( - xor(get_randao_mix(state, get_current_epoch(state)), - hash(block.body.randao_reveal)) - ) -def process_eth1_data(state: BeaconState, block: BeaconBlock) -> None: - for eth1_data_vote in state.eth1_data_votes: - # If someone else has already voted for the same hash, add to its counter - if eth1_data_vote.eth1_data == block.body.eth1_data: - eth1_data_vote.vote_count += 1 - return - # If we're seeing this hash for the first time, make a new counter - state.eth1_data_votes.append(Eth1DataVote(eth1_data=block.body.eth1_data, vote_count=1)) -def process_proposer_slashing(state: BeaconState, - proposer_slashing: ProposerSlashing) -> None: - """ - Process ``ProposerSlashing`` transaction. - Note that this function mutates ``state``. - """ - proposer = state.validator_registry[proposer_slashing.proposer_index] - # Verify that the epoch is the same - assert slot_to_epoch(proposer_slashing.header_1.slot) == slot_to_epoch(proposer_slashing.header_2.slot) - # But the headers are different - assert proposer_slashing.header_1 != proposer_slashing.header_2 - # Proposer is not yet slashed - assert proposer.slashed is False - # Signatures are valid - for header in (proposer_slashing.header_1, proposer_slashing.header_2): - assert bls_verify( - pubkey=proposer.pubkey, - message_hash=signed_root(header), - signature=header.signature, - domain=get_domain(state.fork, slot_to_epoch(header.slot), DOMAIN_BEACON_BLOCK) - ) - slash_validator(state, proposer_slashing.proposer_index) -def process_attester_slashing(state: BeaconState, - attester_slashing: AttesterSlashing) -> None: - """ - Process ``AttesterSlashing`` transaction. - Note that this function mutates ``state``. - """ - attestation1 = attester_slashing.slashable_attestation_1 - attestation2 = attester_slashing.slashable_attestation_2 - # Check that the attestations are conflicting - assert attestation1.data != attestation2.data - assert ( - is_double_vote(attestation1.data, attestation2.data) or - is_surround_vote(attestation1.data, attestation2.data) - ) - assert verify_slashable_attestation(state, attestation1) - assert verify_slashable_attestation(state, attestation2) - slashable_indices = [ - index for index in attestation1.validator_indices - if ( - index in attestation2.validator_indices and - state.validator_registry[index].slashed is False - ) - ] - assert len(slashable_indices) >= 1 - for index in slashable_indices: - slash_validator(state, index) -def process_attestation(state: BeaconState, attestation: Attestation) -> None: - """ - Process ``Attestation`` transaction. - Note that this function mutates ``state``. - """ - # Can't submit attestations that are too far in history (or in prehistory) - assert attestation.data.slot >= GENESIS_SLOT - assert state.slot <= attestation.data.slot + SLOTS_PER_EPOCH - # Can't submit attestations too quickly - assert attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot - # Verify that the justified epoch and root is correct - if slot_to_epoch(attestation.data.slot) >= get_current_epoch(state): - # Case 1: current epoch attestations - assert attestation.data.source_epoch == state.current_justified_epoch - assert attestation.data.source_root == state.current_justified_root - else: - # Case 2: previous epoch attestations - assert attestation.data.source_epoch == state.previous_justified_epoch - assert attestation.data.source_root == state.previous_justified_root - # Check that the crosslink data is valid - acceptable_crosslink_data = { - # Case 1: Latest crosslink matches the one in the state - attestation.data.previous_crosslink, - # Case 2: State has already been updated, state's latest crosslink matches the crosslink - # the attestation is trying to create - Crosslink( - crosslink_data_root=attestation.data.crosslink_data_root, - epoch=slot_to_epoch(attestation.data.slot) - ) - } - assert state.latest_crosslinks[attestation.data.shard] in acceptable_crosslink_data - # Attestation must be nonempty! - assert attestation.aggregation_bitfield != b'\x00' * len(attestation.aggregation_bitfield) - # Custody must be empty (to be removed in phase 1) - assert attestation.custody_bitfield == b'\x00' * len(attestation.custody_bitfield) - # Get the committee for the specific shard that this attestation is for - crosslink_committee = [ - committee for committee, shard in get_crosslink_committees_at_slot(state, attestation.data.slot) - if shard == attestation.data.shard - ][0] - # Custody bitfield must be a subset of the attestation bitfield - for i in range(len(crosslink_committee)): - if get_bitfield_bit(attestation.aggregation_bitfield, i) == 0b0: - assert get_bitfield_bit(attestation.custody_bitfield, i) == 0b0 - # Verify aggregate signature - participants = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) - custody_bit_1_participants = get_attestation_participants(state, attestation.data, attestation.custody_bitfield) - custody_bit_0_participants = [i for i in participants if i not in custody_bit_1_participants] - - assert bls_verify_multiple( - pubkeys=[ - bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_0_participants]), - bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_1_participants]), - ], - message_hashes=[ - hash_tree_root(AttestationDataAndCustodyBit(data=attestation.data, custody_bit=0b0)), - hash_tree_root(AttestationDataAndCustodyBit(data=attestation.data, custody_bit=0b1)), - ], - signature=attestation.aggregate_signature, - domain=get_domain(state.fork, slot_to_epoch(attestation.data.slot), DOMAIN_ATTESTATION), - ) - # Crosslink data root is zero (to be removed in phase 1) - assert attestation.data.crosslink_data_root == ZERO_HASH - # Apply the attestation - pending_attestation = PendingAttestation( - data=attestation.data, - aggregation_bitfield=attestation.aggregation_bitfield, - custody_bitfield=attestation.custody_bitfield, - inclusion_slot=state.slot - ) - if slot_to_epoch(attestation.data.slot) == get_current_epoch(state): - state.current_epoch_attestations.append(pending_attestation) - elif slot_to_epoch(attestation.data.slot) == get_previous_epoch(state): - state.previous_epoch_attestations.append(pending_attestation) -def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: - """ - Process ``VoluntaryExit`` transaction. - Note that this function mutates ``state``. - """ - validator = state.validator_registry[exit.validator_index] - # Verify the validator has not yet exited - assert validator.exit_epoch == FAR_FUTURE_EPOCH - # Verify the validator has not initiated an exit - assert validator.initiated_exit is False - # Exits must specify an epoch when they become valid; they are not valid before then - assert get_current_epoch(state) >= exit.epoch - # Must have been in the validator set long enough - assert get_current_epoch(state) - validator.activation_epoch >= PERSISTENT_COMMITTEE_PERIOD - # Verify signature - assert bls_verify( - pubkey=validator.pubkey, - message_hash=signed_root(exit), - signature=exit.signature, - domain=get_domain(state.fork, exit.epoch, DOMAIN_VOLUNTARY_EXIT) - ) - # Run the exit - initiate_validator_exit(state, exit.validator_index) -def process_transfer(state: BeaconState, transfer: Transfer) -> None: - """ - Process ``Transfer`` transaction. - Note that this function mutates ``state``. - """ - # Verify the amount and fee aren't individually too big (for anti-overflow purposes) - assert state.validator_balances[transfer.sender] >= max(transfer.amount, transfer.fee) - # Verify that we have enough ETH to send, and that after the transfer the balance will be either - # exactly zero or at least MIN_DEPOSIT_AMOUNT - assert ( - state.validator_balances[transfer.sender] == transfer.amount + transfer.fee or - state.validator_balances[transfer.sender] >= transfer.amount + transfer.fee + MIN_DEPOSIT_AMOUNT - ) - # A transfer is valid in only one slot - assert state.slot == transfer.slot - # Only withdrawn or not-yet-deposited accounts can transfer - assert ( - get_current_epoch(state) >= state.validator_registry[transfer.sender].withdrawable_epoch or - state.validator_registry[transfer.sender].activation_epoch == FAR_FUTURE_EPOCH - ) - # Verify that the pubkey is valid - assert ( - state.validator_registry[transfer.sender].withdrawal_credentials == - BLS_WITHDRAWAL_PREFIX_BYTE + hash(transfer.pubkey)[1:] - ) - # Verify that the signature is valid - assert bls_verify( - pubkey=transfer.pubkey, - message_hash=signed_root(transfer), - signature=transfer.signature, - domain=get_domain(state.fork, slot_to_epoch(transfer.slot), DOMAIN_TRANSFER) - ) - # Process the transfer - state.validator_balances[transfer.sender] -= transfer.amount + transfer.fee - state.validator_balances[transfer.recipient] += transfer.amount - state.validator_balances[get_beacon_proposer_index(state, state.slot)] += transfer.fee -def verify_block_state_root(state: BeaconState, block: BeaconBlock) -> None: - assert block.state_root == hash_tree_root(state) - -# Monkey patch validator shuffling cache -_get_shuffling = get_shuffling -shuffling_cache = {} -def get_shuffling(seed: Bytes32, - validators: List[Validator], - epoch: Epoch) -> List[List[ValidatorIndex]]: - - param_hash = (seed, hash_tree_root(validators, [Validator]), epoch) - - if param_hash in shuffling_cache: - # print("Cache hit, epoch={0}".format(epoch)) - return shuffling_cache[param_hash] - else: - # print("Cache miss, epoch={0}".format(epoch)) - ret = _get_shuffling(seed, validators, epoch) - shuffling_cache[param_hash] = ret - return ret - - -# Monkey patch hash cache -_hash = hash -hash_cache = {} -def hash(x): - if x in hash_cache: - return hash_cache[x] - else: - ret = _hash(x) - hash_cache[x] = ret - return ret - \ No newline at end of file diff --git a/build/phase0/state_transition.py b/build/phase0/state_transition.py deleted file mode 100644 index 170f647ab..000000000 --- a/build/phase0/state_transition.py +++ /dev/null @@ -1,100 +0,0 @@ -from . import spec - - -from typing import ( - Any, - Callable, - List, - NewType, - Tuple, -) - -from .spec import ( - BeaconState, - BeaconBlock, -) - - -def process_transaction_type(state: BeaconState, - transactions: List[Any], - max_transactions: int, - tx_fn: Callable[[BeaconState, Any], None]) -> None: - assert len(transactions) <= max_transactions - for transaction in transactions: - tx_fn(state, transaction) - - -def process_transactions(state: BeaconState, block: BeaconBlock) -> None: - process_transaction_type( - state, - block.body.proposer_slashings, - spec.MAX_PROPOSER_SLASHINGS, - spec.process_proposer_slashing, - ) - process_transaction_type( - state, - block.body.attester_slashings, - spec.MAX_ATTESTER_SLASHINGS, - spec.process_attester_slashing, - ) - process_transaction_type( - state, - block.body.attestations, - spec.MAX_ATTESTATIONS, - spec.process_attestation, - ) - process_transaction_type( - state, - block.body.deposits, - spec.MAX_DEPOSITS, - spec.process_deposit, - ) - process_transaction_type( - state, - block.body.voluntary_exits, - spec.MAX_VOLUNTARY_EXITS, - spec.process_voluntary_exit, - ) - assert len(block.body.transfers) == len(set(block.body.transfers)) - process_transaction_type( - state, - block.body.transfers, - spec.MAX_TRANSFERS, - spec.process_transfer, - ) - - -def process_block(state: BeaconState, - block: BeaconBlock, - verify_state_root: bool=False) -> None: - spec.process_block_header(state, block) - spec.process_randao(state, block) - spec.process_eth1_data(state, block) - - process_transactions(state, block) - if verify_state_root: - spec.verify_block_state_root(state, block) - - -def process_epoch_transition(state: BeaconState) -> None: - spec.update_justification_and_finalization(state) - spec.process_crosslinks(state) - spec.maybe_reset_eth1_period(state) - spec.apply_rewards(state) - spec.process_ejections(state) - spec.update_registry_and_shuffling_data(state) - spec.process_slashings(state) - spec.process_exit_queue(state) - spec.finish_epoch_update(state) - - -def state_transition(state: BeaconState, - block: BeaconBlock, - verify_state_root: bool=False) -> BeaconState: - while state.slot < block.slot: - spec.cache_state(state) - if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0: - process_epoch_transition(state) - spec.advance_slot(state) - if block.slot == state.slot: - process_block(state, block) From 55c337a35c7cb6caff0d1fe6f3179d2b3161a579 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 18 Mar 2019 16:20:24 -0600 Subject: [PATCH 057/133] seperate tests 'sanity' and 'minimal-config' vs all --- Makefile | 13 ++- tests/phase0/conftest.py | 102 +++++------------ tests/phase0/helpers.py | 139 +++++++++++++++++++++++ tests/phase0/test_sanity.py | 216 +++++------------------------------- 4 files changed, 202 insertions(+), 268 deletions(-) create mode 100644 tests/phase0/helpers.py diff --git a/Makefile b/Makefile index 593ea8bf4..f0f7557e9 100644 --- a/Makefile +++ b/Makefile @@ -3,8 +3,19 @@ SCRIPT_DIR = ./scripts BUILD_DIR = ./build UTILS_DIR = ./utils -.PHONY: clean all $(BUILD_DIR)/phase0 +.PHONY: clean all test + + +all: $(BUILD_DIR)/phase0 + + +clean: + rm -rf $(BUILD_DIR) + + +test: + pytest -m "sanity and minimal_config" tests/ $(BUILD_DIR)/phase0: mkdir -p $@ diff --git a/tests/phase0/conftest.py b/tests/phase0/conftest.py index 7d372f164..e92896e92 100644 --- a/tests/phase0/conftest.py +++ b/tests/phase0/conftest.py @@ -1,28 +1,27 @@ import pytest -from py_ecc import bls - from build.phase0 import spec -from build.phase0.utils.merkle_minimal import ( - calc_merkle_tree_from_leaves, - get_merkle_proof, - get_merkle_root, -) -from build.phase0.spec import ( - Deposit, - DepositData, - DepositInput, - Eth1Data, - get_genesis_beacon_state, - verify_merkle_branch, - hash, +from tests.phase0.helpers import ( + privkeys_list, + pubkeys_list, + create_genesis_state, ) -privkeys_list = [i+1 for i in range(1000)] -pubkeys_list = [bls.privtopub(privkey) for privkey in privkeys_list] -pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys_list, pubkeys_list)} +DEFAULT_CONFIG = {} # no change + +MINIMAL_CONFIG = { + "SHARD_COUNT": 8, + "MIN_ATTESTATION_INCLUSION_DELAY": 2, + "TARGET_COMMITTEE_SIZE": 4, + "SLOTS_PER_EPOCH": 8, + "GENESIS_EPOCH": spec.GENESIS_SLOT // 8, + "SLOTS_PER_HISTORICAL_ROOT": 64, + "LATEST_RANDAO_MIXES_LENGTH": 64, + "LATEST_ACTIVE_INDEX_ROOTS_LENGTH": 64, + "LATEST_SLASHED_EXIT_LENGTH": 64, +} @pytest.fixture @@ -53,19 +52,14 @@ def overwrite_spec_config(config): spec.BeaconState.fields['latest_slashed_balances'][1] = config[field] -@pytest.fixture -def config(): - return { - "SHARD_COUNT": 8, - "MIN_ATTESTATION_INCLUSION_DELAY": 2, - "TARGET_COMMITTEE_SIZE": 4, - "SLOTS_PER_EPOCH": 8, - "GENESIS_EPOCH": spec.GENESIS_SLOT // 8, - "SLOTS_PER_HISTORICAL_ROOT": 64, - "LATEST_RANDAO_MIXES_LENGTH": 64, - "LATEST_ACTIVE_INDEX_ROOTS_LENGTH": 64, - "LATEST_SLASHED_EXIT_LENGTH": 64, - } +@pytest.fixture( + params=[ + pytest.param(MINIMAL_CONFIG, marks=pytest.mark.minimal_config), + DEFAULT_CONFIG, + ] +) +def config(request): + return request.param @pytest.fixture(autouse=True) @@ -73,52 +67,6 @@ def overwrite_config(config): overwrite_spec_config(config) -def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves): - deposit_timestamp = 0 - proof_of_possession = b'\x33' * 96 - - deposit_data_list = [] - for i in range(num_validators): - pubkey = pubkeys_list[i] - privkey = pubkey_to_privkey[pubkey] - deposit_data = DepositData( - amount=spec.MAX_DEPOSIT_AMOUNT, - timestamp=deposit_timestamp, - deposit_input=DepositInput( - pubkey=pubkey, - withdrawal_credentials=privkey.to_bytes(32, byteorder='big'), - proof_of_possession=proof_of_possession, - ), - ) - item = hash(deposit_data.serialize()) - deposit_data_leaves.append(item) - tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves)) - root = get_merkle_root((tuple(deposit_data_leaves))) - proof = list(get_merkle_proof(tree, item_index=i)) - assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, i, root) - deposit_data_list.append(deposit_data) - - genesis_validator_deposits = [] - for i in range(num_validators): - genesis_validator_deposits.append(Deposit( - proof=list(get_merkle_proof(tree, item_index=i)), - index=i, - deposit_data=deposit_data_list[i] - )) - return genesis_validator_deposits, root - - -def create_genesis_state(num_validators, deposit_data_leaves): - initial_deposits, deposit_root = create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves) - return get_genesis_beacon_state( - initial_deposits, - genesis_time=0, - genesis_eth1_data=Eth1Data( - deposit_root=deposit_root, - block_hash=spec.ZERO_HASH, - ), - ) - @pytest.fixture def num_validators(): return 100 diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py new file mode 100644 index 000000000..fa0ba61b5 --- /dev/null +++ b/tests/phase0/helpers.py @@ -0,0 +1,139 @@ +from copy import deepcopy + +from py_ecc import bls + +import build.phase0.spec as spec +from build.phase0.utils.minimal_ssz import signed_root +from build.phase0.spec import ( + AttestationData, + Deposit, + DepositInput, + DepositData, + Eth1Data, + get_block_root, + get_current_epoch, + get_domain, + get_empty_block, + get_epoch_start_slot, + get_genesis_beacon_state, + verify_merkle_branch, + hash, +) +from build.phase0.utils.merkle_minimal import ( + calc_merkle_tree_from_leaves, + get_merkle_proof, + get_merkle_root, +) + + +privkeys_list = [i+1 for i in range(1000)] +pubkeys_list = [bls.privtopub(privkey) for privkey in privkeys_list] +pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys_list, pubkeys_list)} + + +def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves): + deposit_timestamp = 0 + proof_of_possession = b'\x33' * 96 + + deposit_data_list = [] + for i in range(num_validators): + pubkey = pubkeys_list[i] + privkey = pubkey_to_privkey[pubkey] + deposit_data = DepositData( + amount=spec.MAX_DEPOSIT_AMOUNT, + timestamp=deposit_timestamp, + deposit_input=DepositInput( + pubkey=pubkey, + withdrawal_credentials=privkey.to_bytes(32, byteorder='big'), + proof_of_possession=proof_of_possession, + ), + ) + item = hash(deposit_data.serialize()) + deposit_data_leaves.append(item) + tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves)) + root = get_merkle_root((tuple(deposit_data_leaves))) + proof = list(get_merkle_proof(tree, item_index=i)) + assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, i, root) + deposit_data_list.append(deposit_data) + + genesis_validator_deposits = [] + for i in range(num_validators): + genesis_validator_deposits.append(Deposit( + proof=list(get_merkle_proof(tree, item_index=i)), + index=i, + deposit_data=deposit_data_list[i] + )) + return genesis_validator_deposits, root + + +def create_genesis_state(num_validators, deposit_data_leaves): + initial_deposits, deposit_root = create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves) + return get_genesis_beacon_state( + initial_deposits, + genesis_time=0, + genesis_eth1_data=Eth1Data( + deposit_root=deposit_root, + block_hash=spec.ZERO_HASH, + ), + ) + +def build_empty_block_for_next_slot(state): + empty_block = get_empty_block() + empty_block.slot = state.slot + 1 + previous_block_header = deepcopy(state.latest_block_header) + if previous_block_header.state_root == spec.ZERO_HASH: + previous_block_header.state_root = state.hash_tree_root() + empty_block.previous_block_root = previous_block_header.hash_tree_root() + return empty_block + + +def build_deposit_data(state, pubkey, privkey, amount): + deposit_input = DepositInput( + pubkey=pubkey, + withdrawal_credentials=privkey.to_bytes(32, byteorder='big'), + proof_of_possession=b'00'*96, + ) + proof_of_possession = bls.sign( + message_hash=signed_root(deposit_input), + privkey=privkey, + domain=get_domain( + state.fork, + get_current_epoch(state), + spec.DOMAIN_DEPOSIT, + ) + ) + deposit_input.proof_of_possession = proof_of_possession + deposit_data = DepositData( + amount=amount, + timestamp=0, + deposit_input=deposit_input, + ) + return deposit_data + + +def build_attestation_data(state, slot, shard): + assert state.slot >= slot + + block_root = build_empty_block_for_next_slot(state).previous_block_root + + epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) + if epoch_start_slot == slot: + epoch_boundary_root = block_root + else: + get_block_root(state, epoch_start_slot) + + if slot < epoch_start_slot: + justified_block_root = state.previous_justified_root + else: + justified_block_root = state.current_justified_root + + return AttestationData( + slot=slot, + shard=shard, + beacon_block_root=block_root, + source_epoch=state.current_justified_epoch, + source_root=justified_block_root, + target_root=epoch_boundary_root, + crosslink_data_root=spec.ZERO_HASH, + previous_crosslink=deepcopy(state.latest_crosslinks[shard]), + ) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 0e04df5dd..bfbb2de94 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -1,8 +1,10 @@ import os import sys import time - from copy import deepcopy + +import pytest + from py_ecc import bls import build.phase0.spec as spec @@ -48,78 +50,15 @@ from build.phase0.utils.merkle_minimal import ( get_merkle_proof, get_merkle_root, ) -# from state_test_gen import ( - # generate_from_test, - # dump_json, - # dump_yaml, -# ) +from tests.phase0.helpers import ( + build_attestation_data, + build_deposit_data, + build_empty_block_for_next_slot, +) -def get_empty_root(): - return get_merkle_root((spec.ZERO_HASH,)) - - -def construct_empty_block_for_next_slot(state): - empty_block = get_empty_block() - empty_block.slot = state.slot + 1 - previous_block_header = deepcopy(state.latest_block_header) - if previous_block_header.state_root == spec.ZERO_HASH: - previous_block_header.state_root = state.hash_tree_root() - empty_block.previous_block_root = previous_block_header.hash_tree_root() - return empty_block - - -def create_deposit_data(state, pubkey, privkey, amount): - deposit_input = DepositInput( - pubkey=pubkey, - withdrawal_credentials=privkey.to_bytes(32, byteorder='big'), - proof_of_possession=b'00'*96, - ) - proof_of_possession = bls.sign( - message_hash=signed_root(deposit_input), - privkey=privkey, - domain=get_domain( - state.fork, - get_current_epoch(state), - spec.DOMAIN_DEPOSIT, - ) - ) - deposit_input.proof_of_possession = proof_of_possession - deposit_data = DepositData( - amount=amount, - timestamp=0, - deposit_input=deposit_input, - ) - return deposit_data - - -def build_attestation_data(state, slot, shard): - assert state.slot >= slot - - block_root = construct_empty_block_for_next_slot(state).previous_block_root - - epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) - if epoch_start_slot == slot: - epoch_boundary_root = block_root - else: - get_block_root(state, epoch_start_slot) - - if slot < epoch_start_slot: - justified_block_root = state.previous_justified_root - else: - justified_block_root = state.current_justified_root - - return AttestationData( - slot=slot, - shard=shard, - beacon_block_root=block_root, - source_epoch=state.current_justified_epoch, - source_root=justified_block_root, - target_root=epoch_boundary_root, - crosslink_data_root=spec.ZERO_HASH, - previous_crosslink=deepcopy(state.latest_crosslinks[shard]), - ) - +# mark entire file as 'sanity' +pytestmark = pytest.mark.sanity def test_slot_transition(state): test_state = deepcopy(state) @@ -133,7 +72,7 @@ def test_slot_transition(state): def test_empty_block_transition(state): test_state = deepcopy(state) - block = construct_empty_block_for_next_slot(test_state) + block = build_empty_block_for_next_slot(test_state) state_transition(test_state, block) assert len(test_state.eth1_data_votes) == len(state.eth1_data_votes) + 1 @@ -144,7 +83,7 @@ def test_empty_block_transition(state): def test_skipped_slots(state): test_state = deepcopy(state) - block = construct_empty_block_for_next_slot(test_state) + block = build_empty_block_for_next_slot(test_state) block.slot += 3 state_transition(test_state, block) @@ -158,7 +97,7 @@ def test_skipped_slots(state): def test_empty_epoch_transition(state): test_state = deepcopy(state) - block = construct_empty_block_for_next_slot(test_state) + block = build_empty_block_for_next_slot(test_state) block.slot += spec.SLOTS_PER_EPOCH state_transition(test_state, block) @@ -172,7 +111,7 @@ def test_empty_epoch_transition(state): def test_empty_epoch_transition_not_finalizing(state): test_state = deepcopy(state) - block = construct_empty_block_for_next_slot(test_state) + block = build_empty_block_for_next_slot(test_state) block.slot += spec.SLOTS_PER_EPOCH * 5 state_transition(test_state, block) @@ -226,7 +165,7 @@ def test_proposer_slashing(state, pubkeys, privkeys): # # Add to state via block transition # - block = construct_empty_block_for_next_slot(test_state) + block = build_empty_block_for_next_slot(test_state) block.body.proposer_slashings.append(proposer_slashing) state_transition(test_state, block) @@ -251,7 +190,7 @@ def test_deposit_in_block(state, deposit_data_leaves, pubkeys, privkeys): index = len(test_deposit_data_leaves) pubkey = pubkeys[index] privkey = privkeys[index] - deposit_data = create_deposit_data(pre_state, pubkey, privkey, spec.MAX_DEPOSIT_AMOUNT) + deposit_data = build_deposit_data(pre_state, pubkey, privkey, spec.MAX_DEPOSIT_AMOUNT) item = hash(deposit_data.serialize()) test_deposit_data_leaves.append(item) @@ -268,7 +207,7 @@ def test_deposit_in_block(state, deposit_data_leaves, pubkeys, privkeys): pre_state.latest_eth1_data.deposit_root = root post_state = deepcopy(pre_state) - block = construct_empty_block_for_next_slot(post_state) + block = build_empty_block_for_next_slot(post_state) block.body.deposits.append(deposit) state_transition(post_state, block) @@ -287,7 +226,7 @@ def test_deposit_top_up(state, pubkeys, privkeys, deposit_data_leaves): amount = spec.MAX_DEPOSIT_AMOUNT // 4 pubkey = pubkeys[validator_index] privkey = privkeys[validator_index] - deposit_data = create_deposit_data(pre_state, pubkey, privkey, amount) + deposit_data = build_deposit_data(pre_state, pubkey, privkey, amount) merkle_index = len(test_deposit_data_leaves) item = hash(deposit_data.serialize()) @@ -304,7 +243,7 @@ def test_deposit_top_up(state, pubkeys, privkeys, deposit_data_leaves): ) pre_state.latest_eth1_data.deposit_root = root - block = construct_empty_block_for_next_slot(pre_state) + block = build_empty_block_for_next_slot(pre_state) block.body.deposits.append(deposit) pre_balance = pre_state.validator_balances[validator_index] @@ -365,7 +304,7 @@ def test_attestation(state, pubkeys, privkeys): # # Add to state via block transition # - attestation_block = construct_empty_block_for_next_slot(test_state) + attestation_block = build_empty_block_for_next_slot(test_state) attestation_block.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY attestation_block.body.attestations.append(attestation) state_transition(test_state, attestation_block) @@ -377,7 +316,7 @@ def test_attestation(state, pubkeys, privkeys): # pre_current_epoch_attestations = deepcopy(test_state.current_epoch_attestations) - epoch_block = construct_empty_block_for_next_slot(test_state) + epoch_block = build_empty_block_for_next_slot(test_state) epoch_block.slot += spec.SLOTS_PER_EPOCH state_transition(test_state, epoch_block) @@ -417,7 +356,7 @@ def test_voluntary_exit(state, pubkeys, privkeys): # # Add to state via block transition # - initiate_exit_block = construct_empty_block_for_next_slot(post_state) + initiate_exit_block = build_empty_block_for_next_slot(post_state) initiate_exit_block.body.voluntary_exits.append(voluntary_exit) state_transition(post_state, initiate_exit_block) @@ -428,7 +367,7 @@ def test_voluntary_exit(state, pubkeys, privkeys): # # Process within epoch transition # - exit_block = construct_empty_block_for_next_slot(post_state) + exit_block = build_empty_block_for_next_slot(post_state) exit_block.slot += spec.SLOTS_PER_EPOCH state_transition(post_state, exit_block) @@ -476,7 +415,7 @@ def test_transfer(state, pubkeys, privkeys): # # Add to state via block transition # - block = construct_empty_block_for_next_slot(post_state) + block = build_empty_block_for_next_slot(post_state) block.body.transfers.append(transfer) state_transition(post_state, block) @@ -503,7 +442,7 @@ def test_ejection(state): # # trigger epoch transition # - block = construct_empty_block_for_next_slot(post_state) + block = build_empty_block_for_next_slot(post_state) block.slot += spec.SLOTS_PER_EPOCH state_transition(post_state, block) @@ -518,7 +457,7 @@ def test_historical_batch(state): post_state = deepcopy(pre_state) - block = construct_empty_block_for_next_slot(post_state) + block = build_empty_block_for_next_slot(post_state) state_transition(post_state, block) @@ -527,106 +466,3 @@ def test_historical_batch(state): assert len(post_state.historical_roots) == len(pre_state.historical_roots) + 1 return pre_state, [block], post_state - - -def sanity_tests(num_validators=100, config=None): - print(f"Buidling state with {num_validators} validators...") - if config: - overwrite_spec_config(config) - genesis_state = create_genesis_state(num_validators=num_validators) - print("done!") - print() - - test_cases = [] - - print("Running some sanity check tests...\n") - test_slot_transition(genesis_state) - print("Passed slot transition test\n") - test_cases.append( - generate_from_test(test_empty_block_transition, genesis_state, config=config, fields=['slot']) - ) - print("Passed empty block transition test\n") - test_cases.append( - generate_from_test(test_skipped_slots, genesis_state, config=config, fields=['slot', 'latest_block_roots']) - ) - print("Passed skipped slot test\n") - test_cases.append( - generate_from_test(test_empty_epoch_transition, genesis_state, config=config, fields=['slot', 'latest_block_roots']) - ) - print("Passed empty epoch transition test\n") - test_cases.append( - generate_from_test(test_empty_epoch_transition_not_finalizing, genesis_state, config=config, fields=['slot', 'finalized_epoch']) - ) - print("Passed non-finalizing epoch test\n") - test_cases.append( - generate_from_test(test_proposer_slashing, genesis_state, config=config, fields=['validator_registry', 'validator_balances']) - ) - print("Passed proposer slashing test\n") - test_cases.append( - generate_from_test(test_attestation, genesis_state, config=config, fields=['previous_epoch_attestations', 'current_epoch_attestations']) - ) - print("Passed attestation test\n") - test_cases.append( - generate_from_test(test_deposit_in_block, genesis_state, config=config, fields=['validator_registry', 'validator_balances']) - ) - print("Passed deposit test\n") - test_cases.append( - generate_from_test(test_deposit_top_up, genesis_state, config=config, fields=['validator_registry', 'validator_balances']) - ) - print("Passed deposit top up test\n") - test_cases.append( - generate_from_test(test_voluntary_exit, genesis_state, config=config, fields=['validator_registry']) - ) - print("Passed voluntary exit test\n") - test_cases.append( - generate_from_test(test_transfer, genesis_state, config=config, fields=['validator_balances']) - ) - print("Passed transfer test\n") - test_cases.append( - generate_from_test(test_ejection, genesis_state, config=config, fields=['validator_registry']) - ) - print("Passed ejection test\n") - test_cases.append( - generate_from_test(test_historical_batch, genesis_state, config=config, fields=['historical_roots']) - ) - print("Passed historical batch test\n") - print("done!") - - return test_cases - - -if __name__ == "__main__": - config = { - "SHARD_COUNT": 8, - "MIN_ATTESTATION_INCLUSION_DELAY": 2, - "TARGET_COMMITTEE_SIZE": 4, - "SLOTS_PER_EPOCH": 8, - "GENESIS_EPOCH": spec.GENESIS_SLOT // 8, - "SLOTS_PER_HISTORICAL_ROOT": 64, - "LATEST_RANDAO_MIXES_LENGTH": 64, - "LATEST_ACTIVE_INDEX_ROOTS_LENGTH": 64, - "LATEST_SLASHED_EXIT_LENGTH": 64, - } - - test_cases = sanity_tests(32, config) - # uncomment below to run/generate against the default config - # test_cases = sanity_tests(100) - - test = {} - metadata = {} - metadata['title'] = "Sanity tests" - metadata['summary'] = "Basic sanity checks from phase 0 spec pythonization. All tests are run with `verify_signatures` as set to False." - metadata['test_suite'] = "beacon_state" - metadata['fork'] = "tchaikovsky" - metadata['version'] = "v0.5.0" - test['metadata'] = metadata - test['test_cases'] = test_cases - - if '--output-json' in sys.argv: - os.makedirs('output', exist_ok=True) - with open("output/sanity_check_tests.json", "w+") as outfile: - dump_json(test, outfile) - if '--output-yaml' in sys.argv: - os.makedirs('output', exist_ok=True) - with open("output/sanity_check_tests.yaml", "w+") as outfile: - dump_yaml(test, outfile) From 4440be4e1f92154242960eeaa12320c3b0f5e404 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 18 Mar 2019 16:30:16 -0600 Subject: [PATCH 058/133] add comment to Makefile --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index f0f7557e9..b45cec410 100644 --- a/Makefile +++ b/Makefile @@ -14,9 +14,12 @@ clean: rm -rf $(BUILD_DIR) +# runs a limited set of tests against a minimal config +# run pytest with `-m` option to full suite test: pytest -m "sanity and minimal_config" tests/ + $(BUILD_DIR)/phase0: mkdir -p $@ python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $@/spec.py From 2dce326310cc99adccf083c4a06b7cc09b68d244 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Mon, 18 Mar 2019 16:02:31 -0700 Subject: [PATCH 059/133] Bring back envelope --- specs/networking/messaging.md | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/specs/networking/messaging.md b/specs/networking/messaging.md index e88116f46..de92fe6d4 100644 --- a/specs/networking/messaging.md +++ b/specs/networking/messaging.md @@ -15,15 +15,17 @@ This specification seeks to define a messaging protocol that is flexible enough ## Message Structure -An ETH 2.0 message consists of a single byte representing the message version followed by the encoded, potentially compressed body. We separate the message's version from the version included in the `libp2p` protocol path in order to allow encoding and compression schemes to be updated independently of the `libp2p` protocols themselves. - -It is unlikely that more than 255 message versions will need to be supported, so a single byte should suffice. +An ETH 2.0 message consists of an envelope that defines the message's compression, encoding, and length followed by the body itself. Visually, a message looks like this: ``` +--------------------------+ -| version byte | +| compression nibble | ++--------------------------+ +| encoding nibble | ++--------------------------+ +| body length (uint64) | +--------------------------+ | | | body | @@ -31,11 +33,12 @@ Visually, a message looks like this: +--------------------------+ ``` -Clients MUST ignore messages with mal-formed bodies. The `version` byte MUST be one of the below values: +Clients MUST ignore messages with mal-formed bodies. The compression/encoding nibbles MUST be one of the following values: -## Version Byte Values +## Compression Nibble Values -### `0x01` +- `0x0`: no compression -- **Encoding Scheme:** SSZ -- **Compression Scheme:** Snappy +## Encoding Nibble Values + +- `0x1`: SSZ From a07219c57045a22d13304148dbae8bb9121a0181 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 19 Mar 2019 11:39:19 +0800 Subject: [PATCH 060/133] Fix linter --- scripts/phase0/build_spec.py | 1 - scripts/phase0/function_puller.py | 2 +- tests/phase0/helpers.py | 5 +++-- tests/phase0/test_sanity.py | 34 ++++++++----------------------- utils/phase0/bls_stub.py | 2 +- utils/phase0/hash_function.py | 5 +++-- utils/phase0/merkle_minimal.py | 12 ++++++----- utils/phase0/minimal_ssz.py | 24 ++++++++++++++++++---- utils/phase0/state_transition.py | 2 +- 9 files changed, 45 insertions(+), 42 deletions(-) diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py index eb4f580bd..ae5a5a4f2 100644 --- a/scripts/phase0/build_spec.py +++ b/scripts/phase0/build_spec.py @@ -34,7 +34,6 @@ Any = None Store = None """) - code_lines += function_puller.get_lines(sourcefile) code_lines.append(""" diff --git a/scripts/phase0/function_puller.py b/scripts/phase0/function_puller.py index 8d1c1a0cc..7d5796fc7 100644 --- a/scripts/phase0/function_puller.py +++ b/scripts/phase0/function_puller.py @@ -9,7 +9,7 @@ def get_lines(file_name): for linenum, line in enumerate(open(sys.argv[1]).readlines()): line = line.rstrip() if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`': - current_name = line[line[:-1].rfind('`')+1: -1] + current_name = line[line[:-1].rfind('`') + 1: -1] if line[:9] == '```python': assert pulling_from is None pulling_from = linenum + 1 diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index fa0ba61b5..f7c39ffec 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -26,7 +26,7 @@ from build.phase0.utils.merkle_minimal import ( ) -privkeys_list = [i+1 for i in range(1000)] +privkeys_list = [i + 1 for i in range(1000)] pubkeys_list = [bls.privtopub(privkey) for privkey in privkeys_list] pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys_list, pubkeys_list)} @@ -77,6 +77,7 @@ def create_genesis_state(num_validators, deposit_data_leaves): ), ) + def build_empty_block_for_next_slot(state): empty_block = get_empty_block() empty_block.slot = state.slot + 1 @@ -91,7 +92,7 @@ def build_deposit_data(state, pubkey, privkey, amount): deposit_input = DepositInput( pubkey=pubkey, withdrawal_credentials=privkey.to_bytes(32, byteorder='big'), - proof_of_possession=b'00'*96, + proof_of_possession=b'\x00' * 96, ) proof_of_possession = bls.sign( message_hash=signed_root(deposit_input), diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index bfbb2de94..8799c1ffb 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -1,6 +1,3 @@ -import os -import sys -import time from copy import deepcopy import pytest @@ -12,32 +9,21 @@ from build.phase0.utils.minimal_ssz import signed_root from build.phase0.spec import ( # SSZ Attestation, - AttestationData, AttestationDataAndCustodyBit, BeaconBlockHeader, Deposit, - DepositData, - DepositInput, - Eth1Data, Transfer, ProposerSlashing, - Validator, VoluntaryExit, # functions - int_to_bytes32, - int_to_bytes48, get_active_validator_indices, get_attestation_participants, get_block_root, get_crosslink_committees_at_slot, get_current_epoch, get_domain, - get_empty_block, - get_epoch_start_slot, - get_genesis_beacon_state, get_state_root, advance_slot, - slot_to_epoch, cache_state, verify_merkle_branch, hash, @@ -60,6 +46,7 @@ from tests.phase0.helpers import ( # mark entire file as 'sanity' pytestmark = pytest.mark.sanity + def test_slot_transition(state): test_state = deepcopy(state) cache_state(test_state) @@ -126,18 +113,17 @@ def test_proposer_slashing(state, pubkeys, privkeys): test_state = deepcopy(state) current_epoch = get_current_epoch(test_state) validator_index = get_active_validator_indices(test_state.validator_registry, current_epoch)[-1] - pubkey = pubkeys[validator_index] privkey = privkeys[validator_index] slot = spec.GENESIS_SLOT header_1 = BeaconBlockHeader( slot=slot, - previous_block_root=b'\x00'*32, - state_root=b'\x00'*32, - block_body_root=b'\x00'*32, - signature=b'\x00'*96 + previous_block_root=b'\x00' * 32, + state_root=b'\x00' * 32, + block_body_root=b'\x00' * 32, + signature=b'\x00' * 96 ) header_2 = deepcopy(header_1) - header_2.previous_block_root = b'\x02'*32 + header_2.previous_block_root = b'\x02' * 32 header_2.slot = slot + 1 domain = get_domain( @@ -273,7 +259,7 @@ def test_attestation(state, pubkeys, privkeys): aggregation_bitfield=aggregation_bitfield, data=attestation_data, custody_bitfield=custody_bitfield, - aggregate_signature=b'\x00'*96, + aggregate_signature=b'\x00' * 96, ) participants = get_attestation_participants( test_state, @@ -283,7 +269,6 @@ def test_attestation(state, pubkeys, privkeys): assert len(participants) == 1 validator_index = participants[0] - pubkey = pubkeys[validator_index] privkey = privkeys[validator_index] message_hash = AttestationDataAndCustodyBit( @@ -329,7 +314,6 @@ def test_attestation(state, pubkeys, privkeys): def test_voluntary_exit(state, pubkeys, privkeys): pre_state = deepcopy(state) validator_index = get_active_validator_indices(pre_state.validator_registry, get_current_epoch(pre_state))[-1] - pubkey = pubkeys[validator_index] # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH @@ -341,7 +325,7 @@ def test_voluntary_exit(state, pubkeys, privkeys): voluntary_exit = VoluntaryExit( epoch=get_current_epoch(pre_state), validator_index=validator_index, - signature=b'\x00'*96, + signature=b'\x00' * 96, ) voluntary_exit.signature = bls.sign( message_hash=signed_root(voluntary_exit), @@ -392,7 +376,7 @@ def test_transfer(state, pubkeys, privkeys): fee=0, slot=pre_state.slot + 1, pubkey=transfer_pubkey, - signature=b'\x00'*96, + signature=b'\x00' * 96, ) transfer.signature = bls.sign( message_hash=signed_root(transfer), diff --git a/utils/phase0/bls_stub.py b/utils/phase0/bls_stub.py index 7e3a6a308..108c4ef71 100644 --- a/utils/phase0/bls_stub.py +++ b/utils/phase0/bls_stub.py @@ -9,4 +9,4 @@ def bls_verify_multiple(pubkeys, message_hashes, signature, domain): def bls_aggregate_pubkeys(pubkeys): - return b'\x42'*96 + return b'\x42' * 96 diff --git a/utils/phase0/hash_function.py b/utils/phase0/hash_function.py index da5b4d979..21e6555bf 100644 --- a/utils/phase0/hash_function.py +++ b/utils/phase0/hash_function.py @@ -1,6 +1,7 @@ -from hashlib import sha256 +# from hashlib import sha256 from eth_utils import keccak # def hash(x): return sha256(x).digest() -def hash(x): return keccak(x) +def hash(x): + return keccak(x) diff --git a/utils/phase0/merkle_minimal.py b/utils/phase0/merkle_minimal.py index a811350ce..7c5483de3 100644 --- a/utils/phase0/merkle_minimal.py +++ b/utils/phase0/merkle_minimal.py @@ -2,8 +2,9 @@ from .hash_function import hash zerohashes = [b'\x00' * 32] -for i in range(1, 32): - zerohashes.append(hash(zerohashes[i-1] + zerohashes[i-1])) +for layer in range(1, 32): + zerohashes.append(hash(zerohashes[layer - 1] + zerohashes[layer - 1])) + # Compute a Merkle root of a right-zerobyte-padded 2**32 sized tree def calc_merkle_tree_from_leaves(values): @@ -12,17 +13,18 @@ def calc_merkle_tree_from_leaves(values): for h in range(32): if len(values) % 2 == 1: values.append(zerohashes[h]) - # print(values) - values = [hash(values[i] + values[i+1]) for i in range(0, len(values), 2)] + values = [hash(values[i] + values[i + 1]) for i in range(0, len(values), 2)] tree.append(values[::]) return tree + def get_merkle_root(values): return calc_merkle_tree_from_leaves(values)[-1][0] + def get_merkle_proof(tree, item_index): proof = [] for i in range(32): - subindex = (item_index//2**i)^1 + subindex = (item_index // 2**i) ^ 1 proof.append(tree[i][subindex] if subindex < len(tree[i]) else zerohashes[i]) return proof diff --git a/utils/phase0/minimal_ssz.py b/utils/phase0/minimal_ssz.py index 845de18c3..08bd68357 100644 --- a/utils/phase0/minimal_ssz.py +++ b/utils/phase0/minimal_ssz.py @@ -5,6 +5,7 @@ BYTES_PER_CHUNK = 32 BYTES_PER_LENGTH_PREFIX = 4 ZERO_CHUNK = b'\x00' * BYTES_PER_CHUNK + def SSZType(fields): class SSZObject(): def __init__(self, **kwargs): @@ -37,6 +38,7 @@ def SSZType(fields): SSZObject.fields = fields return SSZObject + class Vector(list): def __init__(self, x): list.__init__(self, x) @@ -47,9 +49,11 @@ class Vector(list): remove = clear = extend = pop = insert = append + def is_basic(typ): return isinstance(typ, str) and (typ[:4] in ('uint', 'bool') or typ == 'byte') + def is_constant_sized(typ): if is_basic(typ): return True @@ -67,6 +71,7 @@ def is_constant_sized(typ): else: raise Exception("Type not recognized") + def coerce_to_bytes(x): if isinstance(x, str): o = x.encode('utf-8') @@ -77,6 +82,7 @@ def coerce_to_bytes(x): else: raise Exception("Expecting bytes") + def serialize_value(value, typ=None): if typ is None: typ = infer_type(value) @@ -110,28 +116,34 @@ def serialize_value(value, typ=None): print(value, typ) raise Exception("Type not recognized") + def chunkify(bytez): bytez += b'\x00' * (-len(bytez) % BYTES_PER_CHUNK) - return [bytez[i:i+32] for i in range(0, len(bytez), 32)] + return [bytez[i:i + 32] for i in range(0, len(bytez), 32)] + def pack(values, subtype): return chunkify(b''.join([serialize_value(value, subtype) for value in values])) + def is_power_of_two(x): - return x > 0 and x & (x-1) == 0 + return x > 0 and x & (x - 1) == 0 + def merkleize(chunks): tree = chunks[::] while not is_power_of_two(len(tree)): tree.append(ZERO_CHUNK) tree = [ZERO_CHUNK] * len(tree) + tree - for i in range(len(tree)//2-1, 0, -1): - tree[i] = hash(tree[i*2] + tree[i*2+1]) + for i in range(len(tree) // 2 - 1, 0, -1): + tree[i] = hash(tree[i * 2] + tree[i * 2 + 1]) return tree[1] + def mix_in_length(root, length): return hash(root + length.to_bytes(32, 'little')) + def infer_type(value): if hasattr(value.__class__, 'fields'): return value.__class__ @@ -146,6 +158,7 @@ def infer_type(value): else: raise Exception("Failed to infer type") + def hash_tree_root(value, typ=None): if typ is None: typ = infer_type(value) @@ -170,6 +183,7 @@ def hash_tree_root(value, typ=None): else: raise Exception("Type not recognized") + def truncate(container): field_keys = list(container.fields.keys()) truncated_fields = { @@ -183,8 +197,10 @@ def truncate(container): } return truncated_class(**kwargs) + def signed_root(container): return hash_tree_root(truncate(container)) + def serialize(ssz_object): return getattr(ssz_object, 'serialize')() diff --git a/utils/phase0/state_transition.py b/utils/phase0/state_transition.py index 170f647ab..92d67c45a 100644 --- a/utils/phase0/state_transition.py +++ b/utils/phase0/state_transition.py @@ -1,7 +1,7 @@ from . import spec -from typing import ( +from typing import ( # noqa: F401 Any, Callable, List, From 8fc1fe5f2075d290d55c16e3cbe18aa782fff6c2 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 19 Mar 2019 11:46:32 +0800 Subject: [PATCH 061/133] Use `EMPTY_SIGNATURE` and `ZERO_HASH` in the tests --- tests/phase0/helpers.py | 6 +++++- tests/phase0/test_sanity.py | 17 ++++++++++------- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index f7c39ffec..76206b00d 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -5,11 +5,15 @@ from py_ecc import bls import build.phase0.spec as spec from build.phase0.utils.minimal_ssz import signed_root from build.phase0.spec import ( + # constants + EMPTY_SIGNATURE, + # SSZ AttestationData, Deposit, DepositInput, DepositData, Eth1Data, + # functions get_block_root, get_current_epoch, get_domain, @@ -92,7 +96,7 @@ def build_deposit_data(state, pubkey, privkey, amount): deposit_input = DepositInput( pubkey=pubkey, withdrawal_credentials=privkey.to_bytes(32, byteorder='big'), - proof_of_possession=b'\x00' * 96, + proof_of_possession=EMPTY_SIGNATURE, ) proof_of_possession = bls.sign( message_hash=signed_root(deposit_input), diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 8799c1ffb..8f04f316c 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -7,6 +7,9 @@ import build.phase0.spec as spec from build.phase0.utils.minimal_ssz import signed_root from build.phase0.spec import ( + # constants + EMPTY_SIGNATURE, + ZERO_HASH, # SSZ Attestation, AttestationDataAndCustodyBit, @@ -117,10 +120,10 @@ def test_proposer_slashing(state, pubkeys, privkeys): slot = spec.GENESIS_SLOT header_1 = BeaconBlockHeader( slot=slot, - previous_block_root=b'\x00' * 32, - state_root=b'\x00' * 32, - block_body_root=b'\x00' * 32, - signature=b'\x00' * 96 + previous_block_root=ZERO_HASH, + state_root=ZERO_HASH, + block_body_root=ZERO_HASH, + signature=EMPTY_SIGNATURE, ) header_2 = deepcopy(header_1) header_2.previous_block_root = b'\x02' * 32 @@ -259,7 +262,7 @@ def test_attestation(state, pubkeys, privkeys): aggregation_bitfield=aggregation_bitfield, data=attestation_data, custody_bitfield=custody_bitfield, - aggregate_signature=b'\x00' * 96, + aggregate_signature=EMPTY_SIGNATURE, ) participants = get_attestation_participants( test_state, @@ -325,7 +328,7 @@ def test_voluntary_exit(state, pubkeys, privkeys): voluntary_exit = VoluntaryExit( epoch=get_current_epoch(pre_state), validator_index=validator_index, - signature=b'\x00' * 96, + signature=EMPTY_SIGNATURE, ) voluntary_exit.signature = bls.sign( message_hash=signed_root(voluntary_exit), @@ -376,7 +379,7 @@ def test_transfer(state, pubkeys, privkeys): fee=0, slot=pre_state.slot + 1, pubkey=transfer_pubkey, - signature=b'\x00' * 96, + signature=EMPTY_SIGNATURE, ) transfer.signature = bls.sign( message_hash=signed_root(transfer), From dc4b652f72339063bfbaae378e850d173168c9f6 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 19 Mar 2019 11:03:42 +0000 Subject: [PATCH 062/133] Only slash active validators This is to prevent a spam/DoS attack where validators with zero balance get "slashed" but no validator loses any balance. --- specs/core/0_beacon-chain.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a631bf2fc..2113472e3 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2315,8 +2315,8 @@ def process_proposer_slashing(state: BeaconState, assert slot_to_epoch(proposer_slashing.header_1.slot) == slot_to_epoch(proposer_slashing.header_2.slot) # But the headers are different assert proposer_slashing.header_1 != proposer_slashing.header_2 - # Proposer is not yet slashed - assert proposer.slashed is False + # Proposer is active and not already slashed + assert is_active_validator(proposer) and proposer.slashed is False # Signatures are valid for header in (proposer_slashing.header_1, proposer_slashing.header_2): assert bls_verify( @@ -2355,6 +2355,7 @@ def process_attester_slashing(state: BeaconState, index for index in attestation1.validator_indices if ( index in attestation2.validator_indices and + is_active_validator(state.validator_registry[index]) and state.validator_registry[index].slashed is False ) ] From 2b454d57f11d8e1bde78dd1aa83116df2b2417ee Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 19 Mar 2019 11:08:17 +0000 Subject: [PATCH 063/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2113472e3..9ed620b83 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -59,6 +59,7 @@ - [`get_current_epoch`](#get_current_epoch) - [`get_epoch_start_slot`](#get_epoch_start_slot) - [`is_active_validator`](#is_active_validator) + - [`is_slashable_validator`](#is_slashable_validator) - [`get_active_validator_indices`](#get_active_validator_indices) - [`get_permuted_index`](#get_permuted_index) - [`split`](#split) @@ -737,6 +738,18 @@ def is_active_validator(validator: Validator, epoch: Epoch) -> bool: return validator.activation_epoch <= epoch < validator.exit_epoch ``` +### `is_slashable_validator` +```python +def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool: + """ + Check if ``validator`` is slashable. + """ + return ( + validator.activation_epoch <= epoch < validator.withdrawable_epoch and + validator.slashed is False + ) +``` + ### `get_active_validator_indices` ```python @@ -2315,8 +2328,8 @@ def process_proposer_slashing(state: BeaconState, assert slot_to_epoch(proposer_slashing.header_1.slot) == slot_to_epoch(proposer_slashing.header_2.slot) # But the headers are different assert proposer_slashing.header_1 != proposer_slashing.header_2 - # Proposer is active and not already slashed - assert is_active_validator(proposer) and proposer.slashed is False + # Check proposer is slashable + assert is_slashable_validator(proposer) # Signatures are valid for header in (proposer_slashing.header_1, proposer_slashing.header_2): assert bls_verify( @@ -2355,8 +2368,7 @@ def process_attester_slashing(state: BeaconState, index for index in attestation1.validator_indices if ( index in attestation2.validator_indices and - is_active_validator(state.validator_registry[index]) and - state.validator_registry[index].slashed is False + is_slashable_validator(state.validator_registry[index]) ) ] assert len(slashable_indices) >= 1 From 0c383ce4a1d4770bdb21975023a2ca7a3ef5f522 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 19 Mar 2019 11:11:18 +0000 Subject: [PATCH 064/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 1 - 1 file changed, 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 9ed620b83..d377b8d45 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1412,7 +1412,6 @@ def slash_validator(state: BeaconState, index: ValidatorIndex) -> None: Note that this function mutates ``state``. """ validator = state.validator_registry[index] - assert state.slot < get_epoch_start_slot(validator.withdrawable_epoch) # [TO BE REMOVED IN PHASE 2] exit_validator(state, index) state.latest_slashed_balances[get_current_epoch(state) % LATEST_SLASHED_EXIT_LENGTH] += get_effective_balance(state, index) From e91036cfc9fbf9d05b03da0180ed5be95cc916ca Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 19 Mar 2019 11:12:50 +0000 Subject: [PATCH 065/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index d377b8d45..4a6170418 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2328,7 +2328,7 @@ def process_proposer_slashing(state: BeaconState, # But the headers are different assert proposer_slashing.header_1 != proposer_slashing.header_2 # Check proposer is slashable - assert is_slashable_validator(proposer) + assert is_slashable_validator(proposer, get_current_epoch(state)) # Signatures are valid for header in (proposer_slashing.header_1, proposer_slashing.header_2): assert bls_verify( @@ -2367,7 +2367,7 @@ def process_attester_slashing(state: BeaconState, index for index in attestation1.validator_indices if ( index in attestation2.validator_indices and - is_slashable_validator(state.validator_registry[index]) + is_slashable_validator(state.validator_registry[index], get_current_epoch(state)) ) ] assert len(slashable_indices) >= 1 From 78f47f2069ec753c08dd7b278f7fc073b086cc34 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 19 Mar 2019 12:23:17 +0000 Subject: [PATCH 066/133] Avoid underflow in voluntary exits --- specs/core/0_beacon-chain.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a631bf2fc..be6a52e68 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2472,6 +2472,7 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: # Exits must specify an epoch when they become valid; they are not valid before then assert get_current_epoch(state) >= exit.epoch # Must have been in the validator set long enough + assert validator.activation_epoch != FAR_FUTURE_EPOCH assert get_current_epoch(state) - validator.activation_epoch >= PERSISTENT_COMMITTEE_PERIOD # Verify signature assert bls_verify( From dd39d25c86d812e7d5ac24e6bc5f043426e3617d Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 19 Mar 2019 09:32:06 -0500 Subject: [PATCH 067/133] Replace committee exponential backoff with max progress Removes the mechanism that only rotates committees if blocks have been finalized and every shard has been crosslinked or at exponentially decreasing intervals, and replaces it with a rule that shard committees can only progress a maximum of 64 epochs at a time to preserve the invariant that maximum possible work required per epoch for a validator is O(1). --- specs/core/0_beacon-chain.md | 95 +++++++++++------------------------- 1 file changed, 28 insertions(+), 67 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a631bf2fc..6877b9358 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -185,8 +185,10 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | `MAX_INDICES_PER_SLASHABLE_VOTE` | `2**12` (= 4,096) | | `MAX_EXIT_DEQUEUES_PER_EPOCH` | `2**2` (= 4) | | `SHUFFLE_ROUND_COUNT` | 90 | +| `MAX_CROSSLINK_EPOCHS` | `2**6` (= 64) | * For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) +* `MAX_CROSSLINK_EPOCHS` should be a small constant times `SHARD_COUNT // EPOCH_LENGTH` ### Deposit contract @@ -598,12 +600,7 @@ The types are defined topologically to aid in facilitating an executable version # Randomness and committees 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], - 'previous_shuffling_start_shard': 'uint64', 'current_shuffling_start_shard': 'uint64', - 'previous_shuffling_epoch': 'uint64', - 'current_shuffling_epoch': 'uint64', - 'previous_shuffling_seed': 'bytes32', - 'current_shuffling_seed': 'bytes32', # Finality 'previous_epoch_attestations': [PendingAttestation], @@ -849,7 +846,7 @@ def get_current_epoch_committee_count(state: BeaconState) -> int: """ current_active_validators = get_active_validator_indices( state.validator_registry, - state.current_shuffling_epoch, + get_current_epoch(state), ) return get_epoch_committee_count(len(current_active_validators)) ``` @@ -886,40 +883,30 @@ def get_crosslink_committees_at_slot(state: BeaconState, next_epoch = current_epoch + 1 assert previous_epoch <= epoch <= next_epoch + committees_per_epoch = get_epoch_committee_count(get_active_validator_indices( + state.validator_registry, + epoch, + )) if epoch == current_epoch: - committees_per_epoch = get_current_epoch_committee_count(state) - seed = state.current_shuffling_seed - shuffling_epoch = state.current_shuffling_epoch shuffling_start_shard = state.current_shuffling_start_shard elif epoch == previous_epoch: - committees_per_epoch = get_previous_epoch_committee_count(state) - seed = state.previous_shuffling_seed - shuffling_epoch = state.previous_shuffling_epoch - shuffling_start_shard = state.previous_shuffling_start_shard + shuffling_start_shard = ( + state.current_shuffling_start_shard - EPOCH_LENGTH * committees_per_epoch + ) % SHARD_COUNT elif epoch == next_epoch: - epochs_since_last_registry_update = current_epoch - state.validator_registry_update_epoch - if registry_change: - committees_per_epoch = get_next_epoch_committee_count(state) - seed = generate_seed(state, next_epoch) - shuffling_epoch = next_epoch - current_committees_per_epoch = get_current_epoch_committee_count(state) - shuffling_start_shard = (state.current_shuffling_start_shard + current_committees_per_epoch) % SHARD_COUNT - elif epochs_since_last_registry_update > 1 and is_power_of_two(epochs_since_last_registry_update): - committees_per_epoch = get_next_epoch_committee_count(state) - seed = generate_seed(state, next_epoch) - shuffling_epoch = next_epoch - shuffling_start_shard = state.current_shuffling_start_shard - else: - committees_per_epoch = get_current_epoch_committee_count(state) - seed = state.current_shuffling_seed - shuffling_epoch = state.current_shuffling_epoch - shuffling_start_shard = state.current_shuffling_start_shard + current_epoch_committees = get_epoch_committee_count(get_active_validator_indices( + state.validator_registry, + current_epoch, + )) + shuffling_start_shard = ( + state.current_shuffling_start_shard + EPOCH_LENGTH * current_epoch_committees + ) % SHARD_COUNT shuffling = get_shuffling( - seed, + generate_seed(state, epoch), state.validator_registry, - shuffling_epoch, + epoch, ) offset = slot % SLOTS_PER_EPOCH committees_per_slot = committees_per_epoch // SLOTS_PER_EPOCH @@ -1529,12 +1516,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Randomness and committees latest_randao_mixes=Vector([ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)]), - previous_shuffling_start_shard=GENESIS_START_SHARD, current_shuffling_start_shard=GENESIS_START_SHARD, - previous_shuffling_epoch=GENESIS_EPOCH, - current_shuffling_epoch=GENESIS_EPOCH, - previous_shuffling_seed=ZERO_HASH, - current_shuffling_seed=ZERO_HASH, # Finality previous_epoch_attestations=[], @@ -1574,7 +1556,6 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], genesis_active_index_root = hash_tree_root(get_active_validator_indices(state.validator_registry, GENESIS_EPOCH)) for index in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH): state.latest_active_index_roots[index] = genesis_active_index_root - state.current_shuffling_seed = generate_seed(state, GENESIS_EPOCH) return state ``` @@ -1855,7 +1836,7 @@ def process_crosslinks(state: BeaconState) -> None: total_balance = get_total_balance(state, crosslink_committee) if 3 * participating_balance >= 2 * total_balance: state.latest_crosslinks[shard] = Crosslink( - epoch=slot_to_epoch(slot), + epoch=min(slot_to_epoch(slot), state.latest_crosslinks[shard].epoch + MAX_CROSSLINK_EPOCHS), crosslink_data_root=winning_root ) ``` @@ -2060,14 +2041,6 @@ def should_update_validator_registry(state: BeaconState) -> bool: # Must have finalized a new block if state.finalized_epoch <= state.validator_registry_update_epoch: return False - # Must have processed new crosslinks on all shards of the current epoch - shards_to_check = [ - (state.current_shuffling_start_shard + i) % SHARD_COUNT - for i in range(get_current_epoch_committee_count(state)) - ] - for shard in shards_to_check: - if state.latest_crosslinks[shard].epoch <= state.validator_registry_update_epoch: - return False return True ``` @@ -2119,30 +2092,17 @@ def update_validator_registry(state: BeaconState) -> None: Run the following function: ```python -def update_registry_and_shuffling_data(state: BeaconState) -> None: - # First set previous shuffling data to current shuffling data - state.previous_shuffling_epoch = state.current_shuffling_epoch - state.previous_shuffling_start_shard = state.current_shuffling_start_shard - state.previous_shuffling_seed = state.current_shuffling_seed +def update_registry(state: BeaconState) -> None: current_epoch = get_current_epoch(state) next_epoch = current_epoch + 1 # Check if we should update, and if so, update if should_update_validator_registry(state): update_validator_registry(state) - # If we update the registry, update the shuffling data and shards as well - state.current_shuffling_epoch = next_epoch - state.current_shuffling_start_shard = ( - state.current_shuffling_start_shard + - get_current_epoch_committee_count(state) % SHARD_COUNT - ) - state.current_shuffling_seed = generate_seed(state, state.current_shuffling_epoch) - else: - # If processing at least one crosslink keeps failing, then reshuffle every power of two, - # but don't update the current_shuffling_start_shard - epochs_since_last_registry_update = current_epoch - state.validator_registry_update_epoch - if epochs_since_last_registry_update > 1 and is_power_of_two(epochs_since_last_registry_update): - state.current_shuffling_epoch = next_epoch - state.current_shuffling_seed = generate_seed(state, state.current_shuffling_epoch) + # If we update the registry, update the shuffling data 2/3 or and shards as well + state.current_shuffling_start_shard = ( + state.current_shuffling_start_shard + + get_current_epoch_committee_count(state) % SHARD_COUNT + ) ``` **Invariant**: the active index root that is hashed into the shuffling seed actually is the `hash_tree_root` of the validator set that is used for that epoch. @@ -2397,7 +2357,8 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: # the attestation is trying to create Crosslink( crosslink_data_root=attestation.data.crosslink_data_root, - epoch=slot_to_epoch(attestation.data.slot) + epoch=min(slot_to_epoch(attestation.data.slot), + attestation.data.previous_crosslink.epoch + MAX_CROSSLINK_EPOCHS) ) } assert state.latest_crosslinks[attestation.data.shard] in acceptable_crosslink_data From db92235d9ed3eeffa846e50eef82895567cf77c7 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 19 Mar 2019 09:34:37 -0500 Subject: [PATCH 068/133] Removed some no-longer-necessary functions --- specs/core/0_beacon-chain.md | 35 +---------------------------------- 1 file changed, 1 insertion(+), 34 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 6877b9358..9e52148a0 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -64,9 +64,7 @@ - [`split`](#split) - [`get_epoch_committee_count`](#get_epoch_committee_count) - [`get_shuffling`](#get_shuffling) - - [`get_previous_epoch_committee_count`](#get_previous_epoch_committee_count) - [`get_current_epoch_committee_count`](#get_current_epoch_committee_count) - - [`get_next_epoch_committee_count`](#get_next_epoch_committee_count) - [`get_crosslink_committees_at_slot`](#get_crosslink_committees_at_slot) - [`get_block_root`](#get_block_root) - [`get_state_root`](#get_state_root) @@ -823,20 +821,6 @@ def get_shuffling(seed: Bytes32, **Note**: this definition and the next few definitions make heavy use of repetitive computing. Production implementations are expected to appropriately use caching/memoization to avoid redoing work. -### `get_previous_epoch_committee_count` - -```python -def get_previous_epoch_committee_count(state: BeaconState) -> int: - """ - Return the number of committees in the previous epoch of the given ``state``. - """ - previous_active_validators = get_active_validator_indices( - state.validator_registry, - state.previous_shuffling_epoch, - ) - return get_epoch_committee_count(len(previous_active_validators)) -``` - ### `get_current_epoch_committee_count` ```python @@ -851,20 +835,6 @@ def get_current_epoch_committee_count(state: BeaconState) -> int: return get_epoch_committee_count(len(current_active_validators)) ``` -### `get_next_epoch_committee_count` - -```python -def get_next_epoch_committee_count(state: BeaconState) -> int: - """ - Return the number of committees in the next epoch of the given ``state``. - """ - next_active_validators = get_active_validator_indices( - state.validator_registry, - get_current_epoch(state) + 1, - ) - return get_epoch_committee_count(len(next_active_validators)) -``` - ### `get_crosslink_committees_at_slot` ```python @@ -895,10 +865,7 @@ def get_crosslink_committees_at_slot(state: BeaconState, state.current_shuffling_start_shard - EPOCH_LENGTH * committees_per_epoch ) % SHARD_COUNT elif epoch == next_epoch: - current_epoch_committees = get_epoch_committee_count(get_active_validator_indices( - state.validator_registry, - current_epoch, - )) + current_epoch_committees = get_current_epoch_committee_count(state) shuffling_start_shard = ( state.current_shuffling_start_shard + EPOCH_LENGTH * current_epoch_committees ) % SHARD_COUNT From d645a9a0389ea571f39e901ce74274ee9d3e704f Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 19 Mar 2019 09:49:32 -0600 Subject: [PATCH 069/133] ensure run verify-state root with block Co-Authored-By: djrtwo --- utils/phase0/state_transition.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/phase0/state_transition.py b/utils/phase0/state_transition.py index 92d67c45a..eefc3d409 100644 --- a/utils/phase0/state_transition.py +++ b/utils/phase0/state_transition.py @@ -97,4 +97,4 @@ def state_transition(state: BeaconState, process_epoch_transition(state) spec.advance_slot(state) if block.slot == state.slot: - process_block(state, block) + process_block(state, block, verify_state_root) From fbd0bb9226102ef4dbde606c0853d8dd74e0b60b Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 09:56:04 -0600 Subject: [PATCH 070/133] withdrwaal_key uses pubkey --- tests/phase0/helpers.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 76206b00d..510361e9c 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -42,13 +42,13 @@ def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves): deposit_data_list = [] for i in range(num_validators): pubkey = pubkeys_list[i] - privkey = pubkey_to_privkey[pubkey] deposit_data = DepositData( amount=spec.MAX_DEPOSIT_AMOUNT, timestamp=deposit_timestamp, deposit_input=DepositInput( pubkey=pubkey, - withdrawal_credentials=privkey.to_bytes(32, byteorder='big'), + # insecurely use pubkey as withdrawal key as well + withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:], proof_of_possession=proof_of_possession, ), ) @@ -95,7 +95,8 @@ def build_empty_block_for_next_slot(state): def build_deposit_data(state, pubkey, privkey, amount): deposit_input = DepositInput( pubkey=pubkey, - withdrawal_credentials=privkey.to_bytes(32, byteorder='big'), + # insecurely use pubkey as withdrawal key as well + withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:], proof_of_possession=EMPTY_SIGNATURE, ) proof_of_possession = bls.sign( From c5ee74d5e03376ec5c3bef1d294aaa9a3da831f6 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 19 Mar 2019 11:21:17 -0500 Subject: [PATCH 071/133] Justin fixes --- specs/core/0_beacon-chain.md | 37 ++++++++++++++---------------------- 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 9e52148a0..c14ff9736 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -183,10 +183,8 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | `MAX_INDICES_PER_SLASHABLE_VOTE` | `2**12` (= 4,096) | | `MAX_EXIT_DEQUEUES_PER_EPOCH` | `2**2` (= 4) | | `SHUFFLE_ROUND_COUNT` | 90 | -| `MAX_CROSSLINK_EPOCHS` | `2**6` (= 64) | * For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) -* `MAX_CROSSLINK_EPOCHS` should be a small constant times `SHARD_COUNT // EPOCH_LENGTH` ### Deposit contract @@ -232,6 +230,10 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | `SLOTS_PER_HISTORICAL_ROOT` | `2**13` (= 8,192) | slots | ~13 hours | | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours | | `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | 9 days | +| `MAX_CROSSLINK_EPOCHS` | `2**6` (= 64) | + +* `MAX_CROSSLINK_EPOCHS` should be a small constant times `SHARD_COUNT // SLOTS_PER_EPOCH` + ### State list lengths @@ -598,7 +600,7 @@ The types are defined topologically to aid in facilitating an executable version # Randomness and committees 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], - 'current_shuffling_start_shard': 'uint64', + 'latest_start_shard': 'uint64', # Finality 'previous_epoch_attestations': [PendingAttestation], @@ -859,15 +861,15 @@ def get_crosslink_committees_at_slot(state: BeaconState, )) if epoch == current_epoch: - shuffling_start_shard = state.current_shuffling_start_shard + shuffling_start_shard = state.latest_start_shard elif epoch == previous_epoch: shuffling_start_shard = ( - state.current_shuffling_start_shard - EPOCH_LENGTH * committees_per_epoch + state.latest_start_shard - SLOTS_PER_EPOCH * committees_per_epoch ) % SHARD_COUNT elif epoch == next_epoch: current_epoch_committees = get_current_epoch_committee_count(state) shuffling_start_shard = ( - state.current_shuffling_start_shard + EPOCH_LENGTH * current_epoch_committees + state.latest_start_shard + EPOCH_LENGTH * current_epoch_committees ) % SHARD_COUNT shuffling = get_shuffling( @@ -1483,7 +1485,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Randomness and committees latest_randao_mixes=Vector([ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)]), - current_shuffling_start_shard=GENESIS_START_SHARD, + latest_start_shard=GENESIS_START_SHARD, # Finality previous_epoch_attestations=[], @@ -2003,14 +2005,6 @@ def process_ejections(state: BeaconState) -> None: #### Validator registry and shuffling seed data -```python -def should_update_validator_registry(state: BeaconState) -> bool: - # Must have finalized a new block - if state.finalized_epoch <= state.validator_registry_update_epoch: - return False - return True -``` - ```python def update_validator_registry(state: BeaconState) -> None: """ @@ -2060,16 +2054,13 @@ Run the following function: ```python def update_registry(state: BeaconState) -> None: - current_epoch = get_current_epoch(state) - next_epoch = current_epoch + 1 # Check if we should update, and if so, update - if should_update_validator_registry(state): + if state.finalized_epoch > state.validator_registry_update_epoch: update_validator_registry(state) - # If we update the registry, update the shuffling data 2/3 or and shards as well - state.current_shuffling_start_shard = ( - state.current_shuffling_start_shard + - get_current_epoch_committee_count(state) % SHARD_COUNT - ) + state.latest_start_shard = ( + state.latest_start_shard + + get_current_epoch_committee_count(state) + ) % SHARD_COUNT ``` **Invariant**: the active index root that is hashed into the shuffling seed actually is the `hash_tree_root` of the validator set that is used for that epoch. From ff165c197bc608eb897031231467917114f6fbdb Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 10:57:40 -0600 Subject: [PATCH 072/133] fix vector to be usable in deepcopy --- utils/phase0/minimal_ssz.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/utils/phase0/minimal_ssz.py b/utils/phase0/minimal_ssz.py index 08bd68357..c4828d08f 100644 --- a/utils/phase0/minimal_ssz.py +++ b/utils/phase0/minimal_ssz.py @@ -39,15 +39,22 @@ def SSZType(fields): return SSZObject -class Vector(list): - def __init__(self, x): - list.__init__(self, x) - self.length = len(x) +class Vector(): + def __init__(self, items): + self.items = items + self.length = len(items) - def append(*args): - raise Exception("Cannot change the length of a vector") + def __getitem__(self, key): + return self.items[key] - remove = clear = extend = pop = insert = append + def __setitem__(self, key, value): + self.items[key] = value + + def __iter__(self): + return iter(self.items) + + def __len__(self): + return self.length def is_basic(typ): From b50e148642d4a19d5517ab1ab689708b33ed7b53 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 19 Mar 2019 17:13:25 +0000 Subject: [PATCH 073/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index c14ff9736..07179aa0a 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -74,7 +74,6 @@ - [`get_beacon_proposer_index`](#get_beacon_proposer_index) - [`verify_merkle_branch`](#verify_merkle_branch) - [`get_attestation_participants`](#get_attestation_participants) - - [`is_power_of_two`](#is_power_of_two) - [`int_to_bytes1`, `int_to_bytes2`, ...](#int_to_bytes1-int_to_bytes2-) - [`bytes_to_int`](#bytes_to_int) - [`get_effective_balance`](#get_effective_balance) @@ -861,16 +860,12 @@ def get_crosslink_committees_at_slot(state: BeaconState, )) if epoch == current_epoch: - shuffling_start_shard = state.latest_start_shard + start_shard = state.latest_start_shard elif epoch == previous_epoch: - shuffling_start_shard = ( - state.latest_start_shard - SLOTS_PER_EPOCH * committees_per_epoch - ) % SHARD_COUNT + start_shard = (state.latest_start_shard - SLOTS_PER_EPOCH * committees_per_epoch) % SHARD_COUNT elif epoch == next_epoch: current_epoch_committees = get_current_epoch_committee_count(state) - shuffling_start_shard = ( - state.latest_start_shard + EPOCH_LENGTH * current_epoch_committees - ) % SHARD_COUNT + start_shard = (state.latest_start_shard + EPOCH_LENGTH * current_epoch_committees) % SHARD_COUNT shuffling = get_shuffling( generate_seed(state, epoch), @@ -879,7 +874,7 @@ def get_crosslink_committees_at_slot(state: BeaconState, ) offset = slot % SLOTS_PER_EPOCH committees_per_slot = committees_per_epoch // SLOTS_PER_EPOCH - slot_start_shard = (shuffling_start_shard + committees_per_slot * offset) % SHARD_COUNT + slot_start_shard = (start_shard + committees_per_slot * offset) % SHARD_COUNT return [ ( @@ -1017,16 +1012,6 @@ def get_attestation_participants(state: BeaconState, return participants ``` -### `is_power_of_two` - -```python -def is_power_of_two(value: int) -> bool: - """ - Check if ``value`` is a power of two integer. - """ - return (value > 0) and (value & (value - 1) == 0) -``` - ### `int_to_bytes1`, `int_to_bytes2`, ... `int_to_bytes1(x): return x.to_bytes(1, 'little')`, `int_to_bytes2(x): return x.to_bytes(2, 'little')`, and so on for all integers, particularly 1, 2, 3, 4, 8, 32, 48, 96. From 009563b2c35c9c9fd352e8026d4b1ff4ff9d2e69 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 11:15:51 -0600 Subject: [PATCH 074/133] fix a few bugs in testing compute_committee --- scripts/phase0/build_spec.py | 23 ++++++++++++----------- specs/core/0_beacon-chain.md | 3 ++- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py index ae5a5a4f2..6116f1ffe 100644 --- a/scripts/phase0/build_spec.py +++ b/scripts/phase0/build_spec.py @@ -37,22 +37,23 @@ Store = None code_lines += function_puller.get_lines(sourcefile) code_lines.append(""" -# Monkey patch validator shuffling cache -_get_shuffling = get_shuffling -shuffling_cache = {} -def get_shuffling(seed: Bytes32, - validators: List[Validator], - epoch: Epoch) -> List[List[ValidatorIndex]]: +# Monkey patch validator get committee code +_compute_committee = compute_committee +committee_cache = {} +def compute_committee(validator_indices: List[ValidatorIndex], + seed: Bytes32, + index: int, + total_committees: int) -> List[ValidatorIndex]: - param_hash = (seed, hash_tree_root(validators, [Validator]), epoch) + param_hash = (hash_tree_root(validator_indices), seed, index, total_committees) - if param_hash in shuffling_cache: + if param_hash in committee_cache: # print("Cache hit, epoch={0}".format(epoch)) - return shuffling_cache[param_hash] + return committee_cache[param_hash] else: # print("Cache miss, epoch={0}".format(epoch)) - ret = _get_shuffling(seed, validators, epoch) - shuffling_cache[param_hash] = ret + ret = _compute_committee(validator_indices, seed, index, total_committees) + committee_cache[param_hash] = ret return ret diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 9708ee591..27ae71c00 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -916,7 +916,8 @@ def get_crosslink_committees_at_slot(state: BeaconState, indices = get_active_validator_indices(state.validator_registry, shuffling_epoch) committee_count = get_epoch_committee_count(len(indices)) - committees_per_slot = committee_count // EPOCH_LENGTH + committees_per_slot = committee_count // SLOTS_PER_EPOCH + offset = slot % SLOTS_PER_EPOCH return [ ( compute_committee(indices, seed, committees_per_slot * offset + i, committee_count) From c8e9073414114cad7b276eb6623457ed8fb1bf86 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 11:24:36 -0600 Subject: [PATCH 075/133] define get_split_offset and squash a couple of bugs --- specs/core/0_beacon-chain.md | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 27ae71c00..b56a87ad5 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -61,7 +61,7 @@ - [`is_active_validator`](#is_active_validator) - [`get_active_validator_indices`](#get_active_validator_indices) - [`get_permuted_index`](#get_permuted_index) - - [`split`](#split) + - [`get_split_offset`](#get_split_offset) - [`get_epoch_committee_count`](#get_epoch_committee_count) - [`compute_committee`](#compute_committee) - [`get_previous_epoch_committee_count`](#get_previous_epoch_committee_count) @@ -773,18 +773,11 @@ def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: return index ``` -### `split` +### `get_split_offset` ```python -def split(values: List[Any], split_count: int) -> List[List[Any]]: - """ - Splits ``values`` into ``split_count`` pieces. - """ - list_length = len(values) - return [ - values[(list_length * i // split_count): (list_length * (i + 1) // split_count)] - for i in range(split_count) - ] +def get_split_offset(list_length: int, split_count: int, index: int) -> int: + return (list_length * index) // split_count ``` ### `get_epoch_committee_count` @@ -918,9 +911,11 @@ def get_crosslink_committees_at_slot(state: BeaconState, committee_count = get_epoch_committee_count(len(indices)) committees_per_slot = committee_count // SLOTS_PER_EPOCH offset = slot % SLOTS_PER_EPOCH + slot_start_shard = (shuffling_start_shard + committees_per_slot * offset) % SHARD_COUNT + return [ ( - compute_committee(indices, seed, committees_per_slot * offset + i, committee_count) + compute_committee(indices, seed, committees_per_slot * offset + i, committee_count), (slot_start_shard + i) % SHARD_COUNT, ) for i in range(committees_per_slot) From f5826e7f1ce5e46358873514d3a1c9d173fe55aa Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 11:34:49 -0600 Subject: [PATCH 076/133] small lint --- README.md | 7 ++++++- specs/core/0_beacon-chain.md | 1 - 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c5c88daf9..8f561a9ab 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ -# Ethereum 2.0 Specifications +Ethereum 2.0 Specifications +------------ [![Join the chat at https://gitter.im/ethereum/sharding](https://badges.gitter.im/ethereum/sharding.svg)](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) @@ -25,3 +26,7 @@ The following are the broad design goals for Ethereum 2.0: * to select all components such that they are either quantum secure or can be easily swapped out for quantum secure counterparts when available * to utilize crypto and design techniques that allow for a large participation of validators in total and per unit time * to allow for a typical consumer laptop with `O(C)` resources to process/validate `O(1)` shards (including any system level validation such as the beacon chain) + +# Executable spec + +The aim is to have the entirety of the Ethereum 2.0Current just the phase 0 spec is executable. diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index b56a87ad5..e21607010 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -920,7 +920,6 @@ def get_crosslink_committees_at_slot(state: BeaconState, ) for i in range(committees_per_slot) ] - ``` ### `get_block_root` From f7fab30772b6d70c4a2f84acc171a0432575394e Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 11:40:24 -0600 Subject: [PATCH 077/133] minor adjustment to not repeat committe count calc --- specs/core/0_beacon-chain.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index e21607010..9563e22bb 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -908,14 +908,13 @@ def get_crosslink_committees_at_slot(state: BeaconState, shuffling_start_shard = state.current_shuffling_start_shard indices = get_active_validator_indices(state.validator_registry, shuffling_epoch) - committee_count = get_epoch_committee_count(len(indices)) - committees_per_slot = committee_count // SLOTS_PER_EPOCH + committees_per_slot = committees_per_epoch // SLOTS_PER_EPOCH offset = slot % SLOTS_PER_EPOCH slot_start_shard = (shuffling_start_shard + committees_per_slot * offset) % SHARD_COUNT return [ ( - compute_committee(indices, seed, committees_per_slot * offset + i, committee_count), + compute_committee(indices, seed, committees_per_slot * offset + i, committees_per_epoch), (slot_start_shard + i) % SHARD_COUNT, ) for i in range(committees_per_slot) From ba57d91e7a31b0dab328c7582dd1159b85fdf5d7 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 11:41:27 -0600 Subject: [PATCH 078/133] undo readme commit --- README.md | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/README.md b/README.md index 8f561a9ab..c5c88daf9 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,4 @@ -Ethereum 2.0 Specifications ------------- +# Ethereum 2.0 Specifications [![Join the chat at https://gitter.im/ethereum/sharding](https://badges.gitter.im/ethereum/sharding.svg)](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) @@ -26,7 +25,3 @@ The following are the broad design goals for Ethereum 2.0: * to select all components such that they are either quantum secure or can be easily swapped out for quantum secure counterparts when available * to utilize crypto and design techniques that allow for a large participation of validators in total and per unit time * to allow for a typical consumer laptop with `O(C)` resources to process/validate `O(1)` shards (including any system level validation such as the beacon chain) - -# Executable spec - -The aim is to have the entirety of the Ethereum 2.0Current just the phase 0 spec is executable. From 3f46010fa8836b68c7d03aca330ec238865583ad Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 12:29:06 -0600 Subject: [PATCH 079/133] modify validator ejecion test to fit PR --- tests/phase0/test_sanity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 8f04f316c..d1811cd00 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -433,7 +433,7 @@ def test_ejection(state): block.slot += spec.SLOTS_PER_EPOCH state_transition(post_state, block) - assert post_state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH + assert post_state.validator_registry[validator_index].initiated_exit == True return pre_state, [block], post_state From 23ef802da5426e0c573417f79f37ecac8500b0b3 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 13:25:34 -0600 Subject: [PATCH 080/133] fix small bug in sytax --- specs/core/0_beacon-chain.md | 2 +- tests/phase0/test_sanity.py | 10 ++++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index aa9fc1e7f..a834a1cde 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2089,7 +2089,7 @@ def update_validator_registry(state: BeaconState) -> None: activate_validator(state, index, is_genesis=False) # Exit validators within the allowable balance churn - if state.current_epoch < state.validator_registry_update_epoch + LATEST_SLASHED_EXIT_LENGTH: + if current_epoch < state.validator_registry_update_epoch + LATEST_SLASHED_EXIT_LENGTH: balance_churn = ( state.latest_slashed_balances[state.validator_registry_update_epoch % LATEST_SLASHED_EXIT_LENGTH] - state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index d1811cd00..56c1c1a64 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -316,12 +316,18 @@ def test_attestation(state, pubkeys, privkeys): def test_voluntary_exit(state, pubkeys, privkeys): pre_state = deepcopy(state) - validator_index = get_active_validator_indices(pre_state.validator_registry, get_current_epoch(pre_state))[-1] + validator_index = get_active_validator_indices( + pre_state.validator_registry, + get_current_epoch(pre_state) + )[-1] # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH # artificially trigger registry update at next epoch transition - pre_state.validator_registry_update_epoch -= 1 + pre_state.finalized_epoch = get_current_epoch(pre_state) - 1 + for crosslink in pre_state.latest_crosslinks: + crosslink.epoch = pre_state.finalized_epoch + pre_state.validator_registry_update_epoch = pre_state.finalized_epoch - 1 post_state = deepcopy(pre_state) From 24f1139d0938d2360f724cc4de7c1c23160b0157 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 13:39:45 -0600 Subject: [PATCH 081/133] add explicit test that ensures exists are blocked when too long since registry change --- tests/phase0/test_sanity.py | 38 +++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 56c1c1a64..8c7e7d28b 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -369,6 +369,44 @@ def test_voluntary_exit(state, pubkeys, privkeys): return pre_state, [initiate_exit_block, exit_block], post_state +def test_no_exit_too_long_since_change(state): + pre_state = deepcopy(state) + validator_index = get_active_validator_indices( + pre_state.validator_registry, + get_current_epoch(pre_state) + )[-1] + + # + # setup pre_state + # + # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit + pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + # artificially trigger registry update at next epoch transition + pre_state.finalized_epoch = get_current_epoch(pre_state) - 1 + for crosslink in pre_state.latest_crosslinks: + crosslink.epoch = pre_state.finalized_epoch + # make epochs since registry update greater than LATEST_SLASHED_EXIT_LENGTH + pre_state.validator_registry_update_epoch = ( + get_current_epoch(pre_state) - spec.LATEST_SLASHED_EXIT_LENGTH + ) + # set validator to have previously initiated exit + pre_state.validator_registry[validator_index].initiated_exit = True + + post_state = deepcopy(pre_state) + + # + # Process registry change but ensure no exit + # + block = build_empty_block_for_next_slot(post_state) + block.slot += spec.SLOTS_PER_EPOCH + state_transition(post_state, block) + + assert post_state.validator_registry_update_epoch == get_current_epoch(post_state) - 1 + assert post_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH + + return pre_state, [block], post_state + + def test_transfer(state, pubkeys, privkeys): pre_state = deepcopy(state) current_epoch = get_current_epoch(pre_state) From b664453a342d88b20a351a90e4aac7ce5a901fa5 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 19 Mar 2019 20:43:05 +0000 Subject: [PATCH 082/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 940343e51..910646487 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2435,8 +2435,9 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: assert validator.initiated_exit is False # Exits must specify an epoch when they become valid; they are not valid before then assert get_current_epoch(state) >= exit.epoch - # Must have been in the validator set long enough - assert validator.activation_epoch != FAR_FUTURE_EPOCH + # Verify the validator is active + assert is_active_validator(validator, state) + # Verify the validator has been active long enough assert get_current_epoch(state) - validator.activation_epoch >= PERSISTENT_COMMITTEE_PERIOD # Verify signature assert bls_verify( @@ -2445,7 +2446,7 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: signature=exit.signature, domain=get_domain(state.fork, exit.epoch, DOMAIN_VOLUNTARY_EXIT) ) - # Run the exit + # Initiate exit initiate_validator_exit(state, exit.validator_index) ``` From ad636a8252f83a5e1be2714c3ad3fc5a299da4ed Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 15:00:29 -0600 Subject: [PATCH 083/133] add no salshed proposer test --- tests/phase0/test_process_block_header.py | 26 +++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 tests/phase0/test_process_block_header.py diff --git a/tests/phase0/test_process_block_header.py b/tests/phase0/test_process_block_header.py new file mode 100644 index 000000000..83d99e574 --- /dev/null +++ b/tests/phase0/test_process_block_header.py @@ -0,0 +1,26 @@ +from copy import deepcopy +import pytest + + +from build.phase0.spec import ( + get_beacon_proposer_index, + process_block_header, +) +from tests.phase0.helpers import ( + build_empty_block_for_next_slot, +) + +# mark entire file as 'sanity' and 'header' +pytestmark = [pytest.mark.sanity, pytest.mark.header] + + +def test_proposer_slashed(state): + pre_state = deepcopy(state) + + block = build_empty_block_for_next_slot(pre_state) + proposer_index = get_beacon_proposer_index(pre_state, block.slot) + pre_state.validator_registry[proposer_index].slashed = True + with pytest.raises(AssertionError): + process_block_header(pre_state, block) + + return state, [block], None From acd7fdd762b19a3758e4fadd481f672b7843d32b Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 15:49:01 -0600 Subject: [PATCH 084/133] add a few voluntary exit tests --- Makefile | 2 +- specs/core/0_beacon-chain.md | 4 +- .../test_process_block_header.py | 4 +- .../block_processing/test_voluntary_exit.py | 170 ++++++++++++++++++ tests/phase0/conftest.py | 6 + tests/phase0/helpers.py | 28 +++ tests/phase0/test_sanity.py | 12 +- 7 files changed, 213 insertions(+), 13 deletions(-) rename tests/phase0/{ => block_processing}/test_process_block_header.py (85%) create mode 100644 tests/phase0/block_processing/test_voluntary_exit.py diff --git a/Makefile b/Makefile index b45cec410..88f17dcf9 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,7 @@ clean: # runs a limited set of tests against a minimal config # run pytest with `-m` option to full suite test: - pytest -m "sanity and minimal_config" tests/ + pytest -m minimal_config tests/ $(BUILD_DIR)/phase0: diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 85e35d595..212cedb95 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2431,14 +2431,14 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: Note that this function mutates ``state``. """ validator = state.validator_registry[exit.validator_index] + # Verify the validator is active + assert is_active_validator(validator, get_current_epoch(state)) # Verify the validator has not yet exited assert validator.exit_epoch == FAR_FUTURE_EPOCH # Verify the validator has not initiated an exit assert validator.initiated_exit is False # Exits must specify an epoch when they become valid; they are not valid before then assert get_current_epoch(state) >= exit.epoch - # Verify the validator is active - assert is_active_validator(validator, state) # Verify the validator has been active long enough assert get_current_epoch(state) - validator.activation_epoch >= PERSISTENT_COMMITTEE_PERIOD # Verify signature diff --git a/tests/phase0/test_process_block_header.py b/tests/phase0/block_processing/test_process_block_header.py similarity index 85% rename from tests/phase0/test_process_block_header.py rename to tests/phase0/block_processing/test_process_block_header.py index 83d99e574..4ec7e336f 100644 --- a/tests/phase0/test_process_block_header.py +++ b/tests/phase0/block_processing/test_process_block_header.py @@ -10,8 +10,8 @@ from tests.phase0.helpers import ( build_empty_block_for_next_slot, ) -# mark entire file as 'sanity' and 'header' -pytestmark = [pytest.mark.sanity, pytest.mark.header] +# mark entire file as 'header' +pytestmark = pytest.mark.header def test_proposer_slashed(state): diff --git a/tests/phase0/block_processing/test_voluntary_exit.py b/tests/phase0/block_processing/test_voluntary_exit.py new file mode 100644 index 000000000..80fad86a1 --- /dev/null +++ b/tests/phase0/block_processing/test_voluntary_exit.py @@ -0,0 +1,170 @@ +from copy import deepcopy +import pytest + +import build.phase0.spec as spec + +from build.phase0.spec import ( + get_active_validator_indices, + get_current_epoch, + process_voluntary_exit, +) +from tests.phase0.helpers import ( + build_voluntary_exit, +) + + +def test_success(state, pub_to_priv): + pre_state = deepcopy(state) + # + # setup pre_state + # + # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit + pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + + # + # build voluntary exit + # + current_epoch = get_current_epoch(pre_state) + validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] + privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + + voluntary_exit = build_voluntary_exit( + pre_state, + current_epoch, + validator_index, + privkey, + ) + + post_state = deepcopy(pre_state) + + # + # test valid exit + # + process_voluntary_exit(post_state, voluntary_exit) + + assert not pre_state.validator_registry[validator_index].initiated_exit + assert post_state.validator_registry[validator_index].initiated_exit + + return pre_state, voluntary_exit, post_state + + +def test_validator_not_active(state, pub_to_priv): + pre_state = deepcopy(state) + current_epoch = get_current_epoch(pre_state) + validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] + privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + + # + # setup pre_state + # + pre_state.validator_registry[validator_index].activation_epoch = spec.FAR_FUTURE_EPOCH + + # + # build and test voluntary exit + # + voluntary_exit = build_voluntary_exit( + pre_state, + current_epoch, + validator_index, + privkey, + ) + + with pytest.raises(AssertionError): + process_voluntary_exit(pre_state, voluntary_exit) + + return pre_state, voluntary_exit, None + + +def test_validator_already_exited(state, pub_to_priv): + pre_state = deepcopy(state) + # + # setup pre_state + # + # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit + pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + + current_epoch = get_current_epoch(pre_state) + validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] + privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + + # but validator already has exited + pre_state.validator_registry[validator_index].exit_epoch = current_epoch + 2 + + # + # build voluntary exit + # + voluntary_exit = build_voluntary_exit( + pre_state, + current_epoch, + validator_index, + privkey, + ) + + with pytest.raises(AssertionError): + process_voluntary_exit(pre_state, voluntary_exit) + + return pre_state, voluntary_exit, None + + +def test_validator_already_initiated_exit(state, pub_to_priv): + pre_state = deepcopy(state) + # + # setup pre_state + # + # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit + pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + + current_epoch = get_current_epoch(pre_state) + validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] + privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + + # but validator already has initiated exit + pre_state.validator_registry[validator_index].initiated_exit = True + + # + # build voluntary exit + # + voluntary_exit = build_voluntary_exit( + pre_state, + current_epoch, + validator_index, + privkey, + ) + + with pytest.raises(AssertionError): + process_voluntary_exit(pre_state, voluntary_exit) + + return pre_state, voluntary_exit, None + + +def test_validator_not_active_long_enough(state, pub_to_priv): + pre_state = deepcopy(state) + # + # setup pre_state + # + current_epoch = get_current_epoch(pre_state) + validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] + privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + + # but validator already has initiated exit + pre_state.validator_registry[validator_index].initiated_exit = True + + # + # build voluntary exit + # + voluntary_exit = build_voluntary_exit( + pre_state, + current_epoch, + validator_index, + privkey, + ) + + assert ( + current_epoch - pre_state.validator_registry[validator_index].activation_epoch < + spec.PERSISTENT_COMMITTEE_PERIOD + ) + + with pytest.raises(AssertionError): + process_voluntary_exit(pre_state, voluntary_exit) + + return pre_state, voluntary_exit, None diff --git a/tests/phase0/conftest.py b/tests/phase0/conftest.py index e92896e92..395929028 100644 --- a/tests/phase0/conftest.py +++ b/tests/phase0/conftest.py @@ -5,6 +5,7 @@ from build.phase0 import spec from tests.phase0.helpers import ( privkeys_list, pubkeys_list, + pubkey_to_privkey, create_genesis_state, ) @@ -34,6 +35,11 @@ def pubkeys(): return pubkeys_list +@pytest.fixture +def pub_to_priv(): + return pubkey_to_privkey + + def overwrite_spec_config(config): for field in config: setattr(spec, field, config[field]) diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 510361e9c..2c7994079 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -13,6 +13,7 @@ from build.phase0.spec import ( DepositInput, DepositData, Eth1Data, + VoluntaryExit, # functions get_block_root, get_current_epoch, @@ -82,6 +83,14 @@ def create_genesis_state(num_validators, deposit_data_leaves): ) +def force_registry_change_at_next_epoch(state): + # artificially trigger registry update at next epoch transition + state.finalized_epoch = get_current_epoch(state) - 1 + for crosslink in state.latest_crosslinks: + crosslink.epoch = state.finalized_epoch + state.validator_registry_update_epoch = state.finalized_epoch - 1 + + def build_empty_block_for_next_slot(state): empty_block = get_empty_block() empty_block.slot = state.slot + 1 @@ -143,3 +152,22 @@ def build_attestation_data(state, slot, shard): crosslink_data_root=spec.ZERO_HASH, previous_crosslink=deepcopy(state.latest_crosslinks[shard]), ) + + +def build_voluntary_exit(state, epoch, validator_index, privkey): + voluntary_exit = VoluntaryExit( + epoch=epoch, + validator_index=validator_index, + signature=EMPTY_SIGNATURE, + ) + voluntary_exit.signature = bls.sign( + message_hash=signed_root(voluntary_exit), + privkey=privkey, + domain=get_domain( + fork=state.fork, + epoch=epoch, + domain_type=spec.DOMAIN_VOLUNTARY_EXIT, + ) + ) + + return voluntary_exit diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 8c7e7d28b..b9d44a72c 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -43,6 +43,7 @@ from tests.phase0.helpers import ( build_attestation_data, build_deposit_data, build_empty_block_for_next_slot, + force_registry_change_at_next_epoch, ) @@ -324,10 +325,7 @@ def test_voluntary_exit(state, pubkeys, privkeys): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH # artificially trigger registry update at next epoch transition - pre_state.finalized_epoch = get_current_epoch(pre_state) - 1 - for crosslink in pre_state.latest_crosslinks: - crosslink.epoch = pre_state.finalized_epoch - pre_state.validator_registry_update_epoch = pre_state.finalized_epoch - 1 + force_registry_change_at_next_epoch(pre_state) post_state = deepcopy(pre_state) @@ -369,7 +367,7 @@ def test_voluntary_exit(state, pubkeys, privkeys): return pre_state, [initiate_exit_block, exit_block], post_state -def test_no_exit_too_long_since_change(state): +def test_no_exit_churn_too_long_since_change(state): pre_state = deepcopy(state) validator_index = get_active_validator_indices( pre_state.validator_registry, @@ -382,9 +380,7 @@ def test_no_exit_too_long_since_change(state): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH # artificially trigger registry update at next epoch transition - pre_state.finalized_epoch = get_current_epoch(pre_state) - 1 - for crosslink in pre_state.latest_crosslinks: - crosslink.epoch = pre_state.finalized_epoch + force_registry_change_at_next_epoch(pre_state) # make epochs since registry update greater than LATEST_SLASHED_EXIT_LENGTH pre_state.validator_registry_update_epoch = ( get_current_epoch(pre_state) - spec.LATEST_SLASHED_EXIT_LENGTH From 472d9c5c20a93c0b1608013c03f5ca92a0a9a1d8 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Tue, 19 Mar 2019 15:32:38 -0700 Subject: [PATCH 085/133] Updates from review --- specs/networking/messaging.md | 2 ++ specs/networking/rpc-interface.md | 24 +++++++++++++++++------- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/specs/networking/messaging.md b/specs/networking/messaging.md index de92fe6d4..b64e1d5d8 100644 --- a/specs/networking/messaging.md +++ b/specs/networking/messaging.md @@ -11,6 +11,8 @@ The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL This specification seeks to define a messaging protocol that is flexible enough to be changed easily as the ETH 2.0 specification evolves. +Note that while `libp2p` is the chosen networking stack for Ethereum 2.0, as of this writing some clients do not have workable `libp2p` implementations. To allow those clients to communicate, we define a message envelope that includes the body's compression, encoding, and body length. Once `libp2p` is available across all implementations, this message envelope will be removed because `libp2p` will negotiate the values defined in the envelope upfront. + # Specification ## Message Structure diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index f505a4663..ef85f32d5 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -34,7 +34,9 @@ A "Protocol ID" in `libp2p` parlance refers to a human-readable identifier `libp ## RPC-Over-`libp2p` -To facilitate RPC-over-`libp2p`, a single protocol path is used: `/eth/serenity/beacon/rpc/1.0.0`. Remote method calls are wrapped in a "request" structure: +To facilitate RPC-over-`libp2p`, a single protocol name is used: `/eth/serenity/beacon/rpc/1`. The version number in the protocol name is neither backwards or forwards compatible, and will be incremented whenever changes to the below structures are required. + +Remote method calls are wrapped in a "request" structure: ``` ( @@ -88,6 +90,10 @@ The first 1,000 values in `error.code` are reserved for system use. The followin 3. `20`: Method not found. 4. `30`: Server error. +### Alternative for Non-`libp2p` Clients + +Since some clients are waiting for `libp2p` implementations in their respective languages. As such, they MAY listen for raw TCP messages on port `9000`. To distinguish RPC messages from other messages on that port, a byte prefix of `ETH` (`0x455448`) MUST be prepended to all messages. This option will be removed once `libp2p` is ready in all supported languages. + ## Messages ### Hello @@ -154,12 +160,13 @@ Once the handshake completes, the client with the higher `latest_finalized_epoch ) ``` -Client MAY send `goodbye` messages upon disconnection. The reason field MUST be one of the following values: +Client MAY send `goodbye` messages upon disconnection. The reason field MAY be one of the following values: - `1`: Client shut down. - `2`: Irrelevant network. -- `3`: Too many peers. -- `4`: Fault/error. +- `3`: Fault/error. + +Clients MAY define custom goodbye reasons as long as the value is larger than `1000`. ### Request Beacon Block Roots @@ -168,7 +175,10 @@ Client MAY send `goodbye` messages upon disconnection. The reason field MUST be **Request Body** ``` -() +( + start_slot: uint64 + count: uint64 +) ``` **Response Body:** @@ -185,7 +195,7 @@ Client MAY send `goodbye` messages upon disconnection. The reason field MUST be ) ``` -Send a list of block roots and slots to the requesting peer. +Requests a list of block roots and slots from the peer. The `count` parameter MUST be less than or equal to `32768`. ### Beacon Block Headers @@ -210,7 +220,7 @@ Send a list of block roots and slots to the requesting peer. ) ``` -Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `2` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is empty for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were empty in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further empty, the array would contain `[2, 6, 8, 10]` - i.e., duplicate blocks MUST be collapsed. +Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `1` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is empty for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were empty in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further empty, the array would contain `[2, 6, 8, 10]` - i.e., duplicate blocks MUST be collapsed. A `skip_slots` value of `0` returns all blocks. The function of the `skip_slots` parameter helps facilitate light client sync - for example, in [#459](https://github.com/ethereum/eth2.0-specs/issues/459) - and allows clients to balance the peers from whom they request headers. Clients could, for instance, request every 10th block from a set of peers where each per has a different starting block in order to populate block data. From cf4f3463a9aecb7052a0c6d196421f0483e35c75 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 17:10:36 -0600 Subject: [PATCH 086/133] add deposit size check in state transiton. add deposit tests --- .../block_processing/test_process_deposit.py | 132 ++++++++++++++++++ .../block_processing/test_voluntary_exit.py | 4 + tests/phase0/helpers.py | 30 +++- tests/phase0/test_sanity.py | 2 + utils/phase0/state_transition.py | 13 ++ 5 files changed, 180 insertions(+), 1 deletion(-) create mode 100644 tests/phase0/block_processing/test_process_deposit.py diff --git a/tests/phase0/block_processing/test_process_deposit.py b/tests/phase0/block_processing/test_process_deposit.py new file mode 100644 index 000000000..297ad37f1 --- /dev/null +++ b/tests/phase0/block_processing/test_process_deposit.py @@ -0,0 +1,132 @@ +from copy import deepcopy +import pytest + +import build.phase0.spec as spec + +from build.phase0.spec import ( + Deposit, + process_deposit, +) +from tests.phase0.helpers import ( + build_deposit, +) + + +# mark entire file as 'voluntary_exits' +pytestmark = pytest.mark.voluntary_exits + + +def test_success(state, deposit_data_leaves, pubkeys, privkeys): + pre_state = deepcopy(state) + + index = len(deposit_data_leaves) + pubkey = pubkeys[index] + privkey = privkeys[index] + deposit, root, deposit_data_leaves = build_deposit( + pre_state, + deposit_data_leaves, + pubkey, + privkey, + spec.MAX_DEPOSIT_AMOUNT, + ) + + pre_state.latest_eth1_data.deposit_root = root + pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves) + + post_state = deepcopy(pre_state) + + process_deposit(post_state, deposit) + + assert len(post_state.validator_registry) == len(state.validator_registry) + 1 + assert len(post_state.validator_balances) == len(state.validator_balances) + 1 + assert post_state.validator_registry[index].pubkey == pubkeys[index] + assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count + + return pre_state, deposit, post_state + + +def test_success_top_up(state, deposit_data_leaves, pubkeys, privkeys): + pre_state = deepcopy(state) + + validator_index = 0 + amount = spec.MAX_DEPOSIT_AMOUNT // 4 + pubkey = pubkeys[validator_index] + privkey = privkeys[validator_index] + deposit, root, deposit_data_leaves = build_deposit( + pre_state, + deposit_data_leaves, + pubkey, + privkey, + amount, + ) + + pre_state.latest_eth1_data.deposit_root = root + pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves) + pre_balance = pre_state.validator_balances[validator_index] + + post_state = deepcopy(pre_state) + + process_deposit(post_state, deposit) + + assert len(post_state.validator_registry) == len(state.validator_registry) + assert len(post_state.validator_balances) == len(state.validator_balances) + assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count + assert post_state.validator_balances[validator_index] == pre_balance + amount + + return pre_state, deposit, post_state + + +def test_wrong_index(state, deposit_data_leaves, pubkeys, privkeys): + pre_state = deepcopy(state) + + index = len(deposit_data_leaves) + pubkey = pubkeys[index] + privkey = privkeys[index] + deposit, root, deposit_data_leaves = build_deposit( + pre_state, + deposit_data_leaves, + pubkey, + privkey, + spec.MAX_DEPOSIT_AMOUNT, + ) + + # mess up deposit_index + deposit.index = pre_state.deposit_index + 1 + + pre_state.latest_eth1_data.deposit_root = root + pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves) + + post_state = deepcopy(pre_state) + + with pytest.raises(AssertionError): + process_deposit(post_state, deposit) + + return pre_state, deposit, None + + +def test_bad_merkle_proof(state, deposit_data_leaves, pubkeys, privkeys): + pre_state = deepcopy(state) + + index = len(deposit_data_leaves) + pubkey = pubkeys[index] + privkey = privkeys[index] + deposit, root, deposit_data_leaves = build_deposit( + pre_state, + deposit_data_leaves, + pubkey, + privkey, + spec.MAX_DEPOSIT_AMOUNT, + ) + + # mess up merkle branch + deposit.proof[-1] = spec.ZERO_HASH + + pre_state.latest_eth1_data.deposit_root = root + pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves) + + post_state = deepcopy(pre_state) + + with pytest.raises(AssertionError): + process_deposit(post_state, deposit) + + return pre_state, deposit, None diff --git a/tests/phase0/block_processing/test_voluntary_exit.py b/tests/phase0/block_processing/test_voluntary_exit.py index 80fad86a1..0801e4292 100644 --- a/tests/phase0/block_processing/test_voluntary_exit.py +++ b/tests/phase0/block_processing/test_voluntary_exit.py @@ -13,6 +13,10 @@ from tests.phase0.helpers import ( ) +# mark entire file as 'voluntary_exits' +pytestmark = pytest.mark.voluntary_exits + + def test_success(state, pub_to_priv): pre_state = deepcopy(state) # diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 2c7994079..5c61685a6 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -72,12 +72,16 @@ def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves): def create_genesis_state(num_validators, deposit_data_leaves): - initial_deposits, deposit_root = create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves) + initial_deposits, deposit_root = create_mock_genesis_validator_deposits( + num_validators, + deposit_data_leaves, + ) return get_genesis_beacon_state( initial_deposits, genesis_time=0, genesis_eth1_data=Eth1Data( deposit_root=deposit_root, + deposit_count=len(initial_deposits), block_hash=spec.ZERO_HASH, ), ) @@ -171,3 +175,27 @@ def build_voluntary_exit(state, epoch, validator_index, privkey): ) return voluntary_exit + + +def build_deposit(state, + deposit_data_leaves, + pubkey, + privkey, + amount): + deposit_data = build_deposit_data(state, pubkey, privkey, amount) + + item = hash(deposit_data.serialize()) + index = len(deposit_data_leaves) + deposit_data_leaves.append(item) + tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves)) + root = get_merkle_root((tuple(deposit_data_leaves))) + proof = list(get_merkle_proof(tree, item_index=index)) + assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, root) + + deposit = Deposit( + proof=list(proof), + index=index, + deposit_data=deposit_data, + ) + + return deposit, root, deposit_data_leaves diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index b9d44a72c..91bd9fe7a 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -196,6 +196,7 @@ def test_deposit_in_block(state, deposit_data_leaves, pubkeys, privkeys): ) pre_state.latest_eth1_data.deposit_root = root + pre_state.latest_eth1_data.deposit_count = len(test_deposit_data_leaves) post_state = deepcopy(pre_state) block = build_empty_block_for_next_slot(post_state) block.body.deposits.append(deposit) @@ -233,6 +234,7 @@ def test_deposit_top_up(state, pubkeys, privkeys, deposit_data_leaves): ) pre_state.latest_eth1_data.deposit_root = root + pre_state.latest_eth1_data.deposit_count = len(test_deposit_data_leaves) block = build_empty_block_for_next_slot(pre_state) block.body.deposits.append(deposit) diff --git a/utils/phase0/state_transition.py b/utils/phase0/state_transition.py index eefc3d409..88c4f934a 100644 --- a/utils/phase0/state_transition.py +++ b/utils/phase0/state_transition.py @@ -15,6 +15,13 @@ from .spec import ( ) +def expected_deposit_count(state: BeaconState) -> int: + return min( + spec.MAX_DEPOSITS, + state.latest_eth1_data.deposit_count - state.deposit_index + ) + + def process_transaction_type(state: BeaconState, transactions: List[Any], max_transactions: int, @@ -31,30 +38,36 @@ def process_transactions(state: BeaconState, block: BeaconBlock) -> None: spec.MAX_PROPOSER_SLASHINGS, spec.process_proposer_slashing, ) + process_transaction_type( state, block.body.attester_slashings, spec.MAX_ATTESTER_SLASHINGS, spec.process_attester_slashing, ) + process_transaction_type( state, block.body.attestations, spec.MAX_ATTESTATIONS, spec.process_attestation, ) + + assert len(block.body.deposits) == expected_deposit_count(state) process_transaction_type( state, block.body.deposits, spec.MAX_DEPOSITS, spec.process_deposit, ) + process_transaction_type( state, block.body.voluntary_exits, spec.MAX_VOLUNTARY_EXITS, spec.process_voluntary_exit, ) + assert len(block.body.transfers) == len(set(block.body.transfers)) process_transaction_type( state, From 1083de0c616aacb78e66363e53eeabac4a1f8f5e Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 17:38:09 -0600 Subject: [PATCH 087/133] add notes about mandatory deposits in validator guide --- specs/validator/0_beacon-chain-validator.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 1a4bddf9e..62a7011b4 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -212,25 +212,25 @@ block_signature = bls_sign( ##### Proposer slashings -Up to `MAX_PROPOSER_SLASHINGS` [`ProposerSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposerslashing) objects can be included in the `block`. The proposer slashings must satisfy the verification conditions found in [proposer slashings processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposer-slashings-1). The validator receives a small "whistleblower" reward for each proposer slashing found and included. +Up to `MAX_PROPOSER_SLASHINGS` [`ProposerSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposerslashing) objects can be included in the `block`. The proposer slashings must satisfy the verification conditions found in [proposer slashings processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposer-slashings). The validator receives a small "whistleblower" reward for each proposer slashing found and included. ##### Attester slashings -Up to `MAX_ATTESTER_SLASHINGS` [`AttesterSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attesterslashing) objects can be included in the `block`. The attester slashings must satisfy the verification conditions found in [Attester slashings processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attester-slashings-1). The validator receives a small "whistleblower" reward for each attester slashing found and included. +Up to `MAX_ATTESTER_SLASHINGS` [`AttesterSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attesterslashing) objects can be included in the `block`. The attester slashings must satisfy the verification conditions found in [Attester slashings processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attester-slashings). The validator receives a small "whistleblower" reward for each attester slashing found and included. ##### Attestations -Up to `MAX_ATTESTATIONS` aggregate attestations can be included in the `block`. The attestations added must satisfy the verification conditions found in [attestation processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestations-1). To maximize profit, the validator should attempt to gather aggregate attestations that include singular attestations from the largest number of validators whose signatures from the same epoch have not previously been added on chain. +Up to `MAX_ATTESTATIONS` aggregate attestations can be included in the `block`. The attestations added must satisfy the verification conditions found in [attestation processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestations). To maximize profit, the validator should attempt to gather aggregate attestations that include singular attestations from the largest number of validators whose signatures from the same epoch have not previously been added on chain. ##### Deposits -Up to `MAX_DEPOSITS` [`Deposit`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposit) objects can be included in the `block`. These deposits are constructed from the `Deposit` logs from the [Eth1.0 deposit contract](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#ethereum-10-deposit-contract) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposits-1). +If there are any unprocessed deposits for the existing `state.latest_eth1_data` (i.e. `state.latest_eth1_data.deposit_count > state.deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, latest_eth1_data.deposit_count - state.deposit_index)`. These [`deposits`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth1.0 deposit contract](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#ethereum-10-deposit-contract) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposits). The `proof` for each deposit must be constructed against the deposit root contained in `state.latest_eth1_data` rather than the deposit root at the time the deposit was initially logged from the 1.0 chain. This entails storing a full deposit merkle tree locally and computing updated proofs against the `latest_eth1_data.deposit_root` as needed. See [`minimal_merkle.py`](https://github.com/ethereum/research/blob/master/spec_pythonizer/utils/merkle_minimal.py) for a sample implementation. ##### Voluntary exits -Up to `MAX_VOLUNTARY_EXITS` [`VoluntaryExit`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#exits-1). +Up to `MAX_VOLUNTARY_EXITS` [`VoluntaryExit`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#voluntary-exits). ### Attestations From 833691b8afe9ca68c75588e0f528780f200de0ee Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 20 Mar 2019 08:16:39 +0000 Subject: [PATCH 088/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 4935ab7d7..099d12b95 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -256,7 +256,7 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | `MIN_PENALTY_QUOTIENT` | `2**5` (= 32) | * The `BASE_REWARD_QUOTIENT` parameter dictates the per-epoch reward. It corresponds to ~2.54% annual interest assuming 10 million participating ETH in every epoch. -* The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12 epochs` (~18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating [validators](#dfn-validator) to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline [validators](#dfn-validator) after `n` epochs is about `(1-1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)` so after `INVERSE_SQRT_E_DROP_TIME` epochs it is roughly `(1-1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. +* The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12 epochs` (~18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating [validators](#dfn-validator) to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline [validators](#dfn-validator) after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)` so after `INVERSE_SQRT_E_DROP_TIME` epochs it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. ### Max transactions per block @@ -789,7 +789,7 @@ def decrease_balance(state: BeaconState, index: int, delta: int) -> None: ```python def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: """ - Return `p(index)` in a pseudorandom permutation `p` of `0...list_size-1` with ``seed`` as entropy. + Return `p(index)` in a pseudorandom permutation `p` of `0...list_size - 1` with ``seed`` as entropy. Utilizes 'swap or not' shuffling found in https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf @@ -1376,7 +1376,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled. state.validator_registry.append(validator) state.balances.append(0) - set_balance(state, len(state.validator_registry)-1, amount) + set_balance(state, len(state.validator_registry) - 1, amount) else: # Increase balance by deposit amount index = validator_pubkeys.index(pubkey) From dde49cbedafa4301f33dea56ad0830d866ae5a57 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 20 Mar 2019 08:47:41 -0600 Subject: [PATCH 089/133] fix and extend header tests --- .../test_process_block_header.py | 90 ++++++++++++++++++- 1 file changed, 86 insertions(+), 4 deletions(-) diff --git a/tests/phase0/block_processing/test_process_block_header.py b/tests/phase0/block_processing/test_process_block_header.py index 4ec7e336f..650bc387c 100644 --- a/tests/phase0/block_processing/test_process_block_header.py +++ b/tests/phase0/block_processing/test_process_block_header.py @@ -4,6 +4,8 @@ import pytest from build.phase0.spec import ( get_beacon_proposer_index, + cache_state, + advance_slot, process_block_header, ) from tests.phase0.helpers import ( @@ -14,13 +16,93 @@ from tests.phase0.helpers import ( pytestmark = pytest.mark.header -def test_proposer_slashed(state): +def test_sucess(state): + pre_state = deepcopy(state) + block = build_empty_block_for_next_slot(pre_state) + + # + # setup pre_state to be ready for block transition + # + cache_state(pre_state) + advance_slot(pre_state) + + post_state = deepcopy(pre_state) + + # + # test block header + # + process_block_header(post_state, block) + + return state, [block], post_state + + +def test_invalid_slot(state): pre_state = deepcopy(state) + # mess up previous block root block = build_empty_block_for_next_slot(pre_state) - proposer_index = get_beacon_proposer_index(pre_state, block.slot) - pre_state.validator_registry[proposer_index].slashed = True + block.previous_block_root = b'\12'*32 + + # + # setup pre_state advancing two slots to induce error + # + cache_state(pre_state) + advance_slot(pre_state) + advance_slot(pre_state) + + post_state = deepcopy(pre_state) + + # + # test block header + # with pytest.raises(AssertionError): - process_block_header(pre_state, block) + process_block_header(post_state, block) + + return state, [block], None + + +def test_invalid_previous_block_root(state): + pre_state = deepcopy(state) + + # mess up previous block root + block = build_empty_block_for_next_slot(pre_state) + block.previous_block_root = b'\12'*32 + + # + # setup pre_state to be ready for block transition + # + cache_state(pre_state) + advance_slot(pre_state) + + post_state = deepcopy(pre_state) + + # + # test block header + # + with pytest.raises(AssertionError): + process_block_header(post_state, block) + + return state, [block], None + + +def test_proposer_slashed(state): + pre_state = deepcopy(state) + proposer_index = get_beacon_proposer_index(pre_state, pre_state.slot + 1) + pre_state.validator_registry[proposer_index].slashed = True + block = build_empty_block_for_next_slot(pre_state) + + # + # setup pre_state to be ready for block transition + # + cache_state(pre_state) + advance_slot(pre_state) + + post_state = deepcopy(pre_state) + + # + # test block header + # + with pytest.raises(AssertionError): + process_block_header(post_state, block) return state, [block], None From f4012ee309dfa5b238bd55e05acd0c041e8c9280 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 20 Mar 2019 09:59:29 -0600 Subject: [PATCH 090/133] make process block header tests more modular --- .../test_process_block_header.py | 107 ++++++------------ 1 file changed, 35 insertions(+), 72 deletions(-) diff --git a/tests/phase0/block_processing/test_process_block_header.py b/tests/phase0/block_processing/test_process_block_header.py index 650bc387c..4981b656c 100644 --- a/tests/phase0/block_processing/test_process_block_header.py +++ b/tests/phase0/block_processing/test_process_block_header.py @@ -16,93 +16,56 @@ from tests.phase0.helpers import ( pytestmark = pytest.mark.header -def test_sucess(state): - pre_state = deepcopy(state) - block = build_empty_block_for_next_slot(pre_state) +def prepare_state_for_header_processing(state): + cache_state(state) + advance_slot(state) - # - # setup pre_state to be ready for block transition - # - cache_state(pre_state) - advance_slot(pre_state) - post_state = deepcopy(pre_state) +def run_block_header_processing(state, block, valid=True): + """ + Run ``process_block_header`` returning the pre and post state. + If ``valid == False``, run expecting ``AssertionError`` + """ + prepare_state_for_header_processing(state) + post_state = deepcopy(state) + + if not valid: + with pytest.raises(AssertionError): + process_block_header(post_state, block) + return state, None - # - # test block header - # process_block_header(post_state, block) + return state, post_state - return state, [block], post_state + +def test_success(state): + block = build_empty_block_for_next_slot(state) + pre_state, post_state = run_block_header_processing(state, block) + return state, block, post_state def test_invalid_slot(state): - pre_state = deepcopy(state) + block = build_empty_block_for_next_slot(state) + block.slot = state.slot + 2 # invalid slot - # mess up previous block root - block = build_empty_block_for_next_slot(pre_state) - block.previous_block_root = b'\12'*32 - - # - # setup pre_state advancing two slots to induce error - # - cache_state(pre_state) - advance_slot(pre_state) - advance_slot(pre_state) - - post_state = deepcopy(pre_state) - - # - # test block header - # - with pytest.raises(AssertionError): - process_block_header(post_state, block) - - return state, [block], None + pre_state, post_state = run_block_header_processing(state, block, valid=False) + return pre_state, block, None def test_invalid_previous_block_root(state): - pre_state = deepcopy(state) + block = build_empty_block_for_next_slot(state) + block.previous_block_root = b'\12'*32 # invalid prev root - # mess up previous block root - block = build_empty_block_for_next_slot(pre_state) - block.previous_block_root = b'\12'*32 - - # - # setup pre_state to be ready for block transition - # - cache_state(pre_state) - advance_slot(pre_state) - - post_state = deepcopy(pre_state) - - # - # test block header - # - with pytest.raises(AssertionError): - process_block_header(post_state, block) - - return state, [block], None + pre_state, post_state = run_block_header_processing(state, block, valid=False) + return pre_state, block, None def test_proposer_slashed(state): - pre_state = deepcopy(state) - proposer_index = get_beacon_proposer_index(pre_state, pre_state.slot + 1) - pre_state.validator_registry[proposer_index].slashed = True - block = build_empty_block_for_next_slot(pre_state) + # set proposer to slashed + proposer_index = get_beacon_proposer_index(state, state.slot + 1) + state.validator_registry[proposer_index].slashed = True - # - # setup pre_state to be ready for block transition - # - cache_state(pre_state) - advance_slot(pre_state) + block = build_empty_block_for_next_slot(state) - post_state = deepcopy(pre_state) - - # - # test block header - # - with pytest.raises(AssertionError): - process_block_header(post_state, block) - - return state, [block], None + pre_state, post_state = run_block_header_processing(state, block, valid=False) + return pre_state, block, None From 8794d03517ea2b6160f032d6619fe01594f2a645 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Wed, 20 Mar 2019 19:04:04 -0700 Subject: [PATCH 091/133] Updates with Whiteblock --- specs/networking/rpc-interface.md | 59 ++++++++++++++++++++++--------- 1 file changed, 42 insertions(+), 17 deletions(-) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index ef85f32d5..51dc3a900 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -51,8 +51,8 @@ and their corresponding responses are wrapped in a "response" structure: ``` ( id: uint64 - is_error: boolean - result: Response + response_code: uint16 + result: bytes ) ``` @@ -61,11 +61,8 @@ If an error occurs, a variant of the response structure is returned: ``` ( id: uint64 - is_error: boolean - result: ( - code: uint16 - data: bytes - ) + response_code: uint16 + result: bytes ) ``` @@ -75,20 +72,21 @@ The details of the RPC-Over-`libp2p` protocol are similar to [JSON-RPC 2.0](http 2. The `id` member in the response MUST be the same as the value of the `id` in the request. 3. The `id` member MUST be unique within the context of a single connection. Monotonically increasing `id`s are RECOMMENDED. 4. The `method_id` member is REQUIRED. -5. The `result` member is required on success, and MUST NOT exist if there was an error. -6. The `error` member is REQUIRED on errors, and MUST NOT exist if there wasn't an error. -7. `is_error` MUST be `true` on errors, or `false` otherwise. +5. The `result` member is REQUIRED on success. +6. The `result` member is OPTIONAL on errors, and MAY contain additional information about the error. +7. `response_code` MUST be `0` on success. Structuring RPC requests in this manner allows multiple calls and responses to be multiplexed over the same stream without switching. Note that this implies that responses MAY arrive in a different order than requests. The "method ID" fields in the below messages refer to the `method` field in the request structure above. -The first 1,000 values in `error.code` are reserved for system use. The following error codes are predefined: +The first 1,000 values in `response_code` are reserved for system use. The following response codes are predefined: -1. `0`: Parse error. -2. `10`: Invalid request. -3. `20`: Method not found. -4. `30`: Server error. +1. `0`: No error. +2. `10`: Parse error. +2. `20`: Invalid request. +3. `30`: Method not found. +4. `40`: Server error. ### Alternative for Non-`libp2p` Clients @@ -105,6 +103,7 @@ Since some clients are waiting for `libp2p` implementations in their respective ``` ( network_id: uint8 + chain_id: uint8 latest_finalized_root: bytes32 latest_finalized_epoch: uint64 best_root: bytes32 @@ -168,6 +167,32 @@ Client MAY send `goodbye` messages upon disconnection. The reason field MAY be o Clients MAY define custom goodbye reasons as long as the value is larger than `1000`. +### Get Status + +**Method ID:** `2` + +**Request Body:** + +``` +( + sha: bytes32 + user_agent: bytes + timestamp: uint64 +) +``` + +**Response Body:** + +``` +( + sha: bytes32 + user_agent: bytes + timestamp: uint64 +) +``` + +Returns metadata about the remote node. + ### Request Beacon Block Roots **Method ID:** `10` @@ -195,7 +220,7 @@ Clients MAY define custom goodbye reasons as long as the value is larger than `1 ) ``` -Requests a list of block roots and slots from the peer. The `count` parameter MUST be less than or equal to `32768`. +Requests a list of block roots and slots from the peer. The `count` parameter MUST be less than or equal to `32768`. The slots MUST be returned in ascending slot order. ### Beacon Block Headers @@ -216,7 +241,7 @@ Requests a list of block roots and slots from the peer. The `count` parameter MU ``` ( - headers: []BlockHeader + headers: []BeaconBlockHeader ) ``` From fdcfc910080f283f62926954150a47ffb681224e Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 21 Mar 2019 07:38:25 -0500 Subject: [PATCH 092/133] Add docstring into get_split_offset --- specs/core/0_beacon-chain.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index d32aa1a0e..e198f5c35 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -779,6 +779,10 @@ def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: ```python def get_split_offset(list_length: int, split_count: int, index: int) -> int: + """ + Returns a value such that for a list L, chunk count k and index i, + split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i + 1)] + """ return (list_length * index) // split_count ``` From fd6d80fcb648a397fc43644199bf78d267d8a988 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 21 Mar 2019 08:24:26 -0600 Subject: [PATCH 093/133] remove get_split_offset from phase 1 doc --- specs/core/0_beacon-chain.md | 12 ++++++------ specs/core/1_shard-data-chains.md | 12 ------------ 2 files changed, 6 insertions(+), 18 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index e198f5c35..1067c3dc0 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -778,12 +778,12 @@ def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: ### `get_split_offset` ```python -def get_split_offset(list_length: int, split_count: int, index: int) -> int: - """ - Returns a value such that for a list L, chunk count k and index i, - split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i + 1)] - """ - return (list_length * index) // split_count +def get_split_offset(list_size: int, chunks: int, index: int) -> int: + """ + Returns a value such that for a list L, chunk count k and index i, + split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i+1)] + """ + return (list_size * index) // chunks ``` ### `get_epoch_committee_count` diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index c76f9ba08..92cee4d19 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -19,7 +19,6 @@ At the current stage, Phase 1, while fundamentally feature-complete, is still su - [Signature domains](#signature-domains) - [Shard chains and crosslink data](#shard-chains-and-crosslink-data) - [Helper functions](#helper-functions) - - [`get_split_offset`](#get_split_offset) - [`get_shuffled_committee`](#get_shuffled_committee) - [`get_persistent_committee`](#get_persistent_committee) - [`get_shard_proposer_index`](#get_shard_proposer_index) @@ -122,17 +121,6 @@ Phase 1 depends upon all of the constants defined in [Phase 0](0_beacon-chain.md ## Helper functions -#### `get_split_offset` - -````python -def get_split_offset(list_size: int, chunks: int, index: int) -> int: - """ - Returns a value such that for a list L, chunk count k and index i, - split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i+1)] - """ - return (list_size * index) // chunks -```` - #### `get_shuffled_committee` ```python From 47477b8e55dba85f7e4e12c3b0cf99bc594ac81d Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 21 Mar 2019 09:37:06 -0600 Subject: [PATCH 094/133] cleanup tests to use get_balance and set_balance --- .../block_processing/test_process_deposit.py | 10 ++++---- tests/phase0/test_sanity.py | 23 +++++++++++-------- 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/tests/phase0/block_processing/test_process_deposit.py b/tests/phase0/block_processing/test_process_deposit.py index 297ad37f1..9f1b6add6 100644 --- a/tests/phase0/block_processing/test_process_deposit.py +++ b/tests/phase0/block_processing/test_process_deposit.py @@ -5,6 +5,7 @@ import build.phase0.spec as spec from build.phase0.spec import ( Deposit, + get_balance, process_deposit, ) from tests.phase0.helpers import ( @@ -38,8 +39,9 @@ def test_success(state, deposit_data_leaves, pubkeys, privkeys): process_deposit(post_state, deposit) assert len(post_state.validator_registry) == len(state.validator_registry) + 1 - assert len(post_state.validator_balances) == len(state.validator_balances) + 1 + assert len(post_state.balances) == len(state.balances) + 1 assert post_state.validator_registry[index].pubkey == pubkeys[index] + assert get_balance(post_state, index) == spec.MAX_DEPOSIT_AMOUNT assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count return pre_state, deposit, post_state @@ -62,16 +64,16 @@ def test_success_top_up(state, deposit_data_leaves, pubkeys, privkeys): pre_state.latest_eth1_data.deposit_root = root pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves) - pre_balance = pre_state.validator_balances[validator_index] + pre_balance = get_balance(pre_state, validator_index) post_state = deepcopy(pre_state) process_deposit(post_state, deposit) assert len(post_state.validator_registry) == len(state.validator_registry) - assert len(post_state.validator_balances) == len(state.validator_balances) + assert len(post_state.balances) == len(state.balances) assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count - assert post_state.validator_balances[validator_index] == pre_balance + amount + assert get_balance(post_state, validator_index) == pre_balance + amount return pre_state, deposit, post_state diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 91bd9fe7a..ec03fb355 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -21,6 +21,7 @@ from build.phase0.spec import ( # functions get_active_validator_indices, get_attestation_participants, + get_balance, get_block_root, get_crosslink_committees_at_slot, get_current_epoch, @@ -28,6 +29,7 @@ from build.phase0.spec import ( get_state_root, advance_slot, cache_state, + set_balance, verify_merkle_branch, hash, ) @@ -168,7 +170,7 @@ def test_proposer_slashing(state, pubkeys, privkeys): assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH # lost whistleblower reward - assert test_state.validator_balances[validator_index] < state.validator_balances[validator_index] + assert get_balance(test_state, validator_index) < get_balance(state, validator_index) return state, [block], test_state @@ -203,7 +205,8 @@ def test_deposit_in_block(state, deposit_data_leaves, pubkeys, privkeys): state_transition(post_state, block) assert len(post_state.validator_registry) == len(state.validator_registry) + 1 - assert len(post_state.validator_balances) == len(state.validator_balances) + 1 + assert len(post_state.balances) == len(state.balances) + 1 + assert get_balance(post_state, index) == spec.MAX_DEPOSIT_AMOUNT assert post_state.validator_registry[index].pubkey == pubkeys[index] return pre_state, [block], post_state @@ -238,12 +241,12 @@ def test_deposit_top_up(state, pubkeys, privkeys, deposit_data_leaves): block = build_empty_block_for_next_slot(pre_state) block.body.deposits.append(deposit) - pre_balance = pre_state.validator_balances[validator_index] + pre_balance = get_balance(pre_state, validator_index) post_state = deepcopy(pre_state) state_transition(post_state, block) assert len(post_state.validator_registry) == len(pre_state.validator_registry) - assert len(post_state.validator_balances) == len(pre_state.validator_balances) - assert post_state.validator_balances[validator_index] == pre_balance + amount + assert len(post_state.balances) == len(pre_state.balances) + assert get_balance(post_state, validator_index) == pre_balance + amount return pre_state, [block], post_state @@ -412,8 +415,8 @@ def test_transfer(state, pubkeys, privkeys): recipient_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] transfer_pubkey = pubkeys[-1] transfer_privkey = privkeys[-1] - amount = pre_state.validator_balances[sender_index] - pre_transfer_recipient_balance = pre_state.validator_balances[recipient_index] + amount = get_balance(pre_state, sender_index) + pre_transfer_recipient_balance = get_balance(pre_state, recipient_index) transfer = Transfer( sender=sender_index, recipient=recipient_index, @@ -448,8 +451,8 @@ def test_transfer(state, pubkeys, privkeys): block.body.transfers.append(transfer) state_transition(post_state, block) - sender_balance = post_state.validator_balances[sender_index] - recipient_balance = post_state.validator_balances[recipient_index] + sender_balance = get_balance(post_state, sender_index) + recipient_balance = get_balance(post_state, recipient_index) assert sender_balance == 0 assert recipient_balance == pre_transfer_recipient_balance + amount @@ -465,7 +468,7 @@ def test_ejection(state): assert pre_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH # set validator balance to below ejection threshold - pre_state.validator_balances[validator_index] = spec.EJECTION_BALANCE - 1 + set_balance(pre_state, validator_index, spec.EJECTION_BALANCE - 1) post_state = deepcopy(pre_state) # From f6da42ffb32fed8e22769dbf77f906889b1e02a2 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 21 Mar 2019 10:04:20 -0600 Subject: [PATCH 095/133] fix markdown issues --- specs/core/0_beacon-chain.md | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 3bc95f717..4eee3dcb5 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -61,9 +61,9 @@ - [`is_active_validator`](#is_active_validator) - [`get_active_validator_indices`](#get_active_validator_indices) - [`get_balance`](#get_balance) - - [`set_balance`](#set_balance) - - [`increase_balance`](#increase_balance) - - [`decrease_balance`](#decrease_balance) + - [`set_balance`](#set_balance) + - [`increase_balance`](#increase_balance) + - [`decrease_balance`](#decrease_balance) - [`get_permuted_index`](#get_permuted_index) - [`get_split_offset`](#get_split_offset) - [`get_epoch_committee_count`](#get_epoch_committee_count) @@ -760,31 +760,32 @@ def get_active_validator_indices(validators: List[Validator], epoch: Epoch) -> L def get_balance(state: BeaconState, index: int) -> int: return state.balances[index] ``` -#### `set_balance` -````python +### `set_balance` + +```python def set_balance(state: BeaconState, index: int, balance: int) -> None: validator = state.validator_registry[index] HALF_INCREMENT = HIGH_BALANCE_INCREMENT // 2 if validator.high_balance > balance or validator.high_balance + 3 * HALF_INCREMENT < balance: validator.high_balance = balance - balance % HIGH_BALANCE_INCREMENT state.balances[index] = balance -```` +``` -#### `increase_balance` +### `increase_balance` -````python +```python def increase_balance(state: BeaconState, index: int, delta: int) -> None: set_balance(state, index, get_balance(state, index) + delta) -```` +``` -#### `decrease_balance` +### `decrease_balance` -````python +```python def decrease_balance(state: BeaconState, index: int, delta: int) -> None: cur_balance = get_balance(state, index) set_balance(state, index, cur_balance - delta if cur_balance >= delta else 0) -```` +``` ### `get_permuted_index` From d3f175d7289befde23d9810a29ee1ca40f02216a Mon Sep 17 00:00:00 2001 From: terence tsao Date: Thu, 21 Mar 2019 11:33:36 -0700 Subject: [PATCH 096/133] Update sync_protocol.md --- specs/light_client/sync_protocol.md | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 8878545bb..575df60fe 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -1,6 +1,18 @@ -# Beacon chain light client syncing +# Beacon Chain Light Client Syncing + +__NOTICE__: This document is a work-in-progress for researchers and implementers. One of the design goals of the eth2 beacon chain is light-client friendlines, both to allow low-resource clients (mobile phones, IoT, etc) to maintain access to the blockchain in a reasonably safe way, but also to facilitate the development of "bridges" between the eth2 beacon chain and other chains. + +## Table of Contents + + +- [Beacon Chain Light Client Syncing](#beacon-chain-light-client-syncing) + - [Table of Contents](#table-of-contents) + - [Light client state](#light-client-state) + - [Updating the shuffled committee](#updating-the-shuffled-committee) + - [Computing the current committee](#computing-the-current-committee) + - [Verifying blocks](#verifying-blocks) + -One of the design goals of the eth2 beacon chain is light-client friendlines, both to allow low-resource clients (mobile phones, IoT, etc) to maintain access to the blockchain in a reasonably safe way, but also to facilitate the development of "bridges" between the eth2 beacon chain and other chains. ### Preliminaries From d1d1b73fb1783b564556a48ea86d69d8dd1003e7 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 21 Mar 2019 15:11:05 -0500 Subject: [PATCH 097/133] Simplify justification and finalization accounting logic Much of the simplification is cosmetic. The following changes are substantive: * Inactivity leak penalty specifically on missing the target, not both the target and the source * Even outside of quadratic leak scenarios, slashing victims suffer offline penalties --- specs/core/0_beacon-chain.md | 94 ++++++++---------------------------- 1 file changed, 21 insertions(+), 73 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1067c3dc0..e2cd8b162 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1883,10 +1883,11 @@ def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: ```python def get_inactivity_penalty(state: BeaconState, index: ValidatorIndex, epochs_since_finality: int) -> Gwei: - return ( - get_base_reward(state, index) + - get_effective_balance(state, index) * epochs_since_finality // INACTIVITY_PENALTY_QUOTIENT // 2 - ) + if epochs_since_finality <= 4: + extra_penalty = 0 + else: + extra_penalty = get_effective_balance(state, index) * min(epochs_since_finality // INACTIVITY_PENALTY_QUOTIENT // 2 + return get_base_reward(state, index) + extra_penalty ``` Note: When applying penalties in the following balance recalculations implementers should make sure the `uint64` does not underflow. @@ -1896,22 +1897,8 @@ Note: When applying penalties in the following balance recalculations implemente ```python def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: epochs_since_finality = get_current_epoch(state) + 1 - state.finalized_epoch - if epochs_since_finality <= 4: - return compute_normal_justification_and_finalization_deltas(state) - else: - return compute_inactivity_leak_deltas(state) -``` - -When blocks are finalizing normally... - -```python -def compute_normal_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: - # deltas[0] for rewards - # deltas[1] for penalties - deltas = [ - [0 for index in range(len(state.validator_registry))], - [0 for index in range(len(state.validator_registry))] - ] + rewards = [0 for index in range(len(state.validator_registry))] + penalties = [0 for index in range(len(state.validator_registry))] # Some helper variables boundary_attestations = get_previous_epoch_boundary_attestations(state) boundary_attesting_balance = get_attesting_balance(state, boundary_attestations) @@ -1919,76 +1906,37 @@ def compute_normal_justification_and_finalization_deltas(state: BeaconState) -> total_attesting_balance = get_attesting_balance(state, state.previous_epoch_attestations) matching_head_attestations = get_previous_epoch_matching_head_attestations(state) matching_head_balance = get_attesting_balance(state, matching_head_attestations) + eligible_validators = [ + i for i,v in enumerate(state.validator_registry) if is_active_validator(v, get_current_epoch(state)) or + (v.slashed and get_current_epoch(state) < v.withdrawable_epoch) + ] # Process rewards or penalties for all validators - for index in get_active_validator_indices(state.validator_registry, get_previous_epoch(state)): + for index in eligible_validators: # Expected FFG source if index in get_attesting_indices(state, state.previous_epoch_attestations): - deltas[0][index] += get_base_reward(state, index) * total_attesting_balance // total_balance + rewards[index] += get_base_reward(state, index) * total_attesting_balance // total_balance # Inclusion speed bonus - deltas[0][index] += ( + rewards[index] += ( get_base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_distance(state, index) ) else: - deltas[1][index] += get_base_reward(state, index) + penalties[index] += get_base_reward(state, index) # Expected FFG target if index in get_attesting_indices(state, boundary_attestations): - deltas[0][index] += get_base_reward(state, index) * boundary_attesting_balance // total_balance + rewards[index] += get_base_reward(state, index) * boundary_attesting_balance // total_balance else: - deltas[1][index] += get_base_reward(state, index) + penalties[index] += get_inactivity_penalty(state, index, epochs_since_finality) # Expected head if index in get_attesting_indices(state, matching_head_attestations): - deltas[0][index] += get_base_reward(state, index) * matching_head_balance // total_balance + rewards[index] += get_base_reward(state, index) * matching_head_balance // total_balance else: - deltas[1][index] += get_base_reward(state, index) + penalties[index] += get_base_reward(state, index) # Proposer bonus if index in get_attesting_indices(state, state.previous_epoch_attestations): proposer_index = get_beacon_proposer_index(state, inclusion_slot(state, index)) - deltas[0][proposer_index] += get_base_reward(state, index) // ATTESTATION_INCLUSION_REWARD_QUOTIENT - return deltas -``` - -When blocks are not finalizing normally... - -```python -def compute_inactivity_leak_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: - # deltas[0] for rewards - # deltas[1] for penalties - deltas = [ - [0 for index in range(len(state.validator_registry))], - [0 for index in range(len(state.validator_registry))] - ] - boundary_attestations = get_previous_epoch_boundary_attestations(state) - matching_head_attestations = get_previous_epoch_matching_head_attestations(state) - active_validator_indices = get_active_validator_indices(state.validator_registry, get_previous_epoch(state)) - epochs_since_finality = get_current_epoch(state) + 1 - state.finalized_epoch - for index in active_validator_indices: - if index not in get_attesting_indices(state, state.previous_epoch_attestations): - deltas[1][index] += get_inactivity_penalty(state, index, epochs_since_finality) - else: - # If a validator did attest, apply a small penalty for getting attestations included late - deltas[0][index] += ( - get_base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // - inclusion_distance(state, index) - ) - deltas[1][index] += get_base_reward(state, index) - if index not in get_attesting_indices(state, boundary_attestations): - deltas[1][index] += get_inactivity_penalty(state, index, epochs_since_finality) - if index not in get_attesting_indices(state, matching_head_attestations): - deltas[1][index] += get_base_reward(state, index) - # Penalize slashed-but-inactive validators as though they were active but offline - for index in range(len(state.validator_registry)): - eligible = ( - index not in active_validator_indices and - state.validator_registry[index].slashed and - get_current_epoch(state) < state.validator_registry[index].withdrawable_epoch - ) - if eligible: - deltas[1][index] += ( - 2 * get_inactivity_penalty(state, index, epochs_since_finality) + - get_base_reward(state, index) - ) - return deltas + rewards[proposer_index] += get_base_reward(state, index) // ATTESTATION_INCLUSION_REWARD_QUOTIENT + return [rewards, penalties] ``` ##### Crosslinks From 38a5c3640b30581a4e807ae6aba13e7266bd1a76 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 21 Mar 2019 15:13:13 -0500 Subject: [PATCH 098/133] Re-added some penalization in case of failure to finalize --- specs/core/0_beacon-chain.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index e2cd8b162..2a7b0c776 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1936,6 +1936,9 @@ def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[ if index in get_attesting_indices(state, state.previous_epoch_attestations): proposer_index = get_beacon_proposer_index(state, inclusion_slot(state, index)) rewards[proposer_index] += get_base_reward(state, index) // ATTESTATION_INCLUSION_REWARD_QUOTIENT + # Take away max rewards if we're not finalizing + if epochs_since_finality > 4: + penalties[index] += get_base_reward(state, index) * 4 return [rewards, penalties] ``` From 3b403909e8d1571bc6a30ac9487d2ba49a9386cd Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 21 Mar 2019 16:29:14 -0500 Subject: [PATCH 099/133] Cosmetic improvement to reward/penalty functions --- specs/core/0_beacon-chain.md | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1067c3dc0..05dda9fe7 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1995,12 +1995,8 @@ def compute_inactivity_leak_deltas(state: BeaconState) -> Tuple[List[Gwei], List ```python def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: - # deltas[0] for rewards - # deltas[1] for penalties - deltas = [ - [0 for index in range(len(state.validator_registry))], - [0 for index in range(len(state.validator_registry))] - ] + rewards = [0 for index in range(len(state.validator_registry))] + penalties = [0 for index in range(len(state.validator_registry))] previous_epoch_start_slot = get_epoch_start_slot(get_previous_epoch(state)) current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) for slot in range(previous_epoch_start_slot, current_epoch_start_slot): @@ -2010,10 +2006,10 @@ def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: total_balance = get_total_balance(state, crosslink_committee) for index in crosslink_committee: if index in participants: - deltas[0][index] += get_base_reward(state, index) * participating_balance // total_balance + rewards[index] += get_base_reward(state, index) * participating_balance // total_balance else: - deltas[1][index] += get_base_reward(state, index) - return deltas + penalties[index] += get_base_reward(state, index) + return [rewards, penalties] ``` #### Apply rewards @@ -2022,12 +2018,12 @@ Run the following: ```python def apply_rewards(state: BeaconState) -> None: - deltas1 = get_justification_and_finalization_deltas(state) - deltas2 = get_crosslink_deltas(state) + rewards1, penalties1 = get_justification_and_finalization_deltas(state) + rewards2, penalties2 = get_crosslink_deltas(state) for i in range(len(state.validator_registry)): state.validator_balances[i] = max( 0, - state.validator_balances[i] + deltas1[0][i] + deltas2[0][i] - deltas1[1][i] - deltas2[1][i] + state.validator_balances[i] + rewards1[i] + rewards2[i] - penalties1[i] - penalties2[i] ) ``` From 3ece05ccc1a5e126e934c57aa091386a4afeb8ef Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 21 Mar 2019 16:36:31 -0500 Subject: [PATCH 100/133] Small cosmetic change to slashable attestations --- specs/core/0_beacon-chain.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1067c3dc0..c7c74279f 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -182,7 +182,7 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | `SHARD_COUNT` | `2**10` (= 1,024) | | `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) | | `MAX_BALANCE_CHURN_QUOTIENT` | `2**5` (= 32) | -| `MAX_INDICES_PER_SLASHABLE_VOTE` | `2**12` (= 4,096) | +| `MAX_SLASHABLE_ATTESTATION_PARTICIPANTS` | `2**12` (= 4,096) | | `MAX_EXIT_DEQUEUES_PER_EPOCH` | `2**2` (= 4) | | `SHUFFLE_ROUND_COUNT` | 90 | @@ -1159,7 +1159,7 @@ def verify_slashable_attestation(state: BeaconState, slashable_attestation: Slas if slashable_attestation.custody_bitfield != b'\x00' * len(slashable_attestation.custody_bitfield): # [TO BE REMOVED IN PHASE 1] return False - if len(slashable_attestation.validator_indices) == 0: + if not (1 <= len(slashable_attestation.validator_indices) <= MAX_SLASHABLE_ATTESTATION_PARTICIPANTS): return False for i in range(len(slashable_attestation.validator_indices) - 1): @@ -1169,9 +1169,6 @@ def verify_slashable_attestation(state: BeaconState, slashable_attestation: Slas if not verify_bitfield(slashable_attestation.custody_bitfield, len(slashable_attestation.validator_indices)): return False - if len(slashable_attestation.validator_indices) > MAX_INDICES_PER_SLASHABLE_VOTE: - return False - custody_bit_0_indices = [] custody_bit_1_indices = [] for i, validator_index in enumerate(slashable_attestation.validator_indices): From e313c5ba5abd949e2af87919a36f84937ee6b68c Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 21 Mar 2019 17:08:54 -0600 Subject: [PATCH 101/133] add tests for proposer slashing --- .../test_process_proposer_slashing.py | 97 +++++++++++++++++++ tests/phase0/helpers.py | 44 +++++++++ tests/phase0/test_sanity.py | 39 +------- 3 files changed, 144 insertions(+), 36 deletions(-) create mode 100644 tests/phase0/block_processing/test_process_proposer_slashing.py diff --git a/tests/phase0/block_processing/test_process_proposer_slashing.py b/tests/phase0/block_processing/test_process_proposer_slashing.py new file mode 100644 index 000000000..467d2164b --- /dev/null +++ b/tests/phase0/block_processing/test_process_proposer_slashing.py @@ -0,0 +1,97 @@ +from copy import deepcopy +import pytest + +import build.phase0.spec as spec +from build.phase0.spec import ( + get_balance, + get_current_epoch, + process_proposer_slashing, +) +from tests.phase0.helpers import ( + get_valid_proposer_slashing, +) + +# mark entire file as 'header' +pytestmark = pytest.mark.proposer_slashings + + +def run_proposer_slashing_processing(state, proposer_slashing, valid=True): + """ + Run ``process_proposer_slashing`` returning the pre and post state. + If ``valid == False``, run expecting ``AssertionError`` + """ + post_state = deepcopy(state) + + if not valid: + with pytest.raises(AssertionError): + process_proposer_slashing(post_state, proposer_slashing) + return state, None + + process_proposer_slashing(post_state, proposer_slashing) + + slashed_validator = post_state.validator_registry[proposer_slashing.proposer_index] + assert not slashed_validator.initiated_exit + assert slashed_validator.slashed + assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH + assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH + # lost whistleblower reward + assert ( + get_balance(post_state, proposer_slashing.proposer_index) < + get_balance(state, proposer_slashing.proposer_index) + ) + + return state, post_state + + +def test_success(state): + proposer_slashing = get_valid_proposer_slashing(state) + + pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing) + + return pre_state, proposer_slashing, post_state + + +def test_epochs_are_different(state): + proposer_slashing = get_valid_proposer_slashing(state) + + # set slots to be in different epochs + proposer_slashing.header_2.slot += spec.SLOTS_PER_EPOCH + + pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing, False) + + return pre_state, proposer_slashing, post_state + + +def test_headers_are_same(state): + proposer_slashing = get_valid_proposer_slashing(state) + + # set headers to be the same + proposer_slashing.header_2 = proposer_slashing.header_1 + + pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing, False) + + return pre_state, proposer_slashing, post_state + + +def test_proposer_is_slashed(state): + proposer_slashing = get_valid_proposer_slashing(state) + + # set proposer to slashed + state.validator_registry[proposer_slashing.proposer_index].slashed = True + + pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing, False) + + return pre_state, proposer_slashing, post_state + + +def test_proposer_is_withdrawn(state): + proposer_slashing = get_valid_proposer_slashing(state) + + # set proposer withdrawable_epoch in past + current_epoch = get_current_epoch(state) + proposer_index = proposer_slashing.proposer_index + state.validator_registry[proposer_index].withdrawable_epoch = current_epoch - 1 + + pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing, False) + + return pre_state, proposer_slashing, post_state diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 5c61685a6..3987289bf 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -7,14 +7,18 @@ from build.phase0.utils.minimal_ssz import signed_root from build.phase0.spec import ( # constants EMPTY_SIGNATURE, + ZERO_HASH, # SSZ AttestationData, + BeaconBlockHeader, Deposit, DepositInput, DepositData, Eth1Data, + ProposerSlashing, VoluntaryExit, # functions + get_active_validator_indices, get_block_root, get_current_epoch, get_domain, @@ -199,3 +203,43 @@ def build_deposit(state, ) return deposit, root, deposit_data_leaves + + +def get_valid_proposer_slashing(state): + current_epoch = get_current_epoch(state) + validator_index = get_active_validator_indices(state.validator_registry, current_epoch)[-1] + privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] + slot = state.slot + + header_1 = BeaconBlockHeader( + slot=slot, + previous_block_root=ZERO_HASH, + state_root=ZERO_HASH, + block_body_root=ZERO_HASH, + signature=EMPTY_SIGNATURE, + ) + header_2 = deepcopy(header_1) + header_2.previous_block_root = b'\x02' * 32 + header_2.slot = slot + 1 + + domain = get_domain( + fork=state.fork, + epoch=get_current_epoch(state), + domain_type=spec.DOMAIN_BEACON_BLOCK, + ) + header_1.signature = bls.sign( + message_hash=signed_root(header_1), + privkey=privkey, + domain=domain, + ) + header_2.signature = bls.sign( + message_hash=signed_root(header_2), + privkey=privkey, + domain=domain, + ) + + return ProposerSlashing( + proposer_index=validator_index, + header_1=header_1, + header_2=header_2, + ) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index ec03fb355..444075a13 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -46,6 +46,7 @@ from tests.phase0.helpers import ( build_deposit_data, build_empty_block_for_next_slot, force_registry_change_at_next_epoch, + get_valid_proposer_slashing, ) @@ -117,42 +118,8 @@ def test_empty_epoch_transition_not_finalizing(state): def test_proposer_slashing(state, pubkeys, privkeys): test_state = deepcopy(state) - current_epoch = get_current_epoch(test_state) - validator_index = get_active_validator_indices(test_state.validator_registry, current_epoch)[-1] - privkey = privkeys[validator_index] - slot = spec.GENESIS_SLOT - header_1 = BeaconBlockHeader( - slot=slot, - previous_block_root=ZERO_HASH, - state_root=ZERO_HASH, - block_body_root=ZERO_HASH, - signature=EMPTY_SIGNATURE, - ) - header_2 = deepcopy(header_1) - header_2.previous_block_root = b'\x02' * 32 - header_2.slot = slot + 1 - - domain = get_domain( - fork=test_state.fork, - epoch=get_current_epoch(test_state), - domain_type=spec.DOMAIN_BEACON_BLOCK, - ) - header_1.signature = bls.sign( - message_hash=signed_root(header_1), - privkey=privkey, - domain=domain, - ) - header_2.signature = bls.sign( - message_hash=signed_root(header_2), - privkey=privkey, - domain=domain, - ) - - proposer_slashing = ProposerSlashing( - proposer_index=validator_index, - header_1=header_1, - header_2=header_2, - ) + proposer_slashing = get_valid_proposer_slashing(state) + validator_index = proposer_slashing.proposer_index # # Add to state via block transition From 11c3291817f3d590723298877c41d0bf244b789b Mon Sep 17 00:00:00 2001 From: terence tsao Date: Thu, 21 Mar 2019 16:30:45 -0700 Subject: [PATCH 102/133] Update sync_protocol.md --- specs/light_client/sync_protocol.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 8878545bb..143f82a39 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -40,8 +40,8 @@ def get_later_start_epoch(slot: Slot) -> int: return slot - slot % PERSISTENT_COMMITTEE_PERIOD - PERSISTENT_COMMITTEE_PERIOD def get_earlier_period_data(block: ExtendedBeaconBlock, shard_id: Shard) -> PeriodData: - period_start = get_earlier_start_epoch(header.slot) - validator_count = len(get_active_validator_indices(state, period_start)) + period_start = get_earlier_start_epoch(block.slot) + validator_count = len(get_active_validator_indices(block.state, period_start)) committee_count = validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE) + 1 indices = get_shuffled_committee(block.state, shard_id, period_start, 0, committee_count) return PeriodData( @@ -51,8 +51,8 @@ def get_earlier_period_data(block: ExtendedBeaconBlock, shard_id: Shard) -> Peri ) def get_later_period_data(block: ExtendedBeaconBlock, shard_id: Shard) -> PeriodData: - period_start = get_later_start_epoch(header.slot) - validator_count = len(get_active_validator_indices(state, period_start)) + period_start = get_later_start_epoch(block.slot) + validator_count = len(get_active_validator_indices(block.state, period_start)) committee_count = validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE) + 1 indices = get_shuffled_committee(block.state, shard_id, period_start, 0, committee_count) return PeriodData( From ae67e9513b46e045b87bdc302f6b20c0fc341e2f Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 22 Mar 2019 12:56:54 +0800 Subject: [PATCH 103/133] Fix type hinting and add docstrings --- specs/core/0_beacon-chain.md | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a67e6291c..c29aa113d 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -770,14 +770,21 @@ def get_active_validator_indices(validators: List[Validator], epoch: Epoch) -> L ### `get_balance` ```python -def get_balance(state: BeaconState, index: int) -> int: +def get_balance(state: BeaconState, index: ValidatorIndex) -> Gwei: + """ + Return the balance for a validator with the given ``index``. + """ return state.balances[index] ``` ### `set_balance` ```python -def set_balance(state: BeaconState, index: int, balance: int) -> None: +def set_balance(state: BeaconState, index: ValidatorIndex, balance: Gwei) -> None: + """ + Set the balance for a validator with the given ``index`` in both ``BeaconState`` + and validator's rounded balance ``high_balance``. + """ validator = state.validator_registry[index] HALF_INCREMENT = HIGH_BALANCE_INCREMENT // 2 if validator.high_balance > balance or validator.high_balance + 3 * HALF_INCREMENT < balance: @@ -788,16 +795,23 @@ def set_balance(state: BeaconState, index: int, balance: int) -> None: ### `increase_balance` ```python -def increase_balance(state: BeaconState, index: int, delta: int) -> None: +def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: + """ + Increase the balance for a validator with the given ``index`` by ``delta``. + """ set_balance(state, index, get_balance(state, index) + delta) ``` ### `decrease_balance` ```python -def decrease_balance(state: BeaconState, index: int, delta: int) -> None: - cur_balance = get_balance(state, index) - set_balance(state, index, cur_balance - delta if cur_balance >= delta else 0) +def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: + """ + Decrease the balance for a validator with the given ``index`` by ``delta``. + Set to ``0`` when underflow. + """ + current_balance = get_balance(state, index) + set_balance(state, index, current_balance - delta if current_balance >= delta else 0) ``` ### `get_permuted_index` From b34858c67b6c0df1bbaaf9c9d44dd68000ebb273 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 22 Mar 2019 14:21:33 +0800 Subject: [PATCH 104/133] Refactor `get_justification_and_finalization_deltas` --- specs/core/0_beacon-chain.md | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2a7b0c776..b374b094f 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -115,7 +115,7 @@ - [Helper functions](#helper-functions-1) - [Justification](#justification) - [Crosslinks](#crosslinks) - - [Eth1 data](#eth1-data-1) + - [Eth1 data](#eth1-data) - [Rewards and penalties](#rewards-and-penalties) - [Justification and finalization](#justification-and-finalization) - [Crosslinks](#crosslinks-1) @@ -128,7 +128,7 @@ - [Per-block processing](#per-block-processing) - [Block header](#block-header) - [RANDAO](#randao) - - [Eth1 data](#eth1-data) + - [Eth1 data](#eth1-data-1) - [Transactions](#transactions) - [Proposer slashings](#proposer-slashings) - [Attester slashings](#attester-slashings) @@ -1896,7 +1896,8 @@ Note: When applying penalties in the following balance recalculations implemente ```python def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: - epochs_since_finality = get_current_epoch(state) + 1 - state.finalized_epoch + current_epoch = get_current_epoch(state) + epochs_since_finality = current_epoch + 1 - state.finalized_epoch rewards = [0 for index in range(len(state.validator_registry))] penalties = [0 for index in range(len(state.validator_registry))] # Some helper variables @@ -1907,38 +1908,42 @@ def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[ matching_head_attestations = get_previous_epoch_matching_head_attestations(state) matching_head_balance = get_attesting_balance(state, matching_head_attestations) eligible_validators = [ - i for i,v in enumerate(state.validator_registry) if is_active_validator(v, get_current_epoch(state)) or - (v.slashed and get_current_epoch(state) < v.withdrawable_epoch) + index for index, validator in enumerate(state.validator_registry) + if ( + is_active_validator(validator, current_epoch) or + (validator.slashed and current_epoch < validator.withdrawable_epoch) + ) ] # Process rewards or penalties for all validators for index in eligible_validators: + base_reward = get_base_reward(state, index) # Expected FFG source if index in get_attesting_indices(state, state.previous_epoch_attestations): - rewards[index] += get_base_reward(state, index) * total_attesting_balance // total_balance + rewards[index] += base_reward * total_attesting_balance // total_balance # Inclusion speed bonus rewards[index] += ( - get_base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // + base_reward * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_distance(state, index) ) else: - penalties[index] += get_base_reward(state, index) + penalties[index] += base_reward # Expected FFG target if index in get_attesting_indices(state, boundary_attestations): - rewards[index] += get_base_reward(state, index) * boundary_attesting_balance // total_balance + rewards[index] += base_reward * boundary_attesting_balance // total_balance else: penalties[index] += get_inactivity_penalty(state, index, epochs_since_finality) # Expected head if index in get_attesting_indices(state, matching_head_attestations): - rewards[index] += get_base_reward(state, index) * matching_head_balance // total_balance + rewards[index] += base_reward * matching_head_balance // total_balance else: - penalties[index] += get_base_reward(state, index) + penalties[index] += base_reward # Proposer bonus if index in get_attesting_indices(state, state.previous_epoch_attestations): proposer_index = get_beacon_proposer_index(state, inclusion_slot(state, index)) - rewards[proposer_index] += get_base_reward(state, index) // ATTESTATION_INCLUSION_REWARD_QUOTIENT + rewards[proposer_index] += base_reward // ATTESTATION_INCLUSION_REWARD_QUOTIENT # Take away max rewards if we're not finalizing if epochs_since_finality > 4: - penalties[index] += get_base_reward(state, index) * 4 + penalties[index] += base_reward * 4 return [rewards, penalties] ``` From e8257db32062a2a674bef8f1c4689d93ba5e0e26 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Fri, 22 Mar 2019 05:40:41 -0500 Subject: [PATCH 105/133] Removed hanging min --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index b374b094f..a4719c702 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1886,7 +1886,7 @@ def get_inactivity_penalty(state: BeaconState, index: ValidatorIndex, epochs_sin if epochs_since_finality <= 4: extra_penalty = 0 else: - extra_penalty = get_effective_balance(state, index) * min(epochs_since_finality // INACTIVITY_PENALTY_QUOTIENT // 2 + extra_penalty = get_effective_balance(state, index) * epochs_since_finality // INACTIVITY_PENALTY_QUOTIENT // 2 return get_base_reward(state, index) + extra_penalty ``` From 3ee9fc0cc775a05042f7acbfc46e03ec24d14104 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Fri, 22 Mar 2019 06:10:44 -0500 Subject: [PATCH 106/133] Merge attestation verification logic Also rename slashable attestation to standalone attestation to reflect its broader functionality in phase 1. --- specs/core/0_beacon-chain.md | 84 +++++++++++++++++++----------------- 1 file changed, 45 insertions(+), 39 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index c29aa113d..a4d5f5ec6 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -28,7 +28,7 @@ - [`Eth1DataVote`](#eth1datavote) - [`AttestationData`](#attestationdata) - [`AttestationDataAndCustodyBit`](#attestationdataandcustodybit) - - [`SlashableAttestation`](#slashableattestation) + - [`StandaloneAttestation`](#standaloneattestation) - [`DepositInput`](#depositinput) - [`DepositData`](#depositdata) - [`BeaconBlockHeader`](#beaconblockheader) @@ -90,7 +90,8 @@ - [`get_domain`](#get_domain) - [`get_bitfield_bit`](#get_bitfield_bit) - [`verify_bitfield`](#verify_bitfield) - - [`verify_slashable_attestation`](#verify_slashable_attestation) + - [`convert_to_standalone`](#convert_to_standalone) + - [`verify_standalone_attestation`](#verify_standalone_attestation) - [`is_double_vote`](#is_double_vote) - [`is_surround_vote`](#is_surround_vote) - [`integer_squareroot`](#integer_squareroot) @@ -187,7 +188,7 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | `SHARD_COUNT` | `2**10` (= 1,024) | | `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) | | `MAX_BALANCE_CHURN_QUOTIENT` | `2**5` (= 32) | -| `MAX_SLASHABLE_ATTESTATION_PARTICIPANTS` | `2**12` (= 4,096) | +| `MAX_ATTESTATION_PARTICIPANTS` | `2**12` (= 4,096) | | `MAX_EXIT_DEQUEUES_PER_EPOCH` | `2**2` (= 4) | | `SHUFFLE_ROUND_COUNT` | 90 | @@ -369,7 +370,7 @@ The types are defined topologically to aid in facilitating an executable version } ``` -#### `SlashableAttestation` +#### `StandaloneAttestation` ```python { @@ -489,10 +490,10 @@ The types are defined topologically to aid in facilitating an executable version ```python { - # First slashable attestation - 'slashable_attestation_1': SlashableAttestation, - # Second slashable attestation - 'slashable_attestation_2': SlashableAttestation, + # First attestation + 'attestation_1': StandaloneAttestation, + # Second attestation + 'attestation_2': StandaloneAttestation, } ``` @@ -1116,7 +1117,7 @@ def get_attestation_participants(state: BeaconState, aggregation_bit = get_bitfield_bit(bitfield, i) if aggregation_bit == 0b1: participants.append(validator_index) - return participants + return sorted(participants) ``` ### `is_power_of_two` @@ -1214,30 +1215,45 @@ def verify_bitfield(bitfield: bytes, committee_size: int) -> bool: return True ``` -### `verify_slashable_attestation` +### `convert_to_standalone` ```python -def verify_slashable_attestation(state: BeaconState, slashable_attestation: SlashableAttestation) -> bool: +def convert_to_standalone(state: BeaconState, attestation: Attestation): """ - Verify validity of ``slashable_attestation`` fields. + Converts an attestation to (almost) standalone-verifiable form """ - if slashable_attestation.custody_bitfield != b'\x00' * len(slashable_attestation.custody_bitfield): # [TO BE REMOVED IN PHASE 1] + return StandaloneAttestation( + validator_indices=get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield), + data=attestation.data, + custody_bitfield=attestation.custody_bitfield, + aggregate_signature=attestation.aggregate_signature + ) +``` + +### `verify_standalone_attestation` + +```python +def verify_standalone_attestation(state: BeaconState, standalone_attestation: StandaloneAttestation) -> bool: + """ + Verify validity of ``standalone_attestation`` fields. + """ + if standalone_attestation.custody_bitfield != b'\x00' * len(standalone_attestation.custody_bitfield): # [TO BE REMOVED IN PHASE 1] return False - if not (1 <= len(slashable_attestation.validator_indices) <= MAX_SLASHABLE_ATTESTATION_PARTICIPANTS): + if not (1 <= len(standalone_attestation.validator_indices) <= MAX_ATTESTATION_PARTICIPANTS): return False - for i in range(len(slashable_attestation.validator_indices) - 1): - if slashable_attestation.validator_indices[i] >= slashable_attestation.validator_indices[i + 1]: + for i in range(len(standalone_attestation.validator_indices) - 1): + if standalone_attestation.validator_indices[i] >= standalone_attestation.validator_indices[i + 1]: return False - if not verify_bitfield(slashable_attestation.custody_bitfield, len(slashable_attestation.validator_indices)): + if not verify_bitfield(standalone_attestation.custody_bitfield, len(standalone_attestation.validator_indices)): return False custody_bit_0_indices = [] custody_bit_1_indices = [] - for i, validator_index in enumerate(slashable_attestation.validator_indices): - if get_bitfield_bit(slashable_attestation.custody_bitfield, i) == 0b0: + for i, validator_index in enumerate(standalone_attestation.validator_indices): + if get_bitfield_bit(standalone_attestation.custody_bitfield, i) == 0b0: custody_bit_0_indices.append(validator_index) else: custody_bit_1_indices.append(validator_index) @@ -1248,11 +1264,11 @@ def verify_slashable_attestation(state: BeaconState, slashable_attestation: Slas bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_1_indices]), ], message_hashes=[ - hash_tree_root(AttestationDataAndCustodyBit(data=slashable_attestation.data, custody_bit=0b0)), - hash_tree_root(AttestationDataAndCustodyBit(data=slashable_attestation.data, custody_bit=0b1)), + hash_tree_root(AttestationDataAndCustodyBit(data=standalone_attestation.data, custody_bit=0b0)), + hash_tree_root(AttestationDataAndCustodyBit(data=standalone_attestation.data, custody_bit=0b1)), ], - signature=slashable_attestation.aggregate_signature, - domain=get_domain(state.fork, slot_to_epoch(slashable_attestation.data.slot), DOMAIN_ATTESTATION), + signature=standalone_attestation.aggregate_signature, + domain=get_domain(state.fork, slot_to_epoch(standalone_attestation.data.slot), DOMAIN_ATTESTATION), ) ``` @@ -2408,16 +2424,16 @@ def process_attester_slashing(state: BeaconState, Process ``AttesterSlashing`` transaction. Note that this function mutates ``state``. """ - attestation1 = attester_slashing.slashable_attestation_1 - attestation2 = attester_slashing.slashable_attestation_2 + attestation1 = attester_slashing.attestation_1 + attestation2 = attester_slashing.attestation_2 # Check that the attestations are conflicting assert attestation1.data != attestation2.data assert ( is_double_vote(attestation1.data, attestation2.data) or is_surround_vote(attestation1.data, attestation2.data) ) - assert verify_slashable_attestation(state, attestation1) - assert verify_slashable_attestation(state, attestation2) + assert verify_standalone_attestation(state, attestation1) + assert verify_standalone_attestation(state, attestation2) slashable_indices = [ index for index in attestation1.validator_indices if ( @@ -2462,18 +2478,8 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: ), } - # Check custody bits [to be generalised in phase 1] - assert attestation.custody_bitfield == b'\x00' * len(attestation.custody_bitfield) - - # Check aggregate signature [to be generalised in phase 1] - participants = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) - assert len(participants) != 0 - assert bls_verify( - pubkey=bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in participants]), - message_hash=hash_tree_root(AttestationDataAndCustodyBit(data=attestation.data, custody_bit=0b0)), - signature=attestation.aggregate_signature, - domain=get_domain(state.fork, target_epoch, DOMAIN_ATTESTATION), - ) + # Check signature and bitfields + assert verify_standalone_attestation(state, convert_to_standalone(state, attestation)) # Cache pending attestation pending_attestation = PendingAttestation( From ce18bde5c9cb81a85105bbd6f93980f29dbe714b Mon Sep 17 00:00:00 2001 From: vbuterin Date: Fri, 22 Mar 2019 06:20:38 -0500 Subject: [PATCH 107/133] Simplified sorted index check --- specs/core/0_beacon-chain.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a4d5f5ec6..94784e625 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1243,10 +1243,9 @@ def verify_standalone_attestation(state: BeaconState, standalone_attestation: St if not (1 <= len(standalone_attestation.validator_indices) <= MAX_ATTESTATION_PARTICIPANTS): return False - for i in range(len(standalone_attestation.validator_indices) - 1): - if standalone_attestation.validator_indices[i] >= standalone_attestation.validator_indices[i + 1]: - return False - + if standalone_attestation.validator_indices != sorted(standalone_attestation.validator_indices): + return False + if not verify_bitfield(standalone_attestation.custody_bitfield, len(standalone_attestation.validator_indices)): return False From 80e2553afd675f508a42b42a44a224b97fe2b6f1 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 22 Mar 2019 09:32:21 -0400 Subject: [PATCH 108/133] Update specs/core/0_beacon-chain.md Co-Authored-By: vbuterin --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 94784e625..3ae2c7e13 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1220,7 +1220,7 @@ def verify_bitfield(bitfield: bytes, committee_size: int) -> bool: ```python def convert_to_standalone(state: BeaconState, attestation: Attestation): """ - Converts an attestation to (almost) standalone-verifiable form + Convert an attestation to (almost) standalone-verifiable form """ return StandaloneAttestation( validator_indices=get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield), From 5b40baa69eaac7151a6c90b9ce292cef827339b5 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sat, 23 Mar 2019 11:58:20 +0800 Subject: [PATCH 109/133] Adjust the sanity test for attestation verification integration --- tests/phase0/test_sanity.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 444075a13..f7670c126 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -227,22 +227,18 @@ def test_attestation(state, pubkeys, privkeys): crosslink_committees = get_crosslink_committees_at_slot(state, slot) crosslink_committee = [committee for committee, _shard in crosslink_committees if _shard == attestation_data.shard][0] - committee_size = len(crosslink_committee) - bitfield_length = (committee_size + 7) // 8 - aggregation_bitfield = b'\x01' + b'\x00' * (bitfield_length - 1) - custody_bitfield = b'\x00' * bitfield_length + # Select the first validator to be the attester + participants = [crosslink_committee[0]] + aggregation_bitfield_length = (len(crosslink_committee) + 7) // 8 + custody_bitfield_length = (len(participants) + 7) // 8 + aggregation_bitfield = b'\x01' + b'\x00' * (aggregation_bitfield_length - 1) + custody_bitfield = b'\x00' * custody_bitfield_length attestation = Attestation( aggregation_bitfield=aggregation_bitfield, data=attestation_data, custody_bitfield=custody_bitfield, aggregate_signature=EMPTY_SIGNATURE, ) - participants = get_attestation_participants( - test_state, - attestation.data, - attestation.aggregation_bitfield, - ) - assert len(participants) == 1 validator_index = participants[0] privkey = privkeys[validator_index] From b7441e8ab78560b3b48b3e1bd10de0aef6172080 Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 25 Mar 2019 14:30:59 +0000 Subject: [PATCH 110/133] Generalise `slash_validator` for phase 1 Make `slash_validator` friendly to phase 1. This is a cosmetic change in the context of phase 0. --- specs/core/0_beacon-chain.md | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index c29aa113d..2eeee7802 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -251,7 +251,8 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | Name | Value | | - | - | | `BASE_REWARD_QUOTIENT` | `2**5` (= 32) | -| `WHISTLEBLOWER_REWARD_QUOTIENT` | `2**9` (= 512) | +| `WHISTLEBLOWING_REWARD_QUOTIENT` | `2**9` (= 512) | +| `PROPOSER_REWARD_QUOTIENT` | `2**4` (= 16) | | `ATTESTATION_INCLUSION_REWARD_QUOTIENT` | `2**3` (= 8) | | `INACTIVITY_PENALTY_QUOTIENT` | `2**24` (= 16,777,216) | | `MIN_PENALTY_QUOTIENT` | `2**5` (= 32) | @@ -1448,21 +1449,25 @@ def exit_validator(state: BeaconState, index: ValidatorIndex) -> None: #### `slash_validator` ```python -def slash_validator(state: BeaconState, index: ValidatorIndex) -> None: +def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whitleblower_index: ValidatorIndex=None) -> None: """ - Slash the validator with index ``index``. + Slash the validator with index ``slashed_index``. Note that this function mutates ``state``. """ - validator = state.validator_registry[index] - exit_validator(state, index) - state.latest_slashed_balances[get_current_epoch(state) % LATEST_SLASHED_EXIT_LENGTH] += get_effective_balance(state, index) + exit_validator(state, slashed_index) + state.validator_registry[slashed_index].slashed = True + state.validator_registry[slashed_index].withdrawable_epoch = get_current_epoch(state) + LATEST_SLASHED_EXIT_LENGTH + slashed_balance = get_effective_balance(state, slashed_index) + state.latest_slashed_balances[get_current_epoch(state) % LATEST_SLASHED_EXIT_LENGTH] += slashed_balance - whistleblower_index = get_beacon_proposer_index(state, state.slot) - whistleblower_reward = get_effective_balance(state, index) // WHISTLEBLOWER_REWARD_QUOTIENT - increase_balance(state, whistleblower_index, whistleblower_reward) - decrease_balance(state, index, whistleblower_reward) - validator.slashed = True - validator.withdrawable_epoch = get_current_epoch(state) + LATEST_SLASHED_EXIT_LENGTH + proposer_index = get_beacon_proposer_index(state, state.slot) + if whileblower_index is None: + whileblower_index = proposer_index + whistleblowing_reward = slashed_balance // WHISTLEBLOWING_REWARD_QUOTIENT + proposer_reward = whistleblowing_reward // PROPOSER_REWARD_QUOTIENT + increase_balance(state, proposer_index, proposer_reward) + increase_balance(state, whitleblower_index, whistleblowing_reward - proposer_reward) + decrease_balance(state, slashed_index, whistleblower_reward) ``` #### `prepare_validator_for_withdrawal` From fb837400b2b2f5c14fd25d7e875fb1a236b83f64 Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 25 Mar 2019 14:49:35 +0000 Subject: [PATCH 111/133] Can't spell (thanks continuous integration!) --- specs/core/0_beacon-chain.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2eeee7802..ff142e048 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1449,7 +1449,7 @@ def exit_validator(state: BeaconState, index: ValidatorIndex) -> None: #### `slash_validator` ```python -def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whitleblower_index: ValidatorIndex=None) -> None: +def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex=None) -> None: """ Slash the validator with index ``slashed_index``. Note that this function mutates ``state``. @@ -1461,12 +1461,12 @@ def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whitleblo state.latest_slashed_balances[get_current_epoch(state) % LATEST_SLASHED_EXIT_LENGTH] += slashed_balance proposer_index = get_beacon_proposer_index(state, state.slot) - if whileblower_index is None: - whileblower_index = proposer_index + if whistleblower_index is None: + whistleblower_index = proposer_index whistleblowing_reward = slashed_balance // WHISTLEBLOWING_REWARD_QUOTIENT proposer_reward = whistleblowing_reward // PROPOSER_REWARD_QUOTIENT increase_balance(state, proposer_index, proposer_reward) - increase_balance(state, whitleblower_index, whistleblowing_reward - proposer_reward) + increase_balance(state, whistleblower_index, whistleblowing_reward - proposer_reward) decrease_balance(state, slashed_index, whistleblower_reward) ``` From acc5f314ac601b3887722a9e6a9783dfd075ebfb Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 25 Mar 2019 14:54:43 +0000 Subject: [PATCH 112/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ff142e048..00c229036 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1467,7 +1467,7 @@ def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whistlebl proposer_reward = whistleblowing_reward // PROPOSER_REWARD_QUOTIENT increase_balance(state, proposer_index, proposer_reward) increase_balance(state, whistleblower_index, whistleblowing_reward - proposer_reward) - decrease_balance(state, slashed_index, whistleblower_reward) + decrease_balance(state, slashed_index, whistleblowing_reward) ``` #### `prepare_validator_for_withdrawal` From fceebeec4e518886b8c2986baee019d2585d8132 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 25 Mar 2019 11:25:33 -0600 Subject: [PATCH 113/133] backport 839 into dev --- .gitignore | 1 + specs/core/0_beacon-chain.md | 11 ++-- tests/phase0/__init__.py | 0 .../block_processing/test_process_deposit.py | 17 ++++-- .../block_processing/test_voluntary_exit.py | 21 ++++---- tests/phase0/conftest.py | 18 ------- tests/phase0/helpers.py | 16 +++--- tests/phase0/test_sanity.py | 18 ++++--- utils/phase0/jsonize.py | 52 +++++++++++++++++++ 9 files changed, 101 insertions(+), 53 deletions(-) create mode 100644 tests/phase0/__init__.py create mode 100644 utils/phase0/jsonize.py diff --git a/.gitignore b/.gitignore index dfb38d170..f33dd5256 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ /.pytest_cache build/ +output/ diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index c29aa113d..e7e540a26 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -692,7 +692,8 @@ def get_temporary_block_header(block: BeaconBlock) -> BeaconBlockHeader: previous_block_root=block.previous_block_root, state_root=ZERO_HASH, block_body_root=hash_tree_root(block.body), - signature=block.signature, + # signed_root(block) is used for block id purposes so signature is a stub + signature=EMPTY_SIGNATURE, ) ``` @@ -1749,7 +1750,7 @@ def cache_state(state: BeaconState) -> None: state.latest_block_header.state_root = previous_slot_state_root # store latest known block for previous slot - state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = hash_tree_root(state.latest_block_header) + state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = signed_root(state.latest_block_header) ``` ### Per-epoch processing @@ -2198,8 +2199,8 @@ def update_registry_and_shuffling_data(state: BeaconState) -> None: state.current_shuffling_epoch = next_epoch state.current_shuffling_start_shard = ( state.current_shuffling_start_shard + - get_current_epoch_committee_count(state) % SHARD_COUNT - ) + get_current_epoch_committee_count(state) + ) % SHARD_COUNT state.current_shuffling_seed = generate_seed(state, state.current_shuffling_epoch) else: # If processing at least one crosslink keeps failing, then reshuffle every power of two, @@ -2315,7 +2316,7 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None: # Verify that the slots match assert block.slot == state.slot # Verify that the parent matches - assert block.previous_block_root == hash_tree_root(state.latest_block_header) + assert block.previous_block_root == signed_root(state.latest_block_header) # Save current block as the new latest block state.latest_block_header = get_temporary_block_header(block) # Verify proposer is not slashed diff --git a/tests/phase0/__init__.py b/tests/phase0/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/phase0/block_processing/test_process_deposit.py b/tests/phase0/block_processing/test_process_deposit.py index 9f1b6add6..0726dddef 100644 --- a/tests/phase0/block_processing/test_process_deposit.py +++ b/tests/phase0/block_processing/test_process_deposit.py @@ -4,12 +4,14 @@ import pytest import build.phase0.spec as spec from build.phase0.spec import ( - Deposit, get_balance, + ZERO_HASH, process_deposit, ) from tests.phase0.helpers import ( build_deposit, + privkeys, + pubkeys, ) @@ -17,8 +19,10 @@ from tests.phase0.helpers import ( pytestmark = pytest.mark.voluntary_exits -def test_success(state, deposit_data_leaves, pubkeys, privkeys): +def test_success(state): pre_state = deepcopy(state) + # fill previous deposits with zero-hash + deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) index = len(deposit_data_leaves) pubkey = pubkeys[index] @@ -47,8 +51,9 @@ def test_success(state, deposit_data_leaves, pubkeys, privkeys): return pre_state, deposit, post_state -def test_success_top_up(state, deposit_data_leaves, pubkeys, privkeys): +def test_success_top_up(state): pre_state = deepcopy(state) + deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) validator_index = 0 amount = spec.MAX_DEPOSIT_AMOUNT // 4 @@ -78,8 +83,9 @@ def test_success_top_up(state, deposit_data_leaves, pubkeys, privkeys): return pre_state, deposit, post_state -def test_wrong_index(state, deposit_data_leaves, pubkeys, privkeys): +def test_wrong_index(state): pre_state = deepcopy(state) + deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) index = len(deposit_data_leaves) pubkey = pubkeys[index] @@ -106,8 +112,9 @@ def test_wrong_index(state, deposit_data_leaves, pubkeys, privkeys): return pre_state, deposit, None -def test_bad_merkle_proof(state, deposit_data_leaves, pubkeys, privkeys): +def test_bad_merkle_proof(state): pre_state = deepcopy(state) + deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) index = len(deposit_data_leaves) pubkey = pubkeys[index] diff --git a/tests/phase0/block_processing/test_voluntary_exit.py b/tests/phase0/block_processing/test_voluntary_exit.py index 0801e4292..6adc81464 100644 --- a/tests/phase0/block_processing/test_voluntary_exit.py +++ b/tests/phase0/block_processing/test_voluntary_exit.py @@ -10,6 +10,7 @@ from build.phase0.spec import ( ) from tests.phase0.helpers import ( build_voluntary_exit, + pubkey_to_privkey, ) @@ -17,7 +18,7 @@ from tests.phase0.helpers import ( pytestmark = pytest.mark.voluntary_exits -def test_success(state, pub_to_priv): +def test_success(state): pre_state = deepcopy(state) # # setup pre_state @@ -30,7 +31,7 @@ def test_success(state, pub_to_priv): # current_epoch = get_current_epoch(pre_state) validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] voluntary_exit = build_voluntary_exit( pre_state, @@ -52,11 +53,11 @@ def test_success(state, pub_to_priv): return pre_state, voluntary_exit, post_state -def test_validator_not_active(state, pub_to_priv): +def test_validator_not_active(state): pre_state = deepcopy(state) current_epoch = get_current_epoch(pre_state) validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] # # setup pre_state @@ -79,7 +80,7 @@ def test_validator_not_active(state, pub_to_priv): return pre_state, voluntary_exit, None -def test_validator_already_exited(state, pub_to_priv): +def test_validator_already_exited(state): pre_state = deepcopy(state) # # setup pre_state @@ -89,7 +90,7 @@ def test_validator_already_exited(state, pub_to_priv): current_epoch = get_current_epoch(pre_state) validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] # but validator already has exited pre_state.validator_registry[validator_index].exit_epoch = current_epoch + 2 @@ -110,7 +111,7 @@ def test_validator_already_exited(state, pub_to_priv): return pre_state, voluntary_exit, None -def test_validator_already_initiated_exit(state, pub_to_priv): +def test_validator_already_initiated_exit(state): pre_state = deepcopy(state) # # setup pre_state @@ -120,7 +121,7 @@ def test_validator_already_initiated_exit(state, pub_to_priv): current_epoch = get_current_epoch(pre_state) validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] # but validator already has initiated exit pre_state.validator_registry[validator_index].initiated_exit = True @@ -141,14 +142,14 @@ def test_validator_already_initiated_exit(state, pub_to_priv): return pre_state, voluntary_exit, None -def test_validator_not_active_long_enough(state, pub_to_priv): +def test_validator_not_active_long_enough(state): pre_state = deepcopy(state) # # setup pre_state # current_epoch = get_current_epoch(pre_state) validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] # but validator already has initiated exit pre_state.validator_registry[validator_index].initiated_exit = True diff --git a/tests/phase0/conftest.py b/tests/phase0/conftest.py index 395929028..36a087941 100644 --- a/tests/phase0/conftest.py +++ b/tests/phase0/conftest.py @@ -3,9 +3,6 @@ import pytest from build.phase0 import spec from tests.phase0.helpers import ( - privkeys_list, - pubkeys_list, - pubkey_to_privkey, create_genesis_state, ) @@ -25,21 +22,6 @@ MINIMAL_CONFIG = { } -@pytest.fixture -def privkeys(): - return privkeys_list - - -@pytest.fixture -def pubkeys(): - return pubkeys_list - - -@pytest.fixture -def pub_to_priv(): - return pubkey_to_privkey - - def overwrite_spec_config(config): for field in config: setattr(spec, field, config[field]) diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 3987289bf..a0ede04e5 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -35,18 +35,20 @@ from build.phase0.utils.merkle_minimal import ( ) -privkeys_list = [i + 1 for i in range(1000)] -pubkeys_list = [bls.privtopub(privkey) for privkey in privkeys_list] -pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys_list, pubkeys_list)} +privkeys = [i + 1 for i in range(1000)] +pubkeys = [bls.privtopub(privkey) for privkey in privkeys] +pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)} -def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves): +def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=None): + if not deposit_data_leaves: + deposit_data_leaves = [] deposit_timestamp = 0 proof_of_possession = b'\x33' * 96 deposit_data_list = [] for i in range(num_validators): - pubkey = pubkeys_list[i] + pubkey = pubkeys[i] deposit_data = DepositData( amount=spec.MAX_DEPOSIT_AMOUNT, timestamp=deposit_timestamp, @@ -75,7 +77,7 @@ def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves): return genesis_validator_deposits, root -def create_genesis_state(num_validators, deposit_data_leaves): +def create_genesis_state(num_validators, deposit_data_leaves=None): initial_deposits, deposit_root = create_mock_genesis_validator_deposits( num_validators, deposit_data_leaves, @@ -105,7 +107,7 @@ def build_empty_block_for_next_slot(state): previous_block_header = deepcopy(state.latest_block_header) if previous_block_header.state_root == spec.ZERO_HASH: previous_block_header.state_root = state.hash_tree_root() - empty_block.previous_block_root = previous_block_header.hash_tree_root() + empty_block.previous_block_root = signed_root(previous_block_header) return empty_block diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 444075a13..0b195fe96 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -47,6 +47,8 @@ from tests.phase0.helpers import ( build_empty_block_for_next_slot, force_registry_change_at_next_epoch, get_valid_proposer_slashing, + privkeys, + pubkeys, ) @@ -116,7 +118,7 @@ def test_empty_epoch_transition_not_finalizing(state): return state, [block], test_state -def test_proposer_slashing(state, pubkeys, privkeys): +def test_proposer_slashing(state): test_state = deepcopy(state) proposer_slashing = get_valid_proposer_slashing(state) validator_index = proposer_slashing.proposer_index @@ -142,9 +144,9 @@ def test_proposer_slashing(state, pubkeys, privkeys): return state, [block], test_state -def test_deposit_in_block(state, deposit_data_leaves, pubkeys, privkeys): +def test_deposit_in_block(state): pre_state = deepcopy(state) - test_deposit_data_leaves = deepcopy(deposit_data_leaves) + test_deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) index = len(test_deposit_data_leaves) pubkey = pubkeys[index] @@ -179,9 +181,9 @@ def test_deposit_in_block(state, deposit_data_leaves, pubkeys, privkeys): return pre_state, [block], post_state -def test_deposit_top_up(state, pubkeys, privkeys, deposit_data_leaves): +def test_deposit_top_up(state): pre_state = deepcopy(state) - test_deposit_data_leaves = deepcopy(deposit_data_leaves) + test_deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) validator_index = 0 amount = spec.MAX_DEPOSIT_AMOUNT // 4 @@ -218,7 +220,7 @@ def test_deposit_top_up(state, pubkeys, privkeys, deposit_data_leaves): return pre_state, [block], post_state -def test_attestation(state, pubkeys, privkeys): +def test_attestation(state): test_state = deepcopy(state) slot = state.slot shard = state.current_shuffling_start_shard @@ -287,7 +289,7 @@ def test_attestation(state, pubkeys, privkeys): return state, [attestation_block, epoch_block], test_state -def test_voluntary_exit(state, pubkeys, privkeys): +def test_voluntary_exit(state): pre_state = deepcopy(state) validator_index = get_active_validator_indices( pre_state.validator_registry, @@ -375,7 +377,7 @@ def test_no_exit_churn_too_long_since_change(state): return pre_state, [block], post_state -def test_transfer(state, pubkeys, privkeys): +def test_transfer(state): pre_state = deepcopy(state) current_epoch = get_current_epoch(pre_state) sender_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[-1] diff --git a/utils/phase0/jsonize.py b/utils/phase0/jsonize.py new file mode 100644 index 000000000..816192ec6 --- /dev/null +++ b/utils/phase0/jsonize.py @@ -0,0 +1,52 @@ +from .minimal_ssz import hash_tree_root + + +def jsonize(value, typ, include_hash_tree_roots=False): + if isinstance(typ, str) and typ[:4] == 'uint': + return value + elif typ == 'bool': + assert value in (True, False) + return value + elif isinstance(typ, list): + return [jsonize(element, typ[0], include_hash_tree_roots) for element in value] + elif isinstance(typ, str) and typ[:4] == 'byte': + return '0x' + value.hex() + elif hasattr(typ, 'fields'): + ret = {} + for field, subtype in typ.fields.items(): + ret[field] = jsonize(getattr(value, field), subtype, include_hash_tree_roots) + if include_hash_tree_roots: + ret[field + "_hash_tree_root"] = '0x' + hash_tree_root(getattr(value, field), subtype).hex() + if include_hash_tree_roots: + ret["hash_tree_root"] = '0x' + hash_tree_root(value, typ).hex() + return ret + else: + print(value, typ) + raise Exception("Type not recognized") + + +def dejsonize(json, typ): + if isinstance(typ, str) and typ[:4] == 'uint': + return json + elif typ == 'bool': + assert json in (True, False) + return json + elif isinstance(typ, list): + return [dejsonize(element, typ[0]) for element in json] + elif isinstance(typ, str) and typ[:4] == 'byte': + return bytes.fromhex(json[2:]) + elif hasattr(typ, 'fields'): + temp = {} + for field, subtype in typ.fields.items(): + temp[field] = dejsonize(json[field], subtype) + if field + "_hash_tree_root" in json: + assert(json[field + "_hash_tree_root"][2:] == + hash_tree_root(temp[field], subtype).hex()) + ret = typ(**temp) + if "hash_tree_root" in json: + assert(json["hash_tree_root"][2:] == + hash_tree_root(ret, typ).hex()) + return ret + else: + print(json, typ) + raise Exception("Type not recognized") From 6cc82278b4a1208bc2da94a37f398eb12c96e4e1 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Mon, 25 Mar 2019 13:27:18 -0700 Subject: [PATCH 114/133] Update rpc-interface.md --- specs/networking/rpc-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index 51dc3a900..fa49bcd75 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -103,7 +103,7 @@ Since some clients are waiting for `libp2p` implementations in their respective ``` ( network_id: uint8 - chain_id: uint8 + chain_id: uint64 latest_finalized_root: bytes32 latest_finalized_epoch: uint64 best_root: bytes32 From 0121adea3831ac527fcb4c8a63a04bd63ac8684f Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 26 Mar 2019 07:09:48 -0600 Subject: [PATCH 115/133] fix beacon proposer function and mod v-guide to not have lookahead for proposing --- specs/core/0_beacon-chain.md | 20 ++++++++-------- specs/validator/0_beacon-chain-validator.md | 26 +++++++++++++++------ 2 files changed, 29 insertions(+), 17 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 485233746..c96c28888 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1061,23 +1061,23 @@ def generate_seed(state: BeaconState, ```python def get_beacon_proposer_index(state: BeaconState, - slot: Slot, - registry_change: bool=False) -> ValidatorIndex: + slot: Slot) -> ValidatorIndex: """ Return the beacon proposer index for the ``slot``. + Due to proposer selection being based upon the validator balances during + the epoch in question, this can only be run for the current epoch. """ - epoch = slot_to_epoch(slot) current_epoch = get_current_epoch(state) - previous_epoch = get_previous_epoch(state) - next_epoch = current_epoch + 1 + assert slot_to_epoch(slot) == current_epoch - assert previous_epoch <= epoch <= next_epoch - - first_committee, _ = get_crosslink_committees_at_slot(state, slot, registry_change)[0] + first_committee, _ = get_crosslink_committees_at_slot(state, slot)[0] i = 0 while True: - rand_byte = hash(generate_seed(get_current_epoch(state)) + int_to_bytes8(i // 32))[i % 32] - candidate = first_committee[(epoch + i) % len(first_committee)] + rand_byte = hash( + generate_seed(state, current_epoch) + + int_to_bytes8(i // 32) + )[i % 32] + candidate = first_committee[(current_epoch + i) % len(first_committee)] if get_effective_balance(state, candidate) * 256 > MAX_DEPOSIT_AMOUNT * rand_byte: return candidate i += 1 diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 62a7011b4..f1a10a048 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -338,15 +338,13 @@ def get_committee_assignment( state: BeaconState, epoch: Epoch, validator_index: ValidatorIndex, - registry_change: bool=False) -> Tuple[List[ValidatorIndex], Shard, Slot, bool]: + registry_change: bool=False) -> Tuple[List[ValidatorIndex], Shard, Slot]: """ Return the committee assignment in the ``epoch`` for ``validator_index`` and ``registry_change``. ``assignment`` returned is a tuple of the following form: * ``assignment[0]`` is the list of validators in the committee * ``assignment[1]`` is the shard to which the committee is assigned * ``assignment[2]`` is the slot at which the committee is assigned - * ``assignment[3]`` is a bool signalling if the validator is expected to propose - a beacon block at the assigned slot. """ previous_epoch = get_previous_epoch(state) next_epoch = get_current_epoch(state) + 1 @@ -367,15 +365,29 @@ def get_committee_assignment( if len(selected_committees) > 0: validators = selected_committees[0][0] shard = selected_committees[0][1] - is_proposer = validator_index == get_beacon_proposer_index(state, slot, registry_change=registry_change) - assignment = (validators, shard, slot, is_proposer) + assignment = (validators, shard, slot) return assignment ``` +A validator can use the following function to see if they are supposed to propose during their assigned committee slot. This function can only be run during the epoch of the slot in question and can not reliably be used to predict an epoch in advance. + +```python +def is_proposer_at_slot(state: BeaconState, + slot: Slot, + validator_index: ValidatorIndex) -> bool: + current_epoch = get_current_epoch(state) + assert slot_to_epoch(slot) == current_epoch + + return get_beacon_proposer_index(state, slot) == validator_index +``` + +_Note_: If a validator is assigned to the 0th slot of an epoch, the validator must run an empty slot transition from the previous epoch into the 0th slot of the epoch to be able to check if they are a proposer at that slot. + + ### Lookahead -The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming assignments of proposing and attesting dictated by the shuffling and slot. +The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing which must checked during the epoch in question. There are three possibilities for the shuffling at the next epoch: 1. The shuffling changes due to a "validator registry change". @@ -386,7 +398,7 @@ Either (2) or (3) occurs if (1) fails. The choice between (2) and (3) is determi When querying for assignments in the next epoch there are two options -- with and without a `registry_change` -- which is the optional fourth parameter of the `get_committee_assignment`. -`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should always plan for assignments from both values of `registry_change` unless the validator can concretely eliminate one of the options. Planning for future assignments involves noting at which future slot one might have to attest and propose and also which shard one should begin syncing (in phase 1+). +`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should always plan for assignments from both values of `registry_change` unless the validator can concretely eliminate one of the options. Planning for future assignments involves noting at which future slot one might have to attest and also which shard one should begin syncing (in phase 1+). Specifically, a validator should call both `get_committee_assignment(state, next_epoch, validator_index, registry_change=True)` and `get_committee_assignment(state, next_epoch, validator_index, registry_change=False)` when checking for next epoch assignments. From be57cafbfbd70368a133a4bd1bc274b306f2af0e Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 26 Mar 2019 07:17:08 -0600 Subject: [PATCH 116/133] switch utils hash-function to sha256 --- utils/phase0/hash_function.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/utils/phase0/hash_function.py b/utils/phase0/hash_function.py index 21e6555bf..3fee63d82 100644 --- a/utils/phase0/hash_function.py +++ b/utils/phase0/hash_function.py @@ -1,7 +1,6 @@ -# from hashlib import sha256 -from eth_utils import keccak +from hashlib import sha256 +# from eth_utils import keccak -# def hash(x): return sha256(x).digest() -def hash(x): - return keccak(x) +def hash(x): return sha256(x).digest() +# def hash(x): return keccak(x) From 87d2618a495ad382d6810a4fe4b96d4d91f9355f Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 26 Mar 2019 13:21:49 +0000 Subject: [PATCH 117/133] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 00c229036..61ebe5e83 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -252,8 +252,7 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | - | - | | `BASE_REWARD_QUOTIENT` | `2**5` (= 32) | | `WHISTLEBLOWING_REWARD_QUOTIENT` | `2**9` (= 512) | -| `PROPOSER_REWARD_QUOTIENT` | `2**4` (= 16) | -| `ATTESTATION_INCLUSION_REWARD_QUOTIENT` | `2**3` (= 8) | +| `PROPOSER_REWARD_QUOTIENT` | `2**3` (= 8) | | `INACTIVITY_PENALTY_QUOTIENT` | `2**24` (= 16,777,216) | | `MIN_PENALTY_QUOTIENT` | `2**5` (= 32) | @@ -2016,7 +2015,7 @@ def compute_normal_justification_and_finalization_deltas(state: BeaconState) -> # Proposer bonus if index in get_attesting_indices(state, state.previous_epoch_attestations): proposer_index = get_beacon_proposer_index(state, inclusion_slot(state, index)) - deltas[0][proposer_index] += get_base_reward(state, index) // ATTESTATION_INCLUSION_REWARD_QUOTIENT + deltas[0][proposer_index] += get_base_reward(state, index) // PROPOSER_REWARD_QUOTIENT return deltas ``` From 5a708bae348221bfe1926775b31a4de05b23a090 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 26 Mar 2019 07:38:51 -0600 Subject: [PATCH 118/133] fix tests --- tests/phase0/helpers.py | 29 ++++++++++------------------- tests/phase0/test_sanity.py | 4 ++-- 2 files changed, 12 insertions(+), 21 deletions(-) diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index a0ede04e5..3c68c2c8c 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -12,7 +12,6 @@ from build.phase0.spec import ( AttestationData, BeaconBlockHeader, Deposit, - DepositInput, DepositData, Eth1Data, ProposerSlashing, @@ -43,21 +42,17 @@ pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkey def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=None): if not deposit_data_leaves: deposit_data_leaves = [] - deposit_timestamp = 0 proof_of_possession = b'\x33' * 96 deposit_data_list = [] for i in range(num_validators): pubkey = pubkeys[i] deposit_data = DepositData( + pubkey=pubkey, + # insecurely use pubkey as withdrawal key as well + withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:], amount=spec.MAX_DEPOSIT_AMOUNT, - timestamp=deposit_timestamp, - deposit_input=DepositInput( - pubkey=pubkey, - # insecurely use pubkey as withdrawal key as well - withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:], - proof_of_possession=proof_of_possession, - ), + proof_of_possession=proof_of_possession, ) item = hash(deposit_data.serialize()) deposit_data_leaves.append(item) @@ -72,7 +67,7 @@ def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=N genesis_validator_deposits.append(Deposit( proof=list(get_merkle_proof(tree, item_index=i)), index=i, - deposit_data=deposit_data_list[i] + data=deposit_data_list[i] )) return genesis_validator_deposits, root @@ -112,14 +107,15 @@ def build_empty_block_for_next_slot(state): def build_deposit_data(state, pubkey, privkey, amount): - deposit_input = DepositInput( + deposit_data = DepositData( pubkey=pubkey, # insecurely use pubkey as withdrawal key as well withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:], + amount=amount, proof_of_possession=EMPTY_SIGNATURE, ) proof_of_possession = bls.sign( - message_hash=signed_root(deposit_input), + message_hash=signed_root(deposit_data), privkey=privkey, domain=get_domain( state.fork, @@ -127,12 +123,7 @@ def build_deposit_data(state, pubkey, privkey, amount): spec.DOMAIN_DEPOSIT, ) ) - deposit_input.proof_of_possession = proof_of_possession - deposit_data = DepositData( - amount=amount, - timestamp=0, - deposit_input=deposit_input, - ) + deposit_data.proof_of_possession = proof_of_possession return deposit_data @@ -201,7 +192,7 @@ def build_deposit(state, deposit = Deposit( proof=list(proof), index=index, - deposit_data=deposit_data, + data=deposit_data, ) return deposit, root, deposit_data_leaves diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 0b195fe96..19e75f672 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -163,7 +163,7 @@ def test_deposit_in_block(state): deposit = Deposit( proof=list(proof), index=index, - deposit_data=deposit_data, + data=deposit_data, ) pre_state.latest_eth1_data.deposit_root = root @@ -202,7 +202,7 @@ def test_deposit_top_up(state): deposit = Deposit( proof=list(proof), index=merkle_index, - deposit_data=deposit_data, + data=deposit_data, ) pre_state.latest_eth1_data.deposit_root = root From a8410b8b843608bbbcec9c4cad76898f7435ae07 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 26 Mar 2019 11:27:07 -0600 Subject: [PATCH 119/133] add some attestation tests. fix genesi crosslink bug --- specs/core/0_beacon-chain.md | 11 ++- .../test_process_attestation.py | 67 +++++++++++++++++++ tests/phase0/helpers.py | 50 ++++++++++++++ tests/phase0/test_sanity.py | 50 +------------- 4 files changed, 124 insertions(+), 54 deletions(-) create mode 100644 tests/phase0/block_processing/test_process_attestation.py diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index e628c7057..4b52bd2fa 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -905,21 +905,20 @@ def get_crosslink_committees_at_slot(state: BeaconState, next_epoch = current_epoch + 1 assert previous_epoch <= epoch <= next_epoch - active_validator_indices = get_active_validator_indices( + indices = get_active_validator_indices( state.validator_registry, epoch, ) - committees_per_epoch = get_epoch_committee_count(len(active_validator_indices)) + committees_per_epoch = get_epoch_committee_count(len(indices)) if epoch == current_epoch: start_shard = state.latest_start_shard elif epoch == previous_epoch: - start_shard = (state.latest_start_shard - SLOTS_PER_EPOCH * committees_per_epoch) % SHARD_COUNT + start_shard = (state.latest_start_shard - committees_per_epoch) % SHARD_COUNT elif epoch == next_epoch: current_epoch_committees = get_current_epoch_committee_count(state) - start_shard = (state.latest_start_shard + EPOCH_LENGTH * current_epoch_committees) % SHARD_COUNT + start_shard = (state.latest_start_shard + current_epoch_committees) % SHARD_COUNT - indices = get_active_validator_indices(state.validator_registry, epoch) committees_per_slot = committees_per_epoch // SLOTS_PER_EPOCH offset = slot % SLOTS_PER_EPOCH slot_start_shard = (start_shard + committees_per_slot * offset) % SHARD_COUNT @@ -1830,7 +1829,7 @@ Run the following function: ```python def process_crosslinks(state: BeaconState) -> None: current_epoch = get_current_epoch(state) - previous_epoch = current_epoch - 1 + previous_epoch = max(current_epoch - 1, GENESIS_EPOCH) next_epoch = current_epoch + 1 for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): diff --git a/tests/phase0/block_processing/test_process_attestation.py b/tests/phase0/block_processing/test_process_attestation.py new file mode 100644 index 000000000..80770fdf9 --- /dev/null +++ b/tests/phase0/block_processing/test_process_attestation.py @@ -0,0 +1,67 @@ +from copy import deepcopy +import pytest + +import build.phase0.spec as spec + +from build.phase0.state_transition import ( + state_transition, +) +from build.phase0.spec import ( + ZERO_HASH, + get_current_epoch, + process_attestation, + slot_to_epoch, +) +from tests.phase0.helpers import ( + build_empty_block_for_next_slot, + get_valid_attestation, +) + + +# mark entire file as 'attestations' +pytestmark = pytest.mark.attestations + + +def run_attestation_processing(state, attestation, valid=True): + """ + Run ``process_attestation`` returning the pre and post state. + If ``valid == False``, run expecting ``AssertionError`` + """ + post_state = deepcopy(state) + + if not valid: + with pytest.raises(AssertionError): + process_attestation(post_state, attestation) + return state, None + + process_attestation(post_state, attestation) + + current_epoch = get_current_epoch(state) + target_epoch = slot_to_epoch(attestation.data.slot) + if target_epoch == current_epoch: + assert len(post_state.current_epoch_attestations) == len(state.current_epoch_attestations) + 1 + else: + assert len(post_state.previous_epoch_attestations) == len(state.previous_epoch_attestations) + 1 + + + return state, post_state + + +def test_success(state): + attestation = get_valid_attestation(state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + pre_state, post_state = run_attestation_processing(state, attestation) + + return pre_state, attestation, post_state + + +def test_success_prevous_epoch(state): + attestation = get_valid_attestation(state) + block = build_empty_block_for_next_slot(state) + block.slot = state.slot + spec.SLOTS_PER_EPOCH + state_transition(state, block) + + pre_state, post_state = run_attestation_processing(state, attestation) + + return pre_state, attestation, post_state diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 3c68c2c8c..d7f4ae6e8 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -9,7 +9,9 @@ from build.phase0.spec import ( EMPTY_SIGNATURE, ZERO_HASH, # SSZ + Attestation, AttestationData, + AttestationDataAndCustodyBit, BeaconBlockHeader, Deposit, DepositData, @@ -18,7 +20,9 @@ from build.phase0.spec import ( VoluntaryExit, # functions get_active_validator_indices, + get_attestation_participants, get_block_root, + get_crosslink_committees_at_slot, get_current_epoch, get_domain, get_empty_block, @@ -236,3 +240,49 @@ def get_valid_proposer_slashing(state): header_1=header_1, header_2=header_2, ) + + +def get_valid_attestation(state, slot=None): + if slot is None: + slot = state.slot + shard = state.latest_start_shard + attestation_data = build_attestation_data(state, slot, shard) + + crosslink_committees = get_crosslink_committees_at_slot(state, slot) + crosslink_committee = [committee for committee, _shard in crosslink_committees if _shard == attestation_data.shard][0] + + committee_size = len(crosslink_committee) + bitfield_length = (committee_size + 7) // 8 + aggregation_bitfield = b'\x01' + b'\x00' * (bitfield_length - 1) + custody_bitfield = b'\x00' * bitfield_length + attestation = Attestation( + aggregation_bitfield=aggregation_bitfield, + data=attestation_data, + custody_bitfield=custody_bitfield, + aggregate_signature=EMPTY_SIGNATURE, + ) + participants = get_attestation_participants( + state, + attestation.data, + attestation.aggregation_bitfield, + ) + assert len(participants) == 1 + + validator_index = participants[0] + privkey = privkeys[validator_index] + + message_hash = AttestationDataAndCustodyBit( + data=attestation.data, + custody_bit=0b0, + ).hash_tree_root() + + attestation.aggregation_signature = bls.sign( + message_hash=message_hash, + privkey=privkey, + domain=get_domain( + fork=state.fork, + epoch=get_current_epoch(state), + domain_type=spec.DOMAIN_ATTESTATION, + ) + ) + return attestation diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index a2cbadd9a..b287bde51 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -11,19 +11,13 @@ from build.phase0.spec import ( EMPTY_SIGNATURE, ZERO_HASH, # SSZ - Attestation, - AttestationDataAndCustodyBit, - BeaconBlockHeader, Deposit, Transfer, - ProposerSlashing, VoluntaryExit, # functions get_active_validator_indices, - get_attestation_participants, get_balance, get_block_root, - get_crosslink_committees_at_slot, get_current_epoch, get_domain, get_state_root, @@ -42,10 +36,10 @@ from build.phase0.utils.merkle_minimal import ( get_merkle_root, ) from tests.phase0.helpers import ( - build_attestation_data, build_deposit_data, build_empty_block_for_next_slot, force_registry_change_at_next_epoch, + get_valid_attestation, get_valid_proposer_slashing, privkeys, pubkeys, @@ -222,47 +216,7 @@ def test_deposit_top_up(state): def test_attestation(state): test_state = deepcopy(state) - slot = state.slot - shard = state.latest_start_shard - attestation_data = build_attestation_data(state, slot, shard) - - crosslink_committees = get_crosslink_committees_at_slot(state, slot) - crosslink_committee = [committee for committee, _shard in crosslink_committees if _shard == attestation_data.shard][0] - - committee_size = len(crosslink_committee) - bitfield_length = (committee_size + 7) // 8 - aggregation_bitfield = b'\x01' + b'\x00' * (bitfield_length - 1) - custody_bitfield = b'\x00' * bitfield_length - attestation = Attestation( - aggregation_bitfield=aggregation_bitfield, - data=attestation_data, - custody_bitfield=custody_bitfield, - aggregate_signature=EMPTY_SIGNATURE, - ) - participants = get_attestation_participants( - test_state, - attestation.data, - attestation.aggregation_bitfield, - ) - assert len(participants) == 1 - - validator_index = participants[0] - privkey = privkeys[validator_index] - - message_hash = AttestationDataAndCustodyBit( - data=attestation.data, - custody_bit=0b0, - ).hash_tree_root() - - attestation.aggregation_signature = bls.sign( - message_hash=message_hash, - privkey=privkey, - domain=get_domain( - fork=test_state.fork, - epoch=get_current_epoch(test_state), - domain_type=spec.DOMAIN_ATTESTATION, - ) - ) + attestation = get_valid_attestation(state) # # Add to state via block transition From 9fa6055a8a5b2c73774f143d3abdbe23323e93b4 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 26 Mar 2019 11:41:15 -0600 Subject: [PATCH 120/133] add more attestation tests --- .../test_process_attestation.py | 87 +++++++++++++++++++ 1 file changed, 87 insertions(+) diff --git a/tests/phase0/block_processing/test_process_attestation.py b/tests/phase0/block_processing/test_process_attestation.py index 80770fdf9..b34c64d95 100644 --- a/tests/phase0/block_processing/test_process_attestation.py +++ b/tests/phase0/block_processing/test_process_attestation.py @@ -65,3 +65,90 @@ def test_success_prevous_epoch(state): pre_state, post_state = run_attestation_processing(state, attestation) return pre_state, attestation, post_state + + +def test_before_inclusion_delay(state): + attestation = get_valid_attestation(state) + # do not increment slot to allow for inclusion delay + + pre_state, post_state = run_attestation_processing(state, attestation, False) + + return pre_state, attestation, post_state + + +def test_after_epoch_slots(state): + attestation = get_valid_attestation(state) + block = build_empty_block_for_next_slot(state) + # increment past latest inclusion slot + block.slot = state.slot + spec.SLOTS_PER_EPOCH + 1 + state_transition(state, block) + + pre_state, post_state = run_attestation_processing(state, attestation, False) + + return pre_state, attestation, post_state + + +def test_bad_source_epoch(state): + attestation = get_valid_attestation(state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.data.source_epoch += 10 + + pre_state, post_state = run_attestation_processing(state, attestation, False) + + return pre_state, attestation, post_state + + +def test_bad_source_root(state): + attestation = get_valid_attestation(state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.data.source_root = b'\x42'*32 + + pre_state, post_state = run_attestation_processing(state, attestation, False) + + return pre_state, attestation, post_state + + +def test_non_zero_crosslink_data_root(state): + attestation = get_valid_attestation(state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.data.crosslink_data_root = b'\x42'*32 + + pre_state, post_state = run_attestation_processing(state, attestation, False) + + return pre_state, attestation, post_state + + +def test_bad_previous_crosslink(state): + attestation = get_valid_attestation(state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + state.latest_crosslinks[attestation.data.shard].epoch += 10 + + pre_state, post_state = run_attestation_processing(state, attestation, False) + + return pre_state, attestation, post_state + + +def test_non_empty_custody_bitfield(state): + attestation = get_valid_attestation(state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.custody_bitfield = b'\x01' + attestation.custody_bitfield[1:] + + pre_state, post_state = run_attestation_processing(state, attestation, False) + + return pre_state, attestation, post_state + + +def test_empty_aggregation_bitfield(state): + attestation = get_valid_attestation(state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.aggregation_bitfield = b'\x00' * len(attestation.aggregation_bitfield) + + pre_state, post_state = run_attestation_processing(state, attestation, False) + + return pre_state, attestation, post_state From 2c5a68b5b5d4348ede49f07e50b943eb22c03414 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 26 Mar 2019 12:32:24 -0600 Subject: [PATCH 121/133] remove registry_change options from shuffling functions --- specs/core/0_beacon-chain.md | 8 ++----- specs/validator/0_beacon-chain-validator.md | 21 +++++-------------- .../test_process_attestation.py | 1 - 3 files changed, 7 insertions(+), 23 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 4b52bd2fa..38f5f56c5 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -891,13 +891,9 @@ def get_current_epoch_committee_count(state: BeaconState) -> int: ```python def get_crosslink_committees_at_slot(state: BeaconState, - slot: Slot, - registry_change: bool=False) -> List[Tuple[List[ValidatorIndex], Shard]]: + slot: Slot) -> List[Tuple[List[ValidatorIndex], Shard]]: """ Return the list of ``(committee, shard)`` tuples for the ``slot``. - - Note: There are two possible shufflings for crosslink committees for a - ``slot`` in the next epoch -- with and without a `registry_change` """ epoch = slot_to_epoch(slot) current_epoch = get_current_epoch(state) @@ -2339,7 +2335,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: # Check target epoch, source epoch, and source root target_epoch = slot_to_epoch(attestation.data.slot) assert (target_epoch, attestation.data.source_epoch, attestation.data.source_root) in { - (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root), + (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root), (get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root), } diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index f1a10a048..4a4c63836 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -331,16 +331,15 @@ signed_attestation_data = bls_sign( ## Validator assignments -A validator can get the current and previous epoch committee assignments using the following helper via `get_committee_assignment(state, epoch, validator_index)` where `previous_epoch <= epoch <= current_epoch`. +A validator can get the current, previous, and next epoch committee assignments using the following helper via `get_committee_assignment(state, epoch, validator_index)` where `previous_epoch <= epoch <= next_epoch`. ```python def get_committee_assignment( state: BeaconState, epoch: Epoch, - validator_index: ValidatorIndex, - registry_change: bool=False) -> Tuple[List[ValidatorIndex], Shard, Slot]: + validator_index: ValidatorIndex) -> Tuple[List[ValidatorIndex], Shard, Slot]: """ - Return the committee assignment in the ``epoch`` for ``validator_index`` and ``registry_change``. + Return the committee assignment in the ``epoch`` for ``validator_index``. ``assignment`` returned is a tuple of the following form: * ``assignment[0]`` is the list of validators in the committee * ``assignment[1]`` is the shard to which the committee is assigned @@ -355,7 +354,6 @@ def get_committee_assignment( crosslink_committees = get_crosslink_committees_at_slot( state, slot, - registry_change=registry_change, ) selected_committees = [ committee # Tuple[List[ValidatorIndex], Shard] @@ -389,18 +387,9 @@ _Note_: If a validator is assigned to the 0th slot of an epoch, the validator mu The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing which must checked during the epoch in question. -There are three possibilities for the shuffling at the next epoch: -1. The shuffling changes due to a "validator registry change". -2. The shuffling changes due to `epochs_since_last_registry_update` being an exact power of 2 greater than 1. -3. The shuffling remains the same (i.e. the validator is in the same shard committee). +`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments which involves noting at which future slot one will have to attest and also which shard one should begin syncing (in phase 1+). -Either (2) or (3) occurs if (1) fails. The choice between (2) and (3) is deterministic based upon `epochs_since_last_registry_update`. - -When querying for assignments in the next epoch there are two options -- with and without a `registry_change` -- which is the optional fourth parameter of the `get_committee_assignment`. - -`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should always plan for assignments from both values of `registry_change` unless the validator can concretely eliminate one of the options. Planning for future assignments involves noting at which future slot one might have to attest and also which shard one should begin syncing (in phase 1+). - -Specifically, a validator should call both `get_committee_assignment(state, next_epoch, validator_index, registry_change=True)` and `get_committee_assignment(state, next_epoch, validator_index, registry_change=False)` when checking for next epoch assignments. +Specifically, a validator should call `get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments. ## How to avoid slashing diff --git a/tests/phase0/block_processing/test_process_attestation.py b/tests/phase0/block_processing/test_process_attestation.py index b34c64d95..08cab11ff 100644 --- a/tests/phase0/block_processing/test_process_attestation.py +++ b/tests/phase0/block_processing/test_process_attestation.py @@ -43,7 +43,6 @@ def run_attestation_processing(state, attestation, valid=True): else: assert len(post_state.previous_epoch_attestations) == len(state.previous_epoch_attestations) + 1 - return state, post_state From a38e3525cd27559cca9599c7c9cf7199b81b558b Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 26 Mar 2019 13:18:18 -0600 Subject: [PATCH 122/133] ensure validator balances are losing when no finality --- tests/phase0/test_sanity.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index b287bde51..3b4497ca5 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -108,6 +108,8 @@ def test_empty_epoch_transition_not_finalizing(state): assert test_state.slot == block.slot assert test_state.finalized_epoch < get_current_epoch(test_state) - 4 + for index in range(len(test_state.validator_registry)): + assert get_balance(test_state, index) < get_balance(state, index) return state, [block], test_state From 63e7346cfbc4b000c28b981710f43b9ec48a284a Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 26 Mar 2019 13:40:19 -0600 Subject: [PATCH 123/133] standaline -> indexed --- specs/core/0_beacon-chain.md | 52 ++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2acf7ddbe..2e2c3ad59 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -28,7 +28,7 @@ - [`Eth1DataVote`](#eth1datavote) - [`AttestationData`](#attestationdata) - [`AttestationDataAndCustodyBit`](#attestationdataandcustodybit) - - [`StandaloneAttestation`](#standaloneattestation) + - [`IndexedAttestation`](#indexedattestation) - [`DepositData`](#depositdata) - [`BeaconBlockHeader`](#beaconblockheader) - [`Validator`](#validator) @@ -86,8 +86,8 @@ - [`get_domain`](#get_domain) - [`get_bitfield_bit`](#get_bitfield_bit) - [`verify_bitfield`](#verify_bitfield) - - [`convert_to_standalone`](#convert_to_standalone) - - [`verify_standalone_attestation`](#verify_standalone_attestation) + - [`convert_to_indexed`](#convert_to_indexed) + - [`verify_indexed_attestation`](#verify_indexed_attestation) - [`is_double_vote`](#is_double_vote) - [`is_surround_vote`](#is_surround_vote) - [`integer_squareroot`](#integer_squareroot) @@ -370,7 +370,7 @@ The types are defined topologically to aid in facilitating an executable version } ``` -#### `StandaloneAttestation` +#### `IndexedAttestation` ```python { @@ -480,9 +480,9 @@ The types are defined topologically to aid in facilitating an executable version ```python { # First attestation - 'attestation_1': StandaloneAttestation, + 'attestation_1': IndexedAttestation, # Second attestation - 'attestation_2': StandaloneAttestation, + 'attestation_2': IndexedAttestation, } ``` @@ -1148,14 +1148,14 @@ def verify_bitfield(bitfield: bytes, committee_size: int) -> bool: return True ``` -### `convert_to_standalone` +### `convert_to_indexed` ```python -def convert_to_standalone(state: BeaconState, attestation: Attestation): +def convert_to_indexed(state: BeaconState, attestation: Attestation): """ - Convert an attestation to (almost) standalone-verifiable form + Convert an attestation to (almost) indexed-verifiable form """ - return StandaloneAttestation( + return IndexedAttestation( validator_indices=get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield), data=attestation.data, custody_bitfield=attestation.custody_bitfield, @@ -1163,29 +1163,29 @@ def convert_to_standalone(state: BeaconState, attestation: Attestation): ) ``` -### `verify_standalone_attestation` +### `verify_indexed_attestation` ```python -def verify_standalone_attestation(state: BeaconState, standalone_attestation: StandaloneAttestation) -> bool: +def verify_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool: """ - Verify validity of ``standalone_attestation`` fields. + Verify validity of ``indexed_attestation`` fields. """ - if standalone_attestation.custody_bitfield != b'\x00' * len(standalone_attestation.custody_bitfield): # [TO BE REMOVED IN PHASE 1] + if indexed_attestation.custody_bitfield != b'\x00' * len(indexed_attestation.custody_bitfield): # [TO BE REMOVED IN PHASE 1] return False - if not (1 <= len(standalone_attestation.validator_indices) <= MAX_ATTESTATION_PARTICIPANTS): + if not (1 <= len(indexed_attestation.validator_indices) <= MAX_ATTESTATION_PARTICIPANTS): return False - if standalone_attestation.validator_indices != sorted(standalone_attestation.validator_indices): + if indexed_attestation.validator_indices != sorted(indexed_attestation.validator_indices): return False - if not verify_bitfield(standalone_attestation.custody_bitfield, len(standalone_attestation.validator_indices)): + if not verify_bitfield(indexed_attestation.custody_bitfield, len(indexed_attestation.validator_indices)): return False custody_bit_0_indices = [] custody_bit_1_indices = [] - for i, validator_index in enumerate(standalone_attestation.validator_indices): - if get_bitfield_bit(standalone_attestation.custody_bitfield, i) == 0b0: + for i, validator_index in enumerate(indexed_attestation.validator_indices): + if get_bitfield_bit(indexed_attestation.custody_bitfield, i) == 0b0: custody_bit_0_indices.append(validator_index) else: custody_bit_1_indices.append(validator_index) @@ -1196,11 +1196,11 @@ def verify_standalone_attestation(state: BeaconState, standalone_attestation: St bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_1_indices]), ], message_hashes=[ - hash_tree_root(AttestationDataAndCustodyBit(data=standalone_attestation.data, custody_bit=0b0)), - hash_tree_root(AttestationDataAndCustodyBit(data=standalone_attestation.data, custody_bit=0b1)), + hash_tree_root(AttestationDataAndCustodyBit(data=indexed_attestation.data, custody_bit=0b0)), + hash_tree_root(AttestationDataAndCustodyBit(data=indexed_attestation.data, custody_bit=0b1)), ], - signature=standalone_attestation.aggregate_signature, - domain=get_domain(state.fork, slot_to_epoch(standalone_attestation.data.slot), DOMAIN_ATTESTATION), + signature=indexed_attestation.aggregate_signature, + domain=get_domain(state.fork, slot_to_epoch(indexed_attestation.data.slot), DOMAIN_ATTESTATION), ) ``` @@ -2318,8 +2318,8 @@ def process_attester_slashing(state: BeaconState, is_double_vote(attestation1.data, attestation2.data) or is_surround_vote(attestation1.data, attestation2.data) ) - assert verify_standalone_attestation(state, attestation1) - assert verify_standalone_attestation(state, attestation2) + assert verify_indexed_attestation(state, attestation1) + assert verify_indexed_attestation(state, attestation2) slashable_indices = [ index for index in attestation1.validator_indices if ( @@ -2366,7 +2366,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: } # Check signature and bitfields - assert verify_standalone_attestation(state, convert_to_standalone(state, attestation)) + assert verify_indexed_attestation(state, convert_to_indexed(state, attestation)) # Cache pending attestation pending_attestation = PendingAttestation( From 1b975d2ceb669f860b7d7c73f71ad68f939618dc Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 27 Mar 2019 19:23:23 +0600 Subject: [PATCH 124/133] Use signed_root as block id in Honest V guide --- specs/validator/0_beacon-chain-validator.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 4a4c63836..0d6033acd 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -152,7 +152,7 @@ _Note:_ there might be "skipped" slots between the `parent` and `block`. These s ##### Parent root -Set `block.previous_block_root = hash_tree_root(parent)`. +Set `block.previous_block_root = signed_root(parent)`. ##### State root @@ -255,11 +255,11 @@ Set `attestation_data.shard = shard` where `shard` is the shard associated with ##### Beacon block root -Set `attestation_data.beacon_block_root = hash_tree_root(head_block)`. +Set `attestation_data.beacon_block_root = signed_root(head_block)`. ##### Target root -Set `attestation_data.target_root = hash_tree_root(epoch_boundary)` where `epoch_boundary` is the block at the most recent epoch boundary. +Set `attestation_data.target_root = signed_root(epoch_boundary)` where `epoch_boundary` is the block at the most recent epoch boundary. _Note:_ This can be looked up in the state using: * Let `epoch_start_slot = get_epoch_start_slot(get_current_epoch(head_state))`. From fbb09795ed3dca6e98eb9ef97c572f4e590293cf Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 27 Mar 2019 08:31:56 -0600 Subject: [PATCH 125/133] fix convert_to_indexed custody bitfield bug --- specs/core/0_beacon-chain.md | 63 +++++++++++++++---- .../test_process_attestation.py | 2 +- tests/phase0/helpers.py | 36 +++++++---- 3 files changed, 78 insertions(+), 23 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2e2c3ad59..0bdfafb79 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -77,6 +77,7 @@ - [`generate_seed`](#generate_seed) - [`get_beacon_proposer_index`](#get_beacon_proposer_index) - [`verify_merkle_branch`](#verify_merkle_branch) + - [`get_crosslink_committee_for_attestation`](#get_crosslink_committee_for_attestation) - [`get_attestation_participants`](#get_attestation_participants) - [`int_to_bytes1`, `int_to_bytes2`, ...](#int_to_bytes1-int_to_bytes2-) - [`bytes_to_int`](#bytes_to_int) @@ -85,6 +86,7 @@ - [`get_fork_version`](#get_fork_version) - [`get_domain`](#get_domain) - [`get_bitfield_bit`](#get_bitfield_bit) + - [`set_bitfield_bit`](#set_bitfield_bit) - [`verify_bitfield`](#verify_bitfield) - [`convert_to_indexed`](#convert_to_indexed) - [`verify_indexed_attestation`](#verify_indexed_attestation) @@ -1037,6 +1039,20 @@ def verify_merkle_branch(leaf: Bytes32, proof: List[Bytes32], depth: int, index: return value == root ``` +### `get_crosslink_committee_for_attestation` + +```python +def get_crosslink_committee_for_attestation(state: BeaconState, + attestation_data: AttestationData) -> List[ValidatorIndex]: + # Find the committee in the list with the desired shard + crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) + + assert attestation_data.shard in [shard for _, shard in crosslink_committees] + crosslink_committee = [committee for committee, shard in crosslink_committees if shard == attestation_data.shard][0] + + return crosslink_committee +``` + ### `get_attestation_participants` ```python @@ -1046,11 +1062,7 @@ def get_attestation_participants(state: BeaconState, """ Return the participant indices corresponding to ``attestation_data`` and ``bitfield``. """ - # Find the committee in the list with the desired shard - crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) - - assert attestation_data.shard in [shard for _, shard in crosslink_committees] - crosslink_committee = [committee for committee, shard in crosslink_committees if shard == attestation_data.shard][0] + crosslink_committee = get_crosslink_committee_for_attestation(state, attestation_data) assert verify_bitfield(bitfield, len(crosslink_committee)) @@ -1060,7 +1072,7 @@ def get_attestation_participants(state: BeaconState, aggregation_bit = get_bitfield_bit(bitfield, i) if aggregation_bit == 0b1: participants.append(validator_index) - return sorted(participants) + return participants ``` ### `int_to_bytes1`, `int_to_bytes2`, ... @@ -1130,6 +1142,22 @@ def get_bitfield_bit(bitfield: bytes, i: int) -> int: return (bitfield[i // 8] >> (i % 8)) % 2 ``` +### `set_bitfield_bit` + +```python +def set_bitfield_bit(bitfield: bytes, i: int) -> int: + """ + Set the bit in ``bitfield`` at position ``i`` to ``1``. + """ + byte_index = i // 8 + bit_index = i % 8 + return ( + bitfield[:byte_index] + + bytes([bitfield[byte_index] | (1 << bit_index)]) + + bitfield[byte_index+1:] + ) +``` + ### `verify_bitfield` ```python @@ -1155,10 +1183,21 @@ def convert_to_indexed(state: BeaconState, attestation: Attestation): """ Convert an attestation to (almost) indexed-verifiable form """ + attesting_indices = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) + + # reconstruct custody bitfield for the truncated attesting_indices + custody_bit_1_indices = get_attestation_participants(state, attestation.data, attestation.custody_bitfield) + custody_bitfield = b'\x00' * ((len(attesting_indices) + 7) // 8) + + crosslink_committee = get_crosslink_committee_for_attestation(state, attestation.data) + for i, validator_index in enumerate(crosslink_committee): + if get_bitfield_bit(attestation.custody_bitfield, i): + custody_bitfield = set_bitfield_bit(custody_bitfield, attesting_indices.index(validator_index)) + return IndexedAttestation( - validator_indices=get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield), + validator_indices=attesting_indices, data=attestation.data, - custody_bitfield=attestation.custody_bitfield, + custody_bitfield=custody_bitfield, aggregate_signature=attestation.aggregate_signature ) ``` @@ -1176,9 +1215,6 @@ def verify_indexed_attestation(state: BeaconState, indexed_attestation: IndexedA if not (1 <= len(indexed_attestation.validator_indices) <= MAX_ATTESTATION_PARTICIPANTS): return False - if indexed_attestation.validator_indices != sorted(indexed_attestation.validator_indices): - return False - if not verify_bitfield(indexed_attestation.custody_bitfield, len(indexed_attestation.validator_indices)): return False @@ -2318,6 +2354,11 @@ def process_attester_slashing(state: BeaconState, is_double_vote(attestation1.data, attestation2.data) or is_surround_vote(attestation1.data, attestation2.data) ) + + # check that indices are sorted + assert attestation1.validator_indices == sorted(attestation1.validator_indices) + assert attestation2.validator_indices == sorted(attestation2.validator_indices) + assert verify_indexed_attestation(state, attestation1) assert verify_indexed_attestation(state, attestation2) slashable_indices = [ diff --git a/tests/phase0/block_processing/test_process_attestation.py b/tests/phase0/block_processing/test_process_attestation.py index 08cab11ff..ca6933ce7 100644 --- a/tests/phase0/block_processing/test_process_attestation.py +++ b/tests/phase0/block_processing/test_process_attestation.py @@ -135,7 +135,7 @@ def test_non_empty_custody_bitfield(state): attestation = get_valid_attestation(state) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - attestation.custody_bitfield = b'\x01' + attestation.custody_bitfield[1:] + attestation.custody_bitfield = deepcopy(attestation.aggregation_bitfield) pre_state, post_state = run_attestation_processing(state, attestation, False) diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index d7f4ae6e8..08ea6ca04 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -22,12 +22,14 @@ from build.phase0.spec import ( get_active_validator_indices, get_attestation_participants, get_block_root, + get_crosslink_committee_for_attestation, get_crosslink_committees_at_slot, get_current_epoch, get_domain, get_empty_block, get_epoch_start_slot, get_genesis_beacon_state, + slot_to_epoch, verify_merkle_branch, hash, ) @@ -248,12 +250,11 @@ def get_valid_attestation(state, slot=None): shard = state.latest_start_shard attestation_data = build_attestation_data(state, slot, shard) - crosslink_committees = get_crosslink_committees_at_slot(state, slot) - crosslink_committee = [committee for committee, _shard in crosslink_committees if _shard == attestation_data.shard][0] + crosslink_committee = get_crosslink_committee_for_attestation(state, attestation_data) committee_size = len(crosslink_committee) bitfield_length = (committee_size + 7) // 8 - aggregation_bitfield = b'\x01' + b'\x00' * (bitfield_length - 1) + aggregation_bitfield = b'\xC0' + b'\x00' * (bitfield_length - 1) custody_bitfield = b'\x00' * bitfield_length attestation = Attestation( aggregation_bitfield=aggregation_bitfield, @@ -266,23 +267,36 @@ def get_valid_attestation(state, slot=None): attestation.data, attestation.aggregation_bitfield, ) - assert len(participants) == 1 + assert len(participants) == 2 - validator_index = participants[0] - privkey = privkeys[validator_index] + signatures = [] + for validator_index in participants: + privkey = privkeys[validator_index] + signatures.append( + get_attestation_signature( + state, + attestation.data, + privkey + ) + ) + + attestation.aggregation_signature = bls.aggregate_signatures(signatures) + return attestation + + +def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0): message_hash = AttestationDataAndCustodyBit( - data=attestation.data, - custody_bit=0b0, + data=attestation_data, + custody_bit=custody_bit, ).hash_tree_root() - attestation.aggregation_signature = bls.sign( + return bls.sign( message_hash=message_hash, privkey=privkey, domain=get_domain( fork=state.fork, - epoch=get_current_epoch(state), + epoch=slot_to_epoch(attestation_data.slot), domain_type=spec.DOMAIN_ATTESTATION, ) ) - return attestation From b5bf56376bdd2e5e0034965b45c192169575440f Mon Sep 17 00:00:00 2001 From: William M Peaster Date: Wed, 27 Mar 2019 14:00:28 -0500 Subject: [PATCH 126/133] Minor copyediting corrections to 0_beacon-chain.md A handful of minor editing changes made to non-code text for the purposes of improved clarity, consistency, and accuracy. --- specs/core/0_beacon-chain.md | 40 ++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2c0bc2554..92acdd70d 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1,6 +1,6 @@ # Ethereum 2.0 Phase 0 -- The Beacon Chain -**NOTICE**: This document is a work-in-progress for researchers and implementers. It reflects recent spec changes and takes precedence over the Python proof-of-concept implementation [[python-poc]](#ref-python-poc). +**NOTICE**: This document is a work in progress for researchers and implementers. It reflects recent spec changes and takes precedence over the Python proof-of-concept implementation [[python-poc]](#ref-python-poc). ## Table of contents @@ -149,9 +149,9 @@ This document represents the specification for Phase 0 of Ethereum 2.0 -- The Beacon Chain. -At the core of Ethereum 2.0 is a system chain called the "beacon chain". The beacon chain stores and manages the registry of [validators](#dfn-validator). In the initial deployment phases of Ethereum 2.0 the only mechanism to become a [validator](#dfn-validator) is to make a one-way ETH transaction to a deposit contract on Ethereum 1.0. Activation as a [validator](#dfn-validator) happens when Ethereum 1.0 deposit receipts are processed by the beacon chain, the activation balance is reached, and after a queuing process. Exit is either voluntary or done forcibly as a penalty for misbehavior. +At the core of Ethereum 2.0 is a system chain called the "beacon chain". The beacon chain stores and manages the registry of [validators](#dfn-validator). In the initial deployment phases of Ethereum 2.0, the only mechanism to become a [validator](#dfn-validator) is to make a one-way ETH transaction to a deposit contract on Ethereum 1.0. Activation as a [validator](#dfn-validator) happens when Ethereum 1.0 deposit receipts are processed by the beacon chain, the activation balance is reached, and a queuing process is completed. Exit is either voluntary or done forcibly as a penalty for misbehavior. -The primary source of load on the beacon chain is "attestations". Attestations are availability votes for a shard block, and simultaneously proof of stake votes for a beacon block. A sufficient number of attestations for the same shard block create a "crosslink", confirming the shard segment up to that shard block into the beacon chain. Crosslinks also serve as infrastructure for asynchronous cross-shard communication. +The primary source of load on the beacon chain is "attestations". Attestations are simultaneously availability votes for a shard block and proof-of-stake votes for a beacon block. A sufficient number of attestations for the same shard block create a "crosslink", confirming the shard segment up to that shard block into the beacon chain. Crosslinks also serve as infrastructure for asynchronous cross-shard communication. ## Notation @@ -159,20 +159,20 @@ Code snippets appearing in `this style` are to be interpreted as Python code. ## Terminology -* **Validator** - a registered participant in the beacon chain. You can become one by sending Ether into the Ethereum 1.0 deposit contract. +* **Validator** - a registered participant in the beacon chain. You can become one by sending ether into the Ethereum 1.0 deposit contract. * **Active validator** - an active participant in the Ethereum 2.0 consensus invited to, among other things, propose and attest to blocks and vote for crosslinks. * **Committee** - a (pseudo-) randomly sampled subset of [active validators](#dfn-active-validator). When a committee is referred to collectively, as in "this committee attests to X", this is assumed to mean "some subset of that committee that contains enough [validators](#dfn-validator) that the protocol recognizes it as representing the committee". -* **Proposer** - the [validator](#dfn-validator) that creates a beacon chain block +* **Proposer** - the [validator](#dfn-validator) that creates a beacon chain block. * **Attester** - a [validator](#dfn-validator) that is part of a committee that needs to sign off on a beacon chain block while simultaneously creating a link (crosslink) to a recent shard block on a particular shard chain. * **Beacon chain** - the central PoS chain that is the base of the sharding system. * **Shard chain** - one of the chains on which user transactions take place and account data is stored. * **Block root** - a 32-byte Merkle root of a beacon chain block or shard chain block. Previously called "block hash". -* **Crosslink** - a set of signatures from a committee attesting to a block in a shard chain, which can be included into the beacon chain. Crosslinks are the main means by which the beacon chain "learns about" the updated state of shard chains. -* **Slot** - a period during which one proposer has the ability to create a beacon chain block and some attesters have the ability to make attestations -* **Epoch** - an aligned span of slots during which all [validators](#dfn-validator) get exactly one chance to make an attestation -* **Finalized**, **justified** - see Casper FFG finalization [[casper-ffg]](#ref-casper-ffg) -* **Withdrawal period** - the number of slots between a [validator](#dfn-validator) exit and the [validator](#dfn-validator) balance being withdrawable -* **Genesis time** - the Unix time of the genesis beacon chain block at slot 0 +* **Crosslink** - a set of signatures from a committee attesting to a block in a shard chain that can be included into the beacon chain. Crosslinks are the main means by which the beacon chain "learns about" the updated state of shard chains. +* **Slot** - a period during which one proposer has the ability to create a beacon chain block and some attesters have the ability to make attestations. +* **Epoch** - an aligned span of slots during which all [validators](#dfn-validator) get exactly one chance to make an attestation. +* **Finalized**, **justified** - see Casper FFG finalization [[casper-ffg]](#ref-casper-ffg). +* **Withdrawal period** - the number of slots between a [validator](#dfn-validator) exit and the [validator](#dfn-validator) balance being withdrawable. +* **Genesis time** - the Unix time of the genesis beacon chain block at slot 0. ## Constants @@ -871,7 +871,7 @@ def compute_committee(validator_indices: List[ValidatorIndex], ] ``` -**Note**: this definition and the next few definitions are highly inefficient as algorithms as they re-calculate many sub-expressions. Production implementations are expected to appropriately use caching/memoization to avoid redoing work. +**Note**: this definition and the next few definitions are highly inefficient as algorithms, as they re-calculate many sub-expressions. Production implementations are expected to appropriately use caching/memoization to avoid redoing work. ### `get_current_epoch_committee_count` @@ -1426,7 +1426,7 @@ Every Ethereum 1.0 deposit, of size between `MIN_DEPOSIT_AMOUNT` and `MAX_DEPOSI ### `Eth2Genesis` log -When sufficiently many full deposits have been made the deposit contract emits the `Eth2Genesis` log. The beacon chain state may then be initialized by calling the `get_genesis_beacon_state` function (defined below) where: +When a sufficient amount of full deposits have been made, the deposit contract emits the `Eth2Genesis` log. The beacon chain state may then be initialized by calling the `get_genesis_beacon_state` function (defined below) where: * `genesis_time` equals `time` in the `Eth2Genesis` log * `latest_eth1_data.deposit_root` equals `deposit_root` in the `Eth2Genesis` log @@ -1557,13 +1557,13 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], ## Beacon chain processing -The beacon chain is the system chain for Ethereum 2.0. The main responsibilities of the beacon chain are: +The beacon chain is the system chain for Ethereum 2.0. The main responsibilities of the beacon chain are as follows: * Store and maintain the registry of [validators](#dfn-validator) * Process crosslinks (see above) * Process its per-block consensus, as well as the finality gadget -Processing the beacon chain is similar to processing the Ethereum 1.0 chain. Clients download and process blocks, and maintain a view of what is the current "canonical chain", terminating at the current "head". However, because of the beacon chain's relationship with Ethereum 1.0, and because it is a proof-of-stake chain, there are differences. +Processing the beacon chain is similar to processing the Ethereum 1.0 chain. Clients download and process blocks and maintain a view of what is the current "canonical chain", terminating at the current "head". However, because of the beacon chain's relationship with Ethereum 1.0, and because it is a proof-of-stake chain, there are differences. For a beacon chain block, `block`, to be processed by a node, the following conditions must be met: @@ -1573,7 +1573,7 @@ For a beacon chain block, `block`, to be processed by a node, the following cond If these conditions are not met, the client should delay processing the beacon block until the conditions are all satisfied. -Beacon block production is significantly different because of the proof of stake mechanism. A client simply checks what it thinks is the canonical chain when it should create a block, and looks up what its slot number is; when the slot arrives, it either proposes or attests to a block as required. Note that this requires each node to have a clock that is roughly (i.e. within `SECONDS_PER_SLOT` seconds) synchronized with the other nodes. +Beacon block production is significantly different because of the proof-of-stake mechanism. A client simply checks what it thinks is the canonical chain when it should create a block and looks up what its slot number is; when the slot arrives, it either proposes or attests to a block as required. Note that this dynamic requires each node to have a clock that is roughly (i.e. within `SECONDS_PER_SLOT` seconds) synchronized with the other nodes. ### Beacon chain fork choice rule @@ -1635,7 +1635,7 @@ def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) ## Beacon chain state transition function -We now define the state transition function. At a high level the state transition is made up of four parts: +We now define the state transition function. At a high level, the state transition is made up of four parts: 1. State caching, which happens at the start of every slot. 2. The per-epoch transitions, which happens at the start of the first slot of every epoch. @@ -1643,7 +1643,7 @@ We now define the state transition function. At a high level the state transitio 4. The per-block transitions, which happens at every block. Transition section notes: -* The state caching, caches the state root of the previous slot. +* The state caching caches the state root of the previous slot. * The per-epoch transitions focus on the [validator](#dfn-validator) registry, including adjusting balances and activating and exiting [validators](#dfn-validator), as well as processing crosslinks and managing block justification/finalization. * The per-slot transitions focus on the slot counter and block roots records updates. * The per-block transitions generally focus on verifying aggregate signatures and saving temporary records relating to the per-block activity in the `BeaconState`. @@ -1876,7 +1876,7 @@ def get_inactivity_penalty(state: BeaconState, index: ValidatorIndex, epochs_sin return get_base_reward(state, index) + extra_penalty ``` -Note: When applying penalties in the following balance recalculations implementers should make sure the `uint64` does not underflow. +Note: When applying penalties in the following balance recalculations, implementers should make sure the `uint64` does not underflow. ##### Justification and finalization @@ -2430,7 +2430,7 @@ def verify_block_state_root(state: BeaconState, block: BeaconBlock) -> None: # References -This section is divided into Normative and Informative references. Normative references are those that must be read in order to implement this specification, while Informative references are merely that, information. An example of the former might be the details of a required consensus algorithm, and an example of the latter might be a pointer to research that demonstrates why a particular consensus algorithm might be better suited for inclusion in the standard than another. +This section is divided into Normative and Informative references. Normative references are those that must be read in order to implement this specification, while Informative references are merely that, informative. An example of the former might be the details of a required consensus algorithm, and an example of the latter might be a pointer to research that demonstrates why a particular consensus algorithm might be better suited for inclusion in the standard than another. ## Normative From 458eb9913364792fef95d78609ffb83fe01cd83e Mon Sep 17 00:00:00 2001 From: William M Peaster Date: Wed, 27 Mar 2019 14:15:50 -0500 Subject: [PATCH 127/133] Minor copyedits to 0_beacon-chain.md Approximately a dozen minor copyediting fixes of non-code text for improved clarity, consistency, and accuracy. --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 92acdd70d..cf6527ad1 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2430,7 +2430,7 @@ def verify_block_state_root(state: BeaconState, block: BeaconBlock) -> None: # References -This section is divided into Normative and Informative references. Normative references are those that must be read in order to implement this specification, while Informative references are merely that, informative. An example of the former might be the details of a required consensus algorithm, and an example of the latter might be a pointer to research that demonstrates why a particular consensus algorithm might be better suited for inclusion in the standard than another. +This section is divided into Normative and Informative references. Normative references are those that must be read in order to implement this specification, while Informative references are merely helpful information. An example of the former might be the details of a required consensus algorithm, and an example of the latter might be a pointer to research that demonstrates why a particular consensus algorithm might be better suited for inclusion in the standard than another. ## Normative From 1f657cfec50b1c41e53a9183193047fc420d3d8d Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 28 Mar 2019 11:26:04 -0600 Subject: [PATCH 128/133] remove custody_bitfield from indexedattestation. add two separate arrays for 0 and 1 bit --- specs/core/0_beacon-chain.md | 45 +++++++++++++----------------------- 1 file changed, 16 insertions(+), 29 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 0bdfafb79..057772293 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -377,11 +377,10 @@ The types are defined topologically to aid in facilitating an executable version ```python { # Validator indices - 'validator_indices': ['uint64'], + 'custody_bit_0_indices': ['uint64'], + 'custody_bit_1_indices': ['uint64'], # Attestation data 'data': AttestationData, - # Custody bitfield - 'custody_bitfield': 'bytes', # Aggregate signature 'aggregate_signature': 'bytes96', } @@ -1060,7 +1059,7 @@ def get_attestation_participants(state: BeaconState, attestation_data: AttestationData, bitfield: bytes) -> List[ValidatorIndex]: """ - Return the participant indices corresponding to ``attestation_data`` and ``bitfield``. + Return the sorted participant indices corresponding to ``attestation_data`` and ``bitfield``. """ crosslink_committee = get_crosslink_committee_for_attestation(state, attestation_data) @@ -1072,7 +1071,7 @@ def get_attestation_participants(state: BeaconState, aggregation_bit = get_bitfield_bit(bitfield, i) if aggregation_bit == 0b1: participants.append(validator_index) - return participants + return sorted(participants) ``` ### `int_to_bytes1`, `int_to_bytes2`, ... @@ -1184,20 +1183,13 @@ def convert_to_indexed(state: BeaconState, attestation: Attestation): Convert an attestation to (almost) indexed-verifiable form """ attesting_indices = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) - - # reconstruct custody bitfield for the truncated attesting_indices custody_bit_1_indices = get_attestation_participants(state, attestation.data, attestation.custody_bitfield) - custody_bitfield = b'\x00' * ((len(attesting_indices) + 7) // 8) - - crosslink_committee = get_crosslink_committee_for_attestation(state, attestation.data) - for i, validator_index in enumerate(crosslink_committee): - if get_bitfield_bit(attestation.custody_bitfield, i): - custody_bitfield = set_bitfield_bit(custody_bitfield, attesting_indices.index(validator_index)) + custody_bit_0_indices = [index for index in attesting_indices if index not in custody_bit_1_indices] return IndexedAttestation( - validator_indices=attesting_indices, + custody_bit_0_indices=custody_bit_0_indices, + custody_bit_1_indices=custody_bit_1_indices, data=attestation.data, - custody_bitfield=custody_bitfield, aggregate_signature=attestation.aggregate_signature ) ``` @@ -1209,22 +1201,21 @@ def verify_indexed_attestation(state: BeaconState, indexed_attestation: IndexedA """ Verify validity of ``indexed_attestation`` fields. """ - if indexed_attestation.custody_bitfield != b'\x00' * len(indexed_attestation.custody_bitfield): # [TO BE REMOVED IN PHASE 1] + custody_bit_0_indices = indexed_attestation.custody_bit_0_indices + custody_bit_1_indices = indexed_attestation.custody_bit_1_indices + + if len(custody_bit_1_indices) > 0: # [TO BE REMOVED IN PHASE 1] return False - if not (1 <= len(indexed_attestation.validator_indices) <= MAX_ATTESTATION_PARTICIPANTS): + total_attesting_indices = len(custody_bit_0_indices + custody_bit_1_indices) + if not (1 <= total_attesting_indices <= MAX_ATTESTATION_PARTICIPANTS): return False - if not verify_bitfield(indexed_attestation.custody_bitfield, len(indexed_attestation.validator_indices)): + if custody_bit_0_indices != sorted(custody_bit_0_indices): return False - custody_bit_0_indices = [] - custody_bit_1_indices = [] - for i, validator_index in enumerate(indexed_attestation.validator_indices): - if get_bitfield_bit(indexed_attestation.custody_bitfield, i) == 0b0: - custody_bit_0_indices.append(validator_index) - else: - custody_bit_1_indices.append(validator_index) + if custody_bit_1_indices != sorted(custody_bit_1_indices): + return False return bls_verify_multiple( pubkeys=[ @@ -2355,10 +2346,6 @@ def process_attester_slashing(state: BeaconState, is_surround_vote(attestation1.data, attestation2.data) ) - # check that indices are sorted - assert attestation1.validator_indices == sorted(attestation1.validator_indices) - assert attestation2.validator_indices == sorted(attestation2.validator_indices) - assert verify_indexed_attestation(state, attestation1) assert verify_indexed_attestation(state, attestation2) slashable_indices = [ From ba47a8f4c44adebf613f5507ca48d022141a389c Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 28 Mar 2019 11:28:38 -0600 Subject: [PATCH 129/133] remove unused set_bitfield_bit hlper --- specs/core/0_beacon-chain.md | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 057772293..8363d9b22 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -86,7 +86,6 @@ - [`get_fork_version`](#get_fork_version) - [`get_domain`](#get_domain) - [`get_bitfield_bit`](#get_bitfield_bit) - - [`set_bitfield_bit`](#set_bitfield_bit) - [`verify_bitfield`](#verify_bitfield) - [`convert_to_indexed`](#convert_to_indexed) - [`verify_indexed_attestation`](#verify_indexed_attestation) @@ -1141,22 +1140,6 @@ def get_bitfield_bit(bitfield: bytes, i: int) -> int: return (bitfield[i // 8] >> (i % 8)) % 2 ``` -### `set_bitfield_bit` - -```python -def set_bitfield_bit(bitfield: bytes, i: int) -> int: - """ - Set the bit in ``bitfield`` at position ``i`` to ``1``. - """ - byte_index = i // 8 - bit_index = i % 8 - return ( - bitfield[:byte_index] + - bytes([bitfield[byte_index] | (1 << bit_index)]) + - bitfield[byte_index+1:] - ) -``` - ### `verify_bitfield` ```python From eb229089c842cac0445ab5393fe04b28c552b0ce Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 28 Mar 2019 11:31:12 -0600 Subject: [PATCH 130/133] lint --- tests/phase0/helpers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 08ea6ca04..e5e335d80 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -280,7 +280,6 @@ def get_valid_attestation(state, slot=None): ) ) - attestation.aggregation_signature = bls.aggregate_signatures(signatures) return attestation From 66d5026ffe53e3473ee5f1bd3b3d81a5a8f316e8 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 28 Mar 2019 13:15:38 -0600 Subject: [PATCH 131/133] minor copy edit --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index cf6527ad1..3a4de1973 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1573,7 +1573,7 @@ For a beacon chain block, `block`, to be processed by a node, the following cond If these conditions are not met, the client should delay processing the beacon block until the conditions are all satisfied. -Beacon block production is significantly different because of the proof-of-stake mechanism. A client simply checks what it thinks is the canonical chain when it should create a block and looks up what its slot number is; when the slot arrives, it either proposes or attests to a block as required. Note that this dynamic requires each node to have a clock that is roughly (i.e. within `SECONDS_PER_SLOT` seconds) synchronized with the other nodes. +Beacon block production is significantly different because of the proof-of-stake mechanism. A client simply checks what it thinks is the canonical chain when it should create a block and looks up what its slot number is; when the slot arrives, it either proposes or attests to a block as required. Note that this requires each node to have a clock that is roughly (i.e. within `SECONDS_PER_SLOT` seconds) synchronized with the other nodes. ### Beacon chain fork choice rule From 1082c68fef660fc66f078ad5442a0065e03e3e71 Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 28 Mar 2019 22:54:39 +0000 Subject: [PATCH 132/133] Separate document for phase 1 custody game (#818) The 1-round custody game has been implemented. Many bugs squashed, and a bunch of polishing done. Miscellaneous known issues (~8 of them) to be resolved in separate, smaller, PRs. --- specs/core/1_custody-game.md | 499 +++++++++++++++++++++++++++++++++++ 1 file changed, 499 insertions(+) create mode 100644 specs/core/1_custody-game.md diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md new file mode 100644 index 000000000..fd754634e --- /dev/null +++ b/specs/core/1_custody-game.md @@ -0,0 +1,499 @@ +# Ethereum 2.0 Phase 1 -- Custody Game + +**NOTICE**: This spec is a work-in-progress for researchers and implementers. + +## Table of contents + + + +- [Ethereum 2.0 Phase 1 -- Custody Game](#ethereum-20-phase-1----custody-game) + - [Table of contents](#table-of-contents) + - [Introduction](#introduction) + - [Terminology](#terminology) + - [Constants](#constants) + - [Misc](#misc) + - [Time parameters](#time-parameters) + - [Max transactions per block](#max-transactions-per-block) + - [Signature domains](#signature-domains) + - [Data structures](#data-structures) + - [Custody objects](#custody-objects) + - [`CustodyChunkChallenge`](#custodychunkchallenge) + - [`CustodyBitChallenge`](#custodybitchallenge) + - [`CustodyChunkChallengeRecord`](#custodychunkchallengerecord) + - [`CustodyBitChallengeRecord`](#custodybitchallengerecord) + - [`CustodyResponse`](#custodyresponse) + - [`CustodyKeyReveal`](#custodykeyreveal) + - [Phase 0 container updates](#phase-0-container-updates) + - [`Validator`](#validator) + - [`BeaconState`](#beaconstate) + - [`BeaconBlockBody`](#beaconblockbody) + - [Helpers](#helpers) + - [`get_crosslink_chunk_count`](#get_crosslink_chunk_count) + - [`get_custody_chunk_bit`](#get_custody_chunk_bit) + - [`epoch_to_custody_period`](#epoch_to_custody_period) + - [`verify_custody_key`](#verify_custody_key) + - [Per-block processing](#per-block-processing) + - [Transactions](#transactions) + - [Custody reveals](#custody-reveals) + - [Chunk challenges](#chunk-challenges) + - [Bit challenges](#bit-challenges) + - [Custody responses](#custody-responses) + - [Per-epoch processing](#per-epoch-processing) + + + +## Introduction + +This document details the beacon chain additions and changes in Phase 1 of Ethereum 2.0 to support the shard data custody game, building upon the [phase 0](0_beacon-chain.md) specification. + +## Terminology + +* **Custody game**: +* **Custody period**: +* **Custody chunk**: +* **Custody chunk bit**: +* **Custody chunk challenge**: +* **Custody bit**: +* **Custody bit challenge**: +* **Custody key**: +* **Custody key reveal**: +* **Custody key mask**: +* **Custody response**: +* **Custody response deadline**: + +## Constants + +### Misc + +| Name | Value | +| - | - | +| `BYTES_PER_SHARD_BLOCK` | `2**14` (= 16,384) | +| `BYTES_PER_CUSTODY_CHUNK` | `2**9` (= 512) | +| `MINOR_REWARD_QUOTIENT` | `2**8` (= 256) | + +### Time parameters + +| Name | Value | Unit | Duration | +| - | - | :-: | :-: | +| `MAX_CHUNK_CHALLENGE_DELAY` | `2**11` (= 2,048) | epochs | ~9 days | +| `EPOCHS_PER_CUSTODY_PERIOD` | `2**11` (= 2,048) | epochs | ~9 days | +| `CUSTODY_RESPONSE_DEADLINE` | `2**14` (= 16,384) | epochs | ~73 days | + +### Max transactions per block + +| Name | Value | +| - | - | +| `MAX_CUSTODY_KEY_REVEALS` | `2**4` (= 16) | +| `MAX_CUSTODY_CHUNK_CHALLENGES` | `2**2` (= 4) | +| `MAX_CUSTODY_BIT_CHALLENGES` | `2**2` (= 4) | +| `MAX_CUSTODY_RESPONSES` | `2**5` (= 32) | + +### Signature domains + +| Name | Value | +| - | - | +| `DOMAIN_CUSTODY_KEY_REVEAL` | `6` | +| `DOMAIN_CUSTODY_BIT_CHALLENGE` | `7` | + +## Data structures + +### Custody objects + +#### `CustodyChunkChallenge` + +```python +{ + 'responder_index': ValidatorIndex, + 'attestation': Attestation, + 'chunk_index': 'uint64', +} +``` + +#### `CustodyBitChallenge` + +```python +{ + 'responder_index': ValidatorIndex, + 'attestation': Attestation, + 'challenger_index': ValidatorIndex, + 'responder_key': BLSSignature, + 'chunk_bits': Bitfield, + 'signature': BLSSignature, +} +``` + +#### `CustodyChunkChallengeRecord` + +```python +{ + 'challenge_index': 'uint64', + 'challenger_index': ValidatorIndex, + 'responder_index': ValidatorIndex, + 'deadline': Epoch, + 'crosslink_data_root': Hash, + 'depth': 'uint64', + 'chunk_index': 'uint64', +} +``` + +#### `CustodyBitChallengeRecord` + +```python +{ + 'challenge_index': 'uint64', + 'challenger_index': ValidatorIndex, + 'responder_index': ValidatorIndex, + 'deadline': Epoch, + 'crosslink_data_root': Hash, + 'chunk_bits': Bitfield, + 'responder_key': BLSSignature, +} +``` + +#### `CustodyResponse` + +```python +{ + 'challenge_index': 'uint64', + 'chunk_index': 'uint64', + 'chunk': ['byte', BYTES_PER_CUSTODY_CHUNK], + 'branch': [Hash], +} +``` + +#### `CustodyKeyReveal` + +```python +{ + 'revealer_index': ValidatorIndex, + 'period': 'uint64', + 'key': BLSSignature, + 'masker_index': ValidatorIndex, + 'mask': Hash, +} +``` + +### Phase 0 container updates + +Add the following fields to the end of the specified container objects. Fields with underlying type `uint64` are initialized to `0` and list fields are initialized to `[]`. + +#### `Validator` + +```python + 'custody_reveal_index': 'uint64', + 'max_reveal_lateness': 'uint64', +``` + +#### `BeaconState` + +```python + 'custody_chunk_challenge_records': [CustodyChunkChallengeRecord], + 'custody_bit_challenge_records': [CustodyBitChallengeRecord], + 'custody_challenge_index': 'uint64', +``` + +#### `BeaconBlockBody` + +```python + 'custody_key_reveals': [CustodyKeyReveal], + 'custody_chunk_challenges': [CustodyChunkChallenge], + 'custody_bit_challenges': [CustodyBitChallenge], + 'custody_responses': [CustodyResponse], +``` + +## Helpers + +### `get_crosslink_chunk_count` + +```python +def get_custody_chunk_count(attestation: Attestation) -> int: + crosslink_start_epoch = attestation.data.latest_crosslink.epoch + crosslink_end_epoch = slot_to_epoch(attestation.data.slot) + crosslink_crosslink_length = min(MAX_CROSSLINK_EPOCHS, end_epoch - start_epoch) + chunks_per_epoch = 2 * BYTES_PER_SHARD_BLOCK * SLOTS_PER_EPOCH // BYTES_PER_CUSTODY_CHUNK + return crosslink_crosslink_length * chunks_per_epoch +``` + +### `get_custody_chunk_bit` + +```python +def get_custody_chunk_bit(key: BLSSignature, chunk: bytes) -> bool: + # TODO: Replace with something MPC-friendly, e.g. the Legendre symbol + return get_bitfield_bit(hash(challenge.responder_key + chunk), 0) +``` + +### `epoch_to_custody_period` + +```python +def epoch_to_custody_period(epoch: Epoch) -> int: + return epoch // EPOCHS_PER_CUSTODY_PERIOD +``` + +### `verify_custody_key` + +```python +def verify_custody_key(state: BeaconState, reveal: CustodyKeyReveal) -> bool: + # Case 1: non-masked non-punitive non-early reveal + pubkeys = [state.validator_registry[reveal.revealer_index].pubkey] + message_hashes = [hash_tree_root(reveal.period)] + + # Case 2: masked punitive early reveal + # Masking prevents proposer stealing the whistleblower reward + # Secure under the aggregate extraction infeasibility assumption + # See pages 11-12 of https://crypto.stanford.edu/~dabo/pubs/papers/aggreg.pdf + if reveal.mask != ZERO_HASH: + pubkeys.append(state.validator_registry[reveal.masker_index].pubkey) + message_hashes.append(reveal.mask) + + return bls_verify_multiple( + pubkeys=pubkeys, + message_hashes=message_hashes, + signature=reveal.key, + domain=get_domain( + fork=state.fork, + epoch=reveal.period * EPOCHS_PER_CUSTODY_PERIOD, + domain_type=DOMAIN_CUSTODY_KEY_REVEAL, + ), + ) +``` + +## Per-block processing + +### Transactions + +Add the following transactions to the per-block processing, in order the given below and after all other transactions in phase 0. + +#### Custody reveals + +Verify that `len(block.body.custody_key_reveals) <= MAX_CUSTODY_KEY_REVEALS`. + +For each `reveal` in `block.body.custody_key_reveals`, run the following function: + +```python +def process_custody_reveal(state: BeaconState, + reveal: CustodyKeyReveal) -> None: + assert verify_custody_key(state, reveal) + revealer = state.validator_registry[reveal.revealer_index] + current_custody_period = epoch_to_custody_period(get_current_epoch(state)) + + # Case 1: non-masked non-punitive non-early reveal + if reveal.mask == ZERO_HASH: + assert reveal.period == epoch_to_custody_period(revealer.activation_epoch) + revealer.custody_reveal_index + # Revealer is active or exited + assert is_active_validator(revealer, get_current_epoch(state)) or revealer.exit_epoch > get_current_epoch(state) + revealer.custody_reveal_index += 1 + revealer.max_reveal_lateness = max(revealer.max_reveal_lateness, current_custody_period - reveal.period) + proposer_index = get_beacon_proposer_index(state, state.slot) + increase_balance(state, proposer_index, base_reward(state, index) // MINOR_REWARD_QUOTIENT) + + # Case 2: masked punitive early reveal + else: + assert reveal.period > current_custody_period + assert revealer.slashed is False + slash_validator(state, reveal.revealer_index, reveal.masker_index) +``` + +#### Chunk challenges + +Verify that `len(block.body.custody_chunk_challenges) <= MAX_CUSTODY_CHUNK_CHALLENGES`. + +For each `challenge` in `block.body.custody_chunk_challenges`, run the following function: + +```python +def process_chunk_challenge(state: BeaconState, + challenge: CustodyChunkChallenge) -> None: + # Verify the attestation + assert verify_standalone_attestation(state, convert_to_standalone(state, challenge.attestation)) + # Verify it is not too late to challenge + assert slot_to_epoch(challenge.attestation.data.slot) >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY + responder = state.validator_registry[challenge.responder_index] + assert responder.exit_epoch >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY + # Verify the responder participated in the attestation + attesters = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) + assert challenge.responder_index in attesters + # Verify the challenge is not a duplicate + for record in state.custody_chunk_challenge_records: + assert ( + record.crosslink_data_root != challenge.attestation.data.crosslink_data_root or + record.chunk_index != challenge.chunk_index + ) + # Verify depth + depth = math.log2(next_power_of_two(get_custody_chunk_count(challenge.attestation))) + assert challenge.chunk_index < 2**depth + # Add new chunk challenge record + state.custody_chunk_challenge_records.append(CustodyChunkChallengeRecord( + challenge_index=state.custody_challenge_index, + challenger_index=get_beacon_proposer_index(state, state.slot), + responder_index=challenge.responder_index + deadline=get_current_epoch(state) + CUSTODY_RESPONSE_DEADLINE, + crosslink_data_root=challenge.attestation.data.crosslink_data_root, + depth=depth, + chunk_index=challenge.chunk_index, + )) + state.custody_challenge_index += 1 + # Postpone responder withdrawability + responder.withdrawable_epoch = FAR_FUTURE_EPOCH +``` + +#### Bit challenges + +Verify that `len(block.body.custody_bit_challenges) <= MAX_CUSTODY_BIT_CHALLENGES`. + +For each `challenge` in `block.body.custody_bit_challenges`, run the following function: + +```python +def process_bit_challenge(state: BeaconState, + challenge: CustodyBitChallenge) -> None: + # Verify challenge signature + challenger = state.validator_registry[challenge.challenger_index] + assert bls_verify( + pubkey=challenger.pubkey, + message_hash=signed_root(challenge), + signature=challenge.signature, + domain=get_domain(state, get_current_epoch(state), DOMAIN_CUSTODY_BIT_CHALLENGE), + ) + # Verify the challenger is not slashed + assert challenger.slashed is False + # Verify the attestation + assert verify_standalone_attestation(state, convert_to_standalone(state, challenge.attestation)) + # Verify the attestation is eligible for challenging + responder = state.validator_registry[challenge.responder_index] + min_challengeable_epoch = responder.exit_epoch - EPOCHS_PER_CUSTODY_PERIOD * (1 + responder.max_reveal_lateness) + assert min_challengeable_epoch <= slot_to_epoch(challenge.attestation.data.slot) + # Verify the responder participated in the attestation + attesters = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) + assert challenge.responder_index in attesters + # A validator can be the challenger or responder for at most one challenge at a time + for record in state.custody_bit_challenge_records: + assert record.challenger_index != challenge.challenger_index + assert record.responder_index != challenge.responder_index + # Verify the responder key + assert verify_custody_key(state, CustodyKeyReveal( + revealer_index=challenge.responder_index, + period=epoch_to_custody_period(slot_to_epoch(attestation.data.slot)), + key=challenge.responder_key, + masker_index=0, + mask=ZERO_HASH, + )) + # Verify the chunk count + chunk_count = get_custody_chunk_count(challenge.attestation) + assert verify_bitfield(challenge.chunk_bits, chunk_count) + # Verify the xor of the chunk bits does not equal the custody bit + chunk_bits_xor = 0b0 + for i in range(chunk_count): + chunk_bits_xor ^ get_bitfield_bit(challenge.chunk_bits, i) + custody_bit = get_bitfield_bit(attestation.custody_bitfield, attesters.index(responder_index)) + assert custody_bit != chunk_bits_xor + # Add new bit challenge record + state.custody_bit_challenge_records.append(CustodyBitChallengeRecord( + challenge_index=state.custody_challenge_index, + challenger_index=challenge.challenger_index, + responder_index=challenge.responder_index, + deadline=get_current_epoch(state) + CUSTODY_RESPONSE_DEADLINE + crosslink_data_root=challenge.attestation.crosslink_data_root, + chunk_bits=challenge.chunk_bits, + responder_key=challenge.responder_key, + )) + state.custody_challenge_index += 1 + # Postpone responder withdrawability + responder.withdrawable_epoch = FAR_FUTURE_EPOCH +``` + +#### Custody responses + +Verify that `len(block.body.custody_responses) <= MAX_CUSTODY_RESPONSES`. + +For each `response` in `block.body.custody_responses`, run the following function: + +```python +def process_custody_response(state: BeaconState, + response: CustodyResponse) -> None: + chunk_challenge = next(record for record in state.custody_chunk_challenge_records if record.challenge_index == response.challenge_index, None) + if chunk_challenge is not None: + return process_chunk_challenge_response(state, response, chunk_challenge) + + bit_challenge = next(record for record in state.custody_bit_challenge_records if record.challenge_index == response.challenge_index, None) + if bit_challenge is not None: + return process_bit_challenge_response(state, response, bit_challenge) + + assert False +``` + +```python +def process_chunk_challenge_response(state: BeaconState, + response: CustodyResponse, + challenge: CustodyChunkChallengeRecord) -> None: + # Verify chunk index + assert response.chunk_index == challenge.chunk_index + # Verify the chunk matches the crosslink data root + assert verify_merkle_branch( + leaf=hash_tree_root(response.chunk), + branch=response.branch, + depth=challenge.depth, + index=response.chunk_index, + root=challenge.crosslink_data_root, + ) + # Clear the challenge + state.custody_chunk_challenge_records.remove(challenge) + # Reward the proposer + proposer_index = get_beacon_proposer_index(state, state.slot) + increase_balance(state, proposer_index, base_reward(state, index) // MINOR_REWARD_QUOTIENT) +``` + +```python +def process_bit_challenge_response(state: BeaconState, + response: CustodyResponse, + challenge: CustodyBitChallengeRecord) -> None: + # Verify chunk index + assert response.chunk_index < len(challenge.chunk_bits) + # Verify the chunk matches the crosslink data root + assert verify_merkle_branch( + leaf=hash_tree_root(response.chunk), + branch=response.branch, + depth=math.log2(next_power_of_two(len(challenge.chunk_bits))), + index=response.chunk_index, + root=challenge.crosslink_data_root, + ) + # Verify the chunk bit does not match the challenge chunk bit + assert get_custody_chunk_bit(challenge.responder_key, response.chunk) != get_bitfield_bit(challenge.chunk_bits, response.chunk_index) + # Clear the challenge + state.custody_bit_challenge_records.remove(challenge) + # Slash challenger + slash_validator(state, challenge.challenger_index, challenge.responder_index) +``` + +## Per-epoch processing + +Run `process_challenge_deadlines(state)` immediately after `process_ejections(state)`: + +```python +def process_challenge_deadlines(state: BeaconState) -> None: + for challenge in state.custody_chunk_challenge_records: + if get_current_epoch(state) > challenge.deadline: + slash_validator(state, challenge.responder_index, challenge.challenger_index) + state.custody_chunk_challenge_records.remove(challenge) + + for challenge in state.custody_bit_challenge_records: + if get_current_epoch(state) > challenge.deadline: + slash_validator(state, challenge.responder_index, challenge.challenger_index) + state.custody_bit_challenge_records.remove(challenge) +``` + +In `process_penalties_and_exits`, change the definition of `eligible` to the following (note that it is not a pure function because `state` is declared in the surrounding scope): + +```python +def eligible(index): + validator = state.validator_registry[index] + # Cannot exit if there are still open chunk challenges + if len([record for record in state.custody_chunk_challenge_records if record.responder_index == index]) > 0: + return False + # Cannot exit if you have not revealed all of your custody keys + elif epoch_to_custody_period(revealer.activation_epoch) + validator.custody_reveal_index <= epoch_to_custody_period(validator.exit_epoch): + return False + # Cannot exit if you already have + elif validator.withdrawable_epoch < FAR_FUTURE_EPOCH: + return False + # Return minimum time + else: + return current_epoch >= validator.exit_epoch + MIN_VALIDATOR_WITHDRAWAL_EPOCHS +``` From f5c5c166af0caa7d451e5e74d8c565dca3ea4cee Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 28 Mar 2019 17:56:43 -0500 Subject: [PATCH 133/133] Replace custody challenge game with JABS (#812) See also #818. === * Replace custody challenge game with JABS Replace the existing proof of custody game with a new game ("Justin's Awesome Bit Sum" or JABS) that works as follows: * The data `D` is split up into 512-byte chunks `D[0] .... D[n-1]`, and use a mix function `mix(subkey, data) -> {0,1}` (currently the first bit of the hash of `subkey+data`). We calculate `M[i] = (mix(D[0]) + ... + mix(D[i-1])) % 2`, and set the custody bit to `M[n-1]` * Anyone can challenge by providing the full `M` where `M[n-1]` is not equal to the custody bit * Anyone can respond to a challenge by providing a specific position in `M` along with a branch of the data where `M[i-1] ^ mix(D[i]) != M[i]` The maximum size of data is now `2**6` epochs * `2**6` blocks * `2**14` bytes = `2**26` bytes, so assuming 512-byte mix chunks the maximum mix size is `2**17` bits or `2**14` bytes. The average mix size is `2**8` bytes. --- specs/core/1_shard-data-chains.md | 1085 ++++++++--------------------- 1 file changed, 288 insertions(+), 797 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 92cee4d19..8f2d12a91 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -1,157 +1,143 @@ # Ethereum 2.0 Phase 1 -- Shard Data Chains -**NOTICE**: This document is a work-in-progress for researchers and implementers. It reflects recent spec changes and takes precedence over the [Python proof-of-concept implementation](https://github.com/ethereum/beacon_chain). +**NOTICE**: This document is a work-in-progress for researchers and implementers. -At the current stage, Phase 1, while fundamentally feature-complete, is still subject to change. Development teams with spare resources may consider starting on the "Shard chains and crosslink data" section; at least basic properties, such as the fact that a shard block can get created every slot and is dependent on both a parent block in the same shard and a beacon chain block at or before that same slot, are unlikely to change, though details are likely to undergo similar kinds of changes to what Phase 0 has undergone since the start of the year. - -## Table of contents +## Table of Contents -- [Ethereum 2.0 Phase 1 -- Shard Data Chains](#ethereum-20-phase-1----shard-data-chains) - - [Table of contents](#table-of-contents) - - [Introduction](#introduction) - - [Terminology](#terminology) - - [Constants](#constants) - - [Misc](#misc) - - [Time parameters](#time-parameters) - - [Max operations per block](#max-operations-per-block) - - [Signature domains](#signature-domains) -- [Shard chains and crosslink data](#shard-chains-and-crosslink-data) - - [Helper functions](#helper-functions) - - [`get_shuffled_committee`](#get_shuffled_committee) - - [`get_persistent_committee`](#get_persistent_committee) - - [`get_shard_proposer_index`](#get_shard_proposer_index) - - [Data Structures](#data-structures) - - [Shard chain blocks](#shard-chain-blocks) - - [Shard block processing](#shard-block-processing) - - [Verifying shard block data](#verifying-shard-block-data) - - [Verifying a crosslink](#verifying-a-crosslink) - - [Shard block fork choice rule](#shard-block-fork-choice-rule) -- [Updates to the beacon chain](#updates-to-the-beacon-chain) +- [Ethereum 2.0 Phase 1 -- Shards Data Chains](#ethereum-20-phase-1----shard-data-chains) + - [Table of Contents](#table-of-contents) + - [Introduction](#introduction) + - [Constants](#constants) + - [Misc](#misc) + - [Time parameters](#time-parameters) + - [Signature domains](#signature-domains) - [Data structures](#data-structures) - - [`Validator`](#validator) - - [`BeaconBlockBody`](#beaconblockbody) - - [`BeaconState`](#beaconstate) - - [`BranchChallenge`](#branchchallenge) - - [`BranchResponse`](#branchresponse) - - [`BranchChallengeRecord`](#branchchallengerecord) - - [`InteractiveCustodyChallengeRecord`](#interactivecustodychallengerecord) - - [`InteractiveCustodyChallengeInitiation`](#interactivecustodychallengeinitiation) - - [`InteractiveCustodyChallengeResponse`](#interactivecustodychallengeresponse) - - [`InteractiveCustodyChallengeContinuation`](#interactivecustodychallengecontinuation) - - [`SubkeyReveal`](#subkeyreveal) - - [Helpers](#helpers) - - [`get_branch_challenge_record_by_id`](#get_branch_challenge_record_by_id) - - [`get_custody_challenge_record_by_id`](#get_custody_challenge_record_by_id) - - [`get_attestation_merkle_depth`](#get_attestation_merkle_depth) - - [`epoch_to_custody_period`](#epoch_to_custody_period) - - [`slot_to_custody_period`](#slot_to_custody_period) - - [`get_current_custody_period`](#get_current_custody_period) - - [`verify_custody_subkey_reveal`](#verify_custody_subkey_reveal) - - [`verify_signed_challenge_message`](#verify_signed_challenge_message) - - [`penalize_validator`](#penalize_validator) - - [Per-slot processing](#per-slot-processing) - - [Operations](#operations) - - [Branch challenges](#branch-challenges) - - [Branch responses](#branch-responses) - - [Subkey reveals](#subkey-reveals) - - [Interactive custody challenge initiations](#interactive-custody-challenge-initiations) - - [Interactive custody challenge responses](#interactive-custody-challenge-responses) - - [Interactive custody challenge continuations](#interactive-custody-challenge-continuations) - - [Per-epoch processing](#per-epoch-processing) - - [One-time phase 1 initiation transition](#one-time-phase-1-initiation-transition) + - [`ShardBlockBody`](#shardblockbody) + - [`ShardBlock`](#shardblock) + - [`ShardBlockHeader`](#shardblockheader) + - [`ShardAttestation`](#shardattestation) + - [Helper functions](#helper-functions) + - [`get_period_committee`](#get_period_committee) + - [`get_persistent_committee`](#get_persistent_committee) + - [`get_shard_proposer_index`](#get_shard_proposer_index) + - [`get_shard_header`](#get_shard_header) + - [`verify_shard_attestation_signature`](#verify_shard_attestation_signature) + - [`compute_crosslink_data_root`](#compute_crosslink_data_root) + - [Object validity](#object-validity) + - [Shard blocks](#shard-blocks) + - [Shard attestations](#shard-attestations) + - [Beacon attestations](#beacon-attestations) + - [Shard fork choice rule](#shard-fork-choice-rule) -### Introduction +## Introduction -This document represents the specification for Phase 1 of Ethereum 2.0 -- Shard Data Chains. Phase 1 depends on the implementation of [Phase 0 -- The Beacon Chain](0_beacon-chain.md). +This document describes the shard data layer and the shard fork choice rule in Phase 1 of Ethereum 2.0. -Ethereum 2.0 consists of a central beacon chain along with `SHARD_COUNT` shard chains. Phase 1 is primarily concerned with the construction, validity, and consensus on the _data_ of these shard chains. Phase 1 does not specify shard chain state execution or account balances. This is left for future phases. +## Constants -### Terminology +### Misc -### Constants +| Name | Value | +| - | - | +| `BYTES_PER_SHARD_BLOCK_BODY` | `2**14` (= 16,384) | +| `MAX_SHARD_ATTESTIONS` | `2**4` (= 16) | +| `PHASE_1_GENESIS_EPOCH` | **TBD** | +| `PHASE_1_GENESIS_SLOT` | get_epoch_start_slot(PHASE_1_GENESIS_EPOCH) | -Phase 1 depends upon all of the constants defined in [Phase 0](0_beacon-chain.md#constants) in addition to the following: - -#### Misc - -| Name | Value | Unit | -|-------------------------------|------------------|--------| -| `SHARD_CHUNK_SIZE` | 2**5 (= 32) | bytes | -| `SHARD_BLOCK_SIZE` | 2**14 (= 16,384) | bytes | -| `MINOR_REWARD_QUOTIENT` | 2**8 (= 256) | | -| `MAX_POC_RESPONSE_DEPTH` | 5 | | -| `ZERO_PUBKEY` | int_to_bytes48(0)| | -| `VALIDATOR_NULL` | 2**64 - 1 | | - -#### Time parameters +### Time parameters | Name | Value | Unit | Duration | | - | - | :-: | :-: | -| `CROSSLINK_LOOKBACK` | 2**5 (= 32) | slots | 3.2 minutes | -| `MAX_BRANCH_CHALLENGE_DELAY` | 2**11 (= 2,048) | epochs | 9 days | -| `CUSTODY_PERIOD_LENGTH` | 2**11 (= 2,048) | epochs | 9 days | -| `PERSISTENT_COMMITTEE_PERIOD` | 2**11 (= 2,048) | epochs | 9 days | -| `CHALLENGE_RESPONSE_DEADLINE` | 2**14 (= 16,384) | epochs | 73 days | +| `CROSSLINK_LOOKBACK` | 2**0 (= 1) | epochs | 6.2 minutes | +| `PERSISTENT_COMMITTEE_PERIOD` | 2**11 (= 2,048) | epochs | ~9 days | -#### Max operations per block +### Signature domains -| Name | Value | -|----------------------------------------------------|---------------| -| `MAX_BRANCH_CHALLENGES` | 2**2 (= 4) | -| `MAX_BRANCH_RESPONSES` | 2**4 (= 16) | -| `MAX_EARLY_SUBKEY_REVEALS` | 2**4 (= 16) | -| `MAX_INTERACTIVE_CUSTODY_CHALLENGE_INITIATIONS` | 2 | -| `MAX_INTERACTIVE_CUSTODY_CHALLENGE_RESPONSES` | 16 | -| `MAX_INTERACTIVE_CUSTODY_CHALLENGE_CONTINUTATIONS` | 16 | +| Name | Value | +| - | - | +| `DOMAIN_SHARD_PROPOSER` | `128` | +| `DOMAIN_SHARD_ATTESTER` | `129` | -#### Signature domains +## Data structures -| Name | Value | -|------------------------------|-----------------| -| `DOMAIN_SHARD_PROPOSER` | 129 | -| `DOMAIN_SHARD_ATTESTER` | 130 | -| `DOMAIN_CUSTODY_SUBKEY` | 131 | -| `DOMAIN_CUSTODY_INTERACTIVE` | 132 | +### `ShardBlockBody` -# Shard chains and crosslink data +```python +['byte', BYTES_PER_SHARD_BLOCK_BODY] +``` + +### `ShardBlock` + +```python +{ + 'slot': Slot, + 'shard': Shard, + 'beacon_chain_root': Hash, + 'previous_block_root': Hash, + 'data': ShardBlockBody, + 'state_root': Hash, + 'attestations': [ShardAttestation], + 'signature': BLSSignature, +} +``` + +### `ShardBlockHeader` + +```python +{ + 'slot': Slot, + 'shard': Shard, + 'beacon_chain_root': Hash, + 'previous_block_root': Hash, + 'body_root': Hash, + 'state_root': Hash, + 'attestations': [ShardAttestation], + 'signature': BLSSignature, +} +``` + +### `ShardAttestation` + +```python +{ + 'data': { + 'slot': Slot, + 'shard': Shard, + 'shard_block_root': Hash, + }, + 'aggregation_bitfield': Bitfield, + 'aggregate_signature': BLSSignature, +} +``` ## Helper functions -#### `get_shuffled_committee` +### `get_period_committee` ```python -def get_shuffled_committee(state: BeaconState, - shard: Shard, - committee_start_epoch: Epoch, - index: int, - committee_count: int) -> List[ValidatorIndex]: +def get_period_committee(state: BeaconState, + shard: Shard, + committee_start_epoch: Epoch, + index: int, + committee_count: int) -> List[ValidatorIndex]: """ - Return shuffled committee. + Return committee for a period. Used to construct persistent committees. """ active_validator_indices = get_active_validator_indices(state.validator_registry, committee_start_epoch) - length = len(active_validator_indices) seed = generate_seed(state, committee_start_epoch) - start_offset = get_split_offset( - length, - SHARD_COUNT * committee_count, - shard * committee_count + index, + return compute_committee( + validator_indices=active_validator_indices, + seed=seed, + index=shard * committee_count + index, + total_committees=SHARD_COUNT * committee_count, ) - end_offset = get_split_offset( - length, - SHARD_COUNT * committee_count, - shard * committee_count + index + 1, - ) - return [ - active_validator_indices[get_permuted_index(i, length, seed)] - for i in range(start_offset, end_offset) - ] ``` -#### `get_persistent_committee` +### `get_persistent_committee` ```python def get_persistent_committee(state: BeaconState, @@ -160,7 +146,6 @@ def get_persistent_committee(state: BeaconState, """ Return the persistent committee for the given ``shard`` at the given ``slot``. """ - earlier_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2 later_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD @@ -172,14 +157,11 @@ def get_persistent_committee(state: BeaconState, ) + 1 index = slot % committee_count - earlier_committee = get_shuffled_committee(state, shard, earlier_start_epoch, index, committee_count) - later_committee = get_shuffled_committee(state, shard, later_start_epoch, index, committee_count) + earlier_committee = get_period_committee(state, shard, earlier_start_epoch, index, committee_count) + later_committee = get_period_committee(state, shard, later_start_epoch, index, committee_count) def get_switchover_epoch(index): - return ( - bytes_to_int(hash(earlier_seed + bytes3(index))[0:8]) % - PERSISTENT_COMMITTEE_PERIOD - ) + return bytes_to_int(hash(earlier_seed + bytes3(index))[0:8]) % PERSISTENT_COMMITTEE_PERIOD # Take not-yet-cycled-out validators from earlier committee and already-cycled-in validators from # later committee; return a sorted list of the union of the two, deduplicated @@ -189,723 +171,232 @@ def get_persistent_committee(state: BeaconState, ))) ``` -#### `get_shard_proposer_index` +### `get_shard_proposer_index` ```python def get_shard_proposer_index(state: BeaconState, shard: Shard, slot: Slot) -> ValidatorIndex: - seed = hash( - state.current_shuffling_seed + - int_to_bytes8(shard) + - int_to_bytes8(slot) - ) + # Randomly shift persistent committee persistent_committee = get_persistent_committee(state, shard, slot) - # Default proposer - index = bytes_to_int(seed[0:8]) % len(persistent_committee) - # If default proposer exits, try the other proposers in order; if all are exited - # return None (ie. no block can be proposed) - validators_to_try = persistent_committee[index:] + persistent_committee[:index] - for index in validators_to_try: + seed = hash(state.current_shuffling_seed + int_to_bytes8(shard) + int_to_bytes8(slot)) + random_index = bytes_to_int(seed[0:8]) % len(persistent_committee) + persistent_committee = persistent_committee[random_index:] + persistent_committee[:random_index] + + # Search for an active proposer + for index in persistent_committee: if is_active_validator(state.validator_registry[index], get_current_epoch(state)): return index + + # No block can be proposed if no validator is active return None ``` -## Data Structures - -### Shard chain blocks - -A `ShardBlock` object has the following fields: +### `get_shard_header` ```python -{ - # Slot number - 'slot': 'uint64', - # What shard is it on - 'shard_id': 'uint64', - # Parent block's root - 'parent_root': 'bytes32', - # Beacon chain block - 'beacon_chain_ref': 'bytes32', - # Merkle root of data - 'data_root': 'bytes32' - # State root (placeholder for now) - 'state_root': 'bytes32', - # Block signature - 'signature': 'bytes96', - # Attestation - 'participation_bitfield': 'bytes', - 'aggregate_signature': 'bytes96', -} +def get_shard_header(block: ShardBlock) -> ShardBlockHeader: + return ShardBlockHeader( + slot: block.slot, + shard: block.shard, + beacon_chain_root: block.beacon_chain_root, + previous_block_root: block.previous_block_root, + body_root: hash_tree_root(block.body), + state_root: block.state_root, + attestations: block.attestations, + signature: block.signature, + ) ``` -## Shard block processing - -For a `shard_block` on a shard to be processed by a node, the following conditions must be met: - -* The `ShardBlock` pointed to by `shard_block.parent_root` has already been processed and accepted -* The signature for the block from the _proposer_ (see below for definition) of that block is included along with the block in the network message object - -To validate a block header on shard `shard_block.shard_id`, compute as follows: - -* Verify that `shard_block.beacon_chain_ref` is the hash of a block in the (canonical) beacon chain with slot less than or equal to `slot`. -* Verify that `shard_block.beacon_chain_ref` is equal to or a descendant of the `shard_block.beacon_chain_ref` specified in the `ShardBlock` pointed to by `shard_block.parent_root`. -* Let `state` be the state of the beacon chain block referred to by `shard_block.beacon_chain_ref`. -* Let `persistent_committee = get_persistent_committee(state, shard_block.shard_id, shard_block.slot)`. -* Assert `verify_bitfield(shard_block.participation_bitfield, len(persistent_committee))` -* For every `i in range(len(persistent_committee))` where `is_active_validator(state.validator_registry[persistent_committee[i]], get_current_epoch(state))` returns `False`, verify that `get_bitfield_bit(shard_block.participation_bitfield, i) == 0` -* Let `proposer_index = get_shard_proposer_index(state, shard_block.shard_id, shard_block.slot)`. -* Verify that `proposer_index` is not `None`. -* Let `msg` be the `shard_block` but with `shard_block.signature` set to `[0, 0]`. -* Verify that `bls_verify(pubkey=validators[proposer_index].pubkey, message_hash=hash(msg), signature=shard_block.signature, domain=get_domain(state, slot_to_epoch(shard_block.slot), DOMAIN_SHARD_PROPOSER))` passes. -* Let `group_public_key = bls_aggregate_pubkeys([state.validator_registry[index].pubkey for i, index in enumerate(persistent_committee) if get_bitfield_bit(shard_block.participation_bitfield, i) is True])`. -* Verify that `bls_verify(pubkey=group_public_key, message_hash=shard_block.parent_root, sig=shard_block.aggregate_signature, domain=get_domain(state, slot_to_epoch(shard_block.slot), DOMAIN_SHARD_ATTESTER))` passes. - -### Verifying shard block data - -At network layer, we expect a shard block header to be broadcast along with its `block_body`. - -* Verify that `len(block_body) == SHARD_BLOCK_SIZE` -* Verify that `merkle_root(block_body)` equals the `data_root` in the header. - -### Verifying a crosslink - -A node should sign a crosslink only if the following conditions hold. **If a node has the capability to perform the required level of verification, it should NOT follow chains on which a crosslink for which these conditions do NOT hold has been included, or a sufficient number of signatures have been included that during the next state recalculation, a crosslink will be registered.** - -First, the conditions must recursively apply to the crosslink referenced in `last_crosslink_root` for the same shard (unless `last_crosslink_root` equals zero, in which case we are at the genesis). - -Second, we verify the `shard_chain_commitment`. -* Let `start_slot = state.latest_crosslinks[shard].epoch * SLOTS_PER_EPOCH + SLOTS_PER_EPOCH - CROSSLINK_LOOKBACK`. -* Let `end_slot = attestation.data.slot - attestation.data.slot % SLOTS_PER_EPOCH - CROSSLINK_LOOKBACK`. -* Let `length = end_slot - start_slot`, `headers[0] .... headers[length-1]` be the serialized block headers in the canonical shard chain from the verifer's point of view (note that this implies that `headers` and `bodies` have been checked for validity). -* Let `bodies[0] ... bodies[length-1]` be the bodies of the blocks. -* Note: If there is a missing slot, then the header and body are the same as that of the block at the most recent slot that has a block. - -We define two helpers: +### `verify_shard_attestation_signature` ```python -def pad_to_power_of_2(values: List[bytes]) -> List[bytes]: - zero_shard_block = b'\x00' * SHARD_BLOCK_SIZE - while not is_power_of_two(len(values)): - values = values + [zero_shard_block] - return values +def verify_shard_attestation_signature(state: BeaconState, + attestation: ShardAttestation) -> None: + data = attestation.data + persistent_committee = get_persistent_committee(state, data.shard, data.slot) + assert verify_bitfield(attestation.aggregation_bitfield, len(persistent_committee)) + pubkeys = [] + for i, index in enumerate(persistent_committee): + if get_bitfield_bit(attestation.aggregation_bitfield, i) == 0b1 + validator = state.validator_registry[index] + assert is_active_validator(validator, get_current_epoch(state)) + pubkeys.append(validator.pubkey) + assert bls_verify( + pubkey=bls_aggregate_pubkeys(pubkeys), + message_hash=data.shard_block_root, + signature=attestation.aggregate_signature, + domain=get_domain(state, slot_to_epoch(data.slot), DOMAIN_SHARD_ATTESTER) + ) ``` -```python -def merkle_root_of_bytes(data: bytes) -> bytes: - return merkle_root([data[i:i + 32] for i in range(0, len(data), 32)]) -``` - -We define the function for computing the commitment as follows: +### `compute_crosslink_data_root` ```python -def compute_commitment(headers: List[ShardBlock], bodies: List[bytes]) -> Bytes32: +def compute_crosslink_data_root(blocks: List[ShardBlock]) -> Hash: + def is_power_of_two(value: int) -> bool: + return (value > 0) and (value & (value - 1) == 0) + + def pad_to_power_of_2(values: List[bytes]) -> List[bytes]: + while not is_power_of_two(len(values)): + values += [b'\x00' * BYTES_PER_SHARD_BLOCK_BODY] + return values + + def merkle_root_of_bytes(data: bytes) -> bytes: + return merkle_root([data[i:i + 32] for i in range(0, len(data), 32)]) + return hash( - merkle_root( - pad_to_power_of_2([ - merkle_root_of_bytes(zpad(serialize(h), SHARD_BLOCK_SIZE)) for h in headers - ]) - ) + - merkle_root( - pad_to_power_of_2([ - merkle_root_of_bytes(h) for h in bodies - ]) - ) + merkle_root(pad_to_power_of_2([ + merkle_root_of_bytes(zpad(serialize(get_shard_header(block)), BYTES_PER_SHARD_BLOCK_BODY)) for block in blocks + ])) + + merkle_root(pad_to_power_of_2([ + merkle_root_of_bytes(block.body) for block in blocks + ])) ) ``` -The `shard_chain_commitment` is only valid if it equals `compute_commitment(headers, bodies)`. +## Object validity +### Shard blocks -### Shard block fork choice rule +Let: -The fork choice rule for any shard is LMD GHOST using the shard chain attestations of the persistent committee and the beacon chain attestations of the crosslink committee currently assigned to that shard, but instead of being rooted in the genesis it is rooted in the block referenced in the most recent accepted crosslink (ie. `state.crosslinks[shard].shard_block_root`). Only blocks whose `beacon_chain_ref` is the block in the main beacon chain at the specified `slot` should be considered (if the beacon chain skips a slot, then the block at that slot is considered to be the block in the beacon chain at the highest slot lower than a slot). - -# Updates to the beacon chain - -## Data structures - -### `Validator` - -Add member values to the end of the `Validator` object: +* `beacon_blocks` be the `BeaconBlock` list such that `beacon_blocks[slot]` is the canonical `BeaconBlock` at slot `slot` +* `beacon_state` be the canonical `BeaconState` after processing `beacon_blocks[-1]` +* `valid_shard_blocks` be the list of valid `ShardBlock`, recursively defined +* `unix_time` be the current unix time +* `candidate` be a candidate `ShardBlock` for which validity is to be determined by running `is_valid_shard_block` ```python - 'next_subkey_to_reveal': 'uint64', - 'reveal_max_periods_late': 'uint64', -``` +def is_valid_shard_block(beacon_blocks: List[BeaconBlock], + beacon_state: BeaconState, + valid_shard_blocks: List[ShardBlock], + unix_time: uint64, + candidate: ShardBlock) -> bool + # Check if block is already determined valid + for _, block in enumerate(valid_shard_blocks): + if candidate == block: + return True -And the initializers: + # Check slot number + assert block.slot >= PHASE_1_GENESIS_SLOT + assert unix_time >= beacon_state.genesis_time + (block.slot - GENESIS_SLOT) * SECONDS_PER_SLOT -```python - 'next_subkey_to_reveal': get_current_custody_period(state), - 'reveal_max_periods_late': 0, -``` + # Check shard number + assert block.shard <= SHARD_COUNT -### `BeaconBlockBody` + # Check beacon block + beacon_block = beacon_blocks[block.slot] + assert block.beacon_block_root == signed_root(beacon_block) + assert beacon_block.slot <= block.slot: -Add member values to the `BeaconBlockBody` structure: + # Check state root + assert block.state_root == ZERO_HASH # [to be removed in phase 2] -```python - 'branch_challenges': [BranchChallenge], - 'branch_responses': [BranchResponse], - 'subkey_reveals': [SubkeyReveal], - 'interactive_custody_challenge_initiations': [InteractiveCustodyChallengeInitiation], - 'interactive_custody_challenge_responses': [InteractiveCustodyChallengeResponse], - 'interactive_custody_challenge_continuations': [InteractiveCustodyChallengeContinuation], - -``` - -And initialize to the following: - -```python - 'branch_challenges': [], - 'branch_responses': [], - 'subkey_reveals': [], -``` - -### `BeaconState` - -Add member values to the `BeaconState` structure: - -```python - 'branch_challenge_records': [BranchChallengeRecord], - 'next_branch_challenge_id': 'uint64', - 'custody_challenge_records': [InteractiveCustodyChallengeRecord], - 'next_custody_challenge_id': 'uint64', -``` - -### `BranchChallenge` - -Define a `BranchChallenge` as follows: - -```python -{ - 'responder_index': 'uint64', - 'data_index': 'uint64', - 'attestation': SlashableAttestation, -} -``` - -### `BranchResponse` - -Define a `BranchResponse` as follows: - -```python -{ - 'challenge_id': 'uint64', - 'responding_to_custody_challenge': 'bool', - 'data': 'bytes32', - 'branch': ['bytes32'], -} -``` - -### `BranchChallengeRecord` - -Define a `BranchChallengeRecord` as follows: - -```python -{ - 'challenge_id': 'uint64', - 'challenger_index': 'uint64', - 'responder_index': 'uint64', - 'root': 'bytes32', - 'depth': 'uint64', - 'deadline': 'uint64', - 'data_index': 'uint64', -} -``` - -### `InteractiveCustodyChallengeRecord` - -```python -{ - 'challenge_id': 'uint64', - 'challenger_index': 'uint64', - 'responder_index': 'uint64', - # Initial data root - 'data_root': 'bytes32', - # Initial custody bit - 'custody_bit': 'bool', - # Responder subkey - 'responder_subkey': 'bytes96', - # The hash in the PoC tree in the position that we are currently at - 'current_custody_tree_node': 'bytes32', - # The position in the tree, in terms of depth and position offset - 'depth': 'uint64', - 'offset': 'uint64', - # Max depth of the branch - 'max_depth': 'uint64', - # Deadline to respond (as an epoch) - 'deadline': 'uint64', -} -``` - -### `InteractiveCustodyChallengeInitiation` - -```python -{ - 'attestation': SlashableAttestation, - 'responder_index': 'uint64', - 'challenger_index': 'uint64', - 'responder_subkey': 'bytes96', - 'signature': 'bytes96', -} -``` - -### `InteractiveCustodyChallengeResponse` - -```python -{ - 'challenge_id': 'uint64', - 'hashes': ['bytes32'], - 'signature': 'bytes96', -} -``` - -### `InteractiveCustodyChallengeContinuation` - -```python -{ - 'challenge_id': 'uint64', - 'sub_index': 'uint64', - 'new_custody_tree_node': 'bytes32', - 'proof': ['bytes32'], - 'signature': 'bytes96', -} -``` - -### `SubkeyReveal` - -Define a `SubkeyReveal` as follows: - -```python -{ - 'validator_index': 'uint64', - 'period': 'uint64', - 'subkey': 'bytes96', - 'mask': 'bytes32', - 'revealer_index': 'uint64' -} -``` - -## Helpers - -### `get_branch_challenge_record_by_id` - -```python -def get_branch_challenge_record_by_id(state: BeaconState, id: int) -> BranchChallengeRecord: - return [c for c in state.branch_challenges if c.challenge_id == id][0] -``` - -### `get_custody_challenge_record_by_id` - -```python -def get_custody_challenge_record_by_id(state: BeaconState, id: int) -> BranchChallengeRecord: - return [c for c in state.branch_challenges if c.challenge_id == id][0] -``` - -### `get_attestation_merkle_depth` - -```python -def get_attestation_merkle_depth(attestation: Attestation) -> int: - start_epoch = attestation.data.latest_crosslink.epoch - end_epoch = slot_to_epoch(attestation.data.slot) - chunks_per_slot = SHARD_BLOCK_SIZE // 32 - chunks = (end_epoch - start_epoch) * EPOCH_LENGTH * chunks_per_slot - return log2(next_power_of_two(chunks)) -``` - -### `epoch_to_custody_period` - -```python -def epoch_to_custody_period(epoch: Epoch) -> int: - return epoch // CUSTODY_PERIOD_LENGTH -``` - -### `slot_to_custody_period` - -```python -def slot_to_custody_period(slot: Slot) -> int: - return epoch_to_custody_period(slot_to_epoch(slot)) -``` - -### `get_current_custody_period` - -```python -def get_current_custody_period(state: BeaconState) -> int: - return epoch_to_custody_period(get_current_epoch(state)) -``` - -### `verify_custody_subkey_reveal` - -```python -def verify_custody_subkey_reveal(pubkey: bytes48, - subkey: bytes96, - mask: bytes32, - mask_pubkey: bytes48, - period: int) -> bool: - # Legitimate reveal: checking that the provided value actually is the subkey - if mask == ZERO_HASH: - pubkeys=[pubkey] - message_hashes=[hash(int_to_bytes8(period))] - - # Punitive early reveal: checking that the provided value is a valid masked subkey - # (masking done to prevent "stealing the reward" from a whistleblower by block proposers) - # Secure under the aggregate extraction infeasibility assumption described on page 11-12 - # of https://crypto.stanford.edu/~dabo/pubs/papers/aggreg.pdf + # Check parent block + if block.slot == PHASE_1_GENESIS_SLOT: + assert candidate.previous_block_root == ZERO_HASH else: - pubkeys=[pubkey, mask_pubkey] - message_hashes=[hash(int_to_bytes8(period)), mask] - - return bls_multi_verify( - pubkeys=pubkeys, - message_hashes=message_hashes, - signature=subkey, - domain=get_domain( - fork=state.fork, - epoch=period * CUSTODY_PERIOD_LENGTH, - domain_type=DOMAIN_CUSTODY_SUBKEY, - ) - ) -``` + parent_block = next( + block for block in valid_shard_blocks if + signed_root(block) == candidate.previous_block_root + , None) + assert parent_block != None + assert parent_block.shard == block.shard + assert parent_block.slot < block.slot + assert signed_root(beacon_blocks[parent_block.slot]) == parent_block.beacon_chain_root -### `verify_signed_challenge_message` + # Check attestations + assert len(block.attestations) <= MAX_SHARD_ATTESTIONS + for _, attestation in enumerate(block.attestations): + assert max(GENESIS_SHARD_SLOT, block.slot - SLOTS_PER_EPOCH) <= attestation.data.slot + assert attesation.data.slot <= block.slot - MIN_ATTESTATION_INCLUSION_DELAY + assert attetation.data.shart == block.shard + verify_shard_attestation_signature(beacon_state, attestation) -```python -def verify_signed_challenge_message(message: Any, pubkey: bytes48) -> bool: - return bls_verify( - message_hash=signed_root(message), - pubkey=pubkey, - signature=message.signature, - domain=get_domain(state, get_current_epoch(state), DOMAIN_CUSTODY_INTERACTIVE) + # Check signature + proposer_index = get_shard_proposer_index(beacon_state, block.shard, block.slot) + assert proposer_index is not None + assert bls_verify( + pubkey=validators[proposer_index].pubkey, + message_hash=signed_root(block), + signature=block.signature, + domain=get_domain(beacon_state, slot_to_epoch(block.slot), DOMAIN_SHARD_PROPOSER) ) + return True ``` -### `penalize_validator` +### Shard attestations -Change the definition of `penalize_validator` as follows: +Let: + +* `valid_shard_blocks` be the list of valid `ShardBlock` +* `beacon_state` be the canonical `BeaconState` +* `candidate` be a candidate `ShardAttestation` for which validity is to be determined by running `is_valid_shard_attestation` ```python -def penalize_validator(state: BeaconState, index: ValidatorIndex, whistleblower_index=None:ValidatorIndex) -> None: - """ - Penalize the validator of the given ``index``. - Note that this function mutates ``state``. - """ - exit_validator(state, index) - validator = state.validator_registry[index] - state.latest_penalized_balances[get_current_epoch(state) % LATEST_PENALIZED_EXIT_LENGTH] += get_effective_balance(state, index) - - block_proposer_index = get_beacon_proposer_index(state, state.slot) - whistleblower_reward = get_effective_balance(state, index) // WHISTLEBLOWER_REWARD_QUOTIENT - if whistleblower_index is None: - state.validator_balances[block_proposer_index] += whistleblower_reward +def is_valid_shard_attestation(valid_shard_blocks: List[ShardBlock], + beacon_state: BeaconState, + candidate: Attestation) -> bool: + # Check shard block + shard_block = next( + block for block in valid_shard_blocks if + signed_root(block) == candidate.attestation.data.shard_block_root + , None) + assert shard_block != None + assert shard_block.slot == attestation.data.slot + assert shard_block.shard == attestation.data.shard + + # Check signature + verify_shard_attestation_signature(beacon_state, attestation) + + return True +``` + +### Beacon attestations + +Let: + +* `shard` be a valid `Shard` +* `shard_blocks` be the `ShardBlock` list such that `shard_blocks[slot]` is the canonical `ShardBlock` for shard `shard` at slot `slot` +* `beacon_state` be the canonical `BeaconState` +* `valid_attestations` be the list of valid `Attestation`, recursively defined +* `candidate` be a candidate `Attestation` which is valid under phase 0 rules, and for which validity is to be determined under phase 1 rules by running `is_valid_beacon_attestation` + +```python +def is_valid_beacon_attestation(shard: Shard, + shard_blocks: List[ShardBlock], + beacon_state: BeaconState, + valid_attestations: List[Attestation], + candidate: Attestation) -> bool: + # Check if attestation is already determined valid + for _, attestation in enumerate(valid_attestations): + if candidate == attestation: + return True + + # Check previous attestation + if candidate.data.previous_crosslink.epoch <= PHASE_1_GENESIS_EPOCH: + assert candidate.data.previous_crosslink.crosslink_data_root == ZERO_HASH else: - state.validator_balances[whistleblower_index] += ( - whistleblower_reward * INCLUDER_REWARD_QUOTIENT / (INCLUDER_REWARD_QUOTIENT + 1) - ) - state.validator_balances[block_proposer_index] += whistleblower_reward / (INCLUDER_REWARD_QUOTIENT + 1) - state.validator_balances[index] -= whistleblower_reward - validator.penalized_epoch = get_current_epoch(state) - validator.withdrawable_epoch = get_current_epoch(state) + LATEST_PENALIZED_EXIT_LENGTH + previous_attestation = next( + attestation for attestation in valid_attestations if + attestation.data.crosslink_data_root == candidate.data.previous_crosslink.crosslink_data_root + , None) + assert previous_attestation != None + assert candidate.data.previous_attestation.epoch < slot_to_epoch(candidate.data.slot) + + # Check crosslink data root + start_epoch = state.latest_crosslinks[shard].epoch + end_epoch = min(slot_to_epoch(candidate.data.slot) - CROSSLINK_LOOKBACK, start_epoch + MAX_CROSSLINK_EPOCHS) + blocks = [] + for slot in range(start_epoch * SLOTS_PER_EPOCH, end_epoch * SLOTS_PER_EPOCH): + blocks.append(shard_blocks[slot]) + assert candidate.data.crosslink_data_root == compute_crosslink_data_root(blocks) + + return True ``` -The only change is that this introduces the possibility of a penalization where the "whistleblower" that takes credit is NOT the block proposer. +## Shard fork choice rule -## Per-slot processing - -### Operations - -Add the following operations to the per-slot processing, in order the given below and _after_ all other operations (specifically, right after exits). - -#### Branch challenges - -Verify that `len(block.body.branch_challenges) <= MAX_BRANCH_CHALLENGES`. - -For each `challenge` in `block.body.branch_challenges`, run: - -```python -def process_branch_challenge(state: BeaconState, - challenge: BranchChallenge) -> None: - # Check that it's not too late to challenge - assert slot_to_epoch(challenge.attestation.data.slot) >= get_current_epoch(state) - MAX_BRANCH_CHALLENGE_DELAY - assert state.validator_registry[responder_index].exit_epoch >= get_current_epoch(state) - MAX_BRANCH_CHALLENGE_DELAY - # Check the attestation is valid - assert verify_slashable_attestation(state, challenge.attestation) - # Check that the responder participated - assert challenger.responder_index in challenge.attestation.validator_indices - # Check the challenge is not a duplicate - assert [ - c for c in state.branch_challenge_records if c.root == challenge.attestation.data.crosslink_data_root and - c.data_index == challenge.data_index - ] == [] - # Check validity of depth - depth = get_attestation_merkle_depth(challenge.attestation) - assert c.data_index < 2**depth - # Add new challenge - state.branch_challenge_records.append(BranchChallengeRecord( - challenge_id=state.next_branch_challenge_id, - challenger_index=get_beacon_proposer_index(state, state.slot), - root=challenge.attestation.data.shard_chain_commitment, - depth=depth, - deadline=get_current_epoch(state) + CHALLENGE_RESPONSE_DEADLINE, - data_index=challenge.data_index - )) - state.next_branch_challenge_id += 1 -``` - -#### Branch responses - -Verify that `len(block.body.branch_responses) <= MAX_BRANCH_RESPONSES`. - -For each `response` in `block.body.branch_responses`, if `response.responding_to_custody_challenge == False`, run: - -```python -def process_branch_exploration_response(state: BeaconState, - response: BranchResponse) -> None: - challenge = get_branch_challenge_record_by_id(response.challenge_id) - assert verify_merkle_branch( - leaf=response.data, - branch=response.branch, - depth=challenge.depth, - index=challenge.data_index, - root=challenge.root - ) - # Must wait at least ENTRY_EXIT_DELAY before responding to a branch challenge - assert get_current_epoch(state) >= challenge.inclusion_epoch + ENTRY_EXIT_DELAY - state.branch_challenge_records.pop(challenge) - # Reward the proposer - proposer_index = get_beacon_proposer_index(state, state.slot) - state.validator_balances[proposer_index] += base_reward(state, index) // MINOR_REWARD_QUOTIENT -``` - -If `response.responding_to_custody_challenge == True`, run: - -```python -def process_branch_custody_response(state: BeaconState, - response: BranchResponse) -> None: - challenge = get_custody_challenge_record_by_id(response.challenge_id) - responder = state.validator_registry[challenge.responder_index] - # Verify we're not too late - assert get_current_epoch(state) < responder.withdrawable_epoch - # Verify the Merkle branch *of the data tree* - assert verify_merkle_branch( - leaf=response.data, - branch=response.branch, - depth=challenge.max_depth, - index=challenge.offset, - root=challenge.data_root - ) - # Responder wins - if hash(challenge.responder_subkey + response.data) == challenge.current_custody_tree_node: - penalize_validator(state, challenge.challenger_index, challenge.responder_index) - # Challenger wins - else: - penalize_validator(state, challenge.responder_index, challenge.challenger_index) - state.custody_challenge_records.pop(challenge) -``` - -#### Subkey reveals - -Verify that `len(block.body.early_subkey_reveals) <= MAX_EARLY_SUBKEY_REVEALS`. - -For each `reveal` in `block.body.early_subkey_reveals`: - -* Verify that `verify_custody_subkey_reveal(state.validator_registry[reveal.validator_index].pubkey, reveal.subkey, reveal.period, reveal.mask, state.validator_registry[reveal.revealer_index].pubkey)` returns `True`. -* Let `is_early_reveal = reveal.period > get_current_custody_period(state) or (reveal.period == get_current_custody_period(state) and state.validator_registry[reveal.validator_index].exit_epoch > get_current_epoch(state))` (ie. either the reveal is of a future period, or it's of the current period and the validator is still active) -* Verify that one of the following is true: - * (i) `is_early_reveal` is `True` - * (ii) `is_early_reveal` is `False` and `reveal.period == state.validator_registry[reveal.validator_index].next_subkey_to_reveal` (revealing a past subkey, or a current subkey for a validator that has exited) and `reveal.mask == ZERO_HASH` - -In case (i): - -* Verify that `state.validator_registry[reveal.validator_index].penalized_epoch > get_current_epoch(state). -* Run `penalize_validator(state, reveal.validator_index, reveal.revealer_index)`. -* Set `state.validator_balances[reveal.revealer_index] += base_reward(state, index) // MINOR_REWARD_QUOTIENT` - -In case (ii): - -* Determine the proposer `proposer_index = get_beacon_proposer_index(state, state.slot)` and set `state.validator_balances[proposer_index] += base_reward(state, index) // MINOR_REWARD_QUOTIENT`. -* Set `state.validator_registry[reveal.validator_index].next_subkey_to_reveal += 1` -* Set `state.validator_registry[reveal.validator_index].reveal_max_periods_late = max(state.validator_registry[reveal.validator_index].reveal_max_periods_late, get_current_period(state) - reveal.period)`. - -#### Interactive custody challenge initiations - -Verify that `len(block.body.interactive_custody_challenge_initiations) <= MAX_INTERACTIVE_CUSTODY_CHALLENGE_INITIATIONS`. - -For each `initiation` in `block.body.interactive_custody_challenge_initiations`, use the following function to process it: - -```python -def process_initiation(state: BeaconState, - initiation: InteractiveCustodyChallengeInitiation) -> None: - challenger = state.validator_registry[initiation.challenger_index] - responder = state.validator_registry[initiation.responder_index] - # Verify the signature - assert verify_signed_challenge_message(initiation, challenger.pubkey) - # Verify the attestation - assert verify_slashable_attestation(initiation.attestation, state) - # Check that the responder actually participated in the attestation - assert initiation.responder_index in attestation.validator_indices - # Any validator can be a challenger or responder of max 1 challenge at a time - for c in state.custody_challenge_records: - assert c.challenger_index != initiation.challenger_index - assert c.responder_index != initiation.responder_index - # Can't challenge if you've been penalized - assert challenger.penalized_epoch == FAR_FUTURE_EPOCH - # Make sure the revealed subkey is valid - assert verify_custody_subkey_reveal( - pubkey=state.validator_registry[responder_index].pubkey, - subkey=initiation.responder_subkey, - period=slot_to_custody_period(attestation.data.slot) - ) - # Verify that the attestation is still eligible for challenging - min_challengeable_epoch = responder.exit_epoch - CUSTODY_PERIOD_LENGTH * (1 + responder.reveal_max_periods_late) - assert min_challengeable_epoch <= slot_to_epoch(initiation.attestation.data.slot) - # Create a new challenge object - state.branch_challenge_records.append(InteractiveCustodyChallengeRecord( - challenge_id=state.next_branch_challenge_id, - challenger_index=initiation.challenger_index, - responder_index=initiation.responder_index, - data_root=attestation.custody_commitment, - custody_bit=get_bitfield_bit(attestation.custody_bitfield, attestation.validator_indices.index(responder_index)), - responder_subkey=responder_subkey, - current_custody_tree_node=ZERO_HASH, - depth=0, - offset=0, - max_depth=get_attestation_data_merkle_depth(initiation.attestation.data), - deadline=get_current_epoch(state) + CHALLENGE_RESPONSE_DEADLINE - )) - state.next_branch_challenge_id += 1 - # Responder can't withdraw yet! - state.validator_registry[responder_index].withdrawable_epoch = FAR_FUTURE_EPOCH -``` - -#### Interactive custody challenge responses - -A response provides 32 hashes that are under current known proof of custody tree node. Note that at the beginning the tree node is just one bit of the custody root, so we ask the responder to sign to commit to the top 5 levels of the tree and therefore the root hash; at all other stages in the game responses are self-verifying. - -Verify that `len(block.body.interactive_custody_challenge_responses) <= MAX_INTERACTIVE_CUSTODY_CHALLENGE_RESPONSES`. - -For each `response` in `block.body.interactive_custody_challenge_responses`, use the following function to process it: - -```python -def process_response(state: BeaconState, - response: InteractiveCustodyChallengeResponse) -> None: - challenge = get_custody_challenge_record_by_id(state, response.challenge_id) - responder = state.validator_registry[challenge.responder_index] - # Check that the right number of hashes was provided - expected_depth = min(challenge.max_depth - challenge.depth, MAX_POC_RESPONSE_DEPTH) - assert 2**expected_depth == len(response.hashes) - # Must make some progress! - assert expected_depth > 0 - # Check the hashes match the previously provided root - root = merkle_root(response.hashes) - # If this is the first response check the bit and the signature and set the root - if challenge.depth == 0: - assert get_bitfield_bit(root, 0) == challenge.custody_bit - assert verify_signed_challenge_message(response, responder.pubkey) - challenge.current_custody_tree_node = root - # Otherwise just check the response against the root - else: - assert root == challenge_data.current_custody_tree_node - # Update challenge data - challenge.deadline=FAR_FUTURE_EPOCH - responder.withdrawable_epoch = get_current_epoch(state) + MAX_POC_RESPONSE_DEPTH -``` - -#### Interactive custody challenge continuations - -Once a response provides 32 hashes, the challenger has the right to choose any one of them that they feel is constructed incorrectly to continue the game. Note that eventually, the game will get to the point where the `new_custody_tree_node` is a leaf node. - -Verify that `len(block.body.interactive_custody_challenge_continuations) <= MAX_INTERACTIVE_CUSTODY_CHALLENGE_CONTINUATIONS`. - -For each `continuation` in `block.body.interactive_custody_challenge_continuations`, use the following function to process it: - -```python -def process_continuation(state: BeaconState, - continuation: InteractiveCustodyChallengeContinuation) -> None: - challenge = get_custody_challenge_record_by_id(state, continuation.challenge_id) - challenger = state.validator_registry[challenge.challenger_index] - responder = state.validator_registry[challenge.responder_index] - expected_depth = min(challenge_data.max_depth - challenge_data.depth, MAX_POC_RESPONSE_DEPTH) - # Verify we're not too late - assert get_current_epoch(state) < responder.withdrawable_epoch - # Verify the Merkle branch (the previous custody response provided the next level of hashes so the - # challenger has the info to make any Merkle branch) - assert verify_merkle_branch( - leaf=new_custody_tree_node, - branch=continuation.proof, - depth=expected_depth, - index=sub_index, - root=challenge_data.current_custody_tree_node - ) - # Verify signature - assert verify_signed_challenge_message(continuation, challenger.pubkey) - # Update the challenge data - challenge.current_custody_tree_node = continuation.new_custody_tree_node - challenge.depth += expected_depth - challenge.deadline = get_current_epoch(state) + MAX_POC_RESPONSE_DEPTH - responder.withdrawable_epoch = FAR_FUTURE_EPOCH - challenge.offset = challenge_data.offset * 2**expected_depth + sub_index -``` - -## Per-epoch processing - -Add the following loop immediately below the `process_ejections` loop: - -```python -def process_challenge_absences(state: BeaconState) -> None: - """ - Iterate through the challenge list - and penalize validators with balance that did not answer challenges. - """ - for c in state.branch_challenge_records: - if get_current_epoch(state) > c.deadline: - penalize_validator(state, c.responder_index, c.challenger_index) - - for c in state.custody_challenge_records: - if get_current_epoch(state) > c.deadline: - penalize_validator(state, c.responder_index, c.challenger_index) - if get_current_epoch(state) > state.validator_registry[c.responder_index].withdrawable_epoch: - penalize_validator(state, c.challenger_index, c.responder_index) -``` - -In `process_penalties_and_exits`, change the definition of `eligible` to the following (note that it is not a pure function because `state` is declared in the surrounding scope): - -```python -def eligible(index): - validator = state.validator_registry[index] - # Cannot exit if there are still open branch challenges - if [c for c in state.branch_challenge_records if c.responder_index == index] != []: - return False - # Cannot exit if you have not revealed all of your subkeys - elif validator.next_subkey_to_reveal <= epoch_to_custody_period(validator.exit_epoch): - return False - # Cannot exit if you already have - elif validator.withdrawable_epoch < FAR_FUTURE_EPOCH: - return False - # Return minimum time - else: - return current_epoch >= validator.exit_epoch + MIN_VALIDATOR_WITHDRAWAL_EPOCHS -``` - -## One-time phase 1 initiation transition - -Run the following on the fork block after per-slot processing and before per-block and per-epoch processing. - -For all `validator` in `ValidatorRegistry`, update it to the new format and fill the new member values with: - -```python - 'next_subkey_to_reveal': get_current_custody_period(state), - 'reveal_max_periods_late': 0, -``` - -Update the `BeaconState` to the new format and fill the new member values with: - -```python - 'branch_challenge_records': [], - 'next_branch_challenge_id': 0, - 'custody_challenge_records': [], - 'next_custody_challenge_id': 0, -``` +The fork choice rule for any shard is LMD GHOST using the shard attestations of the persistent committee and the beacon chain attestations of the crosslink committee currently assigned to that shard, but instead of being rooted in the genesis it is rooted in the block referenced in the most recent accepted crosslink (i.e. `state.crosslinks[shard].shard_block_root`). Only blocks whose `beacon_chain_root` is the block in the main beacon chain at the specified `slot` should be considered. (If the beacon chain skips a slot, then the block at that slot is considered to be the block in the beacon chain at the highest slot lower than a slot.)