From b67aeee855bd4d632514719c7fc743db91b7ab86 Mon Sep 17 00:00:00 2001 From: Ben Edgington Date: Sat, 25 Jan 2020 21:16:32 +0000 Subject: [PATCH 1/5] Fix fast_aggregate_verify BLS tests --- tests/generators/bls/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/generators/bls/main.py b/tests/generators/bls/main.py index cb14ec9ec..19468f041 100644 --- a/tests/generators/bls/main.py +++ b/tests/generators/bls/main.py @@ -134,7 +134,7 @@ def case04_fast_aggregate_verify(): # Invalid signature -- extra pubkey pubkeys_extra = pubkeys + [bls.G2ProofOfPossession.PrivToPub(PRIVKEYS[-1])] - pubkeys_extra_serial = [encode_hex(pubkey) for pubkey in pubkeys] + pubkeys_extra_serial = [encode_hex(pubkey) for pubkey in pubkeys_extra] full_name = f'{pubkeys_extra_serial}_{encode_hex(message)}_extra_pubkey' yield f'fast_aggregate_verify_{(hash(bytes(full_name, "utf-8"))[:8]).hex()}', { 'input': { From 2a91b43eaf6d883750216d82e921227b03be3223 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 8 Jan 2020 18:19:18 +0800 Subject: [PATCH 2/5] Remove shard block chunking Only store a 32 byte root for every shard block Rationale: originally, I added shard block chunking (store 4 chunks for every shard block instead of one root) to facilitate construction of data availability roots. However, it turns out that there is an easier technique. Set the width of the data availability rectangle's rows to be 1/4 the max size of a shard block, so each block would fill multiple rows. Then, non-full blocks will generally create lots of zero rows. For example if the block bodies are `31415926535` and `897932` with a max size of 24 bytes, the rows might look like this: ``` 31415926 53500000 00000000 89793200 00000000 00000000 ``` Zero rows would extend rightward to complete zero rows, and when extending downward we can count the number of zero rows, and reduce the number of extra rows that we make, so we only make a new row for every nonzero row in the original data. This way we get only a close-to-optimal ~4-5x blowup in the data even if the data has zero rows in the middle. --- scripts/build_spec.py | 2 +- specs/phase1/beacon-chain.md | 30 +++++++----------------------- specs/phase1/custody-game.md | 5 ++--- specs/phase1/fraud-proofs.md | 2 +- 4 files changed, 11 insertions(+), 28 deletions(-) diff --git a/scripts/build_spec.py b/scripts/build_spec.py index fa351db2f..6306a13c9 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -48,7 +48,7 @@ from dataclasses import ( from eth2spec.utils.ssz.ssz_impl import hash_tree_root from eth2spec.utils.ssz.ssz_typing import ( View, boolean, Container, List, Vector, uint64, uint8, bit, - ByteVector, ByteList, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector, + ByteList, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector, ) from eth2spec.utils import bls diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 858e840b3..d1b7d52a0 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -97,9 +97,8 @@ Configuration is not namespaced. Instead it is strictly an extension; | `LIGHT_CLIENT_COMMITTEE_SIZE` | `2**7` (= 128) | | `LIGHT_CLIENT_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours | | `SHARD_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours | -| `SHARD_BLOCK_CHUNK_SIZE` | `2**18` (= 262,144) | | -| `MAX_SHARD_BLOCK_CHUNKS` | `2**2` (= 4) | | -| `TARGET_SHARD_BLOCK_SIZE` | `3 * 2**16` (= 196,608) | | +| `MAX_SHARD_BLOCK_SIZE` | `2**20` (= 1,048,576) | | +| `TARGET_SHARD_BLOCK_SIZE` | `2**18` (= 262,144) | | | `SHARD_BLOCK_OFFSETS` | `[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]` | | | `MAX_SHARD_BLOCKS_PER_ATTESTATION` | `len(SHARD_BLOCK_OFFSETS)` | | | `MAX_GASPRICE` | `Gwei(2**14)` (= 16,384) | Gwei | | @@ -297,7 +296,7 @@ class ShardBlockWrapper(Container): shard_parent_root: Root beacon_parent_root: Root slot: Slot - body: ByteList[MAX_SHARD_BLOCK_CHUNKS * SHARD_BLOCK_CHUNK_SIZE] + body: ByteList[MAX_SHARD_BLOCK_SIZE] signature: BLSSignature ``` @@ -330,7 +329,7 @@ class ShardTransition(Container): # Shard block lengths shard_block_lengths: List[uint64, MAX_SHARD_BLOCKS_PER_ATTESTATION] # Shard data roots - shard_data_roots: List[List[Bytes32, MAX_SHARD_BLOCK_CHUNKS], MAX_SHARD_BLOCKS_PER_ATTESTATION] + shard_data_roots: List[Bytes32, MAX_SHARD_BLOCKS_PER_ATTESTATION] # Intermediate shard states shard_states: List[ShardState, MAX_SHARD_BLOCKS_PER_ATTESTATION] # Proposer signature aggregate @@ -396,16 +395,6 @@ def committee_to_compact_committee(state: BeaconState, committee: Sequence[Valid return CompactCommittee(pubkeys=pubkeys, compact_validators=compact_validators) ``` -#### `chunks_to_body_root` - -```python -def chunks_to_body_root(chunks: List[Bytes32, MAX_SHARD_BLOCK_CHUNKS]) -> Root: - empty_chunk_root = hash_tree_root(ByteList[SHARD_BLOCK_CHUNK_SIZE]()) - return hash_tree_root(Vector[Bytes32, MAX_SHARD_BLOCK_CHUNKS]( - chunks + [empty_chunk_root] * (MAX_SHARD_BLOCK_CHUNKS - len(chunks)) - )) -``` - #### `compute_shard_from_committee_index` ```python @@ -666,20 +655,18 @@ def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTr shard_parent_root=shard_parent_root, parent_hash=get_block_root_at_slot(state, get_previous_slot(state.slot)), slot=offset_slots[i], - body_root=chunks_to_body_root(transition.shard_data_roots[i]) + body_root=transition.shard_data_roots[i] )) proposers.append(get_shard_proposer_index(state, shard, offset_slots[i])) shard_parent_root = hash_tree_root(headers[-1]) - # Verify correct calculation of gas prices and slots and chunk roots + # Verify correct calculation of gas prices and slots prev_gasprice = state.shard_states[shard].gasprice for i in range(len(offset_slots)): shard_state = transition.shard_states[i] block_length = transition.shard_block_lengths[i] - chunks = transition.shard_data_roots[i] assert shard_state.gasprice == get_updated_gasprice(prev_gasprice, block_length) assert shard_state.slot == offset_slots[i] - assert len(chunks) == block_length // SHARD_BLOCK_CHUNK_SIZE prev_gasprice = shard_state.gasprice pubkeys = [state.validators[proposer].pubkey for proposer in proposers] @@ -724,10 +711,7 @@ def process_crosslink_for_shard(state: BeaconState, # Attestation <-> shard transition consistency assert shard_transition_root == hash_tree_root(shard_transition) - assert ( - attestation.data.head_shard_root - == chunks_to_body_root(shard_transition.shard_data_roots[-1]) - ) + assert attestation.data.head_shard_root == shard_transition.shard_data_roots[-1] # Apply transition apply_shard_transition(state, shard, shard_transition) diff --git a/specs/phase1/custody-game.md b/specs/phase1/custody-game.md index fd35e6515..121f91f97 100644 --- a/specs/phase1/custody-game.md +++ b/specs/phase1/custody-game.md @@ -103,7 +103,7 @@ class CustodySlashing(Container): whistleblower_index: ValidatorIndex shard_transition: ShardTransition attestation: Attestation - data: ByteList[MAX_SHARD_BLOCK_CHUNKS * SHARD_BLOCK_CHUNK_SIZE] + data: ByteList[MAX_SHARD_BLOCK_SIZE] ``` #### `SignedCustodySlashing` @@ -366,8 +366,7 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed shard_transition = custody_slashing.shard_transition assert hash_tree_root(shard_transition) == attestation.shard_transition_root # Verify that the provided data matches the shard-transition - shard_chunk_roots = shard_transition.shard_data_roots[custody_slashing.data_index] - assert hash_tree_root(custody_slashing.data) == chunks_to_body_root(shard_chunk_roots) + assert hash_tree_root(custody_slashing.data) == shard_transition.shard_data_roots[custody_slashing.data_index] # Verify existence and participation of claimed malefactor attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bits) diff --git a/specs/phase1/fraud-proofs.md b/specs/phase1/fraud-proofs.md index c9368ad2c..0688f5f47 100644 --- a/specs/phase1/fraud-proofs.md +++ b/specs/phase1/fraud-proofs.md @@ -50,7 +50,7 @@ def shard_state_transition(shard: Shard, pre_state: Root, previous_beacon_root: Root, proposer_pubkey: BLSPubkey, - block_data: ByteVector[MAX_SHARD_BLOCK_CHUNKS * SHARD_BLOCK_CHUNK_SIZE]) -> Root: + block_data: ByteList[MAX_SHARD_BLOCK_SIZE]) -> Root: # We will add something more substantive in phase 2 return hash(pre_state + hash_tree_root(previous_beacon_root) + hash_tree_root(block_data)) ``` From 52fb929978685b10ef3b807eba6b461fc1744d08 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sat, 11 Jan 2020 12:42:34 +0800 Subject: [PATCH 3/5] Update specs/core/1_beacon-chain.md --- specs/phase1/beacon-chain.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index d1b7d52a0..78b3b3d25 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -35,7 +35,6 @@ - [`get_previous_slot`](#get_previous_slot) - [`pack_compact_validator`](#pack_compact_validator) - [`committee_to_compact_committee`](#committee_to_compact_committee) - - [`chunks_to_body_root`](#chunks_to_body_root) - [`compute_shard_from_committee_index`](#compute_shard_from_committee_index) - [Beacon state accessors](#beacon-state-accessors) - [`get_active_shard_count`](#get_active_shard_count) @@ -905,4 +904,3 @@ def process_light_client_committee_updates(state: BeaconState) -> None: new_committee = get_light_client_committee(state, get_current_epoch(state) + LIGHT_CLIENT_COMMITTEE_PERIOD) state.next_light_committee = committee_to_compact_committee(state, new_committee) ``` - From 51f2974678bd0711439ec0f8a7a066f9d9f18bb3 Mon Sep 17 00:00:00 2001 From: Herman Junge Date: Wed, 29 Jan 2020 13:51:38 +0000 Subject: [PATCH 4/5] Update validator.md Minor edit --- specs/phase0/validator.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase0/validator.md b/specs/phase0/validator.md index 494035bda..5816e0a74 100644 --- a/specs/phase0/validator.md +++ b/specs/phase0/validator.md @@ -189,7 +189,7 @@ def is_proposer(state: BeaconState, *Note*: To see if a validator is assigned to propose during the slot, the beacon state must be in the epoch in question. At the epoch boundaries, the validator must run an epoch transition into the epoch to successfully check the proposal assignment of the first slot. -*Note*: `BeaconBlock` proposal is distinct from beacon committee assignment, and in a given epoch each responsibility might occur at different a different slot. +*Note*: `BeaconBlock` proposal is distinct from beacon committee assignment, and in a given epoch each responsibility might occur at a different slot. ### Lookahead From fe58c78da8ecef9f9bdeabacf87c90bbdb94e0e0 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Wed, 29 Jan 2020 14:08:48 -0800 Subject: [PATCH 5/5] Fix table --- specs/phase1/phase1-fork.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase1/phase1-fork.md b/specs/phase1/phase1-fork.md index 56eee410b..adb0cd236 100644 --- a/specs/phase1/phase1-fork.md +++ b/specs/phase1/phase1-fork.md @@ -33,7 +33,7 @@ This document describes the process of moving from Phase 0 to Phase 1 of Ethereu Warning: this configuration is not definitive. | Name | Value | -| - | - | - | +| - | - | | `PHASE_1_FORK_VERSION` | `Version('0x01000000')` | | `INITIAL_ACTIVE_SHARDS` | `2**6` (= 64) | | `INITIAL_GASPRICE` | `Gwei(10)` |