Remove shard block chunking

Only store a 32 byte root for every shard block

Rationale: originally, I added shard block chunking (store 4 chunks for every shard block instead of one root) to facilitate construction of data availability roots. However, it turns out that there is an easier technique. Set the width of the data availability rectangle's rows to be 1/4 the max size of a shard block, so each block would fill multiple rows. Then, non-full blocks will generally create lots of zero rows. For example if the block bodies are `31415926535` and `897932` with a max size of 24 bytes, the rows might look like this:

```
31415926
53500000
00000000
89793200
00000000
00000000
```
Zero rows would extend rightward to complete zero rows, and when extending downward we can count the number of zero rows, and reduce the number of extra rows that we make, so we only make a new row for every nonzero row in the original data. This way we get only a close-to-optimal ~4-5x blowup in the data even if the data has zero rows in the middle.
This commit is contained in:
vbuterin 2020-01-08 18:19:18 +08:00 committed by Danny Ryan
parent 7b76808a1c
commit 2a91b43eaf
No known key found for this signature in database
GPG Key ID: 2765A792E42CE07A
4 changed files with 11 additions and 28 deletions

View File

@ -48,7 +48,7 @@ from dataclasses import (
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
from eth2spec.utils.ssz.ssz_typing import (
View, boolean, Container, List, Vector, uint64, uint8, bit,
ByteVector, ByteList, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
ByteList, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
)
from eth2spec.utils import bls

View File

@ -97,9 +97,8 @@ Configuration is not namespaced. Instead it is strictly an extension;
| `LIGHT_CLIENT_COMMITTEE_SIZE` | `2**7` (= 128) |
| `LIGHT_CLIENT_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours |
| `SHARD_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours |
| `SHARD_BLOCK_CHUNK_SIZE` | `2**18` (= 262,144) | |
| `MAX_SHARD_BLOCK_CHUNKS` | `2**2` (= 4) | |
| `TARGET_SHARD_BLOCK_SIZE` | `3 * 2**16` (= 196,608) | |
| `MAX_SHARD_BLOCK_SIZE` | `2**20` (= 1,048,576) | |
| `TARGET_SHARD_BLOCK_SIZE` | `2**18` (= 262,144) | |
| `SHARD_BLOCK_OFFSETS` | `[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]` | |
| `MAX_SHARD_BLOCKS_PER_ATTESTATION` | `len(SHARD_BLOCK_OFFSETS)` | |
| `MAX_GASPRICE` | `Gwei(2**14)` (= 16,384) | Gwei | |
@ -297,7 +296,7 @@ class ShardBlockWrapper(Container):
shard_parent_root: Root
beacon_parent_root: Root
slot: Slot
body: ByteList[MAX_SHARD_BLOCK_CHUNKS * SHARD_BLOCK_CHUNK_SIZE]
body: ByteList[MAX_SHARD_BLOCK_SIZE]
signature: BLSSignature
```
@ -330,7 +329,7 @@ class ShardTransition(Container):
# Shard block lengths
shard_block_lengths: List[uint64, MAX_SHARD_BLOCKS_PER_ATTESTATION]
# Shard data roots
shard_data_roots: List[List[Bytes32, MAX_SHARD_BLOCK_CHUNKS], MAX_SHARD_BLOCKS_PER_ATTESTATION]
shard_data_roots: List[Bytes32, MAX_SHARD_BLOCKS_PER_ATTESTATION]
# Intermediate shard states
shard_states: List[ShardState, MAX_SHARD_BLOCKS_PER_ATTESTATION]
# Proposer signature aggregate
@ -396,16 +395,6 @@ def committee_to_compact_committee(state: BeaconState, committee: Sequence[Valid
return CompactCommittee(pubkeys=pubkeys, compact_validators=compact_validators)
```
#### `chunks_to_body_root`
```python
def chunks_to_body_root(chunks: List[Bytes32, MAX_SHARD_BLOCK_CHUNKS]) -> Root:
empty_chunk_root = hash_tree_root(ByteList[SHARD_BLOCK_CHUNK_SIZE]())
return hash_tree_root(Vector[Bytes32, MAX_SHARD_BLOCK_CHUNKS](
chunks + [empty_chunk_root] * (MAX_SHARD_BLOCK_CHUNKS - len(chunks))
))
```
#### `compute_shard_from_committee_index`
```python
@ -666,20 +655,18 @@ def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTr
shard_parent_root=shard_parent_root,
parent_hash=get_block_root_at_slot(state, get_previous_slot(state.slot)),
slot=offset_slots[i],
body_root=chunks_to_body_root(transition.shard_data_roots[i])
body_root=transition.shard_data_roots[i]
))
proposers.append(get_shard_proposer_index(state, shard, offset_slots[i]))
shard_parent_root = hash_tree_root(headers[-1])
# Verify correct calculation of gas prices and slots and chunk roots
# Verify correct calculation of gas prices and slots
prev_gasprice = state.shard_states[shard].gasprice
for i in range(len(offset_slots)):
shard_state = transition.shard_states[i]
block_length = transition.shard_block_lengths[i]
chunks = transition.shard_data_roots[i]
assert shard_state.gasprice == get_updated_gasprice(prev_gasprice, block_length)
assert shard_state.slot == offset_slots[i]
assert len(chunks) == block_length // SHARD_BLOCK_CHUNK_SIZE
prev_gasprice = shard_state.gasprice
pubkeys = [state.validators[proposer].pubkey for proposer in proposers]
@ -724,10 +711,7 @@ def process_crosslink_for_shard(state: BeaconState,
# Attestation <-> shard transition consistency
assert shard_transition_root == hash_tree_root(shard_transition)
assert (
attestation.data.head_shard_root
== chunks_to_body_root(shard_transition.shard_data_roots[-1])
)
assert attestation.data.head_shard_root == shard_transition.shard_data_roots[-1]
# Apply transition
apply_shard_transition(state, shard, shard_transition)

View File

@ -103,7 +103,7 @@ class CustodySlashing(Container):
whistleblower_index: ValidatorIndex
shard_transition: ShardTransition
attestation: Attestation
data: ByteList[MAX_SHARD_BLOCK_CHUNKS * SHARD_BLOCK_CHUNK_SIZE]
data: ByteList[MAX_SHARD_BLOCK_SIZE]
```
#### `SignedCustodySlashing`
@ -366,8 +366,7 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed
shard_transition = custody_slashing.shard_transition
assert hash_tree_root(shard_transition) == attestation.shard_transition_root
# Verify that the provided data matches the shard-transition
shard_chunk_roots = shard_transition.shard_data_roots[custody_slashing.data_index]
assert hash_tree_root(custody_slashing.data) == chunks_to_body_root(shard_chunk_roots)
assert hash_tree_root(custody_slashing.data) == shard_transition.shard_data_roots[custody_slashing.data_index]
# Verify existence and participation of claimed malefactor
attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bits)

View File

@ -50,7 +50,7 @@ def shard_state_transition(shard: Shard,
pre_state: Root,
previous_beacon_root: Root,
proposer_pubkey: BLSPubkey,
block_data: ByteVector[MAX_SHARD_BLOCK_CHUNKS * SHARD_BLOCK_CHUNK_SIZE]) -> Root:
block_data: ByteList[MAX_SHARD_BLOCK_SIZE]) -> Root:
# We will add something more substantive in phase 2
return hash(pre_state + hash_tree_root(previous_beacon_root) + hash_tree_root(block_data))
```