Optimised updates as suggested by @vbuterin

This commit is contained in:
Justin 2020-11-18 10:33:42 +00:00 committed by GitHub
parent d16900a753
commit 09ec58131d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 54 additions and 47 deletions

View File

@ -20,6 +20,7 @@
- [`LightClientStore`](#lightclientstore) - [`LightClientStore`](#lightclientstore)
- [Light client state updates](#light-client-state-updates) - [Light client state updates](#light-client-state-updates)
- [`is_valid_light_client_update`](#is_valid_light_client_update) - [`is_valid_light_client_update`](#is_valid_light_client_update)
- [`apply_light_client_update`](#apply_light_client_update)
- [`process_light_client_update`](#process_light_client_update) - [`process_light_client_update`](#process_light_client_update)
<!-- END doctoc generated TOC please keep comment here to allow auto update --> <!-- END doctoc generated TOC please keep comment here to allow auto update -->
@ -35,8 +36,8 @@ This document suggests a minimal light client design for the beacon chain that u
| Name | Value | | Name | Value |
| - | - | | - | - |
| `NEXT_SYNC_COMMITTEE_INDEX` | `Index(BeaconState, 'next_sync_committee')` |
| `FINALIZED_ROOT_INDEX` | `Index(BeaconState, 'finalized_checkpoint', 'root')` | | `FINALIZED_ROOT_INDEX` | `Index(BeaconState, 'finalized_checkpoint', 'root')` |
| `NEXT_SYNC_COMMITTEE_INDEX` | `Index(BeaconState, 'next_sync_committee')` |
## Configuration ## Configuration
@ -70,14 +71,14 @@ class LightClientSnapshot(Container):
```python ```python
class LightClientUpdate(Container): class LightClientUpdate(Container):
# Updated snapshot # Update beacon block header
snapshot: LightClientSnapshot header: BeaconBlockHeader
# Header that the new snapshot is a finalized ancestor of # Next sync committee corresponding to the header
signed_header: BeaconBlockHeader next_sync_committee: SyncCommittee
# Merkle branch proving ancestry of the header in the snapshot
ancestry_branch: Vector[Bytes32, log2(FINALIZED_ROOT_INDEX)]
# Merkle branch for the next sync committee
next_sync_committee_branch: Vector[Bytes32, log2(NEXT_SYNC_COMMITTEE_INDEX)] next_sync_committee_branch: Vector[Bytes32, log2(NEXT_SYNC_COMMITTEE_INDEX)]
# Finality proof for the update header
finality_header: BeaconBlockHeader
finality_branch: Vector[Bytes32, log2(FINALIZED_ROOT_INDEX)]
# Sync committee aggregate signature # Sync committee aggregate signature
sync_committee_bits: Bitvector[SYNC_COMMITTEE_SIZE] sync_committee_bits: Bitvector[SYNC_COMMITTEE_SIZE]
sync_committee_signature: BLSSignature sync_committee_signature: BLSSignature
@ -100,75 +101,81 @@ A light client maintains its state in a `store` object of type `LightClientStore
#### `is_valid_light_client_update` #### `is_valid_light_client_update`
```python ```python
def is_valid_light_client_update(store: LightClientStore, update: LightClientUpdate) -> bool: def is_valid_light_client_update(snapshot: LightClientSnapshot, update: LightClientUpdate) -> bool:
# Verify new slot is larger than old slot # Verify update slot is larger than snapshot slot
old_snapshot = store.snapshot assert update.header.slot > snapshot.header.slot
new_snapshot = update.snapshot
assert new_snapshot.header.slot > old_snapshot.header.slot
# Verify update does not skip a sync committee period # Verify update does not skip a sync committee period
old_period = compute_epoch_at_slot(old_snapshot.header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD snapshot_period = compute_epoch_at_slot(snapshot.header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
new_period = compute_epoch_at_slot(new_snapshot.header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD update_period = compute_epoch_at_slot(update.header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
assert new_period in (old_period, old_period + 1) assert update_period in (snapshot_period, snapshot_period + 1)
# Verify relationship between signed header and ancestor header # Verify update header root is the finalized root of the finality header, if specified
if update.signed_header == new_snapshot.header: if update.finality_header == BeaconBlockHeader():
assert update.ancestry_branch == [ZERO_HASH for _ in range(log2(FINALIZED_ROOT_INDEX))] signed_header = update.header
assert update.finality_branch == [ZERO_HASH for _ in range(log2(FINALIZED_ROOT_INDEX))]
else: else:
signed_header = update.finality_header
assert is_valid_merkle_branch( assert is_valid_merkle_branch(
leaf=hash_tree_root(new_snapshot.header), leaf=hash_tree_root(update.header),
branch=update.ancestry_branch, branch=update.finality_branch,
depth=log2(FINALIZED_ROOT_INDEX), depth=log2(FINALIZED_ROOT_INDEX),
index=FINALIZED_ROOT_INDEX % 2**log2(FINALIZED_ROOT_INDEX), index=FINALIZED_ROOT_INDEX % 2**log2(FINALIZED_ROOT_INDEX),
root=update.signed_header.state_root, root=update.finality_header.state_root,
) )
# Verify new snapshot sync committees # Verify update next sync committee if the update period incremented
if new_period == old_period: if update_period == snapshot_period:
assert new_snapshot.current_sync_committee == old_snapshot.current_sync_committee sync_committee = snapshot.current_sync_committee
assert new_snapshot.next_sync_committee == old_snapshot.next_sync_committee assert update.next_sync_committee_branch == [ZERO_HASH for _ in range(log2(NEXT_SYNC_COMMITTEE_INDEX))]
else: else:
assert new_snapshot.current_sync_committee == old_snapshot.next_sync_committee sync_committee = snapshot.next_sync_committee
assert is_valid_merkle_branch( assert is_valid_merkle_branch(
leaf=hash_tree_root(new_snapshot.next_sync_committee), leaf=hash_tree_root(update.next_sync_committee),
branch=update.next_sync_committee_branch, branch=update.next_sync_committee_branch,
depth=log2(NEXT_SYNC_COMMITTEE_INDEX), depth=log2(NEXT_SYNC_COMMITTEE_INDEX),
index=NEXT_SYNC_COMMITTEE_INDEX % 2**log2(NEXT_SYNC_COMMITTEE_INDEX), index=NEXT_SYNC_COMMITTEE_INDEX % 2**log2(NEXT_SYNC_COMMITTEE_INDEX),
root=new_snapshot.header.state_root, root=update.header.state_root,
) )
# Verify sync committee bitfield length # Verify sync committee has sufficient participants
sync_committee = new_snapshot.current_sync_committee
assert len(update.sync_committee_bits) == len(sync_committee)
assert sum(update.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS assert sum(update.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
# Verify sync committee aggregate signature # Verify sync committee aggregate signature
participant_pubkeys = [pubkey for (bit, pubkey) in zip(update.sync_committee_bits, sync_committee.pubkeys) if bit] participant_pubkeys = [pubkey for (bit, pubkey) in zip(update.sync_committee_bits, sync_committee.pubkeys) if bit]
domain = compute_domain(DOMAIN_SYNC_COMMITTEE, update.fork_version) domain = compute_domain(DOMAIN_SYNC_COMMITTEE, update.fork_version)
signing_root = compute_signing_root(update.signed_header, domain) signing_root = compute_signing_root(signed_header, domain)
assert bls.FastAggregateVerify(participant_pubkeys, signing_root, update.sync_committee_signature) assert bls.FastAggregateVerify(participant_pubkeys, signing_root, update.sync_committee_signature)
return True return True
``` ```
#### `process_update` #### `apply_light_client_update`
```python
def apply_light_client_update(snapshot: LightClientSnapshot, update: LightClientUpdate) -> None:
snapshot_period = compute_epoch_at_slot(snapshot.header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
update_period = compute_epoch_at_slot(update.header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
if update_period == snapshot_period + 1:
snapshot.current_sync_committee = snapshot.next_sync_committee
snapshot.next_sync_committee = update.next_sync_committee
snapshot.header = update.header
```
#### `process_light_client_update`
```python ```python
def process_light_client_update(store: LightClientStore, update: LightClientUpdate, current_slot: Slot) -> None: def process_light_client_update(store: LightClientStore, update: LightClientUpdate, current_slot: Slot) -> None:
# Validate update # Validate update
assert is_valid_light_client_update(store, update) assert is_valid_light_client_update(store.snapshot, update)
valid_updates.append(update) store.valid_updates.append(update)
# Immediate update "happy path" requires: if sum(update.sync_committee_bits) * 3 > len(update.sync_committee_bits) * 2 and update.header != update.finality_header:
# (i) 2/3 participation # Apply update if 2/3 quorum is reached and we have a finality proof
# (ii) an update that refers to the finalized ancestor of a signed block, and not the signed block directly apply_light_client_update(store, update)
if sum(update.sync_committee_bits) * 3 > len(update.sync_committee_bits) * 2 and update.snapshot.header != update.signed_header:
# Immediate update when quorum is reached
store.snapshot = update.snapshot
store.valid_updates = [] store.valid_updates = []
elif current_slot > old_snapshot.header.slot + LIGHT_CLIENT_UPDATE_TIMEOUT: elif current_slot > snapshot.header.slot + LIGHT_CLIENT_UPDATE_TIMEOUT:
# Forced best update when the update timeout has elapsed # Forced best update when the update timeout has elapsed
store.snapshot = max(store.valid_updates, key=lambda update: sum(update.sync_committee_bits)).snapshot apply_light_client_update(store, max(store.valid_updates, key=lambda update: sum(update.sync_committee_bits)))
store.valid_updates = [] store.valid_updates = []
``` ```