From 42eae81013074ee191eaf0429e5575a61c714f60 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 15 Jun 2021 21:52:25 +0800 Subject: [PATCH 01/75] WIP. Rework on_block tests --- .../eth2spec/test/helpers/fork_choice.py | 7 +- .../test/phase0/fork_choice/test_on_block.py | 339 ++++++++++++++++++ tests/formats/fork_choice/README.md | 20 +- tests/generators/fork_choice/main.py | 3 +- 4 files changed, 363 insertions(+), 6 deletions(-) create mode 100644 tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py diff --git a/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py b/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py index f6b007894..48248089b 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py +++ b/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py @@ -74,17 +74,20 @@ def on_tick_and_append_step(spec, store, time, test_steps): def run_on_block(spec, store, signed_block, test_steps, valid=True): + yield get_block_file_name(signed_block), signed_block if not valid: try: spec.on_block(store, signed_block) - except AssertionError: + test_steps.append({ + 'block': get_block_file_name(signed_block), + 'valid': True, + }) return else: assert False spec.on_block(store, signed_block) - yield get_block_file_name(signed_block), signed_block test_steps.append({'block': get_block_file_name(signed_block)}) # An on_block step implies receiving block's attestations diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py new file mode 100644 index 000000000..e33c32e58 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py @@ -0,0 +1,339 @@ +from eth2spec.utils.ssz.ssz_impl import hash_tree_root + +from eth2spec.test.context import MINIMAL, spec_state_test, with_all_phases, with_presets +from eth2spec.test.helpers.attestations import next_epoch_with_attestations +from eth2spec.test.helpers.block import build_empty_block_for_next_slot, build_empty_block +from eth2spec.test.helpers.fork_choice import ( + get_genesis_forkchoice_store_and_block, + on_tick_and_append_step, + run_on_block, + tick_and_run_on_block, +) +from eth2spec.test.helpers.state import next_epoch, state_transition_and_sign_block + + +def apply_next_epoch_with_attestations(spec, state, store, test_steps=None): + if test_steps is None: + test_steps = [] + + _, new_signed_blocks, post_state = next_epoch_with_attestations(spec, state, True, False) + for signed_block in new_signed_blocks: + block = signed_block.message + block_root = hash_tree_root(block) + store.blocks[block_root] = block + store.block_states[block_root] = post_state + yield from tick_and_run_on_block(spec, store, signed_block, test_steps) + last_signed_block = signed_block + + return post_state, store, last_signed_block + + +@with_all_phases +@spec_state_test +def test_basic(spec, state): + # Initialization + test_steps = [] + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + # On receiving a block of `GENESIS_SLOT + 1` slot + block = build_empty_block_for_next_slot(spec, state) + signed_block = state_transition_and_sign_block(spec, state, block) + yield from tick_and_run_on_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + + # On receiving a block of next epoch + store.time = current_time + spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH + block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH) + signed_block = state_transition_and_sign_block(spec, state, block) + yield from tick_and_run_on_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + + yield 'steps', test_steps + + # TODO: add tests for justified_root and finalized_root + + +@with_all_phases +@with_presets([MINIMAL], reason="too slow") +@spec_state_test +def test_on_block_checkpoints(spec, state): + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + # Run for 1 epoch with full attestations + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + state, store, last_signed_block = yield from apply_next_epoch_with_attestations(spec, state, store, test_steps) + last_block_root = hash_tree_root(last_signed_block.message) + assert spec.get_head(store) == last_block_root + + # Forward 1 epoch + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Mock the finalized_checkpoint and build a block on it + fin_state = store.block_states[last_block_root] + fin_state.finalized_checkpoint = ( + store.block_states[last_block_root].current_justified_checkpoint + ) + + block = build_empty_block_for_next_slot(spec, fin_state) + signed_block = state_transition_and_sign_block(spec, fin_state.copy(), block) + yield from tick_and_run_on_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +def test_on_block_future_block(spec, state): + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + # do not tick time + + # Fail receiving block of `GENESIS_SLOT + 1` slot + block = build_empty_block_for_next_slot(spec, state) + signed_block = state_transition_and_sign_block(spec, state, block) + run_on_block(spec, store, signed_block, test_steps, valid=False) + + yield 'steps', test_steps + + +# @with_all_phases +# @spec_state_test +# def test_on_block_bad_parent_root(spec, state): +# test_steps = [] +# # Initialization +# store = get_genesis_forkchoice_store(spec, state) +# time = 100 +# on_tick_and_append_step(spec, store, time, test_steps) + +# # Fail receiving block of `GENESIS_SLOT + 1` slot +# block = build_empty_block_for_next_slot(spec, state) +# transition_unsigned_block(spec, state, block) +# block.state_root = state.hash_tree_root() + +# block.parent_root = b'\x45' * 32 + +# signed_block = sign_block(spec, state, block) + +# run_on_block(spec, store, signed_block, test_steps, valid=False) + + +# @with_all_phases +# @spec_state_test +# def test_on_block_before_finalized(spec, state): +# test_steps = [] +# # Initialization +# store = get_genesis_forkchoice_store(spec, state) +# time = 100 +# on_tick_and_append_step(spec, store, time, test_steps) + +# store.finalized_checkpoint = spec.Checkpoint( +# epoch=store.finalized_checkpoint.epoch + 2, +# root=store.finalized_checkpoint.root +# ) + +# # Fail receiving block of `GENESIS_SLOT + 1` slot +# block = build_empty_block_for_next_slot(spec, state) +# signed_block = state_transition_and_sign_block(spec, state, block) +# run_on_block(spec, store, signed_block, test_steps, valid=False) + + +# @with_all_phases +# @spec_state_test +# def test_on_block_finalized_skip_slots(spec, state): +# test_steps = [] +# # Initialization +# store = get_genesis_forkchoice_store(spec, state) +# time = 100 +# on_tick_and_append_step(spec, store, time, test_steps) + +# store.finalized_checkpoint = spec.Checkpoint( +# epoch=store.finalized_checkpoint.epoch + 2, +# root=store.finalized_checkpoint.root +# ) + +# # Build block that includes the skipped slots up to finality in chain +# block = build_empty_block(spec, state, spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + 2) +# signed_block = state_transition_and_sign_block(spec, state, block) +# on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) +# run_on_block(spec, store, signed_block, test_steps) + + +# @with_all_phases +# @spec_state_test +# def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state): +# test_steps = [] +# # Initialization +# transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH - 1) +# block = build_empty_block_for_next_slot(spec, state) +# transition_unsigned_block(spec, state, block) +# block.state_root = state.hash_tree_root() +# store = spec.get_forkchoice_store(state, block) +# store.finalized_checkpoint = spec.Checkpoint( +# epoch=store.finalized_checkpoint.epoch + 2, +# root=store.finalized_checkpoint.root +# ) + +# # First transition through the epoch to ensure no skipped slots +# state, store, _ = apply_next_epoch_with_attestations(spec, state, store) + +# # Now build a block at later slot than finalized epoch +# # Includes finalized block in chain, but not at appropriate skip slot +# block = build_empty_block(spec, state, spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + 2) +# signed_block = state_transition_and_sign_block(spec, state, block) +# on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) +# run_on_block(spec, store, signed_block, test_steps, valid=False) + + +# @with_all_phases +# @spec_state_test +# def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state): +# test_steps = [] +# # Initialization +# store = get_genesis_forkchoice_store(spec, state) +# time = 0 +# on_tick_and_append_step(spec, store, time, test_steps) + +# next_epoch(spec, state) +# on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) +# state, store, last_signed_block = apply_next_epoch_with_attestations(spec, state, store) +# next_epoch(spec, state) +# on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) +# last_block_root = hash_tree_root(last_signed_block.message) + +# # Mock the justified checkpoint +# just_state = store.block_states[last_block_root] +# new_justified = spec.Checkpoint( +# epoch=just_state.current_justified_checkpoint.epoch + 1, +# root=b'\x77' * 32, +# ) +# just_state.current_justified_checkpoint = new_justified + +# block = build_empty_block_for_next_slot(spec, just_state) +# signed_block = state_transition_and_sign_block(spec, deepcopy(just_state), block) +# assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH < spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED +# run_on_block(spec, store, signed_block, test_steps) + +# assert store.justified_checkpoint == new_justified + + +# @with_all_phases +# @spec_state_test +# def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state): +# test_steps = [] +# # Initialization +# store = get_genesis_forkchoice_store(spec, state) +# time = 0 +# on_tick_and_append_step(spec, store, time, test_steps) + +# next_epoch(spec, state) +# on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) +# state, store, last_signed_block = apply_next_epoch_with_attestations(spec, state, store) +# next_epoch(spec, state) +# on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) +# last_block_root = hash_tree_root(last_signed_block.message) + +# # Mock justified block in store +# just_block = build_empty_block_for_next_slot(spec, state) +# # Slot is same as justified checkpoint so does not trigger an override in the store +# just_block.slot = spec.compute_start_slot_at_epoch(store.justified_checkpoint.epoch) +# store.blocks[just_block.hash_tree_root()] = just_block + +# # Step time past safe slots +# time = store.time + spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED * spec.config.SECONDS_PER_SLOT +# on_tick_and_append_step(spec, store, time, test_steps) +# assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED + +# previously_justified = store.justified_checkpoint + +# # Add a series of new blocks with "better" justifications +# best_justified_checkpoint = spec.Checkpoint(epoch=0) +# for i in range(3, 0, -1): +# just_state = store.block_states[last_block_root] +# new_justified = spec.Checkpoint( +# epoch=previously_justified.epoch + i, +# root=just_block.hash_tree_root(), +# ) +# if new_justified.epoch > best_justified_checkpoint.epoch: +# best_justified_checkpoint = new_justified + +# just_state.current_justified_checkpoint = new_justified + +# block = build_empty_block_for_next_slot(spec, just_state) +# signed_block = state_transition_and_sign_block(spec, deepcopy(just_state), block) + +# run_on_block(spec, store, signed_block, test_steps) + +# assert store.justified_checkpoint == previously_justified +# # ensure the best from the series was stored +# assert store.best_justified_checkpoint == best_justified_checkpoint + + +# @with_all_phases +# @spec_state_test +# def test_on_block_outside_safe_slots_but_finality(spec, state): +# test_steps = [] +# # Initialization +# store = get_genesis_forkchoice_store(spec, state) +# time = 100 +# on_tick_and_append_step(spec, store, time, test_steps) + +# next_epoch(spec, state) +# on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) +# state, store, last_signed_block = apply_next_epoch_with_attestations(spec, state, store) +# next_epoch(spec, state) +# on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) +# last_block_root = hash_tree_root(last_signed_block.message) + +# # Mock justified block in store +# just_block = build_empty_block_for_next_slot(spec, state) +# # Slot is same as justified checkpoint so does not trigger an override in the store +# just_block.slot = spec.compute_start_slot_at_epoch(store.justified_checkpoint.epoch) +# store.blocks[just_block.hash_tree_root()] = just_block + +# # Step time past safe slots +# time = store.time + spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED * spec.config.SECONDS_PER_SLOT +# on_tick_and_append_step(spec, store, time, test_steps) +# assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED + +# # Mock justified and finalized update in state +# just_fin_state = store.block_states[last_block_root] +# new_justified = spec.Checkpoint( +# epoch=store.justified_checkpoint.epoch + 1, +# root=just_block.hash_tree_root(), +# ) +# new_finalized = spec.Checkpoint( +# epoch=store.finalized_checkpoint.epoch + 1, +# root=just_block.parent_root, +# ) +# just_fin_state.current_justified_checkpoint = new_justified +# just_fin_state.finalized_checkpoint = new_finalized + +# # Build and add block that includes the new justified/finalized info +# block = build_empty_block_for_next_slot(spec, just_fin_state) +# signed_block = state_transition_and_sign_block(spec, deepcopy(just_fin_state), block) + +# run_on_block(spec, store, signed_block, test_steps) + +# assert store.finalized_checkpoint == new_finalized +# assert store.justified_checkpoint == new_justified diff --git a/tests/formats/fork_choice/README.md b/tests/formats/fork_choice/README.md index 832ce9dd1..199b93784 100644 --- a/tests/formats/fork_choice/README.md +++ b/tests/formats/fork_choice/README.md @@ -28,7 +28,11 @@ The steps to execute in sequence. There may be multiple items of the following t The parameter that is required for executing `on_tick(store, time)`. ```yaml -{ tick: int } -- to execute `on_tick(store, time)` +{ + tick: int -- to execute `on_tick(store, time)`. + valid: bool -- optional, default to `True`. + If it's `False`, this execution step is expected to be invalid. +} ``` After this step, the `store` object may have been updated. @@ -38,7 +42,12 @@ After this step, the `store` object may have been updated. The parameter that is required for executing `on_attestation(store, attestation)`. ```yaml -{ attestation: string } -- the name of the `attestation_<32-byte-root>.ssz_snappy` file. To execute `on_attestation(store, attestation)` with the given attestation. +{ + attestation: string -- the name of the `attestation_<32-byte-root>.ssz_snappy` file. + To execute `on_attestation(store, attestation)` with the given attestation. + valid: bool -- optional, default to `True`. + If it's `False`, this execution step is expected to be invalid. +} ``` The file is located in the same folder (see below). @@ -49,7 +58,12 @@ After this step, the `store` object may have been updated. The parameter that is required for executing `on_block(store, block)`. ```yaml -{ block: string } -- the name of the `block_<32-byte-root>.ssz_snappy` file. To execute `on_block(store, block)` with the given attestation. +{ + block: string -- the name of the `block_<32-byte-root>.ssz_snappy` file. + To execute `on_block(store, block)` with the given attestation. + valid: bool -- optional, default to `True`. + If it's `False`, this execution step is expected to be invalid. +} ``` The file is located in the same folder (see below). diff --git a/tests/generators/fork_choice/main.py b/tests/generators/fork_choice/main.py index f162d9564..9b6325ccf 100644 --- a/tests/generators/fork_choice/main.py +++ b/tests/generators/fork_choice/main.py @@ -4,7 +4,8 @@ from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE if __name__ == "__main__": phase_0_mods = {key: 'eth2spec.test.phase0.fork_choice.test_' + key for key in [ - 'get_head', + # 'get_head', + 'on_block', ]} # No additional Altair specific finality tests, yet. altair_mods = phase_0_mods From fb2465db45161d468aa5ffd6d8cbee5f664fc36b Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 18 Jun 2021 17:39:46 +0800 Subject: [PATCH 02/75] Rework `on_block` unit tests --- .../eth2spec/test/helpers/fork_choice.py | 55 +++- .../test/phase0/fork_choice/test_get_head.py | 20 +- .../test/phase0/fork_choice/test_on_block.py | 282 +++--------------- .../unittests/fork_choice/test_on_block.py | 205 +++++-------- tests/generators/fork_choice/main.py | 2 +- 5 files changed, 164 insertions(+), 400 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py b/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py index 48248089b..bac3d1ff5 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py +++ b/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py @@ -1,4 +1,5 @@ from eth_utils import encode_hex +from eth2spec.test.helpers.attestations import next_epoch_with_attestations def get_anchor_root(spec, state): @@ -18,7 +19,7 @@ def add_block_to_store(spec, store, signed_block): spec.on_block(store, signed_block) -def tick_and_run_on_block(spec, store, signed_block, test_steps=None): +def tick_and_add_block(spec, store, signed_block, test_steps=None, valid=True): if test_steps is None: test_steps = [] @@ -28,7 +29,7 @@ def tick_and_run_on_block(spec, store, signed_block, test_steps=None): if store.time < block_time: on_tick_and_append_step(spec, store, block_time, test_steps) - yield from run_on_block(spec, store, signed_block, test_steps) + yield from add_block(spec, store, signed_block, test_steps, valid=valid) def tick_and_run_on_attestation(spec, store, attestation, test_steps=None): @@ -73,28 +74,47 @@ def on_tick_and_append_step(spec, store, time, test_steps): test_steps.append({'tick': int(time)}) -def run_on_block(spec, store, signed_block, test_steps, valid=True): - yield get_block_file_name(signed_block), signed_block +def run_on_block(spec, store, signed_block, valid=True): if not valid: try: spec.on_block(store, signed_block) except AssertionError: - test_steps.append({ - 'block': get_block_file_name(signed_block), - 'valid': True, - }) return else: assert False spec.on_block(store, signed_block) + assert store.blocks[signed_block.message.hash_tree_root()] == signed_block.message + + +def add_block(spec, store, signed_block, test_steps=None, valid=True): + if test_steps is None: + test_steps = [] + + yield get_block_file_name(signed_block), signed_block + + if not valid: + try: + run_on_block(spec, store, signed_block, valid=True) + except AssertionError: + test_steps.append({ + 'block': get_block_file_name(signed_block), + 'valid': False, + }) + return + else: + assert False + + run_on_block(spec, store, signed_block, valid=True) test_steps.append({'block': get_block_file_name(signed_block)}) # An on_block step implies receiving block's attestations for attestation in signed_block.message.body.attestations: spec.on_attestation(store, attestation) - assert store.blocks[signed_block.message.hash_tree_root()] == signed_block.message + block_root = signed_block.message.hash_tree_root() + assert store.blocks[block_root] == signed_block.message + assert store.block_states[block_root].hash_tree_root() == signed_block.message.state_root test_steps.append({ 'checks': { 'time': int(store.time), @@ -113,3 +133,20 @@ def get_formatted_head_output(spec, store): 'slot': int(slot), 'root': encode_hex(head), } + + +def apply_next_epoch_with_attestations(spec, state, store, test_steps=None): + if test_steps is None: + test_steps = [] + + _, new_signed_blocks, post_state = next_epoch_with_attestations(spec, state, True, False) + for signed_block in new_signed_blocks: + block = signed_block.message + yield from tick_and_add_block(spec, store, signed_block, test_steps) + block_root = block.hash_tree_root() + assert store.blocks[block_root] == block + last_signed_block = signed_block + + assert store.block_states[block_root].hash_tree_root() == post_state.hash_tree_root() + + return post_state, store, last_signed_block diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py index bce886931..12b261e4e 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py @@ -11,12 +11,12 @@ from eth2spec.test.helpers.block import build_empty_block_for_next_slot from eth2spec.test.helpers.constants import MINIMAL from eth2spec.test.helpers.fork_choice import ( tick_and_run_on_attestation, - tick_and_run_on_block, + tick_and_add_block, get_anchor_root, get_genesis_forkchoice_store_and_block, get_formatted_head_output, on_tick_and_append_step, - run_on_block, + add_block, ) from eth2spec.test.helpers.state import ( next_epoch, @@ -68,12 +68,12 @@ def test_chain_no_attestations(spec, state): # On receiving a block of `GENESIS_SLOT + 1` slot block_1 = build_empty_block_for_next_slot(spec, state) signed_block_1 = state_transition_and_sign_block(spec, state, block_1) - yield from tick_and_run_on_block(spec, store, signed_block_1, test_steps) + yield from tick_and_add_block(spec, store, signed_block_1, test_steps) # On receiving a block of next epoch block_2 = build_empty_block_for_next_slot(spec, state) signed_block_2 = state_transition_and_sign_block(spec, state, block_2) - yield from tick_and_run_on_block(spec, store, signed_block_2, test_steps) + yield from tick_and_add_block(spec, store, signed_block_2, test_steps) assert spec.get_head(store) == spec.hash_tree_root(block_2) test_steps.append({ @@ -107,14 +107,14 @@ def test_split_tie_breaker_no_attestations(spec, state): block_1_state = genesis_state.copy() block_1 = build_empty_block_for_next_slot(spec, block_1_state) signed_block_1 = state_transition_and_sign_block(spec, block_1_state, block_1) - yield from tick_and_run_on_block(spec, store, signed_block_1, test_steps) + yield from tick_and_add_block(spec, store, signed_block_1, test_steps) # additional block at slot 1 block_2_state = genesis_state.copy() block_2 = build_empty_block_for_next_slot(spec, block_2_state) block_2.body.graffiti = b'\x42' * 32 signed_block_2 = state_transition_and_sign_block(spec, block_2_state, block_2) - yield from tick_and_run_on_block(spec, store, signed_block_2, test_steps) + yield from tick_and_add_block(spec, store, signed_block_2, test_steps) highest_root = max(spec.hash_tree_root(block_1), spec.hash_tree_root(block_2)) assert spec.get_head(store) == highest_root @@ -150,14 +150,14 @@ def test_shorter_chain_but_heavier_weight(spec, state): for _ in range(3): long_block = build_empty_block_for_next_slot(spec, long_state) signed_long_block = state_transition_and_sign_block(spec, long_state, long_block) - yield from tick_and_run_on_block(spec, store, signed_long_block, test_steps) + yield from tick_and_add_block(spec, store, signed_long_block, test_steps) # build short tree short_state = genesis_state.copy() short_block = build_empty_block_for_next_slot(spec, short_state) short_block.body.graffiti = b'\x42' * 32 signed_short_block = state_transition_and_sign_block(spec, short_state, short_block) - yield from tick_and_run_on_block(spec, store, signed_short_block, test_steps) + yield from tick_and_add_block(spec, store, signed_short_block, test_steps) short_attestation = get_valid_attestation(spec, short_state, short_block.slot, signed=True) yield from tick_and_run_on_attestation(spec, store, short_attestation, test_steps) @@ -200,7 +200,7 @@ def test_filtered_block_tree(spec, state): current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time on_tick_and_append_step(spec, store, current_time, test_steps) for signed_block in signed_blocks: - yield from run_on_block(spec, store, signed_block, test_steps) + yield from add_block(spec, store, signed_block, test_steps) assert store.justified_checkpoint == state.current_justified_checkpoint @@ -247,7 +247,7 @@ def test_filtered_block_tree(spec, state): on_tick_and_append_step(spec, store, current_time, test_steps) # include rogue block and associated attestations in the store - yield from run_on_block(spec, store, signed_rogue_block, test_steps) + yield from add_block(spec, store, signed_rogue_block, test_steps) for attestation in attestations: yield from tick_and_run_on_attestation(spec, store, attestation, test_steps) diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py index e33c32e58..e9f2fae63 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py @@ -1,38 +1,30 @@ from eth2spec.utils.ssz.ssz_impl import hash_tree_root from eth2spec.test.context import MINIMAL, spec_state_test, with_all_phases, with_presets -from eth2spec.test.helpers.attestations import next_epoch_with_attestations -from eth2spec.test.helpers.block import build_empty_block_for_next_slot, build_empty_block +from eth2spec.test.helpers.block import ( + build_empty_block_for_next_slot, + build_empty_block, + transition_unsigned_block, + sign_block, +) from eth2spec.test.helpers.fork_choice import ( get_genesis_forkchoice_store_and_block, on_tick_and_append_step, - run_on_block, - tick_and_run_on_block, + add_block, + tick_and_add_block, + apply_next_epoch_with_attestations, +) +from eth2spec.test.helpers.state import ( + next_epoch, + state_transition_and_sign_block, ) -from eth2spec.test.helpers.state import next_epoch, state_transition_and_sign_block - - -def apply_next_epoch_with_attestations(spec, state, store, test_steps=None): - if test_steps is None: - test_steps = [] - - _, new_signed_blocks, post_state = next_epoch_with_attestations(spec, state, True, False) - for signed_block in new_signed_blocks: - block = signed_block.message - block_root = hash_tree_root(block) - store.blocks[block_root] = block - store.block_states[block_root] = post_state - yield from tick_and_run_on_block(spec, store, signed_block, test_steps) - last_signed_block = signed_block - - return post_state, store, last_signed_block @with_all_phases @spec_state_test def test_basic(spec, state): - # Initialization test_steps = [] + # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) yield 'anchor_state', state yield 'anchor_block', anchor_block @@ -43,14 +35,14 @@ def test_basic(spec, state): # On receiving a block of `GENESIS_SLOT + 1` slot block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) - yield from tick_and_run_on_block(spec, store, signed_block, test_steps) + yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.get_head(store) == signed_block.message.hash_tree_root() # On receiving a block of next epoch store.time = current_time + spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH) signed_block = state_transition_and_sign_block(spec, state, block) - yield from tick_and_run_on_block(spec, store, signed_block, test_steps) + yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.get_head(store) == signed_block.message.hash_tree_root() yield 'steps', test_steps @@ -74,6 +66,7 @@ def test_on_block_checkpoints(spec, state): # Run for 1 epoch with full attestations next_epoch(spec, state) on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + state, store, last_signed_block = yield from apply_next_epoch_with_attestations(spec, state, store, test_steps) last_block_root = hash_tree_root(last_signed_block.message) assert spec.get_head(store) == last_block_root @@ -90,7 +83,7 @@ def test_on_block_checkpoints(spec, state): block = build_empty_block_for_next_slot(spec, fin_state) signed_block = state_transition_and_sign_block(spec, fin_state.copy(), block) - yield from tick_and_run_on_block(spec, store, signed_block, test_steps) + yield from tick_and_add_block(spec, store, signed_block, test_steps) assert spec.get_head(store) == signed_block.message.hash_tree_root() yield 'steps', test_steps @@ -107,233 +100,36 @@ def test_on_block_future_block(spec, state): on_tick_and_append_step(spec, store, current_time, test_steps) assert store.time == current_time - # do not tick time - + # Do NOT tick time to `GENESIS_SLOT + 1` slot # Fail receiving block of `GENESIS_SLOT + 1` slot block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) - run_on_block(spec, store, signed_block, test_steps, valid=False) + yield from add_block(spec, store, signed_block, test_steps, valid=False) yield 'steps', test_steps -# @with_all_phases -# @spec_state_test -# def test_on_block_bad_parent_root(spec, state): -# test_steps = [] -# # Initialization -# store = get_genesis_forkchoice_store(spec, state) -# time = 100 -# on_tick_and_append_step(spec, store, time, test_steps) +@with_all_phases +@spec_state_test +def test_on_block_bad_parent_root(spec, state): + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time -# # Fail receiving block of `GENESIS_SLOT + 1` slot -# block = build_empty_block_for_next_slot(spec, state) -# transition_unsigned_block(spec, state, block) -# block.state_root = state.hash_tree_root() + # Fail receiving block of `GENESIS_SLOT + 1` slot + block = build_empty_block_for_next_slot(spec, state) + transition_unsigned_block(spec, state, block) + block.state_root = state.hash_tree_root() -# block.parent_root = b'\x45' * 32 + block.parent_root = b'\x45' * 32 -# signed_block = sign_block(spec, state, block) + signed_block = sign_block(spec, state, block) -# run_on_block(spec, store, signed_block, test_steps, valid=False) + yield from add_block(spec, store, signed_block, test_steps, valid=False) - -# @with_all_phases -# @spec_state_test -# def test_on_block_before_finalized(spec, state): -# test_steps = [] -# # Initialization -# store = get_genesis_forkchoice_store(spec, state) -# time = 100 -# on_tick_and_append_step(spec, store, time, test_steps) - -# store.finalized_checkpoint = spec.Checkpoint( -# epoch=store.finalized_checkpoint.epoch + 2, -# root=store.finalized_checkpoint.root -# ) - -# # Fail receiving block of `GENESIS_SLOT + 1` slot -# block = build_empty_block_for_next_slot(spec, state) -# signed_block = state_transition_and_sign_block(spec, state, block) -# run_on_block(spec, store, signed_block, test_steps, valid=False) - - -# @with_all_phases -# @spec_state_test -# def test_on_block_finalized_skip_slots(spec, state): -# test_steps = [] -# # Initialization -# store = get_genesis_forkchoice_store(spec, state) -# time = 100 -# on_tick_and_append_step(spec, store, time, test_steps) - -# store.finalized_checkpoint = spec.Checkpoint( -# epoch=store.finalized_checkpoint.epoch + 2, -# root=store.finalized_checkpoint.root -# ) - -# # Build block that includes the skipped slots up to finality in chain -# block = build_empty_block(spec, state, spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + 2) -# signed_block = state_transition_and_sign_block(spec, state, block) -# on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) -# run_on_block(spec, store, signed_block, test_steps) - - -# @with_all_phases -# @spec_state_test -# def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state): -# test_steps = [] -# # Initialization -# transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH - 1) -# block = build_empty_block_for_next_slot(spec, state) -# transition_unsigned_block(spec, state, block) -# block.state_root = state.hash_tree_root() -# store = spec.get_forkchoice_store(state, block) -# store.finalized_checkpoint = spec.Checkpoint( -# epoch=store.finalized_checkpoint.epoch + 2, -# root=store.finalized_checkpoint.root -# ) - -# # First transition through the epoch to ensure no skipped slots -# state, store, _ = apply_next_epoch_with_attestations(spec, state, store) - -# # Now build a block at later slot than finalized epoch -# # Includes finalized block in chain, but not at appropriate skip slot -# block = build_empty_block(spec, state, spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + 2) -# signed_block = state_transition_and_sign_block(spec, state, block) -# on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) -# run_on_block(spec, store, signed_block, test_steps, valid=False) - - -# @with_all_phases -# @spec_state_test -# def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state): -# test_steps = [] -# # Initialization -# store = get_genesis_forkchoice_store(spec, state) -# time = 0 -# on_tick_and_append_step(spec, store, time, test_steps) - -# next_epoch(spec, state) -# on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) -# state, store, last_signed_block = apply_next_epoch_with_attestations(spec, state, store) -# next_epoch(spec, state) -# on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) -# last_block_root = hash_tree_root(last_signed_block.message) - -# # Mock the justified checkpoint -# just_state = store.block_states[last_block_root] -# new_justified = spec.Checkpoint( -# epoch=just_state.current_justified_checkpoint.epoch + 1, -# root=b'\x77' * 32, -# ) -# just_state.current_justified_checkpoint = new_justified - -# block = build_empty_block_for_next_slot(spec, just_state) -# signed_block = state_transition_and_sign_block(spec, deepcopy(just_state), block) -# assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH < spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED -# run_on_block(spec, store, signed_block, test_steps) - -# assert store.justified_checkpoint == new_justified - - -# @with_all_phases -# @spec_state_test -# def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state): -# test_steps = [] -# # Initialization -# store = get_genesis_forkchoice_store(spec, state) -# time = 0 -# on_tick_and_append_step(spec, store, time, test_steps) - -# next_epoch(spec, state) -# on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) -# state, store, last_signed_block = apply_next_epoch_with_attestations(spec, state, store) -# next_epoch(spec, state) -# on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) -# last_block_root = hash_tree_root(last_signed_block.message) - -# # Mock justified block in store -# just_block = build_empty_block_for_next_slot(spec, state) -# # Slot is same as justified checkpoint so does not trigger an override in the store -# just_block.slot = spec.compute_start_slot_at_epoch(store.justified_checkpoint.epoch) -# store.blocks[just_block.hash_tree_root()] = just_block - -# # Step time past safe slots -# time = store.time + spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED * spec.config.SECONDS_PER_SLOT -# on_tick_and_append_step(spec, store, time, test_steps) -# assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED - -# previously_justified = store.justified_checkpoint - -# # Add a series of new blocks with "better" justifications -# best_justified_checkpoint = spec.Checkpoint(epoch=0) -# for i in range(3, 0, -1): -# just_state = store.block_states[last_block_root] -# new_justified = spec.Checkpoint( -# epoch=previously_justified.epoch + i, -# root=just_block.hash_tree_root(), -# ) -# if new_justified.epoch > best_justified_checkpoint.epoch: -# best_justified_checkpoint = new_justified - -# just_state.current_justified_checkpoint = new_justified - -# block = build_empty_block_for_next_slot(spec, just_state) -# signed_block = state_transition_and_sign_block(spec, deepcopy(just_state), block) - -# run_on_block(spec, store, signed_block, test_steps) - -# assert store.justified_checkpoint == previously_justified -# # ensure the best from the series was stored -# assert store.best_justified_checkpoint == best_justified_checkpoint - - -# @with_all_phases -# @spec_state_test -# def test_on_block_outside_safe_slots_but_finality(spec, state): -# test_steps = [] -# # Initialization -# store = get_genesis_forkchoice_store(spec, state) -# time = 100 -# on_tick_and_append_step(spec, store, time, test_steps) - -# next_epoch(spec, state) -# on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) -# state, store, last_signed_block = apply_next_epoch_with_attestations(spec, state, store) -# next_epoch(spec, state) -# on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) -# last_block_root = hash_tree_root(last_signed_block.message) - -# # Mock justified block in store -# just_block = build_empty_block_for_next_slot(spec, state) -# # Slot is same as justified checkpoint so does not trigger an override in the store -# just_block.slot = spec.compute_start_slot_at_epoch(store.justified_checkpoint.epoch) -# store.blocks[just_block.hash_tree_root()] = just_block - -# # Step time past safe slots -# time = store.time + spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED * spec.config.SECONDS_PER_SLOT -# on_tick_and_append_step(spec, store, time, test_steps) -# assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED - -# # Mock justified and finalized update in state -# just_fin_state = store.block_states[last_block_root] -# new_justified = spec.Checkpoint( -# epoch=store.justified_checkpoint.epoch + 1, -# root=just_block.hash_tree_root(), -# ) -# new_finalized = spec.Checkpoint( -# epoch=store.finalized_checkpoint.epoch + 1, -# root=just_block.parent_root, -# ) -# just_fin_state.current_justified_checkpoint = new_justified -# just_fin_state.finalized_checkpoint = new_finalized - -# # Build and add block that includes the new justified/finalized info -# block = build_empty_block_for_next_slot(spec, just_fin_state) -# signed_block = state_transition_and_sign_block(spec, deepcopy(just_fin_state), block) - -# run_on_block(spec, store, signed_block, test_steps) - -# assert store.finalized_checkpoint == new_finalized -# assert store.justified_checkpoint == new_justified + yield 'steps', test_steps diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py index b1862d093..ce2ab95f4 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py @@ -1,123 +1,17 @@ from copy import deepcopy from eth2spec.utils.ssz.ssz_impl import hash_tree_root -from eth2spec.test.context import with_all_phases, spec_state_test -from eth2spec.test.helpers.attestations import next_epoch_with_attestations -from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block, transition_unsigned_block, \ - build_empty_block -from eth2spec.test.helpers.fork_choice import get_genesis_forkchoice_store +from eth2spec.test.context import with_all_phases, spec_state_test, with_phases, PHASE0 +from eth2spec.test.helpers.block import build_empty_block_for_next_slot, transition_unsigned_block, \ + build_empty_block, sign_block +from eth2spec.test.helpers.fork_choice import ( + get_genesis_forkchoice_store, + run_on_block, + apply_next_epoch_with_attestations, +) from eth2spec.test.helpers.state import next_epoch, state_transition_and_sign_block, transition_to -def run_on_block(spec, store, signed_block, valid=True): - if not valid: - try: - spec.on_block(store, signed_block) - except AssertionError: - return - else: - assert False - - spec.on_block(store, signed_block) - assert store.blocks[hash_tree_root(signed_block.message)] == signed_block.message - - -def apply_next_epoch_with_attestations(spec, state, store): - _, new_signed_blocks, post_state = next_epoch_with_attestations(spec, state, True, False) - for signed_block in new_signed_blocks: - block = signed_block.message - block_root = hash_tree_root(block) - store.blocks[block_root] = block - store.block_states[block_root] = post_state - last_signed_block = signed_block - spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) - return post_state, store, last_signed_block - - -@with_all_phases -@spec_state_test -def test_basic(spec, state): - # Initialization - store = get_genesis_forkchoice_store(spec, state) - time = 100 - spec.on_tick(store, time) - assert store.time == time - - # On receiving a block of `GENESIS_SLOT + 1` slot - block = build_empty_block_for_next_slot(spec, state) - signed_block = state_transition_and_sign_block(spec, state, block) - run_on_block(spec, store, signed_block) - - # On receiving a block of next epoch - store.time = time + spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH - block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH) - signed_block = state_transition_and_sign_block(spec, state, block) - - run_on_block(spec, store, signed_block) - - # TODO: add tests for justified_root and finalized_root - - -@with_all_phases -@spec_state_test -def test_on_block_checkpoints(spec, state): - # Initialization - store = get_genesis_forkchoice_store(spec, state) - time = 100 - spec.on_tick(store, time) - - next_epoch(spec, state) - spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) - state, store, last_signed_block = apply_next_epoch_with_attestations(spec, state, store) - next_epoch(spec, state) - spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) - last_block_root = hash_tree_root(last_signed_block.message) - - # Mock the finalized_checkpoint - fin_state = store.block_states[last_block_root] - fin_state.finalized_checkpoint = ( - store.block_states[last_block_root].current_justified_checkpoint - ) - - block = build_empty_block_for_next_slot(spec, fin_state) - signed_block = state_transition_and_sign_block(spec, deepcopy(fin_state), block) - run_on_block(spec, store, signed_block) - - -@with_all_phases -@spec_state_test -def test_on_block_future_block(spec, state): - # Initialization - store = get_genesis_forkchoice_store(spec, state) - - # do not tick time - - # Fail receiving block of `GENESIS_SLOT + 1` slot - block = build_empty_block_for_next_slot(spec, state) - signed_block = state_transition_and_sign_block(spec, state, block) - run_on_block(spec, store, signed_block, False) - - -@with_all_phases -@spec_state_test -def test_on_block_bad_parent_root(spec, state): - # Initialization - store = get_genesis_forkchoice_store(spec, state) - time = 100 - spec.on_tick(store, time) - - # Fail receiving block of `GENESIS_SLOT + 1` slot - block = build_empty_block_for_next_slot(spec, state) - transition_unsigned_block(spec, state, block) - block.state_root = state.hash_tree_root() - - block.parent_root = b'\x45' * 32 - - signed_block = sign_block(spec, state, block) - - run_on_block(spec, store, signed_block, False) - - @with_all_phases @spec_state_test def test_on_block_before_finalized(spec, state): @@ -134,7 +28,7 @@ def test_on_block_before_finalized(spec, state): # Fail receiving block of `GENESIS_SLOT + 1` slot block = build_empty_block_for_next_slot(spec, state) signed_block = state_transition_and_sign_block(spec, state, block) - run_on_block(spec, store, signed_block, False) + run_on_block(spec, store, signed_block, valid=False) @with_all_phases @@ -166,23 +60,31 @@ def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state): transition_unsigned_block(spec, state, block) block.state_root = state.hash_tree_root() store = spec.get_forkchoice_store(state, block) - store.finalized_checkpoint = spec.Checkpoint( - epoch=store.finalized_checkpoint.epoch + 2, - root=store.finalized_checkpoint.root - ) - # First transition through the epoch to ensure no skipped slots - state, store, _ = apply_next_epoch_with_attestations(spec, state, store) + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + spec.on_tick(store, current_time) + assert store.time == current_time + + pre_finalized_checkpoint_epoch = store.finalized_checkpoint.epoch + + # Finalized + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations(spec, state, store) + assert store.finalized_checkpoint.epoch == pre_finalized_checkpoint_epoch + 1 # Now build a block at later slot than finalized epoch # Includes finalized block in chain, but not at appropriate skip slot - block = build_empty_block(spec, state, spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + 2) - signed_block = state_transition_and_sign_block(spec, state, block) + pre_state = store.block_states[block.hash_tree_root()] + block = build_empty_block(spec, + state=pre_state, + slot=spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + 2) + signed_block = sign_block(spec, pre_state, block) + spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) - run_on_block(spec, store, signed_block, False) + run_on_block(spec, store, signed_block, valid=False) -@with_all_phases +@with_phases([PHASE0]) @spec_state_test def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state): # Initialization @@ -192,21 +94,33 @@ def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state): next_epoch(spec, state) spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) - state, store, last_signed_block = apply_next_epoch_with_attestations(spec, state, store) + state, store, last_signed_block = yield from apply_next_epoch_with_attestations(spec, state, store) next_epoch(spec, state) spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) - last_block_root = hash_tree_root(last_signed_block.message) + last_block = last_signed_block.message + last_block_root = last_block.hash_tree_root() - # Mock the justified checkpoint + # NOTE: Mock the justified checkpoint just_state = store.block_states[last_block_root] new_justified = spec.Checkpoint( epoch=just_state.current_justified_checkpoint.epoch + 1, root=b'\x77' * 32, ) - just_state.current_justified_checkpoint = new_justified + just_state.current_justified_checkpoint = new_justified # Mutate `store` + + assert store.block_states[last_block_root].hash_tree_root() == just_state.hash_tree_root() block = build_empty_block_for_next_slot(spec, just_state) - signed_block = state_transition_and_sign_block(spec, deepcopy(just_state), block) + + # NOTE: Mock store so that the modified state could be accessed + parent_block = last_signed_block.message.copy() + parent_block.state_root = just_state.hash_tree_root() + store.blocks[block.parent_root] = parent_block + store.block_states[block.parent_root] = just_state.copy() + assert block.parent_root in store.blocks.keys() + assert block.parent_root in store.block_states.keys() + + signed_block = state_transition_and_sign_block(spec, just_state.copy(), block) assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH < spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED run_on_block(spec, store, signed_block) @@ -223,10 +137,10 @@ def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state): next_epoch(spec, state) spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) - state, store, last_signed_block = apply_next_epoch_with_attestations(spec, state, store) + state, store, last_signed_block = yield from apply_next_epoch_with_attestations(spec, state, store) last_block_root = hash_tree_root(last_signed_block.message) - # Mock fictitious justified checkpoint in store + # NOTE: Mock fictitious justified checkpoint in store store.justified_checkpoint = spec.Checkpoint( epoch=spec.compute_epoch_at_slot(last_signed_block.message.slot), root=spec.Root("0x4a55535449464945440000000000000000000000000000000000000000000000") @@ -248,6 +162,7 @@ def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state): # Add a series of new blocks with "better" justifications best_justified_checkpoint = spec.Checkpoint(epoch=0) for i in range(3, 0, -1): + # Mutate store just_state = store.block_states[last_block_root] new_justified = spec.Checkpoint( epoch=previously_justified.epoch + i, @@ -261,6 +176,14 @@ def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state): block = build_empty_block_for_next_slot(spec, just_state) signed_block = state_transition_and_sign_block(spec, deepcopy(just_state), block) + # NOTE: Mock store so that the modified state could be accessed + parent_block = store.blocks[last_block_root].copy() + parent_block.state_root = just_state.hash_tree_root() + store.blocks[block.parent_root] = parent_block + store.block_states[block.parent_root] = just_state.copy() + assert block.parent_root in store.blocks.keys() + assert block.parent_root in store.block_states.keys() + run_on_block(spec, store, signed_block) assert store.justified_checkpoint == previously_justified @@ -273,15 +196,15 @@ def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state): def test_on_block_outside_safe_slots_but_finality(spec, state): # Initialization store = get_genesis_forkchoice_store(spec, state) - time = 100 + time = 0 spec.on_tick(store, time) next_epoch(spec, state) spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) - state, store, last_signed_block = apply_next_epoch_with_attestations(spec, state, store) + state, store, last_signed_block = yield from apply_next_epoch_with_attestations(spec, state, store) last_block_root = hash_tree_root(last_signed_block.message) - # Mock fictitious justified checkpoint in store + # NOTE: Mock fictitious justified checkpoint in store store.justified_checkpoint = spec.Checkpoint( epoch=spec.compute_epoch_at_slot(last_signed_block.message.slot), root=spec.Root("0x4a55535449464945440000000000000000000000000000000000000000000000") @@ -290,7 +213,7 @@ def test_on_block_outside_safe_slots_but_finality(spec, state): next_epoch(spec, state) spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) - # Create new higher justified checkpoint not in branch of store's justified checkpoint + # NOTE: Mock a new higher justified checkpoint not in branch of store's justified checkpoint just_block = build_empty_block_for_next_slot(spec, state) store.blocks[just_block.hash_tree_root()] = just_block @@ -298,7 +221,7 @@ def test_on_block_outside_safe_slots_but_finality(spec, state): spec.on_tick(store, store.time + spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED * spec.config.SECONDS_PER_SLOT) assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED - # Mock justified and finalized update in state + # NOTE: Mock justified and finalized update in state just_fin_state = store.block_states[last_block_root] new_justified = spec.Checkpoint( epoch=spec.compute_epoch_at_slot(just_block.slot) + 1, @@ -317,6 +240,14 @@ def test_on_block_outside_safe_slots_but_finality(spec, state): block = build_empty_block_for_next_slot(spec, just_fin_state) signed_block = state_transition_and_sign_block(spec, deepcopy(just_fin_state), block) + # NOTE: Mock store so that the modified state could be accessed + parent_block = last_signed_block.message.copy() + parent_block.state_root = just_fin_state.hash_tree_root() + store.blocks[block.parent_root] = parent_block + store.block_states[block.parent_root] = just_fin_state.copy() + assert block.parent_root in store.blocks.keys() + assert block.parent_root in store.block_states.keys() + run_on_block(spec, store, signed_block) assert store.finalized_checkpoint == new_finalized diff --git a/tests/generators/fork_choice/main.py b/tests/generators/fork_choice/main.py index 9b6325ccf..0ebdbf2c0 100644 --- a/tests/generators/fork_choice/main.py +++ b/tests/generators/fork_choice/main.py @@ -4,7 +4,7 @@ from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE if __name__ == "__main__": phase_0_mods = {key: 'eth2spec.test.phase0.fork_choice.test_' + key for key in [ - # 'get_head', + 'get_head', 'on_block', ]} # No additional Altair specific finality tests, yet. From 2445fe5a7684fd2607129f3feea390b6d95cc264 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 18 Jun 2021 19:14:03 +0800 Subject: [PATCH 03/75] Add new test cases - `test_new_finalized_slot_is_not_justified_checkpoint_ancestor` - `test_new_finalized_slot_is_justified_checkpoint_ancestor` --- .../unittests/fork_choice/test_on_block.py | 141 +++++++++++++++++- 1 file changed, 139 insertions(+), 2 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py index ce2ab95f4..195c53839 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py @@ -1,7 +1,8 @@ from copy import deepcopy from eth2spec.utils.ssz.ssz_impl import hash_tree_root -from eth2spec.test.context import with_all_phases, spec_state_test, with_phases, PHASE0 +from eth2spec.test.context import with_all_phases, spec_state_test +from eth2spec.test.helpers.attestations import next_epoch_with_attestations from eth2spec.test.helpers.block import build_empty_block_for_next_slot, transition_unsigned_block, \ build_empty_block, sign_block from eth2spec.test.helpers.fork_choice import ( @@ -84,7 +85,7 @@ def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state): run_on_block(spec, store, signed_block, valid=False) -@with_phases([PHASE0]) +@with_all_phases @spec_state_test def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state): # Initialization @@ -252,3 +253,139 @@ def test_on_block_outside_safe_slots_but_finality(spec, state): assert store.finalized_checkpoint == new_finalized assert store.justified_checkpoint == new_justified + + +@with_all_phases +@spec_state_test +def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): + """ + J: Justified + F: Finalized + pre-store: + epoch + [0] <- [1] <- [2] <- [3] <- [4] <- [5] + F J + + another_state: + [0] <- [1] <- [2] <- [3] <- [4] <- [5] + F J + """ + another_state = state.copy() + # Initialization + store = get_genesis_forkchoice_store(spec, state) + time = 0 + spec.on_tick(store, time) + + # Process state + next_epoch(spec, state) + _, signed_blocks, state = next_epoch_with_attestations(spec, state, False, True) + for block in signed_blocks: + spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + run_on_block(spec, store, block) + next_epoch(spec, state) + for _ in range(2): + _, signed_blocks, state = next_epoch_with_attestations(spec, state, False, True) + for block in signed_blocks: + spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + run_on_block(spec, store, block) + + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.justified_checkpoint.hash_tree_root() == state.current_justified_checkpoint.hash_tree_root() + + # Create another chain + # another_state = store.block_states[store.justified_checkpoint.root].copy() + next_epoch(spec, another_state) + spec.on_tick(store, store.time + another_state.slot * spec.config.SECONDS_PER_SLOT) + + all_blocks = [] + for _ in range(3): + _, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, True) + all_blocks += signed_blocks + + assert another_state.finalized_checkpoint.epoch == 2 + assert another_state.current_justified_checkpoint.epoch == 3 + assert state.finalized_checkpoint.hash_tree_root() != another_state.finalized_checkpoint.hash_tree_root() + assert ( + state.current_justified_checkpoint.hash_tree_root() + != another_state.current_justified_checkpoint.hash_tree_root() + ) + + pre_store_justified_checkpoint_root = store.justified_checkpoint.root + for block in all_blocks: + run_on_block(spec, store, block) + + finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + ancestor_at_finalized_slot = spec.get_ancestor(store, pre_store_justified_checkpoint_root, finalized_slot) + assert ancestor_at_finalized_slot != store.finalized_checkpoint.root + + assert store.finalized_checkpoint.hash_tree_root() == another_state.finalized_checkpoint.hash_tree_root() + assert store.justified_checkpoint.hash_tree_root() == another_state.current_justified_checkpoint.hash_tree_root() + + +@with_all_phases +@spec_state_test +def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state): + """ + J: Justified + F: Finalized + pre-store: + epoch + [0] <- [1] <- [2] <- [3] <- [4] <- [5] + F J + + another_state: + <- [4] <- [5] + F+J + """ + # Initialization + store = get_genesis_forkchoice_store(spec, state) + time = 0 + spec.on_tick(store, time) + + # Process state + next_epoch(spec, state) + spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + _, signed_blocks, state = next_epoch_with_attestations(spec, state, False, True) + for block in signed_blocks: + spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + run_on_block(spec, store, block) + _, signed_blocks, state = next_epoch_with_attestations(spec, state, True, False) + for block in signed_blocks: + spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + run_on_block(spec, store, block) + next_epoch(spec, state) + spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + for _ in range(2): + _, signed_blocks, state = next_epoch_with_attestations(spec, state, False, True) + for block in signed_blocks: + spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + run_on_block(spec, store, block) + + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 + assert store.justified_checkpoint.hash_tree_root() == state.current_justified_checkpoint.hash_tree_root() + + # Create another chain + # Forking from epoch 3 + all_blocks = [] + slot = spec.compute_start_slot_at_epoch(3) + block_root = spec.get_block_root_at_slot(state, slot) + another_state = store.block_states[block_root].copy() + for _ in range(2): + _, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, True) + all_blocks += signed_blocks + + assert another_state.finalized_checkpoint.epoch == 3 + assert another_state.current_justified_checkpoint.epoch == 4 + + pre_store_justified_checkpoint_root = store.justified_checkpoint.root + for block in all_blocks: + run_on_block(spec, store, block) + + finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + ancestor_at_finalized_slot = spec.get_ancestor(store, pre_store_justified_checkpoint_root, finalized_slot) + assert ancestor_at_finalized_slot == store.finalized_checkpoint.root + + assert store.finalized_checkpoint.hash_tree_root() == another_state.finalized_checkpoint.hash_tree_root() + assert store.justified_checkpoint.hash_tree_root() != another_state.current_justified_checkpoint.hash_tree_root() From 7a9ae5733584e3fde5b304d7c606a2264c26f5d8 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sat, 19 Jun 2021 02:13:02 +0800 Subject: [PATCH 04/75] Minor formatting. `True` -> `true`, `False` -> `false` --- tests/formats/fork_choice/README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/formats/fork_choice/README.md b/tests/formats/fork_choice/README.md index 199b93784..03a1a6e9c 100644 --- a/tests/formats/fork_choice/README.md +++ b/tests/formats/fork_choice/README.md @@ -30,8 +30,8 @@ The parameter that is required for executing `on_tick(store, time)`. ```yaml { tick: int -- to execute `on_tick(store, time)`. - valid: bool -- optional, default to `True`. - If it's `False`, this execution step is expected to be invalid. + valid: bool -- optional, default to `true`. + If it's `false`, this execution step is expected to be invalid. } ``` @@ -45,8 +45,8 @@ The parameter that is required for executing `on_attestation(store, attestation) { attestation: string -- the name of the `attestation_<32-byte-root>.ssz_snappy` file. To execute `on_attestation(store, attestation)` with the given attestation. - valid: bool -- optional, default to `True`. - If it's `False`, this execution step is expected to be invalid. + valid: bool -- optional, default to `true`. + If it's `false`, this execution step is expected to be invalid. } ``` The file is located in the same folder (see below). @@ -61,8 +61,8 @@ The parameter that is required for executing `on_block(store, block)`. { block: string -- the name of the `block_<32-byte-root>.ssz_snappy` file. To execute `on_block(store, block)` with the given attestation. - valid: bool -- optional, default to `True`. - If it's `False`, this execution step is expected to be invalid. + valid: bool -- optional, default to `true`. + If it's `false`, this execution step is expected to be invalid. } ``` The file is located in the same folder (see below). From 83598af188cd75ec0ee30ef60a21e3d419229771 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sat, 19 Jun 2021 06:29:01 +0800 Subject: [PATCH 05/75] Add `test_new_justified_is_later_than_store_justified` and fix test cases - Fix `on_tick` calls - Refactor test cases --- .../eth2spec/test/helpers/attestations.py | 32 +++ .../unittests/fork_choice/test_on_block.py | 221 +++++++++++++++--- 2 files changed, 223 insertions(+), 30 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/helpers/attestations.py b/tests/core/pyspec/eth2spec/test/helpers/attestations.py index c92860ffa..fd0e0d880 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/attestations.py +++ b/tests/core/pyspec/eth2spec/test/helpers/attestations.py @@ -261,6 +261,38 @@ def next_epoch_with_attestations(spec, ) +def state_transition_with_signed_full_block(spec, state, fill_cur_epoch, fill_prev_epoch): + # Build a block with previous attestations + block = build_empty_block_for_next_slot(spec, state) + attestations = [] + + if fill_prev_epoch: + # current epoch + slots = state.slot % spec.SLOTS_PER_EPOCH + for slot_offset in range(slots): + target_slot = state.slot - slot_offset + attestations += _get_valid_attestation_at_slot( + state, + spec, + target_slot, + ) + + if fill_prev_epoch: + # attest previous epoch + slots = spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH + for slot_offset in range(1, slots): + target_slot = state.slot - (state.slot % spec.SLOTS_PER_EPOCH) - slot_offset + attestations += _get_valid_attestation_at_slot( + state, + spec, + target_slot, + ) + + block.body.attestations = attestations + signed_block = state_transition_and_sign_block(spec, state, block) + return signed_block + + def prepare_state_with_attestations(spec, state, participation_fn=None): """ Prepare state with attestations according to the ``participation_fn``. diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py index 195c53839..70b5a9706 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py @@ -1,8 +1,12 @@ from copy import deepcopy from eth2spec.utils.ssz.ssz_impl import hash_tree_root -from eth2spec.test.context import with_all_phases, spec_state_test -from eth2spec.test.helpers.attestations import next_epoch_with_attestations +from eth2spec.test.context import MINIMAL, with_all_phases, spec_state_test, with_presets +from eth2spec.test.helpers.attestations import ( + next_epoch_with_attestations, + next_slots_with_attestations, + state_transition_with_signed_full_block, +) from eth2spec.test.helpers.block import build_empty_block_for_next_slot, transition_unsigned_block, \ build_empty_block, sign_block from eth2spec.test.helpers.fork_choice import ( @@ -10,7 +14,7 @@ from eth2spec.test.helpers.fork_choice import ( run_on_block, apply_next_epoch_with_attestations, ) -from eth2spec.test.helpers.state import next_epoch, state_transition_and_sign_block, transition_to +from eth2spec.test.helpers.state import next_epoch, state_transition_and_sign_block, transition_to, next_slots @with_all_phases @@ -48,7 +52,7 @@ def test_on_block_finalized_skip_slots(spec, state): # Build block that includes the skipped slots up to finality in chain block = build_empty_block(spec, state, spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + 2) signed_block = state_transition_and_sign_block(spec, state, block) - spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) run_on_block(spec, store, signed_block) @@ -81,7 +85,7 @@ def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state): slot=spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + 2) signed_block = sign_block(spec, pre_state, block) - spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) run_on_block(spec, store, signed_block, valid=False) @@ -94,10 +98,10 @@ def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state): spec.on_tick(store, time) next_epoch(spec, state) - spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) state, store, last_signed_block = yield from apply_next_epoch_with_attestations(spec, state, store) next_epoch(spec, state) - spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) last_block = last_signed_block.message last_block_root = last_block.hash_tree_root() @@ -137,7 +141,7 @@ def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state): spec.on_tick(store, time) next_epoch(spec, state) - spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) state, store, last_signed_block = yield from apply_next_epoch_with_attestations(spec, state, store) last_block_root = hash_tree_root(last_signed_block.message) @@ -148,7 +152,7 @@ def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state): ) next_epoch(spec, state) - spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) # Create new higher justified checkpoint not in branch of store's justified checkpoint just_block = build_empty_block_for_next_slot(spec, state) @@ -201,7 +205,7 @@ def test_on_block_outside_safe_slots_but_finality(spec, state): spec.on_tick(store, time) next_epoch(spec, state) - spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) state, store, last_signed_block = yield from apply_next_epoch_with_attestations(spec, state, store) last_block_root = hash_tree_root(last_signed_block.message) @@ -212,7 +216,7 @@ def test_on_block_outside_safe_slots_but_finality(spec, state): ) next_epoch(spec, state) - spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) # NOTE: Mock a new higher justified checkpoint not in branch of store's justified checkpoint just_block = build_empty_block_for_next_slot(spec, state) @@ -255,18 +259,166 @@ def test_on_block_outside_safe_slots_but_finality(spec, state): assert store.justified_checkpoint == new_justified +@with_all_phases +@with_presets([MINIMAL], reason="It assumes that `MAX_ATTESTATIONS` >= 2/3 attestations of an epoch") +@spec_state_test +def test_new_justified_is_later_than_store_justified(spec, state): + """ + J: Justified + F: Finalized + fork_1_state (forked from genesis): + epoch + [0] <- [1] <- [2] <- [3] <- [4] + F J + + fork_2_state (forked from fork_1_state's epoch 2): + epoch + └──── [3] <- [4] <- [5] <- [6] + F J + + fork_3_state (forked from genesis): + [0] <- [1] <- [2] <- [3] <- [4] <- [5] + F J + """ + # The 1st fork, from genesis + fork_1_state = state.copy() + # The 3rd fork, from genesis + fork_3_state = state.copy() + + # Initialization + store = get_genesis_forkchoice_store(spec, fork_1_state) + time = 0 + spec.on_tick(store, time) + + # ----- Process fork_1_state + # Skip epoch 0 + next_epoch(spec, fork_1_state) + # Fill epoch 1 with previous epoch attestations + _, signed_blocks, fork_1_state = next_epoch_with_attestations(spec, fork_1_state, False, True) + for block in signed_blocks: + spec.on_tick(store, store.genesis_time + fork_1_state.slot * spec.config.SECONDS_PER_SLOT) + run_on_block(spec, store, block) + + # Fork `fork_2_state` at the start of epoch 2 + fork_2_state = fork_1_state.copy() + assert spec.get_current_epoch(fork_2_state) == 2 + + # Skip epoch 2 + next_epoch(spec, fork_1_state) + # # Fill epoch 3 & 4 with previous epoch attestations + for _ in range(2): + _, signed_blocks, fork_1_state = next_epoch_with_attestations(spec, fork_1_state, False, True) + for block in signed_blocks: + spec.on_tick(store, store.genesis_time + fork_1_state.slot * spec.config.SECONDS_PER_SLOT) + run_on_block(spec, store, block) + + assert fork_1_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 + assert fork_1_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.justified_checkpoint.hash_tree_root() == fork_1_state.current_justified_checkpoint.hash_tree_root() + + # ------ fork_2_state: Create a chain to set store.best_justified_checkpoint + # NOTE: The goal is to make `store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch` + all_blocks = [] + + # Proposed an empty block at epoch 2, 1st slot + block = build_empty_block_for_next_slot(spec, fork_2_state) + signed_block = state_transition_and_sign_block(spec, fork_2_state, block) + all_blocks.append(signed_block.copy()) + assert fork_2_state.current_justified_checkpoint.epoch == 0 + + # Skip to epoch 4 + for _ in range(2): + next_epoch(spec, fork_2_state) + assert fork_2_state.current_justified_checkpoint.epoch == 0 + + # Propose a block at epoch 4, 5th slot + # Propose a block at epoch 5, 5th slot + for _ in range(2): + next_epoch(spec, fork_2_state) + next_slots(spec, fork_2_state, 4) + signed_block = state_transition_with_signed_full_block(spec, fork_2_state, True, True) + all_blocks.append(signed_block.copy()) + assert fork_2_state.current_justified_checkpoint.epoch == 0 + + # Propose a block at epoch 6, SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2 slot + next_epoch(spec, fork_2_state) + next_slots(spec, fork_2_state, spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2) + signed_block = state_transition_with_signed_full_block(spec, fork_2_state, True, True) + all_blocks.append(signed_block.copy()) + assert fork_2_state.finalized_checkpoint.epoch == 0 + assert fork_2_state.current_justified_checkpoint.epoch == 5 + + # Check SAFE_SLOTS_TO_UPDATE_JUSTIFIED + spec.on_tick(store, store.genesis_time + fork_2_state.slot * spec.config.SECONDS_PER_SLOT) + assert spec.compute_slots_since_epoch_start(spec.get_current_slot(store)) >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED + + # Apply blocks of `fork_3_state` to `store` + for block in all_blocks: + if store.time < spec.compute_time_at_slot(fork_2_state, block.message.slot): + spec.on_tick(store, store.genesis_time + block.message.slot * spec.config.SECONDS_PER_SLOT) + run_on_block(spec, store, block) + + assert store.finalized_checkpoint.epoch == 0 + assert store.justified_checkpoint.epoch == 3 + assert store.best_justified_checkpoint.epoch == 5 + + # ------ fork_3_state: Create another chain to test the + # "Update justified if new justified is later than store justified" case + all_blocks = [] + for _ in range(3): + next_epoch(spec, fork_3_state) + + # epoch 3 + _, signed_blocks, fork_3_state = next_epoch_with_attestations(spec, fork_3_state, True, True) + all_blocks += signed_blocks + assert fork_3_state.finalized_checkpoint.epoch == 0 + + # epoch 4, attest the first 5 blocks + _, blocks, fork_3_state = next_slots_with_attestations(spec, fork_3_state, 5, True, True) + all_blocks += blocks.copy() + assert fork_3_state.finalized_checkpoint.epoch == 0 + + # Propose a block at epoch 5, 5th slot + next_epoch(spec, fork_3_state) + next_slots(spec, fork_3_state, 4) + signed_block = state_transition_with_signed_full_block(spec, fork_3_state, True, True) + all_blocks.append(signed_block.copy()) + assert fork_3_state.finalized_checkpoint.epoch == 0 + + # Propose a block at epoch 6, 5th slot + next_epoch(spec, fork_3_state) + next_slots(spec, fork_3_state, 4) + signed_block = state_transition_with_signed_full_block(spec, fork_3_state, True, True) + all_blocks.append(signed_block.copy()) + assert fork_3_state.finalized_checkpoint.epoch == 3 + assert fork_3_state.current_justified_checkpoint.epoch == 4 + + # Apply blocks of `fork_3_state` to `store` + for block in all_blocks: + if store.time < spec.compute_time_at_slot(fork_2_state, block.message.slot): + spec.on_tick(store, store.genesis_time + block.message.slot * spec.config.SECONDS_PER_SLOT) + run_on_block(spec, store, block) + + assert store.finalized_checkpoint.hash_tree_root() == fork_3_state.finalized_checkpoint.hash_tree_root() + assert (store.justified_checkpoint.hash_tree_root() + == fork_3_state.current_justified_checkpoint.hash_tree_root() + != store.best_justified_checkpoint.hash_tree_root()) + assert (store.best_justified_checkpoint.hash_tree_root() + == fork_2_state.current_justified_checkpoint.hash_tree_root()) + + @with_all_phases @spec_state_test def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): """ J: Justified F: Finalized - pre-store: + state (forked from genesis): epoch [0] <- [1] <- [2] <- [3] <- [4] <- [5] F J - another_state: + another_state (forked from genesis): [0] <- [1] <- [2] <- [3] <- [4] <- [5] F J """ @@ -276,17 +428,22 @@ def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): time = 0 spec.on_tick(store, time) - # Process state + # ----- Process state + # Goal: make `store.finalized_checkpoint.epoch == 0` and `store.justified_checkpoint.epoch == 3` + # Skip epoch 0 next_epoch(spec, state) + # Fill epoch 1 with previous epoch attestations _, signed_blocks, state = next_epoch_with_attestations(spec, state, False, True) for block in signed_blocks: - spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) run_on_block(spec, store, block) + # Skip epoch 2 next_epoch(spec, state) + # Fill epoch 3 & 4 with previous epoch attestations for _ in range(2): _, signed_blocks, state = next_epoch_with_attestations(spec, state, False, True) for block in signed_blocks: - spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) run_on_block(spec, store, block) assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 @@ -294,11 +451,11 @@ def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): assert store.justified_checkpoint.hash_tree_root() == state.current_justified_checkpoint.hash_tree_root() # Create another chain - # another_state = store.block_states[store.justified_checkpoint.root].copy() - next_epoch(spec, another_state) - spec.on_tick(store, store.time + another_state.slot * spec.config.SECONDS_PER_SLOT) - + # Goal: make `another_state.finalized_checkpoint.epoch == 2` and `another_state.justified_checkpoint.epoch == 3` all_blocks = [] + # Skip epoch 0 + next_epoch(spec, another_state) + # Fill epoch 1 & 2 with previous + current epoch attestations for _ in range(3): _, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, True) all_blocks += signed_blocks @@ -310,9 +467,11 @@ def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): state.current_justified_checkpoint.hash_tree_root() != another_state.current_justified_checkpoint.hash_tree_root() ) - pre_store_justified_checkpoint_root = store.justified_checkpoint.root + + # Apply blocks of `another_state` to `store` for block in all_blocks: + # NOTE: Do not call `on_tick` here run_on_block(spec, store, block) finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) @@ -329,14 +488,14 @@ def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state): """ J: Justified F: Finalized - pre-store: + state: epoch [0] <- [1] <- [2] <- [3] <- [4] <- [5] F J - another_state: - <- [4] <- [5] - F+J + another_state (forked from state at epoch 3): + └──── [4] <- [5] + F J """ # Initialization store = get_genesis_forkchoice_store(spec, state) @@ -345,21 +504,21 @@ def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state): # Process state next_epoch(spec, state) - spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) _, signed_blocks, state = next_epoch_with_attestations(spec, state, False, True) for block in signed_blocks: - spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) run_on_block(spec, store, block) _, signed_blocks, state = next_epoch_with_attestations(spec, state, True, False) for block in signed_blocks: - spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) run_on_block(spec, store, block) next_epoch(spec, state) - spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) for _ in range(2): _, signed_blocks, state = next_epoch_with_attestations(spec, state, False, True) for block in signed_blocks: - spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT) + spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) run_on_block(spec, store, block) assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 @@ -381,6 +540,8 @@ def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state): pre_store_justified_checkpoint_root = store.justified_checkpoint.root for block in all_blocks: + if store.time < spec.compute_time_at_slot(another_state, block.message.slot): + spec.on_tick(store, store.genesis_time + block.message.slot * spec.config.SECONDS_PER_SLOT) run_on_block(spec, store, block) finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) From 88be6cdf603b334d2d1f11daee3063c6ddbc4b5c Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 22 Jun 2021 20:55:52 +0800 Subject: [PATCH 06/75] Apply Danny's suggestions from code review Co-authored-by: Danny Ryan --- .../test/phase0/unittests/fork_choice/test_on_block.py | 8 ++++++-- tests/formats/fork_choice/README.md | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py index 70b5a9706..5ca362930 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py @@ -7,8 +7,12 @@ from eth2spec.test.helpers.attestations import ( next_slots_with_attestations, state_transition_with_signed_full_block, ) -from eth2spec.test.helpers.block import build_empty_block_for_next_slot, transition_unsigned_block, \ - build_empty_block, sign_block +from eth2spec.test.helpers.block import ( + build_empty_block, + build_empty_block_for_next_slot, + sign_block, + transition_unsigned_block, +) from eth2spec.test.helpers.fork_choice import ( get_genesis_forkchoice_store, run_on_block, diff --git a/tests/formats/fork_choice/README.md b/tests/formats/fork_choice/README.md index 03a1a6e9c..90c0aafc7 100644 --- a/tests/formats/fork_choice/README.md +++ b/tests/formats/fork_choice/README.md @@ -29,7 +29,7 @@ The parameter that is required for executing `on_tick(store, time)`. ```yaml { - tick: int -- to execute `on_tick(store, time)`. + tick: int -- to execute `on_tick(store, time)`. valid: bool -- optional, default to `true`. If it's `false`, this execution step is expected to be invalid. } From 69a645aa8b428dcd47836e4efaa1d9b408853995 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 23 Jun 2021 03:38:17 +0800 Subject: [PATCH 07/75] Apply PR feedback --- .../unittests/fork_choice/test_on_block.py | 31 +++++-------------- 1 file changed, 8 insertions(+), 23 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py index 5ca362930..2f945f06c 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py @@ -26,8 +26,6 @@ from eth2spec.test.helpers.state import next_epoch, state_transition_and_sign_bl def test_on_block_before_finalized(spec, state): # Initialization store = get_genesis_forkchoice_store(spec, state) - time = 100 - spec.on_tick(store, time) store.finalized_checkpoint = spec.Checkpoint( epoch=store.finalized_checkpoint.epoch + 2, @@ -45,9 +43,8 @@ def test_on_block_before_finalized(spec, state): def test_on_block_finalized_skip_slots(spec, state): # Initialization store = get_genesis_forkchoice_store(spec, state) - time = 100 - spec.on_tick(store, time) + # Create a finalized chain store.finalized_checkpoint = spec.Checkpoint( epoch=store.finalized_checkpoint.epoch + 2, root=store.finalized_checkpoint.root @@ -98,16 +95,13 @@ def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state): def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state): # Initialization store = get_genesis_forkchoice_store(spec, state) - time = 0 - spec.on_tick(store, time) next_epoch(spec, state) spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) state, store, last_signed_block = yield from apply_next_epoch_with_attestations(spec, state, store) next_epoch(spec, state) spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - last_block = last_signed_block.message - last_block_root = last_block.hash_tree_root() + last_block_root = last_signed_block.message.hash_tree_root() # NOTE: Mock the justified checkpoint just_state = store.block_states[last_block_root] @@ -141,8 +135,6 @@ def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state): def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state): # Initialization store = get_genesis_forkchoice_store(spec, state) - time = 0 - spec.on_tick(store, time) next_epoch(spec, state) spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) @@ -205,8 +197,6 @@ def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state): def test_on_block_outside_safe_slots_but_finality(spec, state): # Initialization store = get_genesis_forkchoice_store(spec, state) - time = 0 - spec.on_tick(store, time) next_epoch(spec, state) spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) @@ -291,8 +281,6 @@ def test_new_justified_is_later_than_store_justified(spec, state): # Initialization store = get_genesis_forkchoice_store(spec, fork_1_state) - time = 0 - spec.on_tick(store, time) # ----- Process fork_1_state # Skip epoch 0 @@ -422,20 +410,21 @@ def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): [0] <- [1] <- [2] <- [3] <- [4] <- [5] F J - another_state (forked from genesis): - [0] <- [1] <- [2] <- [3] <- [4] <- [5] + another_state (forked from epoch 0): + └──── [1] <- [2] <- [3] <- [4] <- [5] F J """ - another_state = state.copy() # Initialization store = get_genesis_forkchoice_store(spec, state) - time = 0 - spec.on_tick(store, time) # ----- Process state # Goal: make `store.finalized_checkpoint.epoch == 0` and `store.justified_checkpoint.epoch == 3` # Skip epoch 0 next_epoch(spec, state) + + # Forking another_state + another_state = state.copy() + # Fill epoch 1 with previous epoch attestations _, signed_blocks, state = next_epoch_with_attestations(spec, state, False, True) for block in signed_blocks: @@ -457,8 +446,6 @@ def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): # Create another chain # Goal: make `another_state.finalized_checkpoint.epoch == 2` and `another_state.justified_checkpoint.epoch == 3` all_blocks = [] - # Skip epoch 0 - next_epoch(spec, another_state) # Fill epoch 1 & 2 with previous + current epoch attestations for _ in range(3): _, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, True) @@ -503,8 +490,6 @@ def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state): """ # Initialization store = get_genesis_forkchoice_store(spec, state) - time = 0 - spec.on_tick(store, time) # Process state next_epoch(spec, state) From f55afefe9025c103e1fc9f6a193891514b8f8c0e Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 23 Jun 2021 04:58:27 +0800 Subject: [PATCH 08/75] Move more tests from unittests to testgen tests - `test_on_block_before_finalized` - `test_on_block_finalized_skip_slots` - `test_on_block_finalized_skip_slots_not_in_skip_chain` --- .../test/phase0/fork_choice/test_on_block.py | 110 +++++++++++++++++- .../unittests/fork_choice/test_on_block.py | 74 +----------- 2 files changed, 106 insertions(+), 78 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py index e9f2fae63..c54a7a7be 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py @@ -17,6 +17,7 @@ from eth2spec.test.helpers.fork_choice import ( from eth2spec.test.helpers.state import ( next_epoch, state_transition_and_sign_block, + transition_to, ) @@ -51,8 +52,8 @@ def test_basic(spec, state): @with_all_phases -@with_presets([MINIMAL], reason="too slow") @spec_state_test +@with_presets([MINIMAL], reason="too slow") def test_on_block_checkpoints(spec, state): test_steps = [] # Initialization @@ -76,10 +77,8 @@ def test_on_block_checkpoints(spec, state): on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Mock the finalized_checkpoint and build a block on it - fin_state = store.block_states[last_block_root] - fin_state.finalized_checkpoint = ( - store.block_states[last_block_root].current_justified_checkpoint - ) + fin_state = store.block_states[last_block_root].copy() + fin_state.finalized_checkpoint = store.block_states[last_block_root].current_justified_checkpoint.copy() block = build_empty_block_for_next_slot(spec, fin_state) signed_block = state_transition_and_sign_block(spec, fin_state.copy(), block) @@ -133,3 +132,104 @@ def test_on_block_bad_parent_root(spec, state): yield from add_block(spec, store, signed_block, test_steps, valid=False) yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_on_block_before_finalized(spec, state): + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + # Fork + another_state = state.copy() + + # Create a finalized chain + for _ in range(4): + state, store, _ = yield from apply_next_epoch_with_attestations(spec, state, store, test_steps) + assert store.finalized_checkpoint.epoch == 2 + + # Fail receiving block of `GENESIS_SLOT + 1` slot + block = build_empty_block_for_next_slot(spec, another_state) + block.body.graffiti = b'\x12' * 32 + signed_block = state_transition_and_sign_block(spec, another_state, block) + assert signed_block.message.hash_tree_root() not in store.blocks + yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False) + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_on_block_finalized_skip_slots(spec, state): + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + # Create a finalized chain + for _ in range(4): + state, store, _ = yield from apply_next_epoch_with_attestations(spec, state, store, test_steps) + assert store.finalized_checkpoint.epoch == 2 + + # Another chain + another_state = store.block_states[store.finalized_checkpoint.root].copy() + # Build block that includes the skipped slots up to finality in chain + block = build_empty_block(spec, + another_state, + spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + 2) + block.body.graffiti = b'\x12' * 32 + signed_block = state_transition_and_sign_block(spec, another_state, block) + + yield from tick_and_add_block(spec, store, signed_block, test_steps) + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state): + test_steps = [] + # Initialization + transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH - 1) + block = build_empty_block_for_next_slot(spec, state) + transition_unsigned_block(spec, state, block) + block.state_root = state.hash_tree_root() + store = spec.get_forkchoice_store(state, block) + yield 'anchor_state', state + yield 'anchor_block', block + + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + pre_finalized_checkpoint_epoch = store.finalized_checkpoint.epoch + + # Finalized + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations(spec, state, store, test_steps) + assert store.finalized_checkpoint.epoch == pre_finalized_checkpoint_epoch + 1 + + # Now build a block at later slot than finalized epoch + # Includes finalized block in chain, but not at appropriate skip slot + pre_state = store.block_states[block.hash_tree_root()].copy() + block = build_empty_block(spec, + state=pre_state, + slot=spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + 2) + block.body.graffiti = b'\x12' * 32 + signed_block = sign_block(spec, pre_state, block) + yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False) + + yield 'steps', test_steps diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py index 2f945f06c..18e23ade2 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py @@ -8,86 +8,14 @@ from eth2spec.test.helpers.attestations import ( state_transition_with_signed_full_block, ) from eth2spec.test.helpers.block import ( - build_empty_block, build_empty_block_for_next_slot, - sign_block, - transition_unsigned_block, ) from eth2spec.test.helpers.fork_choice import ( get_genesis_forkchoice_store, run_on_block, apply_next_epoch_with_attestations, ) -from eth2spec.test.helpers.state import next_epoch, state_transition_and_sign_block, transition_to, next_slots - - -@with_all_phases -@spec_state_test -def test_on_block_before_finalized(spec, state): - # Initialization - store = get_genesis_forkchoice_store(spec, state) - - store.finalized_checkpoint = spec.Checkpoint( - epoch=store.finalized_checkpoint.epoch + 2, - root=store.finalized_checkpoint.root - ) - - # Fail receiving block of `GENESIS_SLOT + 1` slot - block = build_empty_block_for_next_slot(spec, state) - signed_block = state_transition_and_sign_block(spec, state, block) - run_on_block(spec, store, signed_block, valid=False) - - -@with_all_phases -@spec_state_test -def test_on_block_finalized_skip_slots(spec, state): - # Initialization - store = get_genesis_forkchoice_store(spec, state) - - # Create a finalized chain - store.finalized_checkpoint = spec.Checkpoint( - epoch=store.finalized_checkpoint.epoch + 2, - root=store.finalized_checkpoint.root - ) - - # Build block that includes the skipped slots up to finality in chain - block = build_empty_block(spec, state, spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + 2) - signed_block = state_transition_and_sign_block(spec, state, block) - spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - run_on_block(spec, store, signed_block) - - -@with_all_phases -@spec_state_test -def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state): - # Initialization - transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH - 1) - block = build_empty_block_for_next_slot(spec, state) - transition_unsigned_block(spec, state, block) - block.state_root = state.hash_tree_root() - store = spec.get_forkchoice_store(state, block) - - current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time - spec.on_tick(store, current_time) - assert store.time == current_time - - pre_finalized_checkpoint_epoch = store.finalized_checkpoint.epoch - - # Finalized - for _ in range(3): - state, store, _ = yield from apply_next_epoch_with_attestations(spec, state, store) - assert store.finalized_checkpoint.epoch == pre_finalized_checkpoint_epoch + 1 - - # Now build a block at later slot than finalized epoch - # Includes finalized block in chain, but not at appropriate skip slot - pre_state = store.block_states[block.hash_tree_root()] - block = build_empty_block(spec, - state=pre_state, - slot=spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + 2) - signed_block = sign_block(spec, pre_state, block) - - spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - run_on_block(spec, store, signed_block, valid=False) +from eth2spec.test.helpers.state import next_epoch, state_transition_and_sign_block, next_slots @with_all_phases From 29a93f62853bb7f09e6448acd2e74fe2167dc667 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 25 Jun 2021 18:48:30 +0800 Subject: [PATCH 09/75] Move more unit tests to test vectors --- .../eth2spec/test/helpers/attestations.py | 73 ++- .../eth2spec/test/helpers/fork_choice.py | 104 +++- .../test/phase0/fork_choice/test_on_block.py | 466 +++++++++++++++++- .../unittests/fork_choice/test_on_block.py | 412 +--------------- 4 files changed, 608 insertions(+), 447 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/helpers/attestations.py b/tests/core/pyspec/eth2spec/test/helpers/attestations.py index fd0e0d880..ffd484ecd 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/attestations.py +++ b/tests/core/pyspec/eth2spec/test/helpers/attestations.py @@ -217,30 +217,13 @@ def next_slots_with_attestations(spec, post_state = state.copy() signed_blocks = [] for _ in range(slot_count): - block = build_empty_block_for_next_slot(spec, post_state) - if fill_cur_epoch and post_state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY: - slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 - if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(post_state)): - attestations = _get_valid_attestation_at_slot( - post_state, - spec, - slot_to_attest, - participation_fn=participation_fn - ) - for attestation in attestations: - block.body.attestations.append(attestation) - if fill_prev_epoch: - slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1 - attestations = _get_valid_attestation_at_slot( - post_state, - spec, - slot_to_attest, - participation_fn=participation_fn - ) - for attestation in attestations: - block.body.attestations.append(attestation) - - signed_block = state_transition_and_sign_block(spec, post_state, block) + signed_block = state_transition_with_full_block( + spec, + post_state, + fill_cur_epoch, + fill_prev_epoch, + participation_fn, + ) signed_blocks.append(signed_block) return state, signed_blocks, post_state @@ -249,7 +232,8 @@ def next_slots_with_attestations(spec, def next_epoch_with_attestations(spec, state, fill_cur_epoch, - fill_prev_epoch): + fill_prev_epoch, + participation_fn=None): assert state.slot % spec.SLOTS_PER_EPOCH == 0 return next_slots_with_attestations( @@ -258,15 +242,50 @@ def next_epoch_with_attestations(spec, spec.SLOTS_PER_EPOCH, fill_cur_epoch, fill_prev_epoch, + participation_fn, ) -def state_transition_with_signed_full_block(spec, state, fill_cur_epoch, fill_prev_epoch): +def state_transition_with_full_block(spec, state, fill_cur_epoch, fill_prev_epoch, participation_fn=None): + """ + Build and apply a block with attestions at the calculated `slot_to_attest` of current epoch and/or previous epoch. + """ + block = build_empty_block_for_next_slot(spec, state) + if fill_cur_epoch and state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY: + slot_to_attest = state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 + if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(state)): + attestations = _get_valid_attestation_at_slot( + state, + spec, + slot_to_attest, + participation_fn=participation_fn + ) + for attestation in attestations: + block.body.attestations.append(attestation) + if fill_prev_epoch: + slot_to_attest = state.slot - spec.SLOTS_PER_EPOCH + 1 + attestations = _get_valid_attestation_at_slot( + state, + spec, + slot_to_attest, + participation_fn=participation_fn + ) + for attestation in attestations: + block.body.attestations.append(attestation) + + signed_block = state_transition_and_sign_block(spec, state, block) + return signed_block + + +def state_transition_with_full_attestations_block(spec, state, fill_cur_epoch, fill_prev_epoch): + """ + Build and apply a block with attestions at all valid slots of current epoch and/or previous epoch. + """ # Build a block with previous attestations block = build_empty_block_for_next_slot(spec, state) attestations = [] - if fill_prev_epoch: + if fill_cur_epoch: # current epoch slots = state.slot % spec.SLOTS_PER_EPOCH for slot_offset in range(slots): diff --git a/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py b/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py index bac3d1ff5..ec5793af5 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py +++ b/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py @@ -1,5 +1,8 @@ from eth_utils import encode_hex -from eth2spec.test.helpers.attestations import next_epoch_with_attestations +from eth2spec.test.helpers.attestations import ( + next_epoch_with_attestations, + next_slots_with_attestations, +) def get_anchor_root(spec, state): @@ -19,23 +22,20 @@ def add_block_to_store(spec, store, signed_block): spec.on_block(store, signed_block) -def tick_and_add_block(spec, store, signed_block, test_steps=None, valid=True): - if test_steps is None: - test_steps = [] - +def tick_and_add_block(spec, store, signed_block, test_steps, valid=True, allow_invalid_attestations=False): pre_state = store.block_states[signed_block.message.parent_root] block_time = pre_state.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT if store.time < block_time: on_tick_and_append_step(spec, store, block_time, test_steps) - yield from add_block(spec, store, signed_block, test_steps, valid=valid) + post_state = yield from add_block( + spec, store, signed_block, test_steps, valid=valid, allow_invalid_attestations=allow_invalid_attestations) + + return post_state -def tick_and_run_on_attestation(spec, store, attestation, test_steps=None): - if test_steps is None: - test_steps = [] - +def tick_and_run_on_attestation(spec, store, attestation, test_steps): parent_block = store.blocks[attestation.data.beacon_block_root] pre_state = store.block_states[spec.hash_tree_root(parent_block)] block_time = pre_state.genesis_time + parent_block.slot * spec.config.SECONDS_PER_SLOT @@ -50,6 +50,37 @@ def tick_and_run_on_attestation(spec, store, attestation, test_steps=None): test_steps.append({'attestation': get_attestation_file_name(attestation)}) +def add_attestation(spec, store, attestation, test_steps, valid=True): + yield get_attestation_file_name(attestation), attestation + + if not valid: + try: + run_on_attestation(spec, store, attestation, valid=True) + except AssertionError: + test_steps.append({ + 'attestation': get_attestation_file_name(attestation), + 'valid': False, + }) + return + else: + assert False + + run_on_attestation(spec, store, attestation, valid=True) + test_steps.append({'attestation': get_attestation_file_name(attestation)}) + + +def run_on_attestation(spec, store, attestation, valid=True): + if not valid: + try: + spec.on_attestation(store, attestation) + except AssertionError: + return + else: + assert False + + spec.on_attestation(store, attestation) + + def get_genesis_forkchoice_store(spec, genesis_state): store, _ = get_genesis_forkchoice_store_and_block(spec, genesis_state) return store @@ -87,10 +118,10 @@ def run_on_block(spec, store, signed_block, valid=True): assert store.blocks[signed_block.message.hash_tree_root()] == signed_block.message -def add_block(spec, store, signed_block, test_steps=None, valid=True): - if test_steps is None: - test_steps = [] - +def add_block(spec, store, signed_block, test_steps, valid=True, allow_invalid_attestations=False): + """ + Run on_block and on_attestation + """ yield get_block_file_name(signed_block), signed_block if not valid: @@ -109,8 +140,14 @@ def add_block(spec, store, signed_block, test_steps=None, valid=True): test_steps.append({'block': get_block_file_name(signed_block)}) # An on_block step implies receiving block's attestations - for attestation in signed_block.message.body.attestations: - spec.on_attestation(store, attestation) + try: + for attestation in signed_block.message.body.attestations: + run_on_attestation(spec, store, attestation, valid=True) + except AssertionError: + if allow_invalid_attestations: + pass + else: + raise block_root = signed_block.message.hash_tree_root() assert store.blocks[block_root] == signed_block.message @@ -125,6 +162,8 @@ def add_block(spec, store, signed_block, test_steps=None, valid=True): } }) + return store.block_states[signed_block.message.hash_tree_root()] + def get_formatted_head_output(spec, store): head = spec.get_head(store) @@ -135,11 +174,40 @@ def get_formatted_head_output(spec, store): } -def apply_next_epoch_with_attestations(spec, state, store, test_steps=None): +def apply_next_epoch_with_attestations(spec, + state, + store, + fill_cur_epoch, + fill_prev_epoch, + participation_fn=None, + test_steps=None): if test_steps is None: test_steps = [] - _, new_signed_blocks, post_state = next_epoch_with_attestations(spec, state, True, False) + _, new_signed_blocks, post_state = next_epoch_with_attestations( + spec, state, fill_cur_epoch, fill_prev_epoch, participation_fn=participation_fn) + for signed_block in new_signed_blocks: + block = signed_block.message + yield from tick_and_add_block(spec, store, signed_block, test_steps) + block_root = block.hash_tree_root() + assert store.blocks[block_root] == block + last_signed_block = signed_block + + assert store.block_states[block_root].hash_tree_root() == post_state.hash_tree_root() + + return post_state, store, last_signed_block + + +def apply_next_slots_with_attestations(spec, + state, + store, + slots, + fill_cur_epoch, + fill_prev_epoch, + test_steps, + participation_fn=None): + _, new_signed_blocks, post_state = next_slots_with_attestations( + spec, state, slots, fill_cur_epoch, fill_prev_epoch, participation_fn=participation_fn) for signed_block in new_signed_blocks: block = signed_block.message yield from tick_and_add_block(spec, store, signed_block, test_steps) diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py index c54a7a7be..c3374d4f8 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py @@ -1,6 +1,13 @@ -from eth2spec.utils.ssz.ssz_impl import hash_tree_root +import random +from eth2spec.utils.ssz.ssz_impl import hash_tree_root from eth2spec.test.context import MINIMAL, spec_state_test, with_all_phases, with_presets +from eth2spec.test.helpers.attestations import ( + next_epoch_with_attestations, + next_slots_with_attestations, + state_transition_with_full_block, + state_transition_with_full_attestations_block, +) from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, build_empty_block, @@ -13,14 +20,27 @@ from eth2spec.test.helpers.fork_choice import ( add_block, tick_and_add_block, apply_next_epoch_with_attestations, + apply_next_slots_with_attestations, ) from eth2spec.test.helpers.state import ( next_epoch, + next_slots, state_transition_and_sign_block, transition_to, ) +rng = random.Random(2020) + + +def _drop_random_one_third(_slot, _index, indices): + committee_len = len(indices) + assert committee_len >= 3 + filter_len = committee_len // 3 + participant_count = committee_len - filter_len + return rng.sample(indices, participant_count) + + @with_all_phases @spec_state_test def test_basic(spec, state): @@ -68,7 +88,8 @@ def test_on_block_checkpoints(spec, state): next_epoch(spec, state) on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) - state, store, last_signed_block = yield from apply_next_epoch_with_attestations(spec, state, store, test_steps) + state, store, last_signed_block = yield from apply_next_epoch_with_attestations( + spec, state, store, True, False, test_steps=test_steps) last_block_root = hash_tree_root(last_signed_block.message) assert spec.get_head(store) == last_block_root @@ -152,7 +173,8 @@ def test_on_block_before_finalized(spec, state): # Create a finalized chain for _ in range(4): - state, store, _ = yield from apply_next_epoch_with_attestations(spec, state, store, test_steps) + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, False, test_steps=test_steps) assert store.finalized_checkpoint.epoch == 2 # Fail receiving block of `GENESIS_SLOT + 1` slot @@ -180,7 +202,8 @@ def test_on_block_finalized_skip_slots(spec, state): # Create a finalized chain for _ in range(4): - state, store, _ = yield from apply_next_epoch_with_attestations(spec, state, store, test_steps) + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, False, test_steps=test_steps) assert store.finalized_checkpoint.epoch == 2 # Another chain @@ -219,7 +242,8 @@ def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state): # Finalized for _ in range(3): - state, store, _ = yield from apply_next_epoch_with_attestations(spec, state, store, test_steps) + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, False, test_steps=test_steps) assert store.finalized_checkpoint.epoch == pre_finalized_checkpoint_epoch + 1 # Now build a block at later slot than finalized epoch @@ -233,3 +257,435 @@ def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state): yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False) yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="mainnet config requires too many pre-generated public/private keys") +def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state): + """ + Test `should_update_justified_checkpoint`: + compute_slots_since_epoch_start(get_current_slot(store)) < SAFE_SLOTS_TO_UPDATE_JUSTIFIED + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + # Skip epoch 0 & 1 + for _ in range(2): + next_epoch(spec, state) + # Fill epoch 2 + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, False, test_steps=test_steps) + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2 + # Skip epoch 3 & 4 + for _ in range(2): + next_epoch(spec, state) + # Epoch 5: Attest current epoch + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, False, participation_fn=_drop_random_one_third, test_steps=test_steps) + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 + assert state.current_justified_checkpoint.epoch == 2 + assert store.justified_checkpoint.epoch == 2 + assert state.current_justified_checkpoint == store.justified_checkpoint + + # Skip epoch 6 + next_epoch(spec, state) + + pre_state = state.copy() + + # Build a block to justify epoch 5 + signed_block = state_transition_with_full_block(spec, state, True, True) + assert state.finalized_checkpoint.epoch == 0 + assert state.current_justified_checkpoint.epoch == 5 + assert state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch + assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH < spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED + # Run on_block + yield from tick_and_add_block(spec, store, signed_block, test_steps) + # Ensure justified_checkpoint has been changed but finality is unchanged + assert store.justified_checkpoint.epoch == 5 + assert store.justified_checkpoint == state.current_justified_checkpoint + assert store.finalized_checkpoint.epoch == pre_state.finalized_checkpoint.epoch == 0 + + yield 'steps', test_steps + + +@with_all_phases +@with_presets([MINIMAL], reason="It assumes that `MAX_ATTESTATIONS` >= 2/3 attestations of an epoch") +@spec_state_test +def test_on_block_outside_safe_slots_but_finality(spec, state): + """ + Test `should_update_justified_checkpoint` case + - compute_slots_since_epoch_start(get_current_slot(store)) > SAFE_SLOTS_TO_UPDATE_JUSTIFIED + - new_justified_checkpoint and store.justified_checkpoint.root are NOT conflicting + + Thus should_update_justified_checkpoint returns True. + + Part of this script is similar to `test_new_justified_is_later_than_store_justified`. + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + # Skip epoch 0 + next_epoch(spec, state) + # Fill epoch 1 to 3, attest current epoch + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, False, test_steps=test_steps) + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Skip epoch 4-6 + for _ in range(3): + next_epoch(spec, state) + + # epoch 7 + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + assert state.finalized_checkpoint.epoch == 2 + assert state.current_justified_checkpoint.epoch == 7 + + # epoch 8, attest the first 5 blocks + state, store, _ = yield from apply_next_slots_with_attestations( + spec, state, store, 5, True, True, test_steps) + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 7 + + # Propose a block at epoch 9, 5th slot + next_epoch(spec, state) + next_slots(spec, state, 4) + signed_block = state_transition_with_full_attestations_block(spec, state, True, True) + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 7 + + # Propose an empty block at epoch 10, SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2 slot + # This block would trigger justification and finality updates on store + next_epoch(spec, state) + next_slots(spec, state, 4) + block = build_empty_block_for_next_slot(spec, state) + signed_block = state_transition_and_sign_block(spec, state, block) + assert state.finalized_checkpoint.epoch == 7 + assert state.current_justified_checkpoint.epoch == 8 + # Step time past safe slots and run on_block + if store.time < spec.compute_time_at_slot(state, signed_block.message.slot): + time = store.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT + on_tick_and_append_step(spec, store, time, test_steps) + assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED + yield from add_block(spec, store, signed_block, test_steps) + + # Ensure justified_checkpoint finality has been changed + assert store.finalized_checkpoint.epoch == 7 + assert store.finalized_checkpoint == state.finalized_checkpoint + assert store.justified_checkpoint.epoch == 8 + assert store.justified_checkpoint == state.current_justified_checkpoint + + yield 'steps', test_steps + + +@with_all_phases +@with_presets([MINIMAL], reason="It assumes that `MAX_ATTESTATIONS` >= 2/3 attestations of an epoch") +@spec_state_test +def test_new_justified_is_later_than_store_justified(spec, state): + """ + J: Justified + F: Finalized + fork_1_state (forked from genesis): + epoch + [0] <- [1] <- [2] <- [3] <- [4] + F J + + fork_2_state (forked from fork_1_state's epoch 2): + epoch + └──── [3] <- [4] <- [5] <- [6] + F J + + fork_3_state (forked from genesis): + [0] <- [1] <- [2] <- [3] <- [4] <- [5] + F J + """ + # The 1st fork, from genesis + fork_1_state = state.copy() + # The 3rd fork, from genesis + fork_3_state = state.copy() + + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + # ----- Process fork_1_state + # Skip epoch 0 + next_epoch(spec, fork_1_state) + # Fill epoch 1 with previous epoch attestations + fork_1_state, store, _ = yield from apply_next_epoch_with_attestations( + spec, fork_1_state, store, False, True, test_steps=test_steps) + + # Fork `fork_2_state` at the start of epoch 2 + fork_2_state = fork_1_state.copy() + assert spec.get_current_epoch(fork_2_state) == 2 + + # Skip epoch 2 + next_epoch(spec, fork_1_state) + # # Fill epoch 3 & 4 with previous epoch attestations + for _ in range(2): + fork_1_state, store, _ = yield from apply_next_epoch_with_attestations( + spec, fork_1_state, store, False, True, test_steps=test_steps) + + assert fork_1_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 + assert fork_1_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.justified_checkpoint == fork_1_state.current_justified_checkpoint + + # ------ fork_2_state: Create a chain to set store.best_justified_checkpoint + # NOTE: The goal is to make `store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch` + all_blocks = [] + + # Proposed an empty block at epoch 2, 1st slot + block = build_empty_block_for_next_slot(spec, fork_2_state) + signed_block = state_transition_and_sign_block(spec, fork_2_state, block) + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert fork_2_state.current_justified_checkpoint.epoch == 0 + + # Skip to epoch 4 + for _ in range(2): + next_epoch(spec, fork_2_state) + assert fork_2_state.current_justified_checkpoint.epoch == 0 + + # Propose a block at epoch 4, 5th slot + # Propose a block at epoch 5, 5th slot + for _ in range(2): + next_epoch(spec, fork_2_state) + next_slots(spec, fork_2_state, 4) + signed_block = state_transition_with_full_attestations_block(spec, fork_2_state, True, True) + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert fork_2_state.current_justified_checkpoint.epoch == 0 + + # Propose a block at epoch 6, SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2 slot + next_epoch(spec, fork_2_state) + next_slots(spec, fork_2_state, spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2) + signed_block = state_transition_with_full_attestations_block(spec, fork_2_state, True, True) + assert fork_2_state.finalized_checkpoint.epoch == 0 + assert fork_2_state.current_justified_checkpoint.epoch == 5 + # Check SAFE_SLOTS_TO_UPDATE_JUSTIFIED + spec.on_tick(store, store.genesis_time + fork_2_state.slot * spec.config.SECONDS_PER_SLOT) + assert spec.compute_slots_since_epoch_start(spec.get_current_slot(store)) >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED + # Run on_block + yield from add_block(spec, store, signed_block, test_steps) + assert store.finalized_checkpoint.epoch == 0 + assert store.justified_checkpoint.epoch == 3 + assert store.best_justified_checkpoint.epoch == 5 + + # ------ fork_3_state: Create another chain to test the + # "Update justified if new justified is later than store justified" case + all_blocks = [] + for _ in range(3): + next_epoch(spec, fork_3_state) + + # epoch 3 + _, signed_blocks, fork_3_state = next_epoch_with_attestations(spec, fork_3_state, True, True) + all_blocks += signed_blocks + assert fork_3_state.finalized_checkpoint.epoch == 0 + + # epoch 4, attest the first 5 blocks + _, blocks, fork_3_state = next_slots_with_attestations(spec, fork_3_state, 5, True, True) + all_blocks += blocks.copy() + assert fork_3_state.finalized_checkpoint.epoch == 0 + + # Propose a block at epoch 5, 5th slot + next_epoch(spec, fork_3_state) + next_slots(spec, fork_3_state, 4) + signed_block = state_transition_with_full_block(spec, fork_3_state, True, True) + all_blocks.append(signed_block.copy()) + assert fork_3_state.finalized_checkpoint.epoch == 0 + + # Propose a block at epoch 6, 5th slot + next_epoch(spec, fork_3_state) + next_slots(spec, fork_3_state, 4) + signed_block = state_transition_with_full_block(spec, fork_3_state, True, True) + all_blocks.append(signed_block.copy()) + assert fork_3_state.finalized_checkpoint.epoch == 3 + assert fork_3_state.current_justified_checkpoint.epoch == 4 + + # FIXME: pending on the `on_block`, `on_attestation` fix + # # Apply blocks of `fork_3_state` to `store` + # for block in all_blocks: + # if store.time < spec.compute_time_at_slot(fork_2_state, block.message.slot): + # spec.on_tick(store, store.genesis_time + block.message.slot * spec.config.SECONDS_PER_SLOT) + # # valid_attestations=False because the attestations are outdated (older than previous epoch) + # yield from add_block(spec, store, block, test_steps, allow_invalid_attestations=False) + + # assert store.finalized_checkpoint == fork_3_state.finalized_checkpoint + # assert (store.justified_checkpoint + # == fork_3_state.current_justified_checkpoint + # != store.best_justified_checkpoint) + # assert (store.best_justified_checkpoint + # == fork_2_state.current_justified_checkpoint) + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): + """ + J: Justified + F: Finalized + state (forked from genesis): + epoch + [0] <- [1] <- [2] <- [3] <- [4] <- [5] + F J + + another_state (forked from epoch 0): + └──── [1] <- [2] <- [3] <- [4] <- [5] + F J + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + # ----- Process state + # Goal: make `store.finalized_checkpoint.epoch == 0` and `store.justified_checkpoint.epoch == 3` + # Skip epoch 0 + next_epoch(spec, state) + + # Forking another_state + another_state = state.copy() + + # Fill epoch 1 with previous epoch attestations + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, False, True, test_steps=test_steps) + # Skip epoch 2 + next_epoch(spec, state) + # Fill epoch 3 & 4 with previous epoch attestations + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, False, True, test_steps=test_steps) + + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.justified_checkpoint == state.current_justified_checkpoint + + # Create another chain + # Goal: make `another_state.finalized_checkpoint.epoch == 2` and `another_state.justified_checkpoint.epoch == 3` + all_blocks = [] + # Fill epoch 1 & 2 with previous + current epoch attestations + for _ in range(3): + _, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, True) + all_blocks += signed_blocks + + assert another_state.finalized_checkpoint.epoch == 2 + assert another_state.current_justified_checkpoint.epoch == 3 + assert state.finalized_checkpoint.hash_tree_root() != another_state.finalized_checkpoint.hash_tree_root() + assert ( + state.current_justified_checkpoint.hash_tree_root() + != another_state.current_justified_checkpoint.hash_tree_root() + ) + # pre_store_justified_checkpoint_root = store.justified_checkpoint.root + + # FIXME: pending on the `on_block`, `on_attestation` fix + # # Apply blocks of `another_state` to `store` + # for block in all_blocks: + # # NOTE: Do not call `on_tick` here + # yield from add_block(spec, store, block, test_steps, allow_invalid_attestations=True) + + # finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + # ancestor_at_finalized_slot = spec.get_ancestor(store, pre_store_justified_checkpoint_root, finalized_slot) + # assert ancestor_at_finalized_slot != store.finalized_checkpoint.root + + # assert store.finalized_checkpoint == another_state.finalized_checkpoint + # assert store.justified_checkpoint == another_state.current_justified_checkpoint + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state): + """ + J: Justified + F: Finalized + state: + epoch + [0] <- [1] <- [2] <- [3] <- [4] <- [5] + F J + + another_state (forked from state at epoch 3): + └──── [4] <- [5] + F J + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + # Process state + next_epoch(spec, state) + spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) + + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, False, True, test_steps=test_steps) + + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, False, test_steps=test_steps) + next_epoch(spec, state) + + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, False, True, test_steps=test_steps) + + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 + assert store.justified_checkpoint == state.current_justified_checkpoint + + # Create another chain + # Forking from epoch 3 + all_blocks = [] + slot = spec.compute_start_slot_at_epoch(3) + block_root = spec.get_block_root_at_slot(state, slot) + another_state = store.block_states[block_root].copy() + for _ in range(2): + _, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, True) + all_blocks += signed_blocks + + assert another_state.finalized_checkpoint.epoch == 3 + assert another_state.current_justified_checkpoint.epoch == 4 + + pre_store_justified_checkpoint_root = store.justified_checkpoint.root + for block in all_blocks: + # FIXME: Once `on_block` and `on_attestation` logic is fixed, + # fix test case and remove allow_invalid_attestations flag + yield from tick_and_add_block(spec, store, block, test_steps, allow_invalid_attestations=True) + + finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + ancestor_at_finalized_slot = spec.get_ancestor(store, pre_store_justified_checkpoint_root, finalized_slot) + assert ancestor_at_finalized_slot == store.finalized_checkpoint.root + + assert store.finalized_checkpoint == another_state.finalized_checkpoint + assert store.justified_checkpoint != another_state.current_justified_checkpoint + + yield 'steps', test_steps diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py index 18e23ade2..92382c884 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py @@ -1,11 +1,9 @@ from copy import deepcopy -from eth2spec.utils.ssz.ssz_impl import hash_tree_root -from eth2spec.test.context import MINIMAL, with_all_phases, spec_state_test, with_presets -from eth2spec.test.helpers.attestations import ( - next_epoch_with_attestations, - next_slots_with_attestations, - state_transition_with_signed_full_block, +from eth2spec.utils.ssz.ssz_impl import hash_tree_root +from eth2spec.test.context import ( + spec_state_test, + with_all_phases, ) from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, @@ -15,58 +13,25 @@ from eth2spec.test.helpers.fork_choice import ( run_on_block, apply_next_epoch_with_attestations, ) -from eth2spec.test.helpers.state import next_epoch, state_transition_and_sign_block, next_slots - - -@with_all_phases -@spec_state_test -def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state): - # Initialization - store = get_genesis_forkchoice_store(spec, state) - - next_epoch(spec, state) - spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - state, store, last_signed_block = yield from apply_next_epoch_with_attestations(spec, state, store) - next_epoch(spec, state) - spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - last_block_root = last_signed_block.message.hash_tree_root() - - # NOTE: Mock the justified checkpoint - just_state = store.block_states[last_block_root] - new_justified = spec.Checkpoint( - epoch=just_state.current_justified_checkpoint.epoch + 1, - root=b'\x77' * 32, - ) - just_state.current_justified_checkpoint = new_justified # Mutate `store` - - assert store.block_states[last_block_root].hash_tree_root() == just_state.hash_tree_root() - - block = build_empty_block_for_next_slot(spec, just_state) - - # NOTE: Mock store so that the modified state could be accessed - parent_block = last_signed_block.message.copy() - parent_block.state_root = just_state.hash_tree_root() - store.blocks[block.parent_root] = parent_block - store.block_states[block.parent_root] = just_state.copy() - assert block.parent_root in store.blocks.keys() - assert block.parent_root in store.block_states.keys() - - signed_block = state_transition_and_sign_block(spec, just_state.copy(), block) - assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH < spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED - run_on_block(spec, store, signed_block) - - assert store.justified_checkpoint == new_justified +from eth2spec.test.helpers.state import ( + next_epoch, + state_transition_and_sign_block, +) @with_all_phases @spec_state_test def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state): + """ + NOTE: test_new_justified_is_later_than_store_justified also tests best_justified_checkpoint + """ # Initialization store = get_genesis_forkchoice_store(spec, state) next_epoch(spec, state) spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - state, store, last_signed_block = yield from apply_next_epoch_with_attestations(spec, state, store) + state, store, last_signed_block = yield from apply_next_epoch_with_attestations( + spec, state, store, True, False) last_block_root = hash_tree_root(last_signed_block.message) # NOTE: Mock fictitious justified checkpoint in store @@ -86,6 +51,7 @@ def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state): spec.on_tick(store, store.time + spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED * spec.config.SECONDS_PER_SLOT) assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED + previously_finalized = store.finalized_checkpoint previously_justified = store.justified_checkpoint # Add a series of new blocks with "better" justifications @@ -115,355 +81,7 @@ def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state): run_on_block(spec, store, signed_block) + assert store.finalized_checkpoint == previously_finalized assert store.justified_checkpoint == previously_justified # ensure the best from the series was stored assert store.best_justified_checkpoint == best_justified_checkpoint - - -@with_all_phases -@spec_state_test -def test_on_block_outside_safe_slots_but_finality(spec, state): - # Initialization - store = get_genesis_forkchoice_store(spec, state) - - next_epoch(spec, state) - spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - state, store, last_signed_block = yield from apply_next_epoch_with_attestations(spec, state, store) - last_block_root = hash_tree_root(last_signed_block.message) - - # NOTE: Mock fictitious justified checkpoint in store - store.justified_checkpoint = spec.Checkpoint( - epoch=spec.compute_epoch_at_slot(last_signed_block.message.slot), - root=spec.Root("0x4a55535449464945440000000000000000000000000000000000000000000000") - ) - - next_epoch(spec, state) - spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - - # NOTE: Mock a new higher justified checkpoint not in branch of store's justified checkpoint - just_block = build_empty_block_for_next_slot(spec, state) - store.blocks[just_block.hash_tree_root()] = just_block - - # Step time past safe slots - spec.on_tick(store, store.time + spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED * spec.config.SECONDS_PER_SLOT) - assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED - - # NOTE: Mock justified and finalized update in state - just_fin_state = store.block_states[last_block_root] - new_justified = spec.Checkpoint( - epoch=spec.compute_epoch_at_slot(just_block.slot) + 1, - root=just_block.hash_tree_root(), - ) - assert new_justified.epoch > store.justified_checkpoint.epoch - new_finalized = spec.Checkpoint( - epoch=spec.compute_epoch_at_slot(just_block.slot), - root=just_block.parent_root, - ) - assert new_finalized.epoch > store.finalized_checkpoint.epoch - just_fin_state.current_justified_checkpoint = new_justified - just_fin_state.finalized_checkpoint = new_finalized - - # Build and add block that includes the new justified/finalized info - block = build_empty_block_for_next_slot(spec, just_fin_state) - signed_block = state_transition_and_sign_block(spec, deepcopy(just_fin_state), block) - - # NOTE: Mock store so that the modified state could be accessed - parent_block = last_signed_block.message.copy() - parent_block.state_root = just_fin_state.hash_tree_root() - store.blocks[block.parent_root] = parent_block - store.block_states[block.parent_root] = just_fin_state.copy() - assert block.parent_root in store.blocks.keys() - assert block.parent_root in store.block_states.keys() - - run_on_block(spec, store, signed_block) - - assert store.finalized_checkpoint == new_finalized - assert store.justified_checkpoint == new_justified - - -@with_all_phases -@with_presets([MINIMAL], reason="It assumes that `MAX_ATTESTATIONS` >= 2/3 attestations of an epoch") -@spec_state_test -def test_new_justified_is_later_than_store_justified(spec, state): - """ - J: Justified - F: Finalized - fork_1_state (forked from genesis): - epoch - [0] <- [1] <- [2] <- [3] <- [4] - F J - - fork_2_state (forked from fork_1_state's epoch 2): - epoch - └──── [3] <- [4] <- [5] <- [6] - F J - - fork_3_state (forked from genesis): - [0] <- [1] <- [2] <- [3] <- [4] <- [5] - F J - """ - # The 1st fork, from genesis - fork_1_state = state.copy() - # The 3rd fork, from genesis - fork_3_state = state.copy() - - # Initialization - store = get_genesis_forkchoice_store(spec, fork_1_state) - - # ----- Process fork_1_state - # Skip epoch 0 - next_epoch(spec, fork_1_state) - # Fill epoch 1 with previous epoch attestations - _, signed_blocks, fork_1_state = next_epoch_with_attestations(spec, fork_1_state, False, True) - for block in signed_blocks: - spec.on_tick(store, store.genesis_time + fork_1_state.slot * spec.config.SECONDS_PER_SLOT) - run_on_block(spec, store, block) - - # Fork `fork_2_state` at the start of epoch 2 - fork_2_state = fork_1_state.copy() - assert spec.get_current_epoch(fork_2_state) == 2 - - # Skip epoch 2 - next_epoch(spec, fork_1_state) - # # Fill epoch 3 & 4 with previous epoch attestations - for _ in range(2): - _, signed_blocks, fork_1_state = next_epoch_with_attestations(spec, fork_1_state, False, True) - for block in signed_blocks: - spec.on_tick(store, store.genesis_time + fork_1_state.slot * spec.config.SECONDS_PER_SLOT) - run_on_block(spec, store, block) - - assert fork_1_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 - assert fork_1_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 - assert store.justified_checkpoint.hash_tree_root() == fork_1_state.current_justified_checkpoint.hash_tree_root() - - # ------ fork_2_state: Create a chain to set store.best_justified_checkpoint - # NOTE: The goal is to make `store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch` - all_blocks = [] - - # Proposed an empty block at epoch 2, 1st slot - block = build_empty_block_for_next_slot(spec, fork_2_state) - signed_block = state_transition_and_sign_block(spec, fork_2_state, block) - all_blocks.append(signed_block.copy()) - assert fork_2_state.current_justified_checkpoint.epoch == 0 - - # Skip to epoch 4 - for _ in range(2): - next_epoch(spec, fork_2_state) - assert fork_2_state.current_justified_checkpoint.epoch == 0 - - # Propose a block at epoch 4, 5th slot - # Propose a block at epoch 5, 5th slot - for _ in range(2): - next_epoch(spec, fork_2_state) - next_slots(spec, fork_2_state, 4) - signed_block = state_transition_with_signed_full_block(spec, fork_2_state, True, True) - all_blocks.append(signed_block.copy()) - assert fork_2_state.current_justified_checkpoint.epoch == 0 - - # Propose a block at epoch 6, SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2 slot - next_epoch(spec, fork_2_state) - next_slots(spec, fork_2_state, spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2) - signed_block = state_transition_with_signed_full_block(spec, fork_2_state, True, True) - all_blocks.append(signed_block.copy()) - assert fork_2_state.finalized_checkpoint.epoch == 0 - assert fork_2_state.current_justified_checkpoint.epoch == 5 - - # Check SAFE_SLOTS_TO_UPDATE_JUSTIFIED - spec.on_tick(store, store.genesis_time + fork_2_state.slot * spec.config.SECONDS_PER_SLOT) - assert spec.compute_slots_since_epoch_start(spec.get_current_slot(store)) >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED - - # Apply blocks of `fork_3_state` to `store` - for block in all_blocks: - if store.time < spec.compute_time_at_slot(fork_2_state, block.message.slot): - spec.on_tick(store, store.genesis_time + block.message.slot * spec.config.SECONDS_PER_SLOT) - run_on_block(spec, store, block) - - assert store.finalized_checkpoint.epoch == 0 - assert store.justified_checkpoint.epoch == 3 - assert store.best_justified_checkpoint.epoch == 5 - - # ------ fork_3_state: Create another chain to test the - # "Update justified if new justified is later than store justified" case - all_blocks = [] - for _ in range(3): - next_epoch(spec, fork_3_state) - - # epoch 3 - _, signed_blocks, fork_3_state = next_epoch_with_attestations(spec, fork_3_state, True, True) - all_blocks += signed_blocks - assert fork_3_state.finalized_checkpoint.epoch == 0 - - # epoch 4, attest the first 5 blocks - _, blocks, fork_3_state = next_slots_with_attestations(spec, fork_3_state, 5, True, True) - all_blocks += blocks.copy() - assert fork_3_state.finalized_checkpoint.epoch == 0 - - # Propose a block at epoch 5, 5th slot - next_epoch(spec, fork_3_state) - next_slots(spec, fork_3_state, 4) - signed_block = state_transition_with_signed_full_block(spec, fork_3_state, True, True) - all_blocks.append(signed_block.copy()) - assert fork_3_state.finalized_checkpoint.epoch == 0 - - # Propose a block at epoch 6, 5th slot - next_epoch(spec, fork_3_state) - next_slots(spec, fork_3_state, 4) - signed_block = state_transition_with_signed_full_block(spec, fork_3_state, True, True) - all_blocks.append(signed_block.copy()) - assert fork_3_state.finalized_checkpoint.epoch == 3 - assert fork_3_state.current_justified_checkpoint.epoch == 4 - - # Apply blocks of `fork_3_state` to `store` - for block in all_blocks: - if store.time < spec.compute_time_at_slot(fork_2_state, block.message.slot): - spec.on_tick(store, store.genesis_time + block.message.slot * spec.config.SECONDS_PER_SLOT) - run_on_block(spec, store, block) - - assert store.finalized_checkpoint.hash_tree_root() == fork_3_state.finalized_checkpoint.hash_tree_root() - assert (store.justified_checkpoint.hash_tree_root() - == fork_3_state.current_justified_checkpoint.hash_tree_root() - != store.best_justified_checkpoint.hash_tree_root()) - assert (store.best_justified_checkpoint.hash_tree_root() - == fork_2_state.current_justified_checkpoint.hash_tree_root()) - - -@with_all_phases -@spec_state_test -def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): - """ - J: Justified - F: Finalized - state (forked from genesis): - epoch - [0] <- [1] <- [2] <- [3] <- [4] <- [5] - F J - - another_state (forked from epoch 0): - └──── [1] <- [2] <- [3] <- [4] <- [5] - F J - """ - # Initialization - store = get_genesis_forkchoice_store(spec, state) - - # ----- Process state - # Goal: make `store.finalized_checkpoint.epoch == 0` and `store.justified_checkpoint.epoch == 3` - # Skip epoch 0 - next_epoch(spec, state) - - # Forking another_state - another_state = state.copy() - - # Fill epoch 1 with previous epoch attestations - _, signed_blocks, state = next_epoch_with_attestations(spec, state, False, True) - for block in signed_blocks: - spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - run_on_block(spec, store, block) - # Skip epoch 2 - next_epoch(spec, state) - # Fill epoch 3 & 4 with previous epoch attestations - for _ in range(2): - _, signed_blocks, state = next_epoch_with_attestations(spec, state, False, True) - for block in signed_blocks: - spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - run_on_block(spec, store, block) - - assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 - assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 - assert store.justified_checkpoint.hash_tree_root() == state.current_justified_checkpoint.hash_tree_root() - - # Create another chain - # Goal: make `another_state.finalized_checkpoint.epoch == 2` and `another_state.justified_checkpoint.epoch == 3` - all_blocks = [] - # Fill epoch 1 & 2 with previous + current epoch attestations - for _ in range(3): - _, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, True) - all_blocks += signed_blocks - - assert another_state.finalized_checkpoint.epoch == 2 - assert another_state.current_justified_checkpoint.epoch == 3 - assert state.finalized_checkpoint.hash_tree_root() != another_state.finalized_checkpoint.hash_tree_root() - assert ( - state.current_justified_checkpoint.hash_tree_root() - != another_state.current_justified_checkpoint.hash_tree_root() - ) - pre_store_justified_checkpoint_root = store.justified_checkpoint.root - - # Apply blocks of `another_state` to `store` - for block in all_blocks: - # NOTE: Do not call `on_tick` here - run_on_block(spec, store, block) - - finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) - ancestor_at_finalized_slot = spec.get_ancestor(store, pre_store_justified_checkpoint_root, finalized_slot) - assert ancestor_at_finalized_slot != store.finalized_checkpoint.root - - assert store.finalized_checkpoint.hash_tree_root() == another_state.finalized_checkpoint.hash_tree_root() - assert store.justified_checkpoint.hash_tree_root() == another_state.current_justified_checkpoint.hash_tree_root() - - -@with_all_phases -@spec_state_test -def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state): - """ - J: Justified - F: Finalized - state: - epoch - [0] <- [1] <- [2] <- [3] <- [4] <- [5] - F J - - another_state (forked from state at epoch 3): - └──── [4] <- [5] - F J - """ - # Initialization - store = get_genesis_forkchoice_store(spec, state) - - # Process state - next_epoch(spec, state) - spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - _, signed_blocks, state = next_epoch_with_attestations(spec, state, False, True) - for block in signed_blocks: - spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - run_on_block(spec, store, block) - _, signed_blocks, state = next_epoch_with_attestations(spec, state, True, False) - for block in signed_blocks: - spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - run_on_block(spec, store, block) - next_epoch(spec, state) - spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - for _ in range(2): - _, signed_blocks, state = next_epoch_with_attestations(spec, state, False, True) - for block in signed_blocks: - spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - run_on_block(spec, store, block) - - assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 - assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 - assert store.justified_checkpoint.hash_tree_root() == state.current_justified_checkpoint.hash_tree_root() - - # Create another chain - # Forking from epoch 3 - all_blocks = [] - slot = spec.compute_start_slot_at_epoch(3) - block_root = spec.get_block_root_at_slot(state, slot) - another_state = store.block_states[block_root].copy() - for _ in range(2): - _, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, True) - all_blocks += signed_blocks - - assert another_state.finalized_checkpoint.epoch == 3 - assert another_state.current_justified_checkpoint.epoch == 4 - - pre_store_justified_checkpoint_root = store.justified_checkpoint.root - for block in all_blocks: - if store.time < spec.compute_time_at_slot(another_state, block.message.slot): - spec.on_tick(store, store.genesis_time + block.message.slot * spec.config.SECONDS_PER_SLOT) - run_on_block(spec, store, block) - - finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) - ancestor_at_finalized_slot = spec.get_ancestor(store, pre_store_justified_checkpoint_root, finalized_slot) - assert ancestor_at_finalized_slot == store.finalized_checkpoint.root - - assert store.finalized_checkpoint.hash_tree_root() == another_state.finalized_checkpoint.hash_tree_root() - assert store.justified_checkpoint.hash_tree_root() != another_state.current_justified_checkpoint.hash_tree_root() From 27763bdd869d98193e7be0a8b39d312f0bb6d1d4 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 30 Jun 2021 05:40:26 +0800 Subject: [PATCH 10/75] clean up --- .../test/phase0/fork_choice/test_on_block.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py index c3374d4f8..634610fe7 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py @@ -86,7 +86,7 @@ def test_on_block_checkpoints(spec, state): # Run for 1 epoch with full attestations next_epoch(spec, state) - on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) state, store, last_signed_block = yield from apply_next_epoch_with_attestations( spec, state, store, True, False, test_steps=test_steps) @@ -95,7 +95,7 @@ def test_on_block_checkpoints(spec, state): # Forward 1 epoch next_epoch(spec, state) - on_tick_and_append_step(spec, store, store.time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) # Mock the finalized_checkpoint and build a block on it fin_state = store.block_states[last_block_root].copy() @@ -596,11 +596,9 @@ def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): assert another_state.finalized_checkpoint.epoch == 2 assert another_state.current_justified_checkpoint.epoch == 3 - assert state.finalized_checkpoint.hash_tree_root() != another_state.finalized_checkpoint.hash_tree_root() - assert ( - state.current_justified_checkpoint.hash_tree_root() - != another_state.current_justified_checkpoint.hash_tree_root() - ) + assert state.finalized_checkpoint != another_state.finalized_checkpoint + assert state.current_justified_checkpoint != another_state.current_justified_checkpoint + # pre_store_justified_checkpoint_root = store.justified_checkpoint.root # FIXME: pending on the `on_block`, `on_attestation` fix From 5582490364031ca014f79084b36ce14a52358899 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 14 Jul 2021 13:40:34 +0200 Subject: [PATCH 11/75] Apply missing spec_test decorators, to handle generator_mode flag --- .../altair/block_processing/test_process_sync_aggregate.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py index b9ac8a954..ad285f43c 100644 --- a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py +++ b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py @@ -26,6 +26,7 @@ from eth2spec.test.context import ( with_presets, spec_state_test, always_bls, + spec_test, ) from eth2spec.utils.hash_function import hash @@ -534,6 +535,7 @@ def test_random_all_but_one_participating_with_duplicates(spec, state): @with_altair_and_later @with_presets([MAINNET], reason="to create duplicate committee") +@spec_test @with_custom_state(balances_fn=misc_balances, threshold_fn=default_activation_threshold) @single_phase def test_random_misc_balances_and_half_participation_with_duplicates(spec, state): @@ -596,6 +598,7 @@ def test_random_all_but_one_participating_without_duplicates(spec, state): @with_altair_and_later @with_presets([MINIMAL], reason="to create nonduplicate committee") +@spec_test @with_custom_state(balances_fn=misc_balances, threshold_fn=default_activation_threshold) @single_phase def test_random_misc_balances_and_half_participation_without_duplicates(spec, state): From c420968f664d6400ebfadab56ebdc9a3bb66e93a Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 14 Jul 2021 11:08:54 -0600 Subject: [PATCH 12/75] fix last beta.1 mainnet test --- .../altair/epoch_processing/test_process_inactivity_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py index fd1bf3c57..634c7bc48 100644 --- a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py +++ b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py @@ -218,7 +218,7 @@ def test_some_slashed_zero_scores_full_participation(spec, state): @spec_state_test @leaking() def test_some_slashed_zero_scores_full_participation_leaking(spec, state): - slash_some_validators(spec, state, rng=Random(33221)) + slash_some_validators(spec, state, rng=Random(332243)) yield from run_inactivity_scores_test( spec, state, set_full_participation, zero_inactivity_scores, From a4a050e97af47bde6ee1aa0d1a8facec6c4fed6d Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Fri, 16 Jul 2021 19:16:32 +0600 Subject: [PATCH 13/75] Enforce terminal PoW block to be on the cusp --- setup.py | 2 +- specs/merge/fork-choice.md | 9 ++++++--- specs/merge/validator.md | 3 ++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/setup.py b/setup.py index fa73e0824..c9b22b1a9 100644 --- a/setup.py +++ b/setup.py @@ -509,7 +509,7 @@ ExecutionState = Any def get_pow_block(hash: Bytes32) -> PowBlock: - return PowBlock(block_hash=hash, is_valid=True, is_processed=True, + return PowBlock(block_hash=hash, parent_hash=Bytes32(), is_valid=True, is_processed=True, total_difficulty=uint256(0), difficulty=uint256(0)) diff --git a/specs/merge/fork-choice.md b/specs/merge/fork-choice.md index d0f327137..e21b54a7e 100644 --- a/specs/merge/fork-choice.md +++ b/specs/merge/fork-choice.md @@ -82,6 +82,7 @@ class TransitionStore(object): @dataclass class PowBlock(object): block_hash: Hash32 + parent_hash: Hash32 is_processed: boolean is_valid: boolean total_difficulty: uint256 @@ -99,9 +100,10 @@ Let `get_pow_block(block_hash: Hash32) -> PowBlock` be the function that given t Used by fork-choice handler, `on_block`. ```python -def is_valid_terminal_pow_block(transition_store: TransitionStore, block: PowBlock) -> bool: +def is_valid_terminal_pow_block(transition_store: TransitionStore, block: PowBlock, parent: PowBlock) -> bool: is_total_difficulty_reached = block.total_difficulty >= transition_store.transition_total_difficulty - return block.is_valid and is_total_difficulty_reached + is_parent_total_difficulty_valid = parent.total_difficulty < transition_store.transition_total_difficulty + return block.is_valid and is_total_difficulty_reached and is_parent_total_difficulty_valid ``` ## Updated fork-choice handlers @@ -130,8 +132,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock, transition_store: Tr if (transition_store is not None) and is_merge_block(pre_state, block): # Delay consideration of block until PoW block is processed by the PoW node pow_block = get_pow_block(block.body.execution_payload.parent_hash) + pow_parent = get_pow_block(pow_block.parent_hash) assert pow_block.is_processed - assert is_valid_terminal_pow_block(transition_store, pow_block) + assert is_valid_terminal_pow_block(transition_store, pow_block, pow_parent) # Check the block is valid and compute the post-state state = pre_state.copy() diff --git a/specs/merge/validator.md b/specs/merge/validator.md index 97e6a1deb..be8f8b274 100644 --- a/specs/merge/validator.md +++ b/specs/merge/validator.md @@ -90,7 +90,8 @@ def get_execution_payload(state: BeaconState, execution_engine: ExecutionEngine) -> ExecutionPayload: if not is_merge_complete(state): pow_block = get_pow_chain_head() - if not is_valid_terminal_pow_block(transition_store, pow_block): + pow_parent = get_pow_block(pow_block.parent_hash) + if not is_valid_terminal_pow_block(transition_store, pow_block, pow_parent): # Pre-merge, empty payload return ExecutionPayload() else: From f668b2b43342e3fe0c93c24f91357a3f62111fc9 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Sat, 17 Jul 2021 16:26:18 +1000 Subject: [PATCH 14/75] Add tests for SyncAggregate with no participants and all zero signature. --- .../test_process_sync_aggregate.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py index ad285f43c..effd01047 100644 --- a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py +++ b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py @@ -113,6 +113,21 @@ def test_invalid_signature_missing_participant(spec, state): yield from run_sync_committee_processing(spec, state, block, expect_exception=True) +@with_altair_and_later +@spec_state_test +@always_bls +def test_invalid_signature_no_participants(spec, state): + committee_indices = compute_committee_indices(spec, state, state.current_sync_committee) + + block = build_empty_block_for_next_slot(spec, state) + # Exclude one participant whose signature was included. + block.body.sync_aggregate = spec.SyncAggregate( + sync_committee_bits=[False for _ in committee_indices], + sync_committee_signature=b'\x00' * 96 + ) + yield from run_sync_committee_processing(spec, state, block, expect_exception=True) + + @with_altair_and_later @spec_state_test @always_bls From add00ad2e090baba86971f3b68b13f9ecd15c72c Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Sat, 17 Jul 2021 12:33:06 +0600 Subject: [PATCH 15/75] Replace get_pow_chain_head with get_pow_block_at_total_difficulty in validator.md --- setup.py | 1 + specs/merge/validator.md | 28 +++++++++++++++++----------- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/setup.py b/setup.py index c9b22b1a9..69ae05666 100644 --- a/setup.py +++ b/setup.py @@ -496,6 +496,7 @@ class MergeSpecBuilder(Phase0SpecBuilder): from typing import Protocol from eth2spec.phase0 import {preset_name} as phase0 from eth2spec.utils.ssz.ssz_typing import Bytes20, ByteList, ByteVector, uint256, Union +from typing import Union as PyUnion ''' @classmethod diff --git a/specs/merge/validator.md b/specs/merge/validator.md index be8f8b274..22569887e 100644 --- a/specs/merge/validator.md +++ b/specs/merge/validator.md @@ -19,7 +19,6 @@ - [Block proposal](#block-proposal) - [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody) - [Execution Payload](#execution-payload) - - [`get_pow_chain_head`](#get_pow_chain_head) @@ -63,13 +62,20 @@ All validator responsibilities remain unchanged other than those noted below. Na ##### Execution Payload -###### `get_pow_chain_head` - -Let `get_pow_chain_head() -> PowBlock` be the function that returns the head of the PoW chain. The body of the function is implementation specific. - -* Set `block.body.execution_payload = get_execution_payload(state, transition_store, execution_engine)` where: +* Set `block.body.execution_payload = get_execution_payload(state, transition_store, execution_engine, pow_chain)` where: ```python +def get_pow_block_at_total_difficulty(total_difficulty: uint256, + pow_chain: Sequence[PowBlock]) -> PyUnion[PowBlock, None]: + # `pow_chain` abstractly represents all blocks in the PoW chain + for block in pow_chain: + parent = get_pow_block(block.parent_hash) + if block.total_difficulty >= total_difficulty and parent.total_difficulty < total_difficulty: + return block + + return None + + def compute_randao_mix(state: BeaconState, randao_reveal: BLSSignature) -> Bytes32: epoch = get_current_epoch(state) return xor(get_randao_mix(state, epoch), hash(randao_reveal)) @@ -87,16 +93,16 @@ def produce_execution_payload(state: BeaconState, def get_execution_payload(state: BeaconState, transition_store: TransitionStore, randao_reveal: BLSSignature, - execution_engine: ExecutionEngine) -> ExecutionPayload: + execution_engine: ExecutionEngine, + pow_chain: Sequence[PowBlock]) -> ExecutionPayload: if not is_merge_complete(state): - pow_block = get_pow_chain_head() - pow_parent = get_pow_block(pow_block.parent_hash) - if not is_valid_terminal_pow_block(transition_store, pow_block, pow_parent): + terminal_pow_block = get_pow_block_at_total_difficulty(transition_store.transition_total_difficulty, pow_chain) + if terminal_pow_block is None: # Pre-merge, empty payload return ExecutionPayload() else: # Signify merge via producing on top of the last PoW block - return produce_execution_payload(state, pow_block.block_hash, randao_reveal, execution_engine) + return produce_execution_payload(state, terminal_pow_block.block_hash, randao_reveal, execution_engine) # Post-merge, normal payload parent_hash = state.latest_execution_payload_header.block_hash From 11d54af89d382ea14de42593f87eaf82890c3d49 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Sat, 17 Jul 2021 16:34:41 +1000 Subject: [PATCH 16/75] Add test to confirm infinite signature is invalid when there are participants. --- .../test_process_sync_aggregate.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py index effd01047..2557715e4 100644 --- a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py +++ b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py @@ -128,6 +128,21 @@ def test_invalid_signature_no_participants(spec, state): yield from run_sync_committee_processing(spec, state, block, expect_exception=True) +@with_altair_and_later +@spec_state_test +@always_bls +def test_invalid_signature_infinite_signature_with_participants(spec, state): + committee_indices = compute_committee_indices(spec, state, state.current_sync_committee) + + block = build_empty_block_for_next_slot(spec, state) + # Exclude one participant whose signature was included. + block.body.sync_aggregate = spec.SyncAggregate( + sync_committee_bits=[True for _ in committee_indices], + sync_committee_signature=spec.G2_POINT_AT_INFINITY + ) + yield from run_sync_committee_processing(spec, state, block, expect_exception=True) + + @with_altair_and_later @spec_state_test @always_bls From f16cfe7c3a5933876d0e2b2006a8d5a57b9f3d03 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 17 Jul 2021 14:46:25 +0200 Subject: [PATCH 17/75] update sync aggregate tests --- .../test_process_sync_aggregate.py | 29 +++++++++++++------ 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py index 2557715e4..e4176ee58 100644 --- a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py +++ b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py @@ -117,27 +117,38 @@ def test_invalid_signature_missing_participant(spec, state): @spec_state_test @always_bls def test_invalid_signature_no_participants(spec, state): - committee_indices = compute_committee_indices(spec, state, state.current_sync_committee) - block = build_empty_block_for_next_slot(spec, state) - # Exclude one participant whose signature was included. + # No participants is an allowed case, but needs a specific signature, not the full-zeroed signature. block.body.sync_aggregate = spec.SyncAggregate( - sync_committee_bits=[False for _ in committee_indices], + sync_committee_bits=[False] * len(block.body.sync_aggregate.sync_committee_bits), sync_committee_signature=b'\x00' * 96 ) yield from run_sync_committee_processing(spec, state, block, expect_exception=True) +# No-participants, with valid signature, is tested in test_sync_committee_rewards_empty_participants already. + + +@with_altair_and_later +@spec_state_test +@always_bls +def test_invalid_signature_infinite_signature_with_all_participants(spec, state): + block = build_empty_block_for_next_slot(spec, state) + # Include all participants, try the special-case signature for no-participants + block.body.sync_aggregate = spec.SyncAggregate( + sync_committee_bits=[True] * len(block.body.sync_aggregate.sync_committee_bits), + sync_committee_signature=spec.G2_POINT_AT_INFINITY + ) + yield from run_sync_committee_processing(spec, state, block, expect_exception=True) + @with_altair_and_later @spec_state_test @always_bls -def test_invalid_signature_infinite_signature_with_participants(spec, state): - committee_indices = compute_committee_indices(spec, state, state.current_sync_committee) - +def test_invalid_signature_infinite_signature_with_single_participant(spec, state): block = build_empty_block_for_next_slot(spec, state) - # Exclude one participant whose signature was included. + # Try include a single participant with the special-case signature for no-participants. block.body.sync_aggregate = spec.SyncAggregate( - sync_committee_bits=[True for _ in committee_indices], + sync_committee_bits=[True] + ([False] * (len(block.body.sync_aggregate.sync_committee_bits) - 1)), sync_committee_signature=spec.G2_POINT_AT_INFINITY ) yield from run_sync_committee_processing(spec, state, block, expect_exception=True) From 5ad36bd3d5272eff047cfcd42cc1e7f01b22168e Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 17 Jul 2021 15:55:38 +0200 Subject: [PATCH 18/75] Update remerkleable to v0.1.22: list lookup speed improvement --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index fa73e0824..1b9cbefbb 100644 --- a/setup.py +++ b/setup.py @@ -1024,7 +1024,7 @@ setup( "py_ecc==5.2.0", "milagro_bls_binding==1.6.3", "dataclasses==0.6", - "remerkleable==0.1.21", + "remerkleable==0.1.22", RUAMEL_YAML_VERSION, "lru-dict==1.1.6", MARKO_VERSION, From 1e484b8d98fc55afbe686e2e4e635d6b3f1d19ef Mon Sep 17 00:00:00 2001 From: Piotr Chromiec Date: Mon, 19 Jul 2021 14:48:30 +0200 Subject: [PATCH 19/75] README typo fix --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d1a0b2876..f5ac598b6 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ This repository hosts the current Eth2 specifications. Discussions about design [![GitHub release](https://img.shields.io/github/v/release/ethereum/eth2.0-specs)](https://github.com/ethereum/eth2.0-specs/releases/) [![PyPI version](https://badge.fury.io/py/eth2spec.svg)](https://badge.fury.io/py/eth2spec) -Core specifications for Eth2 clients be found in [specs](specs/). These are divided into features. +Core specifications for Eth2 clients can be found in [specs](specs/). These are divided into features. Features are researched and developed in parallel, and then consolidated into sequential upgrades when ready. The current features are: From 758b828ecb5520f6821cdb57d60be0c8bb982ffa Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Mon, 19 Jul 2021 14:19:44 -0700 Subject: [PATCH 20/75] Update slashing helper to avoid proposer. Fixes #2521. --- .../test_process_inactivity_updates.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py index 634c7bc48..f7d2fa9c8 100644 --- a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py +++ b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py @@ -195,17 +195,24 @@ def test_random_inactivity_scores_full_participation_leaking(spec, state): assert spec.is_in_inactivity_leak(state) -def slash_some_validators(spec, state, rng=Random(40404040)): +def slash_some_validators_for_inactivity_scores_test(spec, state, rng=Random(40404040)): + # ``run_inactivity_scores_test`` runs at the next epoch from `state`. + # We retrieve the proposer of this future state to avoid + # accidentally slashing that validator + future_state = state.copy() + next_epoch_via_block(spec, future_state) + + proposer_index = spec.get_beacon_proposer_index(future_state) # Slash ~1/4 of validaors for validator_index in range(len(state.validators)): - if rng.choice(range(4)) == 0: + if rng.choice(range(4)) == 0 and validator_index != proposer_index: spec.slash_validator(state, validator_index) @with_altair_and_later @spec_state_test def test_some_slashed_zero_scores_full_participation(spec, state): - slash_some_validators(spec, state, rng=Random(33429)) + slash_some_validators_for_inactivity_scores_test(spec, state, rng=Random(33429)) yield from run_inactivity_scores_test( spec, state, set_full_participation, zero_inactivity_scores, @@ -218,7 +225,7 @@ def test_some_slashed_zero_scores_full_participation(spec, state): @spec_state_test @leaking() def test_some_slashed_zero_scores_full_participation_leaking(spec, state): - slash_some_validators(spec, state, rng=Random(332243)) + slash_some_validators_for_inactivity_scores_test(spec, state, rng=Random(332243)) yield from run_inactivity_scores_test( spec, state, set_full_participation, zero_inactivity_scores, @@ -239,7 +246,7 @@ def test_some_slashed_zero_scores_full_participation_leaking(spec, state): @spec_state_test def test_some_slashed_full_random(spec, state): rng = Random(1010222) - slash_some_validators(spec, state, rng=rng) + slash_some_validators_for_inactivity_scores_test(spec, state, rng=rng) yield from run_inactivity_scores_test( spec, state, randomize_attestation_participation, randomize_inactivity_scores, rng=rng, @@ -251,7 +258,7 @@ def test_some_slashed_full_random(spec, state): @leaking() def test_some_slashed_full_random_leaking(spec, state): rng = Random(1102233) - slash_some_validators(spec, state, rng=rng) + slash_some_validators_for_inactivity_scores_test(spec, state, rng=rng) yield from run_inactivity_scores_test( spec, state, randomize_previous_epoch_participation, randomize_inactivity_scores, rng=rng, From 65f6aa1b33b84a94b613a76fc9c6f3122e0e0344 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 20 Jul 2021 17:37:52 +0600 Subject: [PATCH 21/75] Replace PyUnion with Optional --- setup.py | 1 - specs/merge/validator.md | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 69ae05666..c9b22b1a9 100644 --- a/setup.py +++ b/setup.py @@ -496,7 +496,6 @@ class MergeSpecBuilder(Phase0SpecBuilder): from typing import Protocol from eth2spec.phase0 import {preset_name} as phase0 from eth2spec.utils.ssz.ssz_typing import Bytes20, ByteList, ByteVector, uint256, Union -from typing import Union as PyUnion ''' @classmethod diff --git a/specs/merge/validator.md b/specs/merge/validator.md index 22569887e..2fd7bcc22 100644 --- a/specs/merge/validator.md +++ b/specs/merge/validator.md @@ -65,8 +65,7 @@ All validator responsibilities remain unchanged other than those noted below. Na * Set `block.body.execution_payload = get_execution_payload(state, transition_store, execution_engine, pow_chain)` where: ```python -def get_pow_block_at_total_difficulty(total_difficulty: uint256, - pow_chain: Sequence[PowBlock]) -> PyUnion[PowBlock, None]: +def get_pow_block_at_total_difficulty(total_difficulty: uint256, pow_chain: Sequence[PowBlock]) -> Optional[PowBlock]: # `pow_chain` abstractly represents all blocks in the PoW chain for block in pow_chain: parent = get_pow_block(block.parent_hash) From 02a9fc460e4bbb806e7b18ae80e0bbaac810651b Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 21 Jul 2021 11:03:19 -0600 Subject: [PATCH 22/75] require aggregation bits to have at least one participant in sync contributions --- specs/altair/p2p-interface.md | 2 ++ specs/altair/validator.md | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/specs/altair/p2p-interface.md b/specs/altair/p2p-interface.md index e5e391ff5..ba8d09c04 100644 --- a/specs/altair/p2p-interface.md +++ b/specs/altair/p2p-interface.md @@ -139,6 +139,8 @@ def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64 - _[IGNORE]_ The contribution's slot is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. `contribution.slot == current_slot`. - _[REJECT]_ The subcommittee index is in the allowed range, i.e. `contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT`. +- _[REJECT]_ The contribution has participants -- + that is, `len(set(bit for bit in contribution.aggregation_bits if bit == True)) >= 1`. - _[REJECT]_ `contribution_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_sync_committee_aggregator(contribution_and_proof.selection_proof)` returns `True`. - _[REJECT]_ The aggregator's validator index is in the declared subcommittee of the current sync committee -- i.e. `state.validators[contribution_and_proof.aggregator_index].pubkey in get_sync_subcommittee_pubkeys(state, contribution.subcommittee_index)`. diff --git a/specs/altair/validator.md b/specs/altair/validator.md index 8742900bc..badef2de5 100644 --- a/specs/altair/validator.md +++ b/specs/altair/validator.md @@ -354,7 +354,7 @@ def is_sync_committee_aggregator(signature: BLSSignature) -> bool: If a validator is selected to aggregate the `SyncCommitteeMessage`s produced on a subnet during a given `slot`, they construct an aggregated `SyncCommitteeContribution`. -Given all of the (valid) collected `sync_committee_messages: Set[SyncCommitteeMessage]` from the `sync_committee_{subnet_id}` gossip during the selected `slot` with an equivalent `beacon_block_root` to that of the aggregator, the aggregator creates a `contribution: SyncCommitteeContribution` with the following fields: +Collect all of the (valid) `sync_committee_messages: Set[SyncCommitteeMessage]` from the `sync_committee_{subnet_id}` gossip during the selected `slot` with an equivalent `beacon_block_root` to that of the aggregator. If `len(sync_committee_messages) > 0`, the aggregator creates a `contribution: SyncCommitteeContribution` with the following fields: ###### Slot From f7a2a973acd9005a64abbf5f164d6b879cb1d6d9 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 21 Jul 2021 15:51:59 -0600 Subject: [PATCH 23/75] Update specs/altair/p2p-interface.md Co-authored-by: Alex Stokes --- specs/altair/p2p-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/altair/p2p-interface.md b/specs/altair/p2p-interface.md index ba8d09c04..6820fd10a 100644 --- a/specs/altair/p2p-interface.md +++ b/specs/altair/p2p-interface.md @@ -140,7 +140,7 @@ def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64 - _[IGNORE]_ The contribution's slot is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. `contribution.slot == current_slot`. - _[REJECT]_ The subcommittee index is in the allowed range, i.e. `contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT`. - _[REJECT]_ The contribution has participants -- - that is, `len(set(bit for bit in contribution.aggregation_bits if bit == True)) >= 1`. + that is, `any(contribution.aggregation_bits)`. - _[REJECT]_ `contribution_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_sync_committee_aggregator(contribution_and_proof.selection_proof)` returns `True`. - _[REJECT]_ The aggregator's validator index is in the declared subcommittee of the current sync committee -- i.e. `state.validators[contribution_and_proof.aggregator_index].pubkey in get_sync_subcommittee_pubkeys(state, contribution.subcommittee_index)`. From 17fad2dea2ee7e0747d346cf600590721b9b9c59 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 22 Jul 2021 16:36:41 +0200 Subject: [PATCH 24/75] rebase The Merge onto Altair base functionality --- setup.py | 28 ++++----- specs/merge/beacon-chain.md | 58 +++++++++++++++++-- specs/merge/fork.md | 21 ++++--- specs/merge/validator.md | 8 +-- specs/sharding/beacon-chain.md | 21 +++---- specs/sharding/p2p-interface.md | 3 +- tests/core/pyspec/eth2spec/test/context.py | 20 ++----- .../eth2spec/test/helpers/block_processing.py | 1 + .../eth2spec/test/helpers/epoch_processing.py | 2 +- .../pyspec/eth2spec/test/helpers/genesis.py | 14 +++-- .../eth2spec/test/helpers/merge/fork.py | 8 ++- .../test/merge/fork/test_merge_fork_basic.py | 16 ++--- .../test/merge/fork/test_merge_fork_random.py | 20 +++---- 13 files changed, 132 insertions(+), 88 deletions(-) diff --git a/setup.py b/setup.py index 552c69740..4c6969a26 100644 --- a/setup.py +++ b/setup.py @@ -447,7 +447,7 @@ class AltairSpecBuilder(Phase0SpecBuilder): @classmethod def imports(cls, preset_name: str) -> str: return super().imports(preset_name) + '\n' + f''' -from typing import NewType, Union +from typing import NewType, Union as PyUnion from eth2spec.phase0 import {preset_name} as phase0 from eth2spec.utils.ssz.ssz_typing import Path @@ -463,7 +463,7 @@ GeneralizedIndex = NewType('GeneralizedIndex', int) @classmethod def sundry_functions(cls) -> str: return super().sundry_functions() + '\n\n' + ''' -def get_generalized_index(ssz_class: Any, *path: Sequence[Union[int, SSZVariableName]]) -> GeneralizedIndex: +def get_generalized_index(ssz_class: Any, *path: Sequence[PyUnion[int, SSZVariableName]]) -> GeneralizedIndex: ssz_path = Path(ssz_class) for item in path: ssz_path = ssz_path / item @@ -487,14 +487,14 @@ def get_generalized_index(ssz_class: Any, *path: Sequence[Union[int, SSZVariable # # MergeSpecBuilder # -class MergeSpecBuilder(Phase0SpecBuilder): +class MergeSpecBuilder(AltairSpecBuilder): fork: str = MERGE @classmethod def imports(cls, preset_name: str): return super().imports(preset_name) + f''' from typing import Protocol -from eth2spec.phase0 import {preset_name} as phase0 +from eth2spec.altair import {preset_name} as altair from eth2spec.utils.ssz.ssz_typing import Bytes20, ByteList, ByteVector, uint256, Union ''' @@ -844,19 +844,15 @@ class PySpecCommand(Command): if len(self.md_doc_paths) == 0: print("no paths were specified, using default markdown file paths for pyspec" " build (spec fork: %s)" % self.spec_fork) - if self.spec_fork == PHASE0: + if self.spec_fork in (PHASE0, ALTAIR, MERGE): self.md_doc_paths = """ specs/phase0/beacon-chain.md specs/phase0/fork-choice.md specs/phase0/validator.md specs/phase0/weak-subjectivity.md """ - elif self.spec_fork == ALTAIR: - self.md_doc_paths = """ - specs/phase0/beacon-chain.md - specs/phase0/fork-choice.md - specs/phase0/validator.md - specs/phase0/weak-subjectivity.md + if self.spec_fork in (ALTAIR, MERGE): + self.md_doc_paths += """ specs/altair/beacon-chain.md specs/altair/bls.md specs/altair/fork.md @@ -864,18 +860,14 @@ class PySpecCommand(Command): specs/altair/p2p-interface.md specs/altair/sync-protocol.md """ - elif self.spec_fork == MERGE: - self.md_doc_paths = """ - specs/phase0/beacon-chain.md - specs/phase0/fork-choice.md - specs/phase0/validator.md - specs/phase0/weak-subjectivity.md + if self.spec_fork == MERGE: + self.md_doc_paths += """ specs/merge/beacon-chain.md specs/merge/fork.md specs/merge/fork-choice.md specs/merge/validator.md """ - else: + if len(self.md_doc_paths) == 0: raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork) self.parsed_md_doc_paths = self.md_doc_paths.split() diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 59ec3ce48..aa2e3b334 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -1,7 +1,5 @@ # Ethereum 2.0 The Merge -**Warning**: This document is currently based on [Phase 0](../phase0/beacon-chain.md) and will be rebased on [Altair](../altair/beacon-chain.md). - **Notice**: This document is a work-in-progress for researchers and implementers. ## Table of contents @@ -69,7 +67,17 @@ This patch adds transaction execution to the beacon chain as part of the Merge f #### `BeaconBlockBody` ```python -class BeaconBlockBody(phase0.BeaconBlockBody): +class BeaconBlockBody(Container): + randao_reveal: BLSSignature + eth1_data: Eth1Data # Eth1 data vote + graffiti: Bytes32 # Arbitrary data + # Operations + proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS] + attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS] + attestations: List[Attestation, MAX_ATTESTATIONS] + deposits: List[Deposit, MAX_DEPOSITS] + voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS] + sync_aggregate: SyncAggregate # Execution execution_payload: ExecutionPayload # [New in Merge] ``` @@ -77,7 +85,41 @@ class BeaconBlockBody(phase0.BeaconBlockBody): #### `BeaconState` ```python -class BeaconState(phase0.BeaconState): +class BeaconState(altair.BeaconState): + # Versioning + genesis_time: uint64 + genesis_validators_root: Root + slot: Slot + fork: Fork + # History + latest_block_header: BeaconBlockHeader + block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] + # Eth1 + eth1_data: Eth1Data + eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH] + eth1_deposit_index: uint64 + # Registry + validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] + balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] + # Randomness + randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR] + # Slashings + slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances + # Participation + previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + # Finality + justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch + previous_justified_checkpoint: Checkpoint + current_justified_checkpoint: Checkpoint + finalized_checkpoint: Checkpoint + # Inactivity + inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT] + # Sync + current_sync_committee: SyncCommittee + next_sync_committee: SyncCommittee # Execution latest_execution_payload_header: ExecutionPayloadHeader # [New in Merge] ``` @@ -190,6 +232,7 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: process_randao(state, block.body) process_eth1_data(state, block.body) process_operations(state, block.body) + process_sync_aggregate(state, block.body.sync_aggregate) if is_execution_enabled(state, block.body): process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [New in Merge] ``` @@ -232,7 +275,7 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe *Note*: The function `initialize_beacon_state_from_eth1` is modified for pure Merge testing only. -*Note*: The function `initialize_beacon_state_from_eth1` is modified to use `MERGE_FORK_VERSION` and initialize `latest_execution_payload_header`. +*Note*: The function `initialize_beacon_state_from_eth1` is modified: (1) using `MERGE_FORK_VERSION` as the current fork version, (2) utilizing the Merge `BeaconBlockBody` when constructing the initial `latest_block_header`, and (3) initialize `latest_execution_payload_header`. ```python def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32, @@ -269,6 +312,11 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32, # Set genesis validators root for domain separation and chain versioning state.genesis_validators_root = hash_tree_root(state.validators) + # Fill in sync committees + # Note: A duplicate committee is assigned for the current and next committee at genesis + state.current_sync_committee = get_next_sync_committee(state) + state.next_sync_committee = get_next_sync_committee(state) + # [New in Merge] Initialize the execution payload header (with block number set to 0) state.latest_execution_payload_header.block_hash = eth1_block_hash state.latest_execution_payload_header.timestamp = eth1_timestamp diff --git a/specs/merge/fork.md b/specs/merge/fork.md index 1f2ea7fff..5c5c40a64 100644 --- a/specs/merge/fork.md +++ b/specs/merge/fork.md @@ -43,15 +43,18 @@ Note that for the pure Merge networks, we don't apply `upgrade_to_merge` since i ### Upgrading the state -If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == MERGE_FORK_EPOCH`, an irregular state change is made to upgrade to Merge. +As with the Phase0-to-Altair upgrade, the `state_transition` is modified to upgrade the `BeaconState`. +The `BeaconState` upgrade runs as part of `process_slots`, slots with missing block proposals do not affect the upgrade time. +If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == MERGE_FORK_EPOCH`, an irregular state change is made to upgrade to Merge. The upgrade occurs after the completion of the inner loop of `process_slots` that sets `state.slot` equal to `MERGE_FORK_EPOCH * SLOTS_PER_EPOCH`. -Care must be taken when transitioning through the fork boundary as implementations will need a modified [state transition function](../phase0/beacon-chain.md#beacon-chain-state-transition-function) that deviates from the Phase 0 document. -In particular, the outer `state_transition` function defined in the Phase 0 document will not expose the precise fork slot to execute the upgrade in the presence of skipped slots at the fork boundary. Instead the logic must be within `process_slots`. + +When multiple upgrades are scheduled for the same epoch (common for test-networks), +all the upgrades run in sequence before resuming the regular state transition. ```python -def upgrade_to_merge(pre: phase0.BeaconState) -> BeaconState: - epoch = phase0.get_current_epoch(pre) +def upgrade_to_merge(pre: altair.BeaconState) -> BeaconState: + epoch = altair.get_current_epoch(pre) post = BeaconState( # Versioning genesis_time=pre.genesis_time, @@ -78,14 +81,16 @@ def upgrade_to_merge(pre: phase0.BeaconState) -> BeaconState: randao_mixes=pre.randao_mixes, # Slashings slashings=pre.slashings, - # Attestations - previous_epoch_attestations=pre.previous_epoch_attestations, - current_epoch_attestations=pre.current_epoch_attestations, + # Participation + previous_epoch_participation=pre.previous_epoch_participation, + current_epoch_participation=pre.current_epoch_participation, # Finality justification_bits=pre.justification_bits, previous_justified_checkpoint=pre.previous_justified_checkpoint, current_justified_checkpoint=pre.current_justified_checkpoint, finalized_checkpoint=pre.finalized_checkpoint, + # Inactivity + inactivity_scores=pre.inactivity_scores, # Execution-layer latest_execution_payload_header=ExecutionPayloadHeader(), ) diff --git a/specs/merge/validator.md b/specs/merge/validator.md index 2fd7bcc22..baf716760 100644 --- a/specs/merge/validator.md +++ b/specs/merge/validator.md @@ -1,7 +1,5 @@ # Ethereum 2.0 The Merge -**Warning:** This document is currently based on [Phase 0](../phase0/validator.md) but will be rebased to [Altair](../altair/validator.md) once the latter is shipped. - **Notice**: This document is a work-in-progress for researchers and implementers. ## Table of contents @@ -29,9 +27,11 @@ This document represents the changes to be made in the code of an "honest valida ## Prerequisites -This document is an extension of the [Phase 0 -- Validator](../phase0/validator.md). All behaviors and definitions defined in the Phase 0 doc carry over unless explicitly noted or overridden. +This document is an extension of the [Altair -- Honest Validator](../altair/validator.md) guide. +All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden. -All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [The Merge](./beacon-chain.md) are requisite for this document and used throughout. Please see related Beacon Chain doc before continuing and use them as a reference throughout. +All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [The Merge](./beacon-chain.md) are requisite for this document and used throughout. +Please see related Beacon Chain doc before continuing and use them as a reference throughout. ## Protocols diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 5a460855b..4e88a8971 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -179,7 +179,7 @@ class BeaconBlockBody(merge.BeaconBlockBody): # [extends The Merge block body] ### `BeaconState` ```python -class BeaconState(merge.BeaconState): # [extends The Merge state] +class BeaconState(merge.BeaconState): # [Updated fields] (Warning: this changes with Altair, Sharding will rebase to use participation-flags) previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] @@ -494,9 +494,9 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: process_randao(state, block.body) process_eth1_data(state, block.body) process_operations(state, block.body) # [Modified in Sharding] - # Pre-merge, skip execution payload processing - if is_execution_enabled(state, block): - process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [New in Merge] + process_sync_aggregate(state, block.body.sync_aggregate) + # is_execution_enabled is omitted, execution is enabled by default. + process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) ``` #### Operations @@ -527,7 +527,7 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: ```python def process_attestation(state: BeaconState, attestation: Attestation) -> None: - phase0.process_attestation(state, attestation) + altair.process_attestation(state, attestation) update_pending_shard_work(state, attestation) ``` @@ -681,25 +681,26 @@ This epoch transition overrides the Merge epoch transition: ```python def process_epoch(state: BeaconState) -> None: - # Sharding + # Sharding pre-processing process_pending_shard_confirmations(state) charge_confirmed_shard_fees(state) reset_pending_shard_work(state) - # Phase0 + # Base functionality process_justification_and_finalization(state) + process_inactivity_updates(state) process_rewards_and_penalties(state) process_registry_updates(state) process_slashings(state) - - # Final updates process_eth1_data_reset(state) process_effective_balance_updates(state) process_slashings_reset(state) process_randao_mixes_reset(state) process_historical_roots_update(state) - process_participation_record_updates(state) + process_participation_flag_updates(state) + process_sync_committee_updates(state) + # Sharding post-processing process_shard_epoch_increment(state) ``` diff --git a/specs/sharding/p2p-interface.md b/specs/sharding/p2p-interface.md index 51dbfd5a6..baf9494f2 100644 --- a/specs/sharding/p2p-interface.md +++ b/specs/sharding/p2p-interface.md @@ -29,8 +29,7 @@ ## Introduction -The specification of these changes continues in the same format as the [Phase0](../phase0/p2p-interface.md) and -[Altair](../altair/p2p-interface.md) network specifications, and assumes them as pre-requisite. +The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite. The adjustments and additions for Shards are outlined in this document. ## Constants diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index 362f64abf..7fddc0762 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -347,10 +347,6 @@ def with_phases(phases, other_phases=None): preset_name = kw.pop('preset') targets = spec_targets[preset_name] - # TODO: test state is dependent on phase0 but is immediately transitioned to later phases. - # A new state-creation helper for later phases may be in place, and then tests can run without phase0 - available_phases.add(PHASE0) - # Populate all phases for multi-phase tests phase_dir = {} if PHASE0 in available_phases: @@ -433,23 +429,15 @@ def with_config_overrides(config_overrides): def is_post_altair(spec): - if spec.fork == MERGE: # TODO: remove parallel Altair-Merge condition after rebase. - return False - if spec.fork in FORKS_BEFORE_ALTAIR: - return False - return True + return spec.fork not in FORKS_BEFORE_ALTAIR def is_post_merge(spec): - if spec.fork == ALTAIR: # TODO: remove parallel Altair-Merge condition after rebase. - return False - if spec.fork in FORKS_BEFORE_MERGE: - return False - return True + return spec.fork not in FORKS_BEFORE_MERGE -with_altair_and_later = with_phases([ALTAIR]) # TODO: include Merge, but not until Merge work is rebased. -with_merge_and_later = with_phases([MERGE]) +with_altair_and_later = with_phases([ALTAIR, MERGE]) +with_merge_and_later = with_phases([MERGE]) # TODO: include sharding when spec stabilizes. def fork_transition_test(pre_fork_name, post_fork_name, fork_epoch=None): diff --git a/tests/core/pyspec/eth2spec/test/helpers/block_processing.py b/tests/core/pyspec/eth2spec/test/helpers/block_processing.py index d2ec4a111..e82f62ed0 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/block_processing.py +++ b/tests/core/pyspec/eth2spec/test/helpers/block_processing.py @@ -30,6 +30,7 @@ def get_process_calls(spec): # Merge 'process_application_payload': lambda state, block: spec.process_application_payload(state, block.body), + # TODO: add sharding processing functions when spec stabilizes. # Custody Game 'process_custody_game_operations': lambda state, block: spec.process_custody_game_operations(state, block.body), diff --git a/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py b/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py index c783692fc..eed259e81 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py +++ b/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py @@ -28,7 +28,7 @@ def get_process_calls(spec): 'process_participation_record_updates' ), 'process_sync_committee_updates', # altair - 'process_shard_epoch_increment' # sharding + # TODO: add sharding processing functions when spec stabilizes. ] diff --git a/tests/core/pyspec/eth2spec/test/helpers/genesis.py b/tests/core/pyspec/eth2spec/test/helpers/genesis.py index 8617ce9f3..a9eb59f67 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/genesis.py +++ b/tests/core/pyspec/eth2spec/test/helpers/genesis.py @@ -1,7 +1,6 @@ from eth2spec.test.helpers.constants import ( - ALTAIR, - FORKS_BEFORE_ALTAIR, - MERGE, + ALTAIR, MERGE, + FORKS_BEFORE_ALTAIR, FORKS_BEFORE_MERGE, ) from eth2spec.test.helpers.keys import pubkeys @@ -25,11 +24,13 @@ def create_genesis_state(spec, validator_balances, activation_threshold): deposit_root = b'\x42' * 32 eth1_block_hash = b'\xda' * 32 + previous_version = spec.config.GENESIS_FORK_VERSION current_version = spec.config.GENESIS_FORK_VERSION if spec.fork == ALTAIR: current_version = spec.config.ALTAIR_FORK_VERSION elif spec.fork == MERGE: + previous_version = spec.config.ALTAIR_FORK_VERSION current_version = spec.config.MERGE_FORK_VERSION state = spec.BeaconState( @@ -41,7 +42,7 @@ def create_genesis_state(spec, validator_balances, activation_threshold): block_hash=eth1_block_hash, ), fork=spec.Fork( - previous_version=spec.config.GENESIS_FORK_VERSION, + previous_version=previous_version, current_version=current_version, epoch=spec.GENESIS_EPOCH, ), @@ -73,4 +74,9 @@ def create_genesis_state(spec, validator_balances, activation_threshold): state.current_sync_committee = spec.get_next_sync_committee(state) state.next_sync_committee = spec.get_next_sync_committee(state) + if spec.fork not in FORKS_BEFORE_MERGE: + # Initialize the execution payload header (with block number and genesis time set to 0) + state.latest_execution_payload_header.block_hash = eth1_block_hash + state.latest_execution_payload_header.random = eth1_block_hash + return state diff --git a/tests/core/pyspec/eth2spec/test/helpers/merge/fork.py b/tests/core/pyspec/eth2spec/test/helpers/merge/fork.py index 9b7f89366..3a55dc88c 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/merge/fork.py +++ b/tests/core/pyspec/eth2spec/test/helpers/merge/fork.py @@ -24,10 +24,14 @@ def run_fork_test(post_spec, pre_state): 'randao_mixes', # Slashings 'slashings', - # Attestations - 'previous_epoch_attestations', 'current_epoch_attestations', + # Participation + 'previous_epoch_participation', 'current_epoch_participation', # Finality 'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint', + # Inactivity + 'inactivity_scores', + # Sync + 'current_sync_committee', 'next_sync_committee' ] for field in stable_fields: assert getattr(pre_state, field) == getattr(post_state, field) diff --git a/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_basic.py b/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_basic.py index 066a656a8..d92b0015c 100644 --- a/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_basic.py +++ b/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_basic.py @@ -7,7 +7,7 @@ from eth2spec.test.context import ( ) from eth2spec.test.utils import with_meta_tags from eth2spec.test.helpers.constants import ( - PHASE0, MERGE, + ALTAIR, MERGE, MINIMAL, ) from eth2spec.test.helpers.state import ( @@ -20,7 +20,7 @@ from eth2spec.test.helpers.merge.fork import ( ) -@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_phases(phases=[ALTAIR], other_phases=[MERGE]) @spec_test @with_state @with_meta_tags(MERGE_FORK_TEST_META_TAGS) @@ -28,7 +28,7 @@ def test_fork_base_state(spec, phases, state): yield from run_fork_test(phases[MERGE], state) -@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_phases(phases=[ALTAIR], other_phases=[MERGE]) @spec_test @with_state @with_meta_tags(MERGE_FORK_TEST_META_TAGS) @@ -37,7 +37,7 @@ def test_fork_next_epoch(spec, phases, state): yield from run_fork_test(phases[MERGE], state) -@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_phases(phases=[ALTAIR], other_phases=[MERGE]) @spec_test @with_state @with_meta_tags(MERGE_FORK_TEST_META_TAGS) @@ -46,7 +46,7 @@ def test_fork_next_epoch_with_block(spec, phases, state): yield from run_fork_test(phases[MERGE], state) -@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_phases(phases=[ALTAIR], other_phases=[MERGE]) @spec_test @with_state @with_meta_tags(MERGE_FORK_TEST_META_TAGS) @@ -56,7 +56,7 @@ def test_fork_many_next_epoch(spec, phases, state): yield from run_fork_test(phases[MERGE], state) -@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_phases(phases=[ALTAIR], other_phases=[MERGE]) @with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @spec_test @with_meta_tags(MERGE_FORK_TEST_META_TAGS) @@ -64,7 +64,7 @@ def test_fork_random_low_balances(spec, phases, state): yield from run_fork_test(phases[MERGE], state) -@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_phases(phases=[ALTAIR], other_phases=[MERGE]) @with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @spec_test @with_meta_tags(MERGE_FORK_TEST_META_TAGS) @@ -72,7 +72,7 @@ def test_fork_random_misc_balances(spec, phases, state): yield from run_fork_test(phases[MERGE], state) -@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_phases(phases=[ALTAIR], other_phases=[MERGE]) @with_presets([MINIMAL], reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") @with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) diff --git a/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_random.py b/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_random.py index d790acd3a..cabde150f 100644 --- a/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_random.py +++ b/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_random.py @@ -9,7 +9,7 @@ from eth2spec.test.context import ( ) from eth2spec.test.utils import with_meta_tags from eth2spec.test.helpers.constants import ( - PHASE0, MERGE, + ALTAIR, MERGE, MINIMAL, ) from eth2spec.test.helpers.merge.fork import ( @@ -22,7 +22,7 @@ from eth2spec.test.helpers.random import ( ) -@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_phases(phases=[ALTAIR], other_phases=[MERGE]) @spec_test @with_state @with_meta_tags(MERGE_FORK_TEST_META_TAGS) @@ -31,7 +31,7 @@ def test_merge_fork_random_0(spec, phases, state): yield from run_fork_test(phases[MERGE], state) -@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_phases(phases=[ALTAIR], other_phases=[MERGE]) @spec_test @with_state @with_meta_tags(MERGE_FORK_TEST_META_TAGS) @@ -40,7 +40,7 @@ def test_merge_fork_random_1(spec, phases, state): yield from run_fork_test(phases[MERGE], state) -@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_phases(phases=[ALTAIR], other_phases=[MERGE]) @spec_test @with_state @with_meta_tags(MERGE_FORK_TEST_META_TAGS) @@ -49,7 +49,7 @@ def test_merge_fork_random_2(spec, phases, state): yield from run_fork_test(phases[MERGE], state) -@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_phases(phases=[ALTAIR], other_phases=[MERGE]) @spec_test @with_state @with_meta_tags(MERGE_FORK_TEST_META_TAGS) @@ -58,7 +58,7 @@ def test_merge_fork_random_3(spec, phases, state): yield from run_fork_test(phases[MERGE], state) -@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_phases(phases=[ALTAIR], other_phases=[MERGE]) @spec_test @with_state @with_meta_tags(MERGE_FORK_TEST_META_TAGS) @@ -69,7 +69,7 @@ def test_merge_fork_random_duplicate_attestations(spec, phases, state): yield from run_fork_test(phases[MERGE], state) -@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_phases(phases=[ALTAIR], other_phases=[MERGE]) @spec_test @with_state @with_meta_tags(MERGE_FORK_TEST_META_TAGS) @@ -91,7 +91,7 @@ def test_merge_fork_random_mismatched_attestations(spec, phases, state): yield from run_fork_test(phases[MERGE], state_0) -@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_phases(phases=[ALTAIR], other_phases=[MERGE]) @spec_test @with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @with_meta_tags(MERGE_FORK_TEST_META_TAGS) @@ -100,7 +100,7 @@ def test_merge_fork_random_low_balances(spec, phases, state): yield from run_fork_test(phases[MERGE], state) -@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_phases(phases=[ALTAIR], other_phases=[MERGE]) @spec_test @with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @with_meta_tags(MERGE_FORK_TEST_META_TAGS) @@ -109,7 +109,7 @@ def test_merge_fork_random_misc_balances(spec, phases, state): yield from run_fork_test(phases[MERGE], state) -@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_phases(phases=[ALTAIR], other_phases=[MERGE]) @with_presets([MINIMAL], reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") @spec_test From 3b34f16e5a6d61cc67d6ffec8f4f4e70f604adca Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 22 Jul 2021 11:07:04 -0600 Subject: [PATCH 25/75] add base merge p2p spec --- specs/merge/p2p-interface.md | 111 +++++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 specs/merge/p2p-interface.md diff --git a/specs/merge/p2p-interface.md b/specs/merge/p2p-interface.md new file mode 100644 index 000000000..4af3f8e10 --- /dev/null +++ b/specs/merge/p2p-interface.md @@ -0,0 +1,111 @@ +# Ethereum Merge networking specification + +This document contains the networking specification for Ethereum 2.0 clients added during the Merge deployment. + +The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite. This document should be viewed as additive to the documents from [Phase 0](../phase0/p2p-interface.md) and from [Altair](../altair/p2p-interface.md) +and will be referred to as the "Phase 0 document" and "Altair document" respectively, hereafter. +Readers should understand the Phase 0 and Altair documents and use them as a basis to understand the changes outlined in this document. + +## Table of contents + + + + + + - [Warning](#warning) +- [Modifications in the Merge](#modifications-in-the-merge) + - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) + - [Topics and messages](#topics-and-messages) + - [Global topics](#global-topics) + - [`beacon_block`](#beacon_block) + - [Transitioning the gossip](#transitioning-the-gossip) + - [The Req/Resp domain](#the-reqresp-domain) + - [Messages](#messages) + - [BeaconBlocksByRange v2](#beaconblocksbyrange-v2) + - [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2) + + + + +## Warning + +This document is currently illustrative for early Merge testnets and some parts are subject to change. +Refer to the note in the [validator guide](./validator.md) for further details. + +# Modifications in the Merge + +## The gossip domain: gossipsub + +Some gossip meshes are upgraded in the Merge to support upgraded types. + +### Topics and messages + +Topics follow the same specification as in prior upgrades. +All topics remain stable except the beacon block topic which is updated with the modified type. + +The specification around the creation, validation, and dissemination of messages has not changed from the Phase 0 and Altair documents. + +The derivation of the `message-id` remains stable. + +The new topics along with the type of the `data` field of a gossipsub message are given in this table: + +| Name | Message Type | +| - | - | +| `beacon_block` | `SignedBeaconBlock` (modified) | + +Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics. + +#### Global topics + +The Merge changes the type of the global beacon block topic. + +##### `beacon_block` + +The existing specification for this topic does not change from prior upgrades, +but the type of the payload does change to the (modified) `SignedBeaconBlock` found in the Merge. +This type changes due to the addition of `execution_payload` to the inner `BeaconBlockBody`. + +See the Merge [state transition document](./beacon-chain.md#beaconblockbody) for further details. + +### Transitioning the gossip + +See gossip transition details found in the [Altair document](../altair/p2p) for +details on how to handle transitioning gossip topics for the Merge. + +## The Req/Resp domain + +### Messages + +#### BeaconBlocksByRange v2 + +**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/` + +Request and Response remain unchanged. +`MERGE_FORK_VERSION` is used as an additional `context` to specify the Merge block type. + +Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: + +[0]: # (eth2spec: skip) + +| `fork_version` | Chunk SSZ type | +| ------------------------ | -------------------------- | +| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` | +| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` | +| `MERGE_FORK_VERSION` | `merge.SignedBeaconBlock` | + +#### BeaconBlocksByRoot v2 + +**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/` + +Request and Response remain unchanged. +`MERGE_FORK_VERSION` is used as an additional `context` to specify the Merge block type. + +Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: + +[1]: # (eth2spec: skip) + +| `fork_version` | Chunk SSZ type | +| ------------------------ | -------------------------- | +| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` | +| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` | +| `MERGE_FORK_VERSION` | `merge.SignedBeaconBlock` | From bb0848b6f6d07ff0eaad16c039e9010f93c85514 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 22 Jul 2021 19:58:10 +0200 Subject: [PATCH 26/75] carry over current and previous_sync_committee in Merge state upgrader, review from @djrtwo --- specs/merge/fork.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/specs/merge/fork.md b/specs/merge/fork.md index 5c5c40a64..c89e00cf9 100644 --- a/specs/merge/fork.md +++ b/specs/merge/fork.md @@ -91,6 +91,9 @@ def upgrade_to_merge(pre: altair.BeaconState) -> BeaconState: finalized_checkpoint=pre.finalized_checkpoint, # Inactivity inactivity_scores=pre.inactivity_scores, + # Sync + current_sync_committee=pre.current_sync_committee, + next_sync_committee=pre.next_sync_committee, # Execution-layer latest_execution_payload_header=ExecutionPayloadHeader(), ) From 8318441474c045255deb48e708f6ebfd43edc240 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 22 Jul 2021 19:59:15 +0200 Subject: [PATCH 27/75] Fix remaining merge-rebase-altair tests --- .../pyspec/eth2spec/test/helpers/constants.py | 7 +--- .../eth2spec/test/helpers/merge/fork.py | 3 -- .../test/merge/fork/test_merge_fork_random.py | 38 +------------------ 3 files changed, 3 insertions(+), 45 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/helpers/constants.py b/tests/core/pyspec/eth2spec/test/helpers/constants.py index 8f116dc3d..5ab847327 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/constants.py +++ b/tests/core/pyspec/eth2spec/test/helpers/constants.py @@ -18,12 +18,9 @@ DAS = SpecForkName('das') ALL_PHASES = (PHASE0, ALTAIR, MERGE) # The forks that output to the test vectors. TESTGEN_FORKS = (PHASE0, ALTAIR, MERGE) -# TODO: everything runs in parallel to Altair. -# After features are rebased on the Altair fork, this can be reduced to just PHASE0. -FORKS_BEFORE_ALTAIR = (PHASE0, MERGE, SHARDING, CUSTODY_GAME, DAS) -# TODO: when rebasing Merge onto Altair, add ALTAIR to this tuple. -FORKS_BEFORE_MERGE = (PHASE0,) +FORKS_BEFORE_ALTAIR = (PHASE0,) +FORKS_BEFORE_MERGE = (PHASE0, ALTAIR) # # Config diff --git a/tests/core/pyspec/eth2spec/test/helpers/merge/fork.py b/tests/core/pyspec/eth2spec/test/helpers/merge/fork.py index 3a55dc88c..5a45e8565 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/merge/fork.py +++ b/tests/core/pyspec/eth2spec/test/helpers/merge/fork.py @@ -4,9 +4,6 @@ MERGE_FORK_TEST_META_TAGS = { def run_fork_test(post_spec, pre_state): - # Clean up state to be more realistic - pre_state.current_epoch_attestations = [] - yield 'pre', pre_state post_state = post_spec.upgrade_to_merge(pre_state) diff --git a/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_random.py b/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_random.py index cabde150f..20101fac4 100644 --- a/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_random.py +++ b/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_random.py @@ -16,10 +16,7 @@ from eth2spec.test.helpers.merge.fork import ( MERGE_FORK_TEST_META_TAGS, run_fork_test, ) -from eth2spec.test.helpers.random import ( - randomize_state, - randomize_attestation_participation, -) +from eth2spec.test.helpers.random import randomize_state @with_phases(phases=[ALTAIR], other_phases=[MERGE]) @@ -58,39 +55,6 @@ def test_merge_fork_random_3(spec, phases, state): yield from run_fork_test(phases[MERGE], state) -@with_phases(phases=[ALTAIR], other_phases=[MERGE]) -@spec_test -@with_state -@with_meta_tags(MERGE_FORK_TEST_META_TAGS) -def test_merge_fork_random_duplicate_attestations(spec, phases, state): - randomize_state(spec, state, rng=Random(1111)) - # Note: `run_fork_test` empties `current_epoch_attestations` - state.previous_epoch_attestations = state.previous_epoch_attestations + state.previous_epoch_attestations - yield from run_fork_test(phases[MERGE], state) - - -@with_phases(phases=[ALTAIR], other_phases=[MERGE]) -@spec_test -@with_state -@with_meta_tags(MERGE_FORK_TEST_META_TAGS) -def test_merge_fork_random_mismatched_attestations(spec, phases, state): - # Create a random state - randomize_state(spec, state, rng=Random(2222)) - - # Now make two copies - state_0 = state.copy() - state_1 = state.copy() - - # Randomize attestation participation of both - randomize_attestation_participation(spec, state_0, rng=Random(3333)) - randomize_attestation_participation(spec, state_1, rng=Random(4444)) - - # Note: `run_fork_test` empties `current_epoch_attestations` - # Use pending attestations from both random states in a single state for testing - state_0.previous_epoch_attestations = state_0.previous_epoch_attestations + state_1.previous_epoch_attestations - yield from run_fork_test(phases[MERGE], state_0) - - @with_phases(phases=[ALTAIR], other_phases=[MERGE]) @spec_test @with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) From bf6ad465ce63b05d067dc8207dea96e8958f01b4 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 22 Jul 2021 20:01:50 +0200 Subject: [PATCH 28/75] remove old BeaconState extension --- specs/merge/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index aa2e3b334..01c54e017 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -85,7 +85,7 @@ class BeaconBlockBody(Container): #### `BeaconState` ```python -class BeaconState(altair.BeaconState): +class BeaconState(Container): # Versioning genesis_time: uint64 genesis_validators_root: Root From 06b8bb1a5785bfd928b0cf907203ef660305cc91 Mon Sep 17 00:00:00 2001 From: Anton Nashatyrev Date: Mon, 21 Jun 2021 20:27:29 +0300 Subject: [PATCH 29/75] Simplify get_start_shard function --- specs/sharding/beacon-chain.md | 33 ++++----------------------------- 1 file changed, 4 insertions(+), 29 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 4e88a8971..517892b8c 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -58,7 +58,6 @@ - [`process_pending_shard_confirmations`](#process_pending_shard_confirmations) - [`charge_confirmed_shard_fees`](#charge_confirmed_shard_fees) - [`reset_pending_shard_work`](#reset_pending_shard_work) - - [`process_shard_epoch_increment`](#process_shard_epoch_increment) @@ -187,7 +186,6 @@ class BeaconState(merge.BeaconState): # A ring buffer of the latest slots, with information per active shard. shard_buffer: Vector[List[ShardWork, MAX_SHARDS], SHARD_STATE_MEMORY_SLOTS] shard_gasprice: uint64 - current_epoch_start_shard: Shard ``` ## New containers @@ -447,22 +445,10 @@ def get_start_shard(state: BeaconState, slot: Slot) -> Shard: """ Return the start shard at ``slot``. """ - current_epoch_start_slot = compute_start_slot_at_epoch(get_current_epoch(state)) - shard = state.current_epoch_start_shard - if slot > current_epoch_start_slot: - # Current epoch or the next epoch lookahead - for _slot in range(current_epoch_start_slot, slot): - committee_count = get_committee_count_per_slot(state, compute_epoch_at_slot(Slot(_slot))) - active_shard_count = get_active_shard_count(state, compute_epoch_at_slot(Slot(_slot))) - shard = (shard + committee_count) % active_shard_count - elif slot < current_epoch_start_slot: - # Previous epoch - for _slot in list(range(slot, current_epoch_start_slot))[::-1]: - committee_count = get_committee_count_per_slot(state, compute_epoch_at_slot(Slot(_slot))) - active_shard_count = get_active_shard_count(state, compute_epoch_at_slot(Slot(_slot))) - # Ensure positive - shard = (shard + active_shard_count - committee_count) % active_shard_count - return Shard(shard) + epoch = compute_epoch_at_slot(Slot(_slot)) + committee_count = get_committee_count_per_slot(state, epoch) + active_shard_count = get_active_shard_count(state, epoch) + return committee_count * slot % active_shard_count ``` #### `compute_shard_from_committee_index` @@ -699,9 +685,6 @@ def process_epoch(state: BeaconState) -> None: process_historical_roots_update(state) process_participation_flag_updates(state) process_sync_committee_updates(state) - - # Sharding post-processing - process_shard_epoch_increment(state) ``` #### `process_pending_shard_confirmations` @@ -800,11 +783,3 @@ def reset_pending_shard_work(state: BeaconState) -> None: ) # a shard without committee available defaults to SHARD_WORK_UNCONFIRMED. ``` - -#### `process_shard_epoch_increment` - -```python -def process_shard_epoch_increment(state: BeaconState) -> None: - # Update current_epoch_start_shard - state.current_epoch_start_shard = get_start_shard(state, Slot(state.slot + 1)) -``` From bc936768c74cd8afb72fd383dd76750ebdc00c8e Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 17 Jun 2021 23:17:47 +0200 Subject: [PATCH 30/75] global selection of shard proposers --- specs/sharding/beacon-chain.md | 25 +++---------------------- 1 file changed, 3 insertions(+), 22 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 517892b8c..41497d8d8 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -43,7 +43,6 @@ - [Beacon state accessors](#beacon-state-accessors) - [Updated `get_committee_count_per_slot`](#updated-get_committee_count_per_slot) - [`get_active_shard_count`](#get_active_shard_count) - - [`get_shard_committee`](#get_shard_committee) - [`compute_proposer_index`](#compute_proposer_index) - [`get_shard_proposer_index`](#get_shard_proposer_index) - [`get_start_shard`](#get_start_shard) @@ -369,24 +368,6 @@ def get_active_shard_count(state: BeaconState, epoch: Epoch) -> uint64: return INITIAL_ACTIVE_SHARDS ``` -#### `get_shard_committee` - -```python -def get_shard_committee(beacon_state: BeaconState, epoch: Epoch, shard: Shard) -> Sequence[ValidatorIndex]: - """ - Return the shard committee of the given ``epoch`` of the given ``shard``. - """ - source_epoch = compute_committee_source_epoch(epoch, SHARD_COMMITTEE_PERIOD) - active_validator_indices = get_active_validator_indices(beacon_state, source_epoch) - seed = get_seed(beacon_state, source_epoch, DOMAIN_SHARD_COMMITTEE) - return compute_committee( - indices=active_validator_indices, - seed=seed, - index=shard, - count=get_active_shard_count(beacon_state, epoch), - ) -``` - #### `compute_proposer_index` Updated version to get a proposer index that will only allow proposers with a certain minimum balance, @@ -423,8 +404,7 @@ def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard Return the proposer's index of shard block at ``slot``. """ epoch = compute_epoch_at_slot(slot) - committee = get_shard_committee(beacon_state, epoch, shard) - seed = hash(get_seed(beacon_state, epoch, DOMAIN_SHARD_PROPOSER) + uint_to_bytes(slot)) + seed = hash(get_seed(beacon_state, epoch, DOMAIN_SHARD_PROPOSER) + uint_to_bytes(slot) + uint_to_bytes(shard)) # Proposer must have sufficient balance to pay for worst case fee burn EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION = ( @@ -435,7 +415,8 @@ def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard beacon_state.shard_gasprice * MAX_SAMPLES_PER_BLOCK // TARGET_SAMPLES_PER_BLOCK + EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION ) - return compute_proposer_index(beacon_state, committee, seed, min_effective_balance) + indices = get_active_validator_indices(state, epoch) + return compute_proposer_index(beacon_state, indices, seed, min_effective_balance) ``` #### `get_start_shard` From 4b2523961723717c49a61e6f5587d4802211a33d Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 18 Jun 2021 02:21:21 +0200 Subject: [PATCH 31/75] builders make blobs, proposers make blocks --- specs/sharding/beacon-chain.md | 264 +++++++++++++++++++++++++++----- specs/sharding/p2p-interface.md | 45 ------ 2 files changed, 224 insertions(+), 85 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 41497d8d8..acba8e2a5 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -9,6 +9,7 @@ - [Introduction](#introduction) + - [Glossary](#glossary) - [Custom types](#custom-types) - [Constants](#constants) - [Misc](#misc) @@ -23,15 +24,25 @@ - [Updated containers](#updated-containers) - [`AttestationData`](#attestationdata) - [`BeaconBlockBody`](#beaconblockbody) + - [`Builder`](#builder) - [`BeaconState`](#beaconstate) - [New containers](#new-containers) - [`DataCommitment`](#datacommitment) + - [ShardBlobBody](#shardblobbody) - [`ShardBlobBodySummary`](#shardblobbodysummary) + - [`ShardBlob`](#shardblob) - [`ShardBlobHeader`](#shardblobheader) + - [`SignedShardBlob`](#signedshardblob) - [`SignedShardBlobHeader`](#signedshardblobheader) + - [ShardBlock](#shardblock) + - [`ShardBlockHeader`](#shardblockheader) + - [`SignedShardBlock`](#signedshardblock) + - [`SignedShardBlockHeader`](#signedshardblockheader) - [`PendingShardHeader`](#pendingshardheader) - [`ShardBlobReference`](#shardblobreference) - [`SignedShardBlobReference`](#signedshardblobreference) + - [`ShardBlockReference`](#shardblockreference) + - [`SignedShardBlockReference`](#signedshardblockreference) - [`ShardProposerSlashing`](#shardproposerslashing) - [`ShardWork`](#shardwork) - [Helper functions](#helper-functions) @@ -51,6 +62,7 @@ - [Block processing](#block-processing) - [Operations](#operations) - [Extended Attestation processing](#extended-attestation-processing) + - [`charge_builder`](#charge_builder) - [`process_shard_header`](#process_shard_header) - [`process_shard_proposer_slashing`](#process_shard_proposer_slashing) - [Epoch transition](#epoch-transition) @@ -68,6 +80,13 @@ This document describes the extensions made to the Phase 0 design of The Beacon based on the ideas [here](https://hackmd.io/G-Iy5jqyT7CXWEz8Ssos8g) and more broadly [here](https://arxiv.org/abs/1809.09044), using KZG10 commitments to commit to data to remove any need for fraud proofs (and hence, safety-critical synchrony assumptions) in the design. +### Glossary + +- **Data**: A list of KZG points, to translate a byte string into +- **Blob**: Data with commitments and meta-data, like a flattened bundle of L2 transactions. +- **Builder**: Builds blobs and bids for proposal slots with fee-paying blob-headers, responsible for availability. +- **Shard proposer**: Validator, selects a signed blob-header, taking bids for shard data opportunity. +- **Shard block**: Unique per `(slot, shard, proposer)`, selected signed blob ## Custom types @@ -78,6 +97,7 @@ We define the following Python custom types for type hinting and readability: | `Shard` | `uint64` | A shard number | | `BLSCommitment` | `Bytes48` | A G1 curve point | | `BLSPoint` | `uint256` | A number `x` in the range `0 <= x < MODULUS` | +| `BuilderIndex` | `uint64` | Builder registry index | ## Constants @@ -97,7 +117,7 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | | - | - | | `DOMAIN_SHARD_PROPOSER` | `DomainType('0x80000000')` | -| `DOMAIN_SHARD_COMMITTEE` | `DomainType('0x81000000')` | +| `DOMAIN_SHARD_BUILDER` | `DomainType('0x81000000')` | ### Shard Work Status @@ -118,6 +138,7 @@ The following values are (non-configurable) constants used throughout the specif | `MAX_SHARD_PROPOSER_SLASHINGS` | `2**4` (= 16) | Maximum amount of shard proposer slashing operations per block | | `MAX_SHARD_HEADERS_PER_SHARD` | `4` | | | `SHARD_STATE_MEMORY_SLOTS` | `uint64(2**8)` (= 256) | Number of slots for which shard commitments and confirmation status is directly available in the state | +| `BUILDER_REGISTRY_LIMIT` | `uint64(2**40)` (= 1,099,511,627,776) | builders | ### Shard block samples @@ -162,8 +183,8 @@ class AttestationData(Container): # FFG vote source: Checkpoint target: Checkpoint - # Shard header root - shard_header_root: Root # [New in Sharding] + # Hash-tree-root of ShardBlock + shard_block_root: Root # [New in Sharding] ``` ### `BeaconBlockBody` @@ -171,7 +192,16 @@ class AttestationData(Container): ```python class BeaconBlockBody(merge.BeaconBlockBody): # [extends The Merge block body] shard_proposer_slashings: List[ShardProposerSlashing, MAX_SHARD_PROPOSER_SLASHINGS] - shard_headers: List[SignedShardBlobHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD] + shard_headers: List[SignedShardBlockHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD] +``` + +### `Builder` + +```python +class Builder(Container): + pubkey: BLSPubkey + # TODO: fields for either an expiry mechanism (refunding execution account with remaining balance) + # and/or a builder-transaction mechanism. ``` ### `BeaconState` @@ -182,6 +212,9 @@ class BeaconState(merge.BeaconState): previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] # [New fields] + # Builder registry. + builders: List[Builder, BUILDER_REGISTRY_LIMIT] + builder_balances: List[Gwei, BUILDER_REGISTRY_LIMIT] # A ring buffer of the latest slots, with information per active shard. shard_buffer: Vector[List[ShardWork, MAX_SHARDS], SHARD_STATE_MEMORY_SLOTS] shard_gasprice: uint64 @@ -189,9 +222,6 @@ class BeaconState(merge.BeaconState): ## New containers -The shard data itself is network-layer only, and can be found in the [P2P specification](./p2p-interface.md). -The beacon chain registers just the commitments of the shard data. - ### `DataCommitment` ```python @@ -202,8 +232,33 @@ class DataCommitment(Container): length: uint64 ``` +### ShardBlobBody + +Unsigned shard data, bundled by a shard-builder. +Unique, signing different bodies as shard proposer for the same `(slot, shard)` is slashable. + +```python +class ShardBlobBody(Container): + # The actual data commitment + commitment: DataCommitment + # Proof that the degree < commitment.length + degree_proof: BLSCommitment + # The actual data. Should match the commitment and degree proof. + data: List[BLSPoint, POINTS_PER_SAMPLE * MAX_SAMPLES_PER_BLOCK] + # Latest block root of the Beacon Chain, before shard_blob.slot + beacon_block_root: Root + # Builder of the data, pays data-fee to proposer + builder_index: BuilderIndex + # TODO: fee payment amount fields (EIP 1559 like) +``` + ### `ShardBlobBodySummary` +Summary version of the `ShardBlobBody`, omitting the data payload, while preserving the data-commitments. + +The commitments are not further collapsed to a single hash, +to avoid an extra network roundtrip between proposer and builder, to include the header on-chain more quickly. + ```python class ShardBlobBodySummary(Container): # The actual data commitment @@ -214,36 +269,110 @@ class ShardBlobBodySummary(Container): data_root: Root # Latest block root of the Beacon Chain, before shard_blob.slot beacon_block_root: Root + # Builder of the data, pays data-fee to proposer + builder_index: BuilderIndex + # TODO: fee payment amount fields (EIP 1559 like) +``` + +### `ShardBlob` + +`ShardBlobBody` wrapped with the header data that is unique to the shard blob proposal. + +```python +class ShardBlob(Container): + slot: Slot + shard: Shard + # Blob contents + body: ShardBlobBody ``` ### `ShardBlobHeader` +Header version of `ShardBlob`. Separates designation (slot, shard) and contents (blob). + ```python class ShardBlobHeader(Container): - # Slot and shard that this header is intended for slot: Slot shard: Shard - # SSZ-summary of ShardBlobBody + # Blob contents, without the full data body_summary: ShardBlobBodySummary - # Proposer of the shard-blob - proposer_index: ValidatorIndex +``` + +### `SignedShardBlob` + +Full blob data, signed by the shard builder, ensuring fee payment. + +```python +class SignedShardBlob(Container): + message: ShardBlob + signature: BLSSignature ``` ### `SignedShardBlobHeader` +Header of the blob, the signature is equally applicable to `SignedShardBlob`. +Shard proposers can accept `SignedShardBlobHeader` as a data-transaction. + ```python class SignedShardBlobHeader(Container): message: ShardBlobHeader signature: BLSSignature ``` +### ShardBlock + +Full blob data signed by builder, to be confirmed by proxy as `ShardBlockHeader`. + +```python +class ShardBlock(Container): + # Shard data with fee payment by bundle builder + signed_blob: SignedShardBlob + # Proposer of the shard-blob + proposer_index: ValidatorIndex +``` + +### `ShardBlockHeader` + +Header version of `ShardBlock`, selecting a `SignedShardBlobHeader`. + +```python +class ShardBlockHeader(Container): + # Shard commitments and fee payment by blob builder + signed_blob_header: SignedShardBlobHeader + # Proposer of the shard-blob + proposer_index: ValidatorIndex +``` + +### `SignedShardBlock` + +Shard blob, signed for payment, and signed for proposal. Propagated to attesters. + +```python +class SignedShardBlock(Container): + message: ShardBlock + signature: BLSSignature +``` + +### `SignedShardBlockHeader` + +Header version of `SignedShardBlock`, substituting the full data within the blob for just the hash-tree-root. + +The signature is equally applicable to `SignedShardBlock`, +which the builder can publish as soon as the signed header is seen. + +```python +class SignedShardBlockHeader(Container): + message: ShardBlockHeader + signature: BLSSignature +``` + ### `PendingShardHeader` ```python class PendingShardHeader(Container): # KZG10 commitment to the data commitment: DataCommitment - # hash_tree_root of the ShardHeader (stored so that attestations can be checked against it) + # hash_tree_root of the ShardBlockHeader (stored so that attestations can be checked against it) root: Root # Who voted for the header votes: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] @@ -255,31 +384,50 @@ class PendingShardHeader(Container): ### `ShardBlobReference` +Reference version of `ShardBlobHeader`, substituting the body for just a hash-tree-root. + ```python class ShardBlobReference(Container): - # Slot and shard that this reference is intended for slot: Slot shard: Shard - # Hash-tree-root of ShardBlobBody + # Blob hash-tree-root for reference, enough for uniqueness body_root: Root - # Proposer of the shard-blob - proposer_index: ValidatorIndex ``` ### `SignedShardBlobReference` +`ShardBlobReference`, signed by the blob builder. The builder-signature is part of the block identity. + ```python class SignedShardBlobReference(Container): message: ShardBlobReference signature: BLSSignature ``` +### `ShardBlockReference` + +```python +class ShardBlockReference(Container): + # Blob, minimized for efficient slashing + signed_blob_reference: SignedShardBlobReference + # Proposer of the shard-blob + proposer_index: ValidatorIndex +``` + +### `SignedShardBlockReference` + +```python +class SignedShardBlockReference(Container): + message: ShardBlockReference + signature: BLSSignature +``` + ### `ShardProposerSlashing` ```python class ShardProposerSlashing(Container): - signed_reference_1: SignedShardBlobReference - signed_reference_2: SignedShardBlobReference + signed_reference_1: SignedShardBlockReference + signed_reference_2: SignedShardBlockReference ``` ### `ShardWork` @@ -516,7 +664,7 @@ def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> N current_headers: Sequence[PendingShardHeader] = committee_work.status.value # Find the corresponding header, abort if it cannot be found - header_index = [header.root for header in current_headers].index(attestation.data.shard_header_root) + header_index = [header.root for header in current_headers].index(attestation.data.shard_block_root) pending_header: PendingShardHeader = current_headers[header_index] full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) @@ -554,36 +702,63 @@ def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> N ) ``` + +#### `charge_builder` + +```python +def charge_builder(state: BeaconState, index: BuilderIndex, fee: Gwei) -> None: + """ + Decrease the builder balance at index ``index`` by ``fee``, with underflow check. + """ + assert state.builder_balances[index] >= fee + state.builder_balances[index] -= fee +``` + ##### `process_shard_header` ```python -def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeader) -> None: - header = signed_header.message +def process_shard_header(state: BeaconState, signed_block_header: SignedShardBlockHeader) -> None: + block_header: ShardBlockHeader = signed_block_header.message + signed_blob_header: SignedShardBlobHeader = block_header.signed_blob_header + blob_header: ShardBlobHeader = signed_blob_header.message + slot = blob_header.slot + shard = blob_header.shard + # Verify the header is not 0, and not from the future. - assert Slot(0) < header.slot <= state.slot - header_epoch = compute_epoch_at_slot(header.slot) + assert Slot(0) < slot <= state.slot + header_epoch = compute_epoch_at_slot(slot) # Verify that the header is within the processing time window assert header_epoch in [get_previous_epoch(state), get_current_epoch(state)] # Verify that the shard is active - assert header.shard < get_active_shard_count(state, header_epoch) + assert shard < get_active_shard_count(state, header_epoch) # Verify that the block root matches, # to ensure the header will only be included in this specific Beacon Chain sub-tree. - assert header.body_summary.beacon_block_root == get_block_root_at_slot(state, header.slot - 1) + assert blob_header.body_summary.beacon_block_root == get_block_root_at_slot(state, slot - 1) # Check that this data is still pending - committee_work = state.shard_buffer[header.slot % SHARD_STATE_MEMORY_SLOTS][header.shard] + committee_work = state.shard_buffer[slot % SHARD_STATE_MEMORY_SLOTS][shard] assert committee_work.status.selector == SHARD_WORK_PENDING # Check that this header is not yet in the pending list current_headers: List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD] = committee_work.status.value - header_root = hash_tree_root(header) + header_root = hash_tree_root(block_header) assert header_root not in [pending_header.root for pending_header in current_headers] # Verify proposer - assert header.proposer_index == get_shard_proposer_index(state, header.slot, header.shard) - # Verify signature - signing_root = compute_signing_root(header, get_domain(state, DOMAIN_SHARD_PROPOSER)) - assert bls.Verify(state.validators[header.proposer_index].pubkey, signing_root, signed_header.signature) + assert block_header.proposer_index == get_shard_proposer_index(state, slot, shard) + # Verify proposer signature + block_signing_root = compute_signing_root(block_header, get_domain(state, DOMAIN_SHARD_PROPOSER)) + proposer_pubkey = state.validators[block_header.proposer_index].pubkey + assert bls.Verify(proposer_pubkey, block_signing_root, signed_block_header.signature) + + # Verify builder requirements + blob_summary: ShardBlobBodySummary = blob_header.body_summary + builder_index = blob_summary.builder_index + + # Verify builder signature + builder = state.builders[builder_index] + blob_signing_root = compute_signing_root(blob_header, get_domain(state, DOMAIN_SHARD_BUILDER)) # TODO new constant + assert bls.Verify(builder.pubkey, blob_signing_root, signed_blob_header.signature) # Verify the length by verifying the degree. body_summary = header.body_summary @@ -594,12 +769,16 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade == bls.Pairing(body_summary.commitment.point, G2_SETUP[-body_summary.commitment.length]) ) + # Charge builder, with hard balance requirement + fee = Gwei(123) # TODO + charge_builder(state, builder_index, fee) + # Initialize the pending header - index = compute_committee_index_from_shard(state, header.slot, header.shard) - committee_length = len(get_beacon_committee(state, header.slot, index)) + index = compute_committee_index_from_shard(state, slot, shard) + committee_length = len(get_beacon_committee(state, slot, index)) initial_votes = Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length) pending_header = PendingShardHeader( - commitment=body_summary.commitment, + commitment=blob_summary.commitment, root=header_root, votes=initial_votes, weight=0, @@ -619,17 +798,22 @@ The goal is to ensure that a proof can only be constructed if `deg(B) < l` (ther ```python def process_shard_proposer_slashing(state: BeaconState, proposer_slashing: ShardProposerSlashing) -> None: - reference_1 = proposer_slashing.signed_reference_1.message - reference_2 = proposer_slashing.signed_reference_2.message + reference_1: ShardBlockReference = proposer_slashing.signed_reference_1.message + reference_2 : ShardBlockReference = proposer_slashing.signed_reference_2.message + blob_1 = reference_1.signed_blob_reference.message + blob_2 = reference_2.signed_blob_reference.message # Verify header slots match - assert reference_1.slot == reference_2.slot + assert blob_1.slot == blob_2.slot # Verify header shards match - assert reference_1.shard == reference_2.shard + assert blob_1.shard == blob_2.shard # Verify header proposer indices match assert reference_1.proposer_index == reference_2.proposer_index - # Verify the headers are different (i.e. different body) - assert reference_1 != reference_2 + # Verify the headers are different (i.e. different body, or different builder signature) + assert ( + blob_1.body_root != blob_2.body_root + or reference_1.signed_blob_reference.signature != reference_2.signed_blob_reference.signature + ) # Verify the proposer is slashable proposer = state.validators[reference_1.proposer_index] assert is_slashable_validator(proposer, get_current_epoch(state)) diff --git a/specs/sharding/p2p-interface.md b/specs/sharding/p2p-interface.md index baf9494f2..d00321f36 100644 --- a/specs/sharding/p2p-interface.md +++ b/specs/sharding/p2p-interface.md @@ -11,10 +11,6 @@ - [Introduction](#introduction) - [Constants](#constants) - [Misc](#misc) -- [New containers](#new-containers) - - [ShardBlobBody](#shardblobbody) - - [ShardBlob](#shardblob) - - [SignedShardBlob](#signedshardblob) - [Gossip domain](#gossip-domain) - [Topics and messages](#topics-and-messages) - [Shard blob subnets](#shard-blob-subnets) @@ -40,47 +36,6 @@ The adjustments and additions for Shards are outlined in this document. | ---- | ----- | ----------- | | `SHARD_BLOB_SUBNET_COUNT` | `64` | The number of `shard_blob_{subnet_id}` subnets used in the gossipsub protocol. | -## New containers - -### ShardBlobBody - -```python -class ShardBlobBody(Container): - # The actual data commitment - commitment: DataCommitment - # Proof that the degree < commitment.length - degree_proof: BLSCommitment - # The actual data. Should match the commitment and degree proof. - data: List[BLSPoint, POINTS_PER_SAMPLE * MAX_SAMPLES_PER_BLOCK] - # Latest block root of the Beacon Chain, before shard_blob.slot - beacon_block_root: Root -``` - -The user MUST always verify the commitments in the `body` are valid for the `data` in the `body`. - -### ShardBlob - -```python -class ShardBlob(Container): - # Slot and shard that this blob is intended for - slot: Slot - shard: Shard - # Shard data with related commitments and beacon anchor - body: ShardBlobBody - # Proposer of the shard-blob - proposer_index: ValidatorIndex -``` - -This is the expanded form of the `ShardBlobHeader` type. - -### SignedShardBlob - -```python -class SignedShardBlob(Container): - message: ShardBlob - signature: BLSSignature -``` - ## Gossip domain ### Topics and messages From 9e10f582993fc1f48f01c6b57e737c34b84ce4c5 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 18 Jun 2021 02:48:58 +0200 Subject: [PATCH 32/75] update networking spec --- specs/sharding/p2p-interface.md | 85 ++++++++++++++++++++------------- 1 file changed, 53 insertions(+), 32 deletions(-) diff --git a/specs/sharding/p2p-interface.md b/specs/sharding/p2p-interface.md index d00321f36..94dd75128 100644 --- a/specs/sharding/p2p-interface.md +++ b/specs/sharding/p2p-interface.md @@ -14,7 +14,7 @@ - [Gossip domain](#gossip-domain) - [Topics and messages](#topics-and-messages) - [Shard blob subnets](#shard-blob-subnets) - - [`shard_blob_{subnet_id}`](#shard_blob_subnet_id) + - [`shard_block_{subnet_id}`](#shard_block_subnet_id) - [Global topics](#global-topics) - [`shard_header`](#shard_header) - [`shard_proposer_slashing`](#shard_proposer_slashing) @@ -34,7 +34,7 @@ The adjustments and additions for Shards are outlined in this document. | Name | Value | Description | | ---- | ----- | ----------- | -| `SHARD_BLOB_SUBNET_COUNT` | `64` | The number of `shard_blob_{subnet_id}` subnets used in the gossipsub protocol. | +| `SHARD_BLOCK_SUBNET_COUNT` | `64` | The number of `shard_block_{subnet_id}` subnets used in the gossipsub protocol. | ## Gossip domain @@ -44,22 +44,22 @@ Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface. | Name | Message Type | |----------------------------------|---------------------------| -| `shard_blob_{subnet_id}` | `SignedShardBlob` | -| `shard_header` | `SignedShardBlobHeader` | +| `shard_block_{subnet_id}` | `SignedShardBlock` | +| `shard_block_header` | `SignedShardBlockHeader` | | `shard_proposer_slashing` | `ShardProposerSlashing` | The [DAS network specification](./das-p2p.md) defines additional topics. -#### Shard blob subnets +#### Shard block subnets -Shard blob subnets are used to propagate shard blobs to subsections of the network. +Shard block subnets are used by builders to make their blobs available after selection by shard proposers. -##### `shard_blob_{subnet_id}` +##### `shard_block_{subnet_id}` -Shard block data, in the form of a `SignedShardBlob` is published to the `shard_blob_{subnet_id}` subnets. +Shard block data, in the form of a `SignedShardBlock` is published to the `shard_block_{subnet_id}` subnets. ```python -def compute_subnet_for_shard_blob(state: BeaconState, slot: Slot, shard: Shard) -> uint64: +def compute_subnet_for_shard_block(state: BeaconState, slot: Slot, shard: Shard) -> uint64: """ Compute the correct subnet for a shard blob publication. Note, this mimics compute_subnet_for_attestation(). @@ -69,11 +69,19 @@ def compute_subnet_for_shard_blob(state: BeaconState, slot: Slot, shard: Shard) slots_since_epoch_start = Slot(slot % SLOTS_PER_EPOCH) committees_since_epoch_start = committees_per_slot * slots_since_epoch_start - return uint64((committees_since_epoch_start + committee_index) % SHARD_BLOB_SUBNET_COUNT) + return uint64((committees_since_epoch_start + committee_index) % SHARD_BLOCK_SUBNET_COUNT) ``` -The following validations MUST pass before forwarding the `signed_blob` (with inner `message` as `blob`) on the horizontal subnet or creating samples for it. -- _[IGNORE]_ The `blob` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- +The following validations MUST pass before forwarding the `signed_block` on the horizontal subnet or creating samples for it. + +We define some aliases to the nested contents of `signed_block`: +```python +block: ShardBlock = signed_block.message +signed_blob: SignedShardBlob = block.signed_blob +blob: ShardBlob = signed_blob.message +``` + +- _[IGNORE]_ The `block` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `blob.slot <= current_slot` (a client MAY queue future blobs for processing at the appropriate slot). - _[IGNORE]_ The `blob` is new enough to be still be processed -- @@ -81,36 +89,49 @@ The following validations MUST pass before forwarding the `signed_blob` (with in - _[REJECT]_ The shard should have a committee at slot -- i.e. validate that `compute_committee_index_from_shard(state, blob.slot, blob.shard)` doesn't raise an error - _[REJECT]_ The shard blob is for the correct subnet -- - i.e. `compute_subnet_for_shard_blob(state, blob.slot, blob.shard) == subnet_id` -- _[IGNORE]_ The blob is the first blob with valid signature received for the `(blob.proposer_index, blob.slot, blob.shard)` combination. -- _[REJECT]_ As already limited by the SSZ list-limit, it is important the blob is well-formatted and not too large. + i.e. `compute_subnet_for_shard_block(state, blob.slot, blob.shard) == subnet_id` +- _[IGNORE]_ The block is the first block with valid signature received for the `(block.proposer_index, blob.slot, blob.shard)` combination. +- _[REJECT]_ The blob is not too large, the data MUST NOT be larger than the SSZ list-limit, and a client MAY be more strict. - _[REJECT]_ The `blob.body.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid. -- _[REJECT]_ The proposer signature, `signed_blob.signature`, is valid with respect to the `proposer_index` pubkey. -- _[REJECT]_ The blob is proposed by the expected `proposer_index` for the blob's slot +- _[REJECT]_ The block proposer signature, `signed_block.signature`, is valid with respect to the `proposer_index` pubkey. +- _[REJECT]_ The blob builder exists and has sufficient balance to back the fee payment. +- _[REJECT]_ The blob builder signature, `signed_blob.signature`, is valid with respect to the `builder_index` pubkey. +- _[REJECT]_ The block is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `blob.body.beacon_block_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, - the block MAY be queued for later processing while proposers for the blob's branch are calculated -- + the block MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. #### Global topics -There are two additional global topics for Sharding, one is used to propagate shard blob headers (`shard_header`) to -all nodes on the network. Another one is used to propagate validator message (`shard_proposer_slashing`). +There are two additional global topics for Sharding. -##### `shard_header` +One is used to propagate shard block headers (`shard_block_header`) to all nodes on the network. +Another one is used to propagate shard proposer slashings (`shard_proposer_slashing`). -Shard header data, in the form of a `SignedShardBlobHeader` is published to the global `shard_header` subnet. +##### `shard_block_header` -The following validations MUST pass before forwarding the `signed_shard_blob_header` (with inner `message` as `header`) on the network. -- _[IGNORE]_ The `header` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. validate that `header.slot <= current_slot` +Shard header data, in the form of a `SignedShardBlockHeader` is published to the global `shard_block_header` subnet. +Shard block headers select shard blob bids by builders, and should be timely to ensure builders can publish the full shard block timely. + +The following validations MUST pass before forwarding the `signed_block_header` (with inner `message` as `header`) on the network. + +We define some aliases to the nested contents of `signed_block_header`: +```python +block_header: ShardBlockHeader = signed_block_header.message +signed_blob_header: SignedShardBlobHeader = header.signed_blob_header +blob_header: ShardBlobHeader = signed_blob_header.message +``` + +- _[IGNORE]_ The header is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- + i.e. validate that `blob_header.slot <= current_slot` (a client MAY queue future headers for processing at the appropriate slot). -- _[IGNORE]_ The `header` is new enough to be still be processed -- - i.e. validate that `compute_epoch_at_slot(header.slot) >= get_previous_epoch(state)` -- _[IGNORE]_ The header is the first header with valid signature received for the `(header.proposer_index, header.slot, header.shard)` combination. -- _[REJECT]_ The shard should have a committee at slot -- - i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard)` doesn't raise an error -- _[REJECT]_ The proposer signature, `signed_shard_blob_header.signature`, is valid with respect to the `proposer_index` pubkey. +- _[IGNORE]_ The header is new enough to be still be processed -- + i.e. validate that `compute_epoch_at_slot(blob_header.slot) >= get_previous_epoch(state)` +- _[IGNORE]_ The header is the first header with valid signature received for the `(block_header.proposer_index, blob_header.slot, blob_header.shard)` combination. +- _[REJECT]_ The `shard` MUST have a committee at the `slot` -- + i.e. validate that `compute_committee_index_from_shard(state, blob_header.slot, blob_header.shard)` doesn't raise an error +- _[REJECT]_ The proposer signature, `signed_shard_block_header.signature`, is valid with respect to the `block_header.proposer_index` pubkey. - _[REJECT]_ The header is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `header.body_summary.beacon_block_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, @@ -124,6 +145,6 @@ Shard proposer slashings, in the form of `ShardProposerSlashing`, are published The following validations MUST pass before forwarding the `shard_proposer_slashing` on to the network. - _[IGNORE]_ The shard proposer slashing is the first valid shard proposer slashing received - for the proposer with index `proposer_slashing.signed_header_1.message.proposer_index`. + for the proposer with index `proposer_slashing.signed_reference_1.message.proposer_index`. The `slot` and `shard` are ignored, there are no per-shard slashings. - _[REJECT]_ All of the conditions within `process_shard_proposer_slashing` pass validation. From b3d5858cc9c42db65005206e6f3f1f31ce983209 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 18 Jun 2021 04:02:06 +0200 Subject: [PATCH 33/75] update data fee payment, todo --- specs/sharding/beacon-chain.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index acba8e2a5..fe0c53bc4 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -770,8 +770,11 @@ def process_shard_header(state: BeaconState, signed_block_header: SignedShardBlo ) # Charge builder, with hard balance requirement - fee = Gwei(123) # TODO + fee = Gwei(123) # TODO EIP 1559 like fee? Burn some of it? charge_builder(state, builder_index, fee) + # TODO: proposer is charged for confirmed headers (see charge_confirmed_shard_fees). + # Need to align incentive, so proposer does not gain from including unconfirmed headers + increase_balance(state, block_header.proposer_index, fee) # Initialize the pending header index = compute_committee_index_from_shard(state, slot, shard) From 2a105f45819e262f27a3ea14279c5a5ca2a3cdd7 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 18 Jun 2021 04:02:34 +0200 Subject: [PATCH 34/75] fix toc --- specs/sharding/p2p-interface.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/sharding/p2p-interface.md b/specs/sharding/p2p-interface.md index 94dd75128..e9b561312 100644 --- a/specs/sharding/p2p-interface.md +++ b/specs/sharding/p2p-interface.md @@ -13,10 +13,10 @@ - [Misc](#misc) - [Gossip domain](#gossip-domain) - [Topics and messages](#topics-and-messages) - - [Shard blob subnets](#shard-blob-subnets) + - [Shard block subnets](#shard-block-subnets) - [`shard_block_{subnet_id}`](#shard_block_subnet_id) - [Global topics](#global-topics) - - [`shard_header`](#shard_header) + - [`shard_block_header`](#shard_block_header) - [`shard_proposer_slashing`](#shard_proposer_slashing) From 5726cb9374bf31b8bc208f4703ee9109fb709367 Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 21 Jun 2021 00:55:45 +0200 Subject: [PATCH 35/75] aggregate builder and proposer for simplified typing and optimized verification --- specs/sharding/beacon-chain.md | 201 +++++++++++---------------------- 1 file changed, 63 insertions(+), 138 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index fe0c53bc4..ebfaa89eb 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -34,15 +34,8 @@ - [`ShardBlobHeader`](#shardblobheader) - [`SignedShardBlob`](#signedshardblob) - [`SignedShardBlobHeader`](#signedshardblobheader) - - [ShardBlock](#shardblock) - - [`ShardBlockHeader`](#shardblockheader) - - [`SignedShardBlock`](#signedshardblock) - - [`SignedShardBlockHeader`](#signedshardblockheader) - [`PendingShardHeader`](#pendingshardheader) - [`ShardBlobReference`](#shardblobreference) - - [`SignedShardBlobReference`](#signedshardblobreference) - - [`ShardBlockReference`](#shardblockreference) - - [`SignedShardBlockReference`](#signedshardblockreference) - [`ShardProposerSlashing`](#shardproposerslashing) - [`ShardWork`](#shardwork) - [Helper functions](#helper-functions) @@ -116,8 +109,7 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | | - | - | -| `DOMAIN_SHARD_PROPOSER` | `DomainType('0x80000000')` | -| `DOMAIN_SHARD_BUILDER` | `DomainType('0x81000000')` | +| `DOMAIN_SHARD_BLOB` | `DomainType('0x80000000')` | ### Shard Work Status @@ -247,8 +239,6 @@ class ShardBlobBody(Container): data: List[BLSPoint, POINTS_PER_SAMPLE * MAX_SAMPLES_PER_BLOCK] # Latest block root of the Beacon Chain, before shard_blob.slot beacon_block_root: Root - # Builder of the data, pays data-fee to proposer - builder_index: BuilderIndex # TODO: fee payment amount fields (EIP 1559 like) ``` @@ -269,8 +259,6 @@ class ShardBlobBodySummary(Container): data_root: Root # Latest block root of the Beacon Chain, before shard_blob.slot beacon_block_root: Root - # Builder of the data, pays data-fee to proposer - builder_index: BuilderIndex # TODO: fee payment amount fields (EIP 1559 like) ``` @@ -282,18 +270,26 @@ class ShardBlobBodySummary(Container): class ShardBlob(Container): slot: Slot shard: Shard + # Proposer of the shard-blob + proposer_index: ValidatorIndex + # Builder of the data, pays data-fee to proposer + builder_index: BuilderIndex # Blob contents body: ShardBlobBody ``` ### `ShardBlobHeader` -Header version of `ShardBlob`. Separates designation (slot, shard) and contents (blob). +Header version of `ShardBlob`. ```python class ShardBlobHeader(Container): slot: Slot shard: Shard + # Proposer of the shard-blob + proposer_index: ValidatorIndex + # Builder of the data, pays data-fee to proposer + builder_index: BuilderIndex # Blob contents, without the full data body_summary: ShardBlobBodySummary ``` @@ -316,53 +312,8 @@ Shard proposers can accept `SignedShardBlobHeader` as a data-transaction. ```python class SignedShardBlobHeader(Container): message: ShardBlobHeader - signature: BLSSignature -``` - -### ShardBlock - -Full blob data signed by builder, to be confirmed by proxy as `ShardBlockHeader`. - -```python -class ShardBlock(Container): - # Shard data with fee payment by bundle builder - signed_blob: SignedShardBlob - # Proposer of the shard-blob - proposer_index: ValidatorIndex -``` - -### `ShardBlockHeader` - -Header version of `ShardBlock`, selecting a `SignedShardBlobHeader`. - -```python -class ShardBlockHeader(Container): - # Shard commitments and fee payment by blob builder - signed_blob_header: SignedShardBlobHeader - # Proposer of the shard-blob - proposer_index: ValidatorIndex -``` - -### `SignedShardBlock` - -Shard blob, signed for payment, and signed for proposal. Propagated to attesters. - -```python -class SignedShardBlock(Container): - message: ShardBlock - signature: BLSSignature -``` - -### `SignedShardBlockHeader` - -Header version of `SignedShardBlock`, substituting the full data within the blob for just the hash-tree-root. - -The signature is equally applicable to `SignedShardBlock`, -which the builder can publish as soon as the signed header is seen. - -```python -class SignedShardBlockHeader(Container): - message: ShardBlockHeader + # Signature by builder. + # Once accepted by proposer, the signatures is the aggregate of both. signature: BLSSignature ``` @@ -390,44 +341,26 @@ Reference version of `ShardBlobHeader`, substituting the body for just a hash-tr class ShardBlobReference(Container): slot: Slot shard: Shard - # Blob hash-tree-root for reference, enough for uniqueness - body_root: Root -``` - -### `SignedShardBlobReference` - -`ShardBlobReference`, signed by the blob builder. The builder-signature is part of the block identity. - -```python -class SignedShardBlobReference(Container): - message: ShardBlobReference - signature: BLSSignature -``` - -### `ShardBlockReference` - -```python -class ShardBlockReference(Container): - # Blob, minimized for efficient slashing - signed_blob_reference: SignedShardBlobReference # Proposer of the shard-blob proposer_index: ValidatorIndex -``` - -### `SignedShardBlockReference` - -```python -class SignedShardBlockReference(Container): - message: ShardBlockReference - signature: BLSSignature + # Builder of the data + builder_index: BuilderIndex + # Blob hash-tree-root for slashing reference + body_root: Root ``` ### `ShardProposerSlashing` ```python class ShardProposerSlashing(Container): - signed_reference_1: SignedShardBlockReference - signed_reference_2: SignedShardBlockReference + slot: Slot + shard: Shard + proposer_index: ValidatorIndex + builder_index: BuilderIndex + body_root_1: Root + body_root_2: Root + signature_1: BLSSignature + signature_2: BLSSignature ``` ### `ShardWork` @@ -552,7 +485,7 @@ def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard Return the proposer's index of shard block at ``slot``. """ epoch = compute_epoch_at_slot(slot) - seed = hash(get_seed(beacon_state, epoch, DOMAIN_SHARD_PROPOSER) + uint_to_bytes(slot) + uint_to_bytes(shard)) + seed = hash(get_seed(beacon_state, epoch, DOMAIN_SHARD_BLOB) + uint_to_bytes(slot) + uint_to_bytes(shard)) # Proposer must have sufficient balance to pay for worst case fee burn EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION = ( @@ -717,12 +650,10 @@ def charge_builder(state: BeaconState, index: BuilderIndex, fee: Gwei) -> None: ##### `process_shard_header` ```python -def process_shard_header(state: BeaconState, signed_block_header: SignedShardBlockHeader) -> None: - block_header: ShardBlockHeader = signed_block_header.message - signed_blob_header: SignedShardBlobHeader = block_header.signed_blob_header - blob_header: ShardBlobHeader = signed_blob_header.message - slot = blob_header.slot - shard = blob_header.shard +def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeader) -> None: + header: ShardBlobHeader = signed_header.message + slot = header.slot + shard = header.shard # Verify the header is not 0, and not from the future. assert Slot(0) < slot <= state.slot @@ -733,7 +664,7 @@ def process_shard_header(state: BeaconState, signed_block_header: SignedShardBlo assert shard < get_active_shard_count(state, header_epoch) # Verify that the block root matches, # to ensure the header will only be included in this specific Beacon Chain sub-tree. - assert blob_header.body_summary.beacon_block_root == get_block_root_at_slot(state, slot - 1) + assert header.body_summary.beacon_block_root == get_block_root_at_slot(state, slot - 1) # Check that this data is still pending committee_work = state.shard_buffer[slot % SHARD_STATE_MEMORY_SLOTS][shard] @@ -741,24 +672,17 @@ def process_shard_header(state: BeaconState, signed_block_header: SignedShardBlo # Check that this header is not yet in the pending list current_headers: List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD] = committee_work.status.value - header_root = hash_tree_root(block_header) + header_root = hash_tree_root(header) assert header_root not in [pending_header.root for pending_header in current_headers] - # Verify proposer - assert block_header.proposer_index == get_shard_proposer_index(state, slot, shard) - # Verify proposer signature - block_signing_root = compute_signing_root(block_header, get_domain(state, DOMAIN_SHARD_PROPOSER)) - proposer_pubkey = state.validators[block_header.proposer_index].pubkey - assert bls.Verify(proposer_pubkey, block_signing_root, signed_block_header.signature) + # Verify proposer matches + assert header.proposer_index == get_shard_proposer_index(state, slot, shard) - # Verify builder requirements - blob_summary: ShardBlobBodySummary = blob_header.body_summary - builder_index = blob_summary.builder_index - - # Verify builder signature - builder = state.builders[builder_index] - blob_signing_root = compute_signing_root(blob_header, get_domain(state, DOMAIN_SHARD_BUILDER)) # TODO new constant - assert bls.Verify(builder.pubkey, blob_signing_root, signed_blob_header.signature) + # Verify builder and proposer aggregate signature + blob_signing_root = compute_signing_root(header, get_domain(state, DOMAIN_SHARD_BLOB)) + builder_pubkey = state.builders[header.builder_index].pubkey + proposer_pubkey = state.validators[header.proposer_index].pubkey + assert bls.FastAggregateVerify([builder_pubkey, proposer_pubkey], blob_signing_root, signed_header.signature) # Verify the length by verifying the degree. body_summary = header.body_summary @@ -771,10 +695,10 @@ def process_shard_header(state: BeaconState, signed_block_header: SignedShardBlo # Charge builder, with hard balance requirement fee = Gwei(123) # TODO EIP 1559 like fee? Burn some of it? - charge_builder(state, builder_index, fee) + charge_builder(state, header.builder_index, fee) # TODO: proposer is charged for confirmed headers (see charge_confirmed_shard_fees). # Need to align incentive, so proposer does not gain from including unconfirmed headers - increase_balance(state, block_header.proposer_index, fee) + increase_balance(state, blob_header.proposer_index, fee) # Initialize the pending header index = compute_committee_index_from_shard(state, slot, shard) @@ -801,32 +725,33 @@ The goal is to ensure that a proof can only be constructed if `deg(B) < l` (ther ```python def process_shard_proposer_slashing(state: BeaconState, proposer_slashing: ShardProposerSlashing) -> None: - reference_1: ShardBlockReference = proposer_slashing.signed_reference_1.message - reference_2 : ShardBlockReference = proposer_slashing.signed_reference_2.message - blob_1 = reference_1.signed_blob_reference.message - blob_2 = reference_2.signed_blob_reference.message + # Verify the headers are different + assert proposer_slashing.body_root_1 != proposer_slashing.body_root_2 + + slot = proposer_slashing.slot + shard = proposer_slashing.shard + proposer_index = proposer_slashing.proposer_index + builder_index = proposer_slashing.builder_index - # Verify header slots match - assert blob_1.slot == blob_2.slot - # Verify header shards match - assert blob_1.shard == blob_2.shard - # Verify header proposer indices match - assert reference_1.proposer_index == reference_2.proposer_index - # Verify the headers are different (i.e. different body, or different builder signature) - assert ( - blob_1.body_root != blob_2.body_root - or reference_1.signed_blob_reference.signature != reference_2.signed_blob_reference.signature - ) # Verify the proposer is slashable - proposer = state.validators[reference_1.proposer_index] + proposer = state.validators[proposer_index] assert is_slashable_validator(proposer, get_current_epoch(state)) - # Verify signatures - for signed_header in (proposer_slashing.signed_reference_1, proposer_slashing.signed_reference_2): - domain = get_domain(state, DOMAIN_SHARD_PROPOSER, compute_epoch_at_slot(signed_header.message.slot)) - signing_root = compute_signing_root(signed_header.message, domain) - assert bls.Verify(proposer.pubkey, signing_root, signed_header.signature) - slash_validator(state, reference_1.proposer_index) + reference_1 = ShardBlobReference(slot=slot, shard=shard, + proposer_index=proposer_index, builder_index=builder_index, + body_root= proposer_slashing.body_root_1) + reference_2 = ShardBlobReference(slot=slot, shard=shard, + proposer_index=proposer_index, builder_index=builder_index, + body_root= proposer_slashing.body_root_2) + proposer_pubkey = proposer.pubkey + builder_pubkey = state.builders[builder_index].pubkey + domain = get_domain(state, DOMAIN_SHARD_PROPOSER, compute_epoch_at_slot(slot)) + signing_root_1 = compute_signing_root(reference_1, domain) + signing_root_2 = compute_signing_root(reference_2, domain) + assert bls.FastAggregateVerify([builder_pubkey, proposer_pubkey], signing_root_1, proposer_slashing.signature_1) + assert bls.FastAggregateVerify([builder_pubkey, proposer_pubkey], signing_root_2, proposer_slashing.signature_2) + + slash_validator(state, proposer_index) ``` ### Epoch transition From 5034e2d7bc194613a11e63359f4dcff2d1dd993a Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 21 Jun 2021 23:33:10 +0200 Subject: [PATCH 36/75] update shard spec wording + fix shard slashings --- specs/sharding/beacon-chain.md | 54 ++++++++++++++++++---------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index ebfaa89eb..4bf5f7a03 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -78,8 +78,7 @@ using KZG10 commitments to commit to data to remove any need for fraud proofs (a - **Data**: A list of KZG points, to translate a byte string into - **Blob**: Data with commitments and meta-data, like a flattened bundle of L2 transactions. - **Builder**: Builds blobs and bids for proposal slots with fee-paying blob-headers, responsible for availability. -- **Shard proposer**: Validator, selects a signed blob-header, taking bids for shard data opportunity. -- **Shard block**: Unique per `(slot, shard, proposer)`, selected signed blob +- **Shard proposer**: Validator, taking bids for shard data opportunity, co-signs with builder to propose the blob. ## Custom types @@ -132,12 +131,12 @@ The following values are (non-configurable) constants used throughout the specif | `SHARD_STATE_MEMORY_SLOTS` | `uint64(2**8)` (= 256) | Number of slots for which shard commitments and confirmation status is directly available in the state | | `BUILDER_REGISTRY_LIMIT` | `uint64(2**40)` (= 1,099,511,627,776) | builders | -### Shard block samples +### Shard blob samples | Name | Value | Notes | | - | - | - | -| `MAX_SAMPLES_PER_BLOCK` | `uint64(2**11)` (= 2,048) | 248 * 2,048 = 507,904 bytes | -| `TARGET_SAMPLES_PER_BLOCK` | `uint64(2**10)` (= 1,024) | 248 * 1,024 = 253,952 bytes | +| `MAX_SAMPLES_PER_BLOB` | `uint64(2**11)` (= 2,048) | 248 * 2,048 = 507,904 bytes | +| `TARGET_SAMPLES_PER_BLOB` | `uint64(2**10)` (= 1,024) | 248 * 1,024 = 253,952 bytes | ### Precomputed size verification points @@ -145,7 +144,7 @@ The following values are (non-configurable) constants used throughout the specif | - | - | | `G1_SETUP` | Type `List[G1]`. The G1-side trusted setup `[G, G*s, G*s**2....]`; note that the first point is the generator. | | `G2_SETUP` | Type `List[G2]`. The G2-side trusted setup `[G, G*s, G*s**2....]` | -| `ROOT_OF_UNITY` | `pow(PRIMITIVE_ROOT_OF_UNITY, (MODULUS - 1) // int(MAX_SAMPLES_PER_BLOCK * POINTS_PER_SAMPLE), MODULUS)` | +| `ROOT_OF_UNITY` | `pow(PRIMITIVE_ROOT_OF_UNITY, (MODULUS - 1) // int(MAX_SAMPLES_PER_BLOB * POINTS_PER_SAMPLE), MODULUS)` | ### Gwei values @@ -176,7 +175,7 @@ class AttestationData(Container): source: Checkpoint target: Checkpoint # Hash-tree-root of ShardBlock - shard_block_root: Root # [New in Sharding] + shard_blob_root: Root # [New in Sharding] ``` ### `BeaconBlockBody` @@ -236,7 +235,7 @@ class ShardBlobBody(Container): # Proof that the degree < commitment.length degree_proof: BLSCommitment # The actual data. Should match the commitment and degree proof. - data: List[BLSPoint, POINTS_PER_SAMPLE * MAX_SAMPLES_PER_BLOCK] + data: List[BLSPoint, POINTS_PER_SAMPLE * MAX_SAMPLES_PER_BLOB] # Latest block root of the Beacon Chain, before shard_blob.slot beacon_block_root: Root # TODO: fee payment amount fields (EIP 1559 like) @@ -296,7 +295,7 @@ class ShardBlobHeader(Container): ### `SignedShardBlob` -Full blob data, signed by the shard builder, ensuring fee payment. +Full blob data, signed by the shard builder (ensuring fee payment) and shard proposer (ensuring a single proposal). ```python class SignedShardBlob(Container): @@ -307,7 +306,7 @@ class SignedShardBlob(Container): ### `SignedShardBlobHeader` Header of the blob, the signature is equally applicable to `SignedShardBlob`. -Shard proposers can accept `SignedShardBlobHeader` as a data-transaction. +Shard proposers can accept `SignedShardBlobHeader` as a data-transaction by co-signing the header. ```python class SignedShardBlobHeader(Container): @@ -356,7 +355,8 @@ class ShardProposerSlashing(Container): slot: Slot shard: Shard proposer_index: ValidatorIndex - builder_index: BuilderIndex + builder_index_1: BuilderIndex + builder_index_2: BuilderIndex body_root_1: Root body_root_2: Root signature_1: BLSSignature @@ -400,13 +400,13 @@ def compute_previous_slot(slot: Slot) -> Slot: ```python def compute_updated_gasprice(prev_gasprice: Gwei, shard_block_length: uint64, adjustment_quotient: uint64) -> Gwei: - if shard_block_length > TARGET_SAMPLES_PER_BLOCK: - delta = max(1, prev_gasprice * (shard_block_length - TARGET_SAMPLES_PER_BLOCK) - // TARGET_SAMPLES_PER_BLOCK // adjustment_quotient) + if shard_block_length > TARGET_SAMPLES_PER_BLOB: + delta = max(1, prev_gasprice * (shard_block_length - TARGET_SAMPLES_PER_BLOB) + // TARGET_SAMPLES_PER_BLOB // adjustment_quotient) return min(prev_gasprice + delta, MAX_GASPRICE) else: - delta = max(1, prev_gasprice * (TARGET_SAMPLES_PER_BLOCK - shard_block_length) - // TARGET_SAMPLES_PER_BLOCK // adjustment_quotient) + delta = max(1, prev_gasprice * (TARGET_SAMPLES_PER_BLOB - shard_block_length) + // TARGET_SAMPLES_PER_BLOB // adjustment_quotient) return max(prev_gasprice, MIN_GASPRICE + delta) - delta ``` @@ -493,7 +493,7 @@ def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard * HYSTERESIS_DOWNWARD_MULTIPLIER // HYSTERESIS_QUOTIENT ) min_effective_balance = ( - beacon_state.shard_gasprice * MAX_SAMPLES_PER_BLOCK // TARGET_SAMPLES_PER_BLOCK + beacon_state.shard_gasprice * MAX_SAMPLES_PER_BLOB // TARGET_SAMPLES_PER_BLOB + EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION ) indices = get_active_validator_indices(state, epoch) @@ -597,7 +597,7 @@ def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> N current_headers: Sequence[PendingShardHeader] = committee_work.status.value # Find the corresponding header, abort if it cannot be found - header_index = [header.root for header in current_headers].index(attestation.data.shard_block_root) + header_index = [header.root for header in current_headers].index(attestation.data.shard_blob_root) pending_header: PendingShardHeader = current_headers[header_index] full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) @@ -731,25 +731,27 @@ def process_shard_proposer_slashing(state: BeaconState, proposer_slashing: Shard slot = proposer_slashing.slot shard = proposer_slashing.shard proposer_index = proposer_slashing.proposer_index - builder_index = proposer_slashing.builder_index # Verify the proposer is slashable proposer = state.validators[proposer_index] assert is_slashable_validator(proposer, get_current_epoch(state)) reference_1 = ShardBlobReference(slot=slot, shard=shard, - proposer_index=proposer_index, builder_index=builder_index, + proposer_index=proposer_index, + builder_index=proposer_slashing.builder_index_1, body_root= proposer_slashing.body_root_1) reference_2 = ShardBlobReference(slot=slot, shard=shard, - proposer_index=proposer_index, builder_index=builder_index, + proposer_index=proposer_index, + builder_index=proposer_slashing.builder_index_1, body_root= proposer_slashing.body_root_2) - proposer_pubkey = proposer.pubkey - builder_pubkey = state.builders[builder_index].pubkey + # The builders are not slashed, the proposer co-signed with them + builder_pubkey_1 = state.builders[proposer_slashing.builder_index_1].pubkey + builder_pubkey_2 = state.builders[proposer_slashing.builder_index_2].pubkey domain = get_domain(state, DOMAIN_SHARD_PROPOSER, compute_epoch_at_slot(slot)) signing_root_1 = compute_signing_root(reference_1, domain) signing_root_2 = compute_signing_root(reference_2, domain) - assert bls.FastAggregateVerify([builder_pubkey, proposer_pubkey], signing_root_1, proposer_slashing.signature_1) - assert bls.FastAggregateVerify([builder_pubkey, proposer_pubkey], signing_root_2, proposer_slashing.signature_2) + assert bls.FastAggregateVerify([builder_pubkey_1, proposer.pubkey], signing_root_1, proposer_slashing.signature_1) + assert bls.FastAggregateVerify([builder_pubkey_2, proposer.pubkey], signing_root_2, proposer_slashing.signature_2) slash_validator(state, proposer_index) ``` @@ -828,7 +830,7 @@ def charge_confirmed_shard_fees(state: BeaconState) -> None: proposer = get_shard_proposer_index(state, slot, Shard(shard_index)) fee = ( (state.shard_gasprice * commitment.length) - // TARGET_SAMPLES_PER_BLOCK + // TARGET_SAMPLES_PER_BLOB ) decrease_balance(state, proposer, fee) From b25afc88fd2686f9d9e94447b7db6f3e25d391da Mon Sep 17 00:00:00 2001 From: protolambda Date: Tue, 22 Jun 2021 00:10:47 +0200 Subject: [PATCH 37/75] update networking spec with aggregate proposer/builder types, update TOCs --- specs/sharding/beacon-chain.md | 2 +- specs/sharding/p2p-interface.md | 140 +++++++++++++++++++------------- 2 files changed, 83 insertions(+), 59 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 4bf5f7a03..6378389a7 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -17,7 +17,7 @@ - [Shard Work Status](#shard-work-status) - [Preset](#preset) - [Misc](#misc-1) - - [Shard block samples](#shard-block-samples) + - [Shard blob samples](#shard-blob-samples) - [Precomputed size verification points](#precomputed-size-verification-points) - [Gwei values](#gwei-values) - [Configuration](#configuration) diff --git a/specs/sharding/p2p-interface.md b/specs/sharding/p2p-interface.md index e9b561312..4eb3f6f2e 100644 --- a/specs/sharding/p2p-interface.md +++ b/specs/sharding/p2p-interface.md @@ -13,10 +13,11 @@ - [Misc](#misc) - [Gossip domain](#gossip-domain) - [Topics and messages](#topics-and-messages) - - [Shard block subnets](#shard-block-subnets) - - [`shard_block_{subnet_id}`](#shard_block_subnet_id) + - [Shard blob subnets](#shard-blob-subnets) + - [`shard_blob_{subnet_id}`](#shard_blob_subnet_id) - [Global topics](#global-topics) - - [`shard_block_header`](#shard_block_header) + - [`shard_blob_header`](#shard_blob_header) + - [`shard_blob_tx`](#shard_blob_tx) - [`shard_proposer_slashing`](#shard_proposer_slashing) @@ -34,7 +35,7 @@ The adjustments and additions for Shards are outlined in this document. | Name | Value | Description | | ---- | ----- | ----------- | -| `SHARD_BLOCK_SUBNET_COUNT` | `64` | The number of `shard_block_{subnet_id}` subnets used in the gossipsub protocol. | +| `SHARD_BLOB_SUBNET_COUNT` | `64` | The number of `shard_blob_{subnet_id}` subnets used in the gossipsub protocol. | ## Gossip domain @@ -42,24 +43,25 @@ The adjustments and additions for Shards are outlined in this document. Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.md#topics-and-messages), names and payload types are: -| Name | Message Type | -|----------------------------------|---------------------------| -| `shard_block_{subnet_id}` | `SignedShardBlock` | -| `shard_block_header` | `SignedShardBlockHeader` | -| `shard_proposer_slashing` | `ShardProposerSlashing` | +| Name | Message Type | +|---------------------------------|--------------------------| +| `shard_blob_{subnet_id}` | `SignedShardBlob` | +| `shard_blob_header` | `SignedShardBlobHeader` | +| `shard_blob_tx` | `SignedShardBlobHeader` | +| `shard_proposer_slashing` | `ShardProposerSlashing` | The [DAS network specification](./das-p2p.md) defines additional topics. -#### Shard block subnets +#### Shard blob subnets -Shard block subnets are used by builders to make their blobs available after selection by shard proposers. +Shard blob subnets are used by builders to make their blobs available after selection by shard proposers. -##### `shard_block_{subnet_id}` +##### `shard_blob_{subnet_id}` -Shard block data, in the form of a `SignedShardBlock` is published to the `shard_block_{subnet_id}` subnets. +Shard blob data, in the form of a `SignedShardBlob` is published to the `shard_blob_{subnet_id}` subnets. ```python -def compute_subnet_for_shard_block(state: BeaconState, slot: Slot, shard: Shard) -> uint64: +def compute_subnet_for_shard_blob(state: BeaconState, slot: Slot, shard: Shard) -> uint64: """ Compute the correct subnet for a shard blob publication. Note, this mimics compute_subnet_for_attestation(). @@ -69,75 +71,97 @@ def compute_subnet_for_shard_block(state: BeaconState, slot: Slot, shard: Shard) slots_since_epoch_start = Slot(slot % SLOTS_PER_EPOCH) committees_since_epoch_start = committees_per_slot * slots_since_epoch_start - return uint64((committees_since_epoch_start + committee_index) % SHARD_BLOCK_SUBNET_COUNT) + return uint64((committees_since_epoch_start + committee_index) % SHARD_BLOB_SUBNET_COUNT) ``` -The following validations MUST pass before forwarding the `signed_block` on the horizontal subnet or creating samples for it. +The following validations MUST pass before forwarding the `signed_blob`, +on the horizontal subnet or creating samples for it. Alias `blob = signed_blob.message`. -We define some aliases to the nested contents of `signed_block`: -```python -block: ShardBlock = signed_block.message -signed_blob: SignedShardBlob = block.signed_blob -blob: ShardBlob = signed_blob.message -``` - -- _[IGNORE]_ The `block` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- +- _[IGNORE]_ The `blob` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `blob.slot <= current_slot` (a client MAY queue future blobs for processing at the appropriate slot). -- _[IGNORE]_ The `blob` is new enough to be still be processed -- +- _[IGNORE]_ The `blob` is new enough to still be processed -- i.e. validate that `compute_epoch_at_slot(blob.slot) >= get_previous_epoch(state)` -- _[REJECT]_ The shard should have a committee at slot -- +- _[REJECT]_ The shard blob is for an active shard -- + i.e. `blob.shard < get_active_shard_count(state, compute_epoch_at_slot(blob.slot))` +- _[REJECT]_ The `blob.shard` MUST have a committee at the `blob.slot` -- i.e. validate that `compute_committee_index_from_shard(state, blob.slot, blob.shard)` doesn't raise an error - _[REJECT]_ The shard blob is for the correct subnet -- - i.e. `compute_subnet_for_shard_block(state, blob.slot, blob.shard) == subnet_id` -- _[IGNORE]_ The block is the first block with valid signature received for the `(block.proposer_index, blob.slot, blob.shard)` combination. + i.e. `compute_subnet_for_shard_blob(state, blob.slot, blob.shard) == subnet_id` +- _[IGNORE]_ The blob is the first blob with valid signature received for the `(blob.proposer_index, blob.slot, blob.shard)` combination. - _[REJECT]_ The blob is not too large, the data MUST NOT be larger than the SSZ list-limit, and a client MAY be more strict. - _[REJECT]_ The `blob.body.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid. -- _[REJECT]_ The block proposer signature, `signed_block.signature`, is valid with respect to the `proposer_index` pubkey. - _[REJECT]_ The blob builder exists and has sufficient balance to back the fee payment. -- _[REJECT]_ The blob builder signature, `signed_blob.signature`, is valid with respect to the `builder_index` pubkey. -- _[REJECT]_ The block is proposed by the expected `proposer_index` for the block's slot +- _[REJECT]_ The blob signature is valid for the aggregate of proposer and builder, `signed_blob.signature`, + i.e. `bls.FastAggregateVerify([builder_pubkey, proposer_pubkey], blob_signing_root, signed_blob.signature)`. +- _[REJECT]_ The blob is proposed by the expected `proposer_index` for the blob's `slot` and `shard`, in the context of the current shuffling (defined by `blob.body.beacon_block_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, - the block MAY be queued for later processing while proposers for the block's branch are calculated -- + the blob MAY be queued for later processing while proposers for the blob's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. #### Global topics -There are two additional global topics for Sharding. +There are three additional global topics for Sharding. -One is used to propagate shard block headers (`shard_block_header`) to all nodes on the network. -Another one is used to propagate shard proposer slashings (`shard_proposer_slashing`). +- `shard_blob_header`: co-signed headers, to be included on-chain, and signaling builders to publish full data. +- `shard_blob_tx`: builder-signed headers, also known as "data transaction". +- `shard_proposer_slashing`: slashings of duplicate shard proposals -##### `shard_block_header` +##### `shard_blob_header` -Shard header data, in the form of a `SignedShardBlockHeader` is published to the global `shard_block_header` subnet. -Shard block headers select shard blob bids by builders, and should be timely to ensure builders can publish the full shard block timely. +Shard header data, in the form of a `SignedShardBlobHeader` is published to the global `shard_blob_header` subnet. +Shard blob headers select shard blob bids by builders, +and should be timely to ensure builders can publish the full shard blob before subsequent attestations. -The following validations MUST pass before forwarding the `signed_block_header` (with inner `message` as `header`) on the network. +The following validations MUST pass before forwarding the `signed_blob_header` on the network. Alias `header = signed_blob_header.message` -We define some aliases to the nested contents of `signed_block_header`: -```python -block_header: ShardBlockHeader = signed_block_header.message -signed_blob_header: SignedShardBlobHeader = header.signed_blob_header -blob_header: ShardBlobHeader = signed_blob_header.message -``` - -- _[IGNORE]_ The header is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. validate that `blob_header.slot <= current_slot` +- _[IGNORE]_ The `header` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- + i.e. validate that `header.slot <= current_slot` (a client MAY queue future headers for processing at the appropriate slot). -- _[IGNORE]_ The header is new enough to be still be processed -- - i.e. validate that `compute_epoch_at_slot(blob_header.slot) >= get_previous_epoch(state)` -- _[IGNORE]_ The header is the first header with valid signature received for the `(block_header.proposer_index, blob_header.slot, blob_header.shard)` combination. -- _[REJECT]_ The `shard` MUST have a committee at the `slot` -- - i.e. validate that `compute_committee_index_from_shard(state, blob_header.slot, blob_header.shard)` doesn't raise an error -- _[REJECT]_ The proposer signature, `signed_shard_block_header.signature`, is valid with respect to the `block_header.proposer_index` pubkey. -- _[REJECT]_ The header is proposed by the expected `proposer_index` for the block's slot +- _[IGNORE]_ The header is new enough to still be processed -- + i.e. validate that `compute_epoch_at_slot(header.slot) >= get_previous_epoch(state)` +- _[REJECT]_ The shard header is for an active shard -- + i.e. `header.shard < get_active_shard_count(state, compute_epoch_at_slot(header.slot))` +- _[REJECT]_ The `header.shard` MUST have a committee at the `header.slot` -- + i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard)` doesn't raise an error +- _[IGNORE]_ The header is the first header with valid signature received for the `(header.proposer_index, header.slot, header.shard)` combination. +- _[REJECT]_ The blob builder exists and has sufficient balance to back the fee payment. +- _[REJECT]_ The header signature is valid for the aggregate of proposer and builder, `signed_blob_header.signature`, + i.e. `bls.FastAggregateVerify([builder_pubkey, proposer_pubkey], blob_signing_root, signed_blob_header.signature)`. +- _[REJECT]_ The header is proposed by the expected `proposer_index` for the blob's `header.slot` and `header.shard` in the context of the current shuffling (defined by `header.body_summary.beacon_block_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, - the block MAY be queued for later processing while proposers for the block's branch are calculated -- + the blob MAY be queued for later processing while proposers for the blob's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. +##### `shard_blob_tx` + +Shard data-transactions, in the form of a `SignedShardBlobHeader` is published to the global `shard_blob_tx` subnet. +These shard blob headers are signed solely by the blob-builder. + +The following validations MUST pass before forwarding the `signed_blob_header` on the network. Alias `header = signed_blob_header.message` + +- _[IGNORE]_ The `header` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- + i.e. validate that `header.slot <= current_slot` + (a client MAY queue future headers for processing at the appropriate slot). +- _[IGNORE]_ The header is new enough to still be processed -- + i.e. validate that `compute_epoch_at_slot(header.slot) >= get_previous_epoch(state)` +- _[REJECT]_ The shard header is for an active shard -- + i.e. `header.shard < get_active_shard_count(state, compute_epoch_at_slot(header.slot))` +- _[REJECT]_ The `header.shard` MUST have a committee at the `header.slot` -- + i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard)` doesn't raise an error +- _[IGNORE]_ The header is the first header with valid signature received for the `(header.builder_index, header.slot, header.shard)` combination. +- _[REJECT]_ The blob builder exists and has sufficient balance to back the fee payment. +- _[IGNORE]_ The header fee SHOULD be higher than previously seen headers for `(header.slot, header.shard)`, from any builder. + Propagating nodes MAY increase fee increments in case of spam. +- _[REJECT]_ The header signature is valid for ONLY the builder, `signed_blob_header.signature`, + i.e. `bls.Verify(builder_pubkey, blob_signing_root, signed_blob_header.signature)`. The signature is not an aggregate with the proposer. +- _[REJECT]_ The header is designated for proposal by the expected `proposer_index` for the blob's `header.slot` and `header.shard` + in the context of the current shuffling (defined by `header.body_summary.beacon_block_root`/`slot`). + If the `proposer_index` cannot immediately be verified against the expected shuffling, + the blob MAY be queued for later processing while proposers for the blob's branch are calculated -- + in such a case _do not_ `REJECT`, instead `IGNORE` this message. ##### `shard_proposer_slashing` @@ -145,6 +169,6 @@ Shard proposer slashings, in the form of `ShardProposerSlashing`, are published The following validations MUST pass before forwarding the `shard_proposer_slashing` on to the network. - _[IGNORE]_ The shard proposer slashing is the first valid shard proposer slashing received - for the proposer with index `proposer_slashing.signed_reference_1.message.proposer_index`. - The `slot` and `shard` are ignored, there are no per-shard slashings. + for the proposer with index `proposer_slashing.proposer_index`. + The `proposer_slashing.slot` and `proposer_slashing.shard` are ignored, there are no repeated or per-shard slashings. - _[REJECT]_ All of the conditions within `process_shard_proposer_slashing` pass validation. From f791fe7d1c322f3e8f2121f6c7a1abd675652085 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 23 Jun 2021 23:33:46 +0200 Subject: [PATCH 38/75] implement review suggestions Co-authored-by: Anton Nashatyrev --- specs/sharding/beacon-chain.md | 89 ++++++++++++++++++---------------- 1 file changed, 47 insertions(+), 42 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 6378389a7..63b9457c4 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -24,9 +24,9 @@ - [Updated containers](#updated-containers) - [`AttestationData`](#attestationdata) - [`BeaconBlockBody`](#beaconblockbody) - - [`Builder`](#builder) - [`BeaconState`](#beaconstate) - [New containers](#new-containers) + - [`Builder`](#builder) - [`DataCommitment`](#datacommitment) - [ShardBlobBody](#shardblobbody) - [`ShardBlobBodySummary`](#shardblobbodysummary) @@ -77,8 +77,8 @@ using KZG10 commitments to commit to data to remove any need for fraud proofs (a - **Data**: A list of KZG points, to translate a byte string into - **Blob**: Data with commitments and meta-data, like a flattened bundle of L2 transactions. -- **Builder**: Builds blobs and bids for proposal slots with fee-paying blob-headers, responsible for availability. -- **Shard proposer**: Validator, taking bids for shard data opportunity, co-signs with builder to propose the blob. +- **Builder**: Independent actor that builds blobs and bids for proposal slots via fee-paying blob-headers, responsible for availability. +- **Shard proposer**: Validator taking bids from blob builders for shard data opportunity, co-signs with builder to propose the blob. ## Custom types @@ -129,7 +129,7 @@ The following values are (non-configurable) constants used throughout the specif | `MAX_SHARD_PROPOSER_SLASHINGS` | `2**4` (= 16) | Maximum amount of shard proposer slashing operations per block | | `MAX_SHARD_HEADERS_PER_SHARD` | `4` | | | `SHARD_STATE_MEMORY_SLOTS` | `uint64(2**8)` (= 256) | Number of slots for which shard commitments and confirmation status is directly available in the state | -| `BUILDER_REGISTRY_LIMIT` | `uint64(2**40)` (= 1,099,511,627,776) | builders | +| `BLOB_BUILDER_REGISTRY_LIMIT` | `uint64(2**40)` (= 1,099,511,627,776) | shard blob builders | ### Shard blob samples @@ -174,7 +174,7 @@ class AttestationData(Container): # FFG vote source: Checkpoint target: Checkpoint - # Hash-tree-root of ShardBlock + # Hash-tree-root of ShardBlob shard_blob_root: Root # [New in Sharding] ``` @@ -183,16 +183,7 @@ class AttestationData(Container): ```python class BeaconBlockBody(merge.BeaconBlockBody): # [extends The Merge block body] shard_proposer_slashings: List[ShardProposerSlashing, MAX_SHARD_PROPOSER_SLASHINGS] - shard_headers: List[SignedShardBlockHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD] -``` - -### `Builder` - -```python -class Builder(Container): - pubkey: BLSPubkey - # TODO: fields for either an expiry mechanism (refunding execution account with remaining balance) - # and/or a builder-transaction mechanism. + shard_headers: List[SignedShardBlobHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD] ``` ### `BeaconState` @@ -203,9 +194,9 @@ class BeaconState(merge.BeaconState): previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] # [New fields] - # Builder registry. - builders: List[Builder, BUILDER_REGISTRY_LIMIT] - builder_balances: List[Gwei, BUILDER_REGISTRY_LIMIT] + # Blob builder registry. + blob_builders: List[Builder, BLOB_BUILDER_REGISTRY_LIMIT] + blob_builder_balances: List[Gwei, BLOB_BUILDER_REGISTRY_LIMIT] # A ring buffer of the latest slots, with information per active shard. shard_buffer: Vector[List[ShardWork, MAX_SHARDS], SHARD_STATE_MEMORY_SLOTS] shard_gasprice: uint64 @@ -213,6 +204,15 @@ class BeaconState(merge.BeaconState): ## New containers +### `Builder` + +```python +class Builder(Container): + pubkey: BLSPubkey + # TODO: fields for either an expiry mechanism (refunding execution account with remaining balance) + # and/or a builder-transaction mechanism. +``` + ### `DataCommitment` ```python @@ -269,10 +269,10 @@ class ShardBlobBodySummary(Container): class ShardBlob(Container): slot: Slot shard: Shard - # Proposer of the shard-blob - proposer_index: ValidatorIndex # Builder of the data, pays data-fee to proposer builder_index: BuilderIndex + # Proposer of the shard-blob + proposer_index: ValidatorIndex # Blob contents body: ShardBlobBody ``` @@ -285,10 +285,10 @@ Header version of `ShardBlob`. class ShardBlobHeader(Container): slot: Slot shard: Shard - # Proposer of the shard-blob - proposer_index: ValidatorIndex # Builder of the data, pays data-fee to proposer builder_index: BuilderIndex + # Proposer of the shard-blob + proposer_index: ValidatorIndex # Blob contents, without the full data body_summary: ShardBlobBodySummary ``` @@ -322,7 +322,7 @@ class SignedShardBlobHeader(Container): class PendingShardHeader(Container): # KZG10 commitment to the data commitment: DataCommitment - # hash_tree_root of the ShardBlockHeader (stored so that attestations can be checked against it) + # hash_tree_root of the ShardBlobHeader (stored so that attestations can be checked against it) root: Root # Who voted for the header votes: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] @@ -340,10 +340,10 @@ Reference version of `ShardBlobHeader`, substituting the body for just a hash-tr class ShardBlobReference(Container): slot: Slot shard: Shard - # Proposer of the shard-blob - proposer_index: ValidatorIndex # Builder of the data builder_index: BuilderIndex + # Proposer of the shard-blob + proposer_index: ValidatorIndex # Blob hash-tree-root for slashing reference body_root: Root ``` @@ -643,8 +643,12 @@ def charge_builder(state: BeaconState, index: BuilderIndex, fee: Gwei) -> None: """ Decrease the builder balance at index ``index`` by ``fee``, with underflow check. """ - assert state.builder_balances[index] >= fee - state.builder_balances[index] -= fee + # TODO: apply stricter requirement to protect against fee-acceptance race conditions, e.g.: + # - balance per shard (or builders per shard) + # - balance / shard_count > fee + # TODO: also consider requirement to pay for base-fee of shard-data + assert state.blob_builder_balances[index] >= fee + state.blob_builder_balances[index] -= fee ``` ##### `process_shard_header` @@ -680,7 +684,7 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade # Verify builder and proposer aggregate signature blob_signing_root = compute_signing_root(header, get_domain(state, DOMAIN_SHARD_BLOB)) - builder_pubkey = state.builders[header.builder_index].pubkey + builder_pubkey = state.blob_builders[header.builder_index].pubkey proposer_pubkey = state.validators[header.proposer_index].pubkey assert bls.FastAggregateVerify([builder_pubkey, proposer_pubkey], blob_signing_root, signed_header.signature) @@ -698,7 +702,7 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade charge_builder(state, header.builder_index, fee) # TODO: proposer is charged for confirmed headers (see charge_confirmed_shard_fees). # Need to align incentive, so proposer does not gain from including unconfirmed headers - increase_balance(state, blob_header.proposer_index, fee) + increase_balance(state, header.proposer_index, fee) # Initialize the pending header index = compute_committee_index_from_shard(state, slot, shard) @@ -725,28 +729,29 @@ The goal is to ensure that a proof can only be constructed if `deg(B) < l` (ther ```python def process_shard_proposer_slashing(state: BeaconState, proposer_slashing: ShardProposerSlashing) -> None: - # Verify the headers are different - assert proposer_slashing.body_root_1 != proposer_slashing.body_root_2 - slot = proposer_slashing.slot shard = proposer_slashing.shard proposer_index = proposer_slashing.proposer_index + reference_1 = ShardBlobReference(slot=slot, shard=shard, + proposer_index=proposer_index, + builder_index=proposer_slashing.builder_index_1, + body_root=proposer_slashing.body_root_1) + reference_2 = ShardBlobReference(slot=slot, shard=shard, + proposer_index=proposer_index, + builder_index=proposer_slashing.builder_index_2, + body_root=proposer_slashing.body_root_2) + + # Verify the signed messages are different + assert reference_1 != reference_2 + # Verify the proposer is slashable proposer = state.validators[proposer_index] assert is_slashable_validator(proposer, get_current_epoch(state)) - reference_1 = ShardBlobReference(slot=slot, shard=shard, - proposer_index=proposer_index, - builder_index=proposer_slashing.builder_index_1, - body_root= proposer_slashing.body_root_1) - reference_2 = ShardBlobReference(slot=slot, shard=shard, - proposer_index=proposer_index, - builder_index=proposer_slashing.builder_index_1, - body_root= proposer_slashing.body_root_2) # The builders are not slashed, the proposer co-signed with them - builder_pubkey_1 = state.builders[proposer_slashing.builder_index_1].pubkey - builder_pubkey_2 = state.builders[proposer_slashing.builder_index_2].pubkey + builder_pubkey_1 = state.blob_builders[proposer_slashing.builder_index_1].pubkey + builder_pubkey_2 = state.blob_builders[proposer_slashing.builder_index_2].pubkey domain = get_domain(state, DOMAIN_SHARD_PROPOSER, compute_epoch_at_slot(slot)) signing_root_1 = compute_signing_root(reference_1, domain) signing_root_2 = compute_signing_root(reference_2, domain) From a7f58ef08acb81a88b279b449335faebc6c717b2 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 14 Jul 2021 13:19:00 +0200 Subject: [PATCH 39/75] fix comment + handle missing pending headers --- specs/sharding/beacon-chain.md | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 63b9457c4..2ab0f68cd 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -367,7 +367,7 @@ class ShardProposerSlashing(Container): ```python class ShardWork(Container): - # Upon confirmation the data is reduced to just the header. + # Upon confirmation the data is reduced to just the commitment. status: Union[ # See Shard Work Status enum None, # SHARD_WORK_UNCONFIRMED DataCommitment, # SHARD_WORK_CONFIRMED @@ -597,7 +597,15 @@ def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> N current_headers: Sequence[PendingShardHeader] = committee_work.status.value # Find the corresponding header, abort if it cannot be found - header_index = [header.root for header in current_headers].index(attestation.data.shard_blob_root) + header_index = len(current_headers) + for i, header in enumerate(current_headers): + if attestation.data.shard_blob_root == header.root: + header_index = i + break + # Attestations for an unknown header do not count towards shard confirmations, but can otherwise be valid. + if header_index == len(current_headers): + # TODO: Attestations may be re-included if headers are included late. + return pending_header: PendingShardHeader = current_headers[header_index] full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) From 35df4b2d4f81b79dba5651614c5581f8bb400a90 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 23 Jul 2021 07:54:26 -0600 Subject: [PATCH 40/75] rephrase context enum for blocks_by requests --- specs/merge/p2p-interface.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/merge/p2p-interface.md b/specs/merge/p2p-interface.md index 4af3f8e10..712a17549 100644 --- a/specs/merge/p2p-interface.md +++ b/specs/merge/p2p-interface.md @@ -81,7 +81,7 @@ details on how to handle transitioning gossip topics for the Merge. **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/` Request and Response remain unchanged. -`MERGE_FORK_VERSION` is used as an additional `context` to specify the Merge block type. +The Merge fork-digest is introduced to the `context` enum to specify the Merge block type. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -98,7 +98,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/` Request and Response remain unchanged. -`MERGE_FORK_VERSION` is used as an additional `context` to specify the Merge block type. +The Merge fork-digest is introduced to the `context` enum to specify the Merge block type. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: From 37da2018a3ea860c3661b2d516d923e03a15aa7d Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 23 Jul 2021 08:22:53 -0600 Subject: [PATCH 41/75] bump VERSION.txt to 1.1.0-beta.2 --- tests/core/pyspec/eth2spec/VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/VERSION.txt b/tests/core/pyspec/eth2spec/VERSION.txt index 854663a0e..fd5a6fed6 100644 --- a/tests/core/pyspec/eth2spec/VERSION.txt +++ b/tests/core/pyspec/eth2spec/VERSION.txt @@ -1 +1 @@ -1.1.0-beta.1 \ No newline at end of file +1.1.0-beta.2 \ No newline at end of file From 1a966d1e378dc719aa12f6eacae854f531eab9fe Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 26 Jul 2021 15:26:55 +0200 Subject: [PATCH 42/75] work in progress new sharding fee mechanism --- specs/sharding/beacon-chain.md | 297 +++++++++++++++++---------------- 1 file changed, 156 insertions(+), 141 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 2ab0f68cd..40eabe548 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -15,8 +15,11 @@ - [Misc](#misc) - [Domain types](#domain-types) - [Shard Work Status](#shard-work-status) -- [Preset](#preset) - [Misc](#misc-1) + - [Participation flag indices](#participation-flag-indices) + - [Incentivization weights](#incentivization-weights) +- [Preset](#preset) + - [Misc](#misc-2) - [Shard blob samples](#shard-blob-samples) - [Precomputed size verification points](#precomputed-size-verification-points) - [Gwei values](#gwei-values) @@ -28,6 +31,7 @@ - [New containers](#new-containers) - [`Builder`](#builder) - [`DataCommitment`](#datacommitment) + - [`AttestedDataCommitment`](#attesteddatacommitment) - [ShardBlobBody](#shardblobbody) - [`ShardBlobBodySummary`](#shardblobbodysummary) - [`ShardBlob`](#shardblob) @@ -39,15 +43,15 @@ - [`ShardProposerSlashing`](#shardproposerslashing) - [`ShardWork`](#shardwork) - [Helper functions](#helper-functions) - - [Misc](#misc-2) + - [Misc](#misc-3) - [`next_power_of_two`](#next_power_of_two) - [`compute_previous_slot`](#compute_previous_slot) - - [`compute_updated_gasprice`](#compute_updated_gasprice) + - [`compute_updated_sample_price`](#compute_updated_sample_price) - [`compute_committee_source_epoch`](#compute_committee_source_epoch) + - [`batch_apply_participation_flag`](#batch_apply_participation_flag) - [Beacon state accessors](#beacon-state-accessors) - [Updated `get_committee_count_per_slot`](#updated-get_committee_count_per_slot) - [`get_active_shard_count`](#get_active_shard_count) - - [`compute_proposer_index`](#compute_proposer_index) - [`get_shard_proposer_index`](#get_shard_proposer_index) - [`get_start_shard`](#get_start_shard) - [`compute_shard_from_committee_index`](#compute_shard_from_committee_index) @@ -55,12 +59,10 @@ - [Block processing](#block-processing) - [Operations](#operations) - [Extended Attestation processing](#extended-attestation-processing) - - [`charge_builder`](#charge_builder) - [`process_shard_header`](#process_shard_header) - [`process_shard_proposer_slashing`](#process_shard_proposer_slashing) - [Epoch transition](#epoch-transition) - [`process_pending_shard_confirmations`](#process_pending_shard_confirmations) - - [`charge_confirmed_shard_fees`](#charge_confirmed_shard_fees) - [`reset_pending_shard_work`](#reset_pending_shard_work) @@ -118,6 +120,30 @@ The following values are (non-configurable) constants used throughout the specif | `SHARD_WORK_CONFIRMED` | `1` | Confirmed, reduced to just the commitment | | `SHARD_WORK_PENDING` | `2` | Pending, a list of competing headers | +### Misc + +TODO: `PARTICIPATION_FLAG_WEIGHTS` backwards-compatibility is difficult, depends on usage. + +| Name | Value | +| - | - | +| `PARTICIPATION_FLAG_WEIGHTS` | `[TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT, TIMELY_HEAD_WEIGHT, TIMELY_SHARD_WEIGHT]` | + +### Participation flag indices + +| Name | Value | +| - | - | +| `TIMELY_SHARD_FLAG_INDEX` | `3` | + +### Incentivization weights + +TODO: determine weight for shard attestations + +| Name | Value | +| - | - | +| `TIMELY_SHARD_WEIGHT` | `uint64(8)` | + +TODO: `WEIGHT_DENOMINATOR` needs to be adjusted, but this breaks a lot of Altair code. + ## Preset ### Misc @@ -125,7 +151,7 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | Notes | | - | - | - | | `MAX_SHARDS` | `uint64(2**10)` (= 1,024) | Theoretical max shard count (used to determine data structure sizes) | -| `GASPRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 8) | Gasprice may decrease/increase by at most exp(1 / this value) *per epoch* | +| `SAMPLE_PRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 8) | Sample price may decrease/increase by at most exp(1 / this value) *per epoch* | | `MAX_SHARD_PROPOSER_SLASHINGS` | `2**4` (= 16) | Maximum amount of shard proposer slashing operations per block | | `MAX_SHARD_HEADERS_PER_SHARD` | `4` | | | `SHARD_STATE_MEMORY_SLOTS` | `uint64(2**8)` (= 256) | Number of slots for which shard commitments and confirmation status is directly available in the state | @@ -150,8 +176,8 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | Unit | Description | | - | - | - | - | -| `MAX_GASPRICE` | `Gwei(2**33)` (= 8,589,934,592) | Gwei | Max gasprice charged for a TARGET-sized shard block | -| `MIN_GASPRICE` | `Gwei(2**3)` (= 8) | Gwei | Min gasprice charged for a TARGET-sized shard block | +| `MAX_SAMPLE_PRICE` | `Gwei(2**33)` (= 8,589,934,592) | Gwei | Max sample charged for a TARGET-sized shard blob | +| `MIN_SAMPLE_PRICE` | `Gwei(2**3)` (= 8) | Gwei | Min sample price charged for a TARGET-sized shard blob | ## Configuration @@ -199,7 +225,7 @@ class BeaconState(merge.BeaconState): blob_builder_balances: List[Gwei, BLOB_BUILDER_REGISTRY_LIMIT] # A ring buffer of the latest slots, with information per active shard. shard_buffer: Vector[List[ShardWork, MAX_SHARDS], SHARD_STATE_MEMORY_SLOTS] - shard_gasprice: uint64 + shard_sample_price: uint64 ``` ## New containers @@ -223,6 +249,18 @@ class DataCommitment(Container): length: uint64 ``` +### `AttestedDataCommitment` + +```python +class AttestedDataCommitment(Container): + # KZG10 commitment to the data, and length + commitment: DataCommitment + # hash_tree_root of the ShardBlobHeader (stored so that attestations can be checked against it) + root: Root + # The proposer who included the shard-header + includer_index: ValidatorIndex +``` + ### ShardBlobBody Unsigned shard data, bundled by a shard-builder. @@ -238,7 +276,10 @@ class ShardBlobBody(Container): data: List[BLSPoint, POINTS_PER_SAMPLE * MAX_SAMPLES_PER_BLOB] # Latest block root of the Beacon Chain, before shard_blob.slot beacon_block_root: Root - # TODO: fee payment amount fields (EIP 1559 like) + # fee payment fields (EIP 1559 like) + # TODO: express in MWei instead? + max_priority_fee_per_sample: Gwei + max_fee_per_sample: Gwei ``` ### `ShardBlobBodySummary` @@ -258,7 +299,10 @@ class ShardBlobBodySummary(Container): data_root: Root # Latest block root of the Beacon Chain, before shard_blob.slot beacon_block_root: Root - # TODO: fee payment amount fields (EIP 1559 like) + # fee payment fields (EIP 1559 like) + # TODO: express in MWei instead? + max_priority_fee_per_sample: Gwei + max_fee_per_sample: Gwei ``` ### `ShardBlob` @@ -320,10 +364,8 @@ class SignedShardBlobHeader(Container): ```python class PendingShardHeader(Container): - # KZG10 commitment to the data - commitment: DataCommitment - # hash_tree_root of the ShardBlobHeader (stored so that attestations can be checked against it) - root: Root + # The commitment that is attested + attested: AttestedDataCommitment # Who voted for the header votes: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] # Sum of effective balances of votes @@ -370,7 +412,7 @@ class ShardWork(Container): # Upon confirmation the data is reduced to just the commitment. status: Union[ # See Shard Work Status enum None, # SHARD_WORK_UNCONFIRMED - DataCommitment, # SHARD_WORK_CONFIRMED + ConfirmedDataCommitment, # SHARD_WORK_CONFIRMED List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD] # SHARD_WORK_PENDING ] ``` @@ -396,18 +438,16 @@ def compute_previous_slot(slot: Slot) -> Slot: return Slot(0) ``` -#### `compute_updated_gasprice` +#### `compute_updated_sample_price` ```python -def compute_updated_gasprice(prev_gasprice: Gwei, shard_block_length: uint64, adjustment_quotient: uint64) -> Gwei: - if shard_block_length > TARGET_SAMPLES_PER_BLOB: - delta = max(1, prev_gasprice * (shard_block_length - TARGET_SAMPLES_PER_BLOB) - // TARGET_SAMPLES_PER_BLOB // adjustment_quotient) - return min(prev_gasprice + delta, MAX_GASPRICE) +def compute_updated_sample_price(prev_price: Gwei, samples: uint64, adjustment_quotient: uint64) -> Gwei: + if samples > TARGET_SAMPLES_PER_BLOB: + delta = max(1, prev_price * (samples - TARGET_SAMPLES_PER_BLOB) // TARGET_SAMPLES_PER_BLOB // adjustment_quotient) + return min(prev_price + delta, MAX_SAMPLE_PRICE) else: - delta = max(1, prev_gasprice * (TARGET_SAMPLES_PER_BLOB - shard_block_length) - // TARGET_SAMPLES_PER_BLOB // adjustment_quotient) - return max(prev_gasprice, MIN_GASPRICE + delta) - delta + delta = max(1, prev_price * (TARGET_SAMPLES_PER_BLOB - samples) // TARGET_SAMPLES_PER_BLOB // adjustment_quotient) + return max(prev_price, MIN_SAMPLE_PRICE + delta) - delta ``` #### `compute_committee_source_epoch` @@ -423,6 +463,20 @@ def compute_committee_source_epoch(epoch: Epoch, period: uint64) -> Epoch: return source_epoch ``` +#### `batch_apply_participation_flag` + +```python +def batch_apply_participation_flag(state: BeaconState, bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE], + epoch: Epoch, full_committee: Sequence[ValidatorIndex], flag_index: int): + if epoch == get_current_epoch(state): + epoch_participation = state.current_epoch_participation + else: + epoch_participation = state.previous_epoch_participation + for bit, index in zip(bits, full_committee): + if bit: + epoch_participation[index] = add_flag(epoch_participation[index], flag_index) +``` + ### Beacon state accessors #### Updated `get_committee_count_per_slot` @@ -449,34 +503,6 @@ def get_active_shard_count(state: BeaconState, epoch: Epoch) -> uint64: return INITIAL_ACTIVE_SHARDS ``` -#### `compute_proposer_index` - -Updated version to get a proposer index that will only allow proposers with a certain minimum balance, -ensuring that the balance is always sufficient to cover gas costs. - -```python -def compute_proposer_index(beacon_state: BeaconState, - indices: Sequence[ValidatorIndex], - seed: Bytes32, - min_effective_balance: Gwei = Gwei(0)) -> ValidatorIndex: - """ - Return from ``indices`` a random index sampled by effective balance. - """ - assert len(indices) > 0 - MAX_RANDOM_BYTE = 2**8 - 1 - i = uint64(0) - total = uint64(len(indices)) - while True: - candidate_index = indices[compute_shuffled_index(i % total, total, seed)] - random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32] - effective_balance = beacon_state.validators[candidate_index].effective_balance - if effective_balance <= min_effective_balance: - continue - if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: - return candidate_index - i += 1 -``` - #### `get_shard_proposer_index` ```python @@ -486,18 +512,8 @@ def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard """ epoch = compute_epoch_at_slot(slot) seed = hash(get_seed(beacon_state, epoch, DOMAIN_SHARD_BLOB) + uint_to_bytes(slot) + uint_to_bytes(shard)) - - # Proposer must have sufficient balance to pay for worst case fee burn - EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION = ( - EFFECTIVE_BALANCE_INCREMENT - EFFECTIVE_BALANCE_INCREMENT - * HYSTERESIS_DOWNWARD_MULTIPLIER // HYSTERESIS_QUOTIENT - ) - min_effective_balance = ( - beacon_state.shard_gasprice * MAX_SAMPLES_PER_BLOB // TARGET_SAMPLES_PER_BLOB - + EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION - ) indices = get_active_validator_indices(state, epoch) - return compute_proposer_index(beacon_state, indices, seed, min_effective_balance) + return compute_proposer_index(beacon_state, indices, seed) ``` #### `get_start_shard` @@ -564,6 +580,12 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: for_ops(body.shard_proposer_slashings, process_shard_proposer_slashing) # Limit is dynamic based on active shard count assert len(body.shard_headers) <= MAX_SHARD_HEADERS_PER_SHARD * get_active_shard_count(state, get_current_epoch(state)) + # Included shard headers must be sorted by shard index, the base-fee is adjusted in sequence (capacity is staggered) + # Duplicates (same slot and shard) are allowed, although slashable, only the first affects capacity. + if len(body.shard_headers) > 0: + shard = 0 + for i, header in body.shard_headers[1:] + for_ops(body.shard_headers, process_shard_header) # New attestation processing for_ops(body.attestations, process_attestation) @@ -576,22 +598,30 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: ```python def process_attestation(state: BeaconState, attestation: Attestation) -> None: altair.process_attestation(state, attestation) - update_pending_shard_work(state, attestation) + process_attested_shard_work(state, attestation) ``` ```python -def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> None: +def process_attested_shard_work(state: BeaconState, attestation: Attestation) -> None: attestation_shard = compute_shard_from_committee_index( state, attestation.data.slot, attestation.data.index, ) + full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) + buffer_index = attestation.data.slot % SHARD_STATE_MEMORY_SLOTS committee_work = state.shard_buffer[buffer_index][attestation_shard] # Skip attestation vote accounting if the header is not pending if committee_work.status.selector != SHARD_WORK_PENDING: - # TODO In Altair: set participation bit flag, if attestation matches winning header. + # If the data was already confirmed, check if this matches, to apply the flag to the attesters. + if committee_work.status.selector == SHARD_WORK_CONFIRMED: + attested: AttestedDataCommitment = current_headers[header_index] + if attested.root == attestation.data.shard_blob_root: + batch_apply_participation_flag(state, attestation.aggregation_bits, + attestation.data.target.epoch, + full_committee, TIMELY_SHARD_FLAG_INDEX) return current_headers: Sequence[PendingShardHeader] = committee_work.status.value @@ -599,16 +629,16 @@ def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> N # Find the corresponding header, abort if it cannot be found header_index = len(current_headers) for i, header in enumerate(current_headers): - if attestation.data.shard_blob_root == header.root: + if attestation.data.shard_blob_root == header.attested.root: header_index = i break + # Attestations for an unknown header do not count towards shard confirmations, but can otherwise be valid. if header_index == len(current_headers): - # TODO: Attestations may be re-included if headers are included late. + # Note: Attestations may be re-included if headers are included late. return pending_header: PendingShardHeader = current_headers[header_index] - full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) # The weight may be outdated if it is not the initial weight, and from a previous epoch if pending_header.weight != 0 and compute_epoch_at_slot(pending_header.update_slot) < get_current_epoch(state): @@ -629,8 +659,11 @@ def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> N # Check if the PendingShardHeader is eligible for expedited confirmation, requiring 2/3 of balance attesting if pending_header.weight * 3 >= full_committee_balance * 2: - # TODO In Altair: set participation bit flag for voters of this early winning header - if pending_header.commitment == DataCommitment(): + # participants of the winning header are remembered with participation flags + batch_apply_participation_flag(state, pending_header.votes, attestation.data.target.epoch, + full_committee, TIMELY_SHARD_FLAG_INDEX) + + if pending_header.attested.commitment == DataCommitment(): # The committee voted to not confirm anything state.shard_buffer[buffer_index][attestation_shard].status.change( selector=SHARD_WORK_UNCONFIRMED, @@ -639,26 +672,10 @@ def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> N else: state.shard_buffer[buffer_index][attestation_shard].status.change( selector=SHARD_WORK_CONFIRMED, - value=pending_header.commitment, + value=pending_header.attested, ) ``` - -#### `charge_builder` - -```python -def charge_builder(state: BeaconState, index: BuilderIndex, fee: Gwei) -> None: - """ - Decrease the builder balance at index ``index`` by ``fee``, with underflow check. - """ - # TODO: apply stricter requirement to protect against fee-acceptance race conditions, e.g.: - # - balance per shard (or builders per shard) - # - balance / shard_count > fee - # TODO: also consider requirement to pay for base-fee of shard-data - assert state.blob_builder_balances[index] >= fee - state.blob_builder_balances[index] -= fee -``` - ##### `process_shard_header` ```python @@ -673,7 +690,16 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade # Verify that the header is within the processing time window assert header_epoch in [get_previous_epoch(state), get_current_epoch(state)] # Verify that the shard is active - assert shard < get_active_shard_count(state, header_epoch) + start_shard = get_start_shard(state, slot) + committees_per_slot = get_committee_count_per_slot(state, header_epoch) + end_shard = start_shard + committees_per_slot + shard_count = get_active_shard_count(state, header_epoch) + # Per slot, there may be max. shard_count committees. + # If there are shard_count * 2/3 per slot, then wrap around. + if end_shard >= shard_count: + assert not (end_shard - shard_count <= shard < start_shard) + else: + assert start_shard <= shard < end_shard # Verify that the block root matches, # to ensure the header will only be included in this specific Beacon Chain sub-tree. assert header.body_summary.beacon_block_root == get_block_root_at_slot(state, slot - 1) @@ -685,7 +711,7 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade # Check that this header is not yet in the pending list current_headers: List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD] = committee_work.status.value header_root = hash_tree_root(header) - assert header_root not in [pending_header.root for pending_header in current_headers] + assert header_root not in [pending_header.attested.root for pending_header in current_headers] # Verify proposer matches assert header.proposer_index == get_shard_proposer_index(state, slot, shard) @@ -705,20 +731,46 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade == bls.Pairing(body_summary.commitment.point, G2_SETUP[-body_summary.commitment.length]) ) - # Charge builder, with hard balance requirement - fee = Gwei(123) # TODO EIP 1559 like fee? Burn some of it? - charge_builder(state, header.builder_index, fee) - # TODO: proposer is charged for confirmed headers (see charge_confirmed_shard_fees). - # Need to align incentive, so proposer does not gain from including unconfirmed headers - increase_balance(state, header.proposer_index, fee) + # Charge EIP 1559 fee, builder pays for opportunity, and is responsible for later availability, + # or fail to publish at their own expense. + samples = blob_summary.commitment.length + # TODO: overflows, need bigger int type + max_fee = blob_summary.max_fee_per_sample * samples + + # Builder must have sufficient balance, even if max_fee is not completely utilized + assert state.blob_builder_balances[header.builder_index] > max_fee + + base_fee = state.shard_sample_price * samples + # Base fee must be paid + assert max_fee >= base_fee + + # Remaining fee goes towards proposer for prioritizing, up to a maximum + max_priority_fee = blob_summary.max_priority_fee_per_sample * samples + priority_fee = min(max_fee - base_fee, max_priority_fee) + + # Burn base fee, take priority fee + decrease_balance(state, header.builder_index, base_fee + priority_fee) + # Pay out priority fee + increase_balance(state, header.proposer_index, priority_fee) + + # Track updated sample price + adjustment_quotient = ( + get_active_shard_count(state, previous_epoch) + * SLOTS_PER_EPOCH * SAMPLE_PRICE_ADJUSTMENT_COEFFICIENT + ) + state.shard_sample_price = compute_updated_sample_price( + state.shard_sample_price, blob_summary.commitment.length, adjustment_quotient) # Initialize the pending header index = compute_committee_index_from_shard(state, slot, shard) committee_length = len(get_beacon_committee(state, slot, index)) initial_votes = Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length) pending_header = PendingShardHeader( - commitment=blob_summary.commitment, - root=header_root, + attested=AttestedDataCommitment( + commitment=blob_summary.commitment, + root=header_root, + includer_index=get_beacon_proposer_index(state), + ) votes=initial_votes, weight=0, update_slot=state.slot, @@ -777,13 +829,12 @@ This epoch transition overrides the Merge epoch transition: def process_epoch(state: BeaconState) -> None: # Sharding pre-processing process_pending_shard_confirmations(state) - charge_confirmed_shard_fees(state) reset_pending_shard_work(state) # Base functionality process_justification_and_finalization(state) process_inactivity_updates(state) - process_rewards_and_penalties(state) + process_rewards_and_penalties(state) # Note: modified, see new TIMELY_SHARD_FLAG_INDEX process_registry_updates(state) process_slashings(state) process_eth1_data_reset(state) @@ -815,45 +866,10 @@ def process_pending_shard_confirmations(state: BeaconState) -> None: if committee_work.status.selector == SHARD_WORK_PENDING: winning_header = max(committee_work.status.value, key=lambda header: header.weight) # TODO In Altair: set participation bit flag of voters for winning header - if winning_header.commitment == DataCommitment(): + if winning_header.attested.commitment == DataCommitment(): committee_work.status.change(selector=SHARD_WORK_UNCONFIRMED, value=None) else: - committee_work.status.change(selector=SHARD_WORK_CONFIRMED, value=winning_header.commitment) -``` - -#### `charge_confirmed_shard_fees` - -```python -def charge_confirmed_shard_fees(state: BeaconState) -> None: - new_gasprice = state.shard_gasprice - previous_epoch = get_previous_epoch(state) - previous_epoch_start_slot = compute_start_slot_at_epoch(previous_epoch) - adjustment_quotient = ( - get_active_shard_count(state, previous_epoch) - * SLOTS_PER_EPOCH * GASPRICE_ADJUSTMENT_COEFFICIENT - ) - # Iterate through confirmed shard-headers - for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH): - buffer_index = slot % SHARD_STATE_MEMORY_SLOTS - for shard_index in range(len(state.shard_buffer[buffer_index])): - committee_work = state.shard_buffer[buffer_index][shard_index] - if committee_work.status.selector == SHARD_WORK_CONFIRMED: - commitment: DataCommitment = committee_work.status.value - # Charge EIP 1559 fee - proposer = get_shard_proposer_index(state, slot, Shard(shard_index)) - fee = ( - (state.shard_gasprice * commitment.length) - // TARGET_SAMPLES_PER_BLOB - ) - decrease_balance(state, proposer, fee) - - # Track updated gas price - new_gasprice = compute_updated_gasprice( - new_gasprice, - commitment.length, - adjustment_quotient, - ) - state.shard_gasprice = new_gasprice + committee_work.status.change(selector=SHARD_WORK_CONFIRMED, value=winning_header.attested) ``` #### `reset_pending_shard_work` @@ -881,8 +897,7 @@ def reset_pending_shard_work(state: BeaconState) -> None: selector=SHARD_WORK_PENDING, value=List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD]( PendingShardHeader( - commitment=DataCommitment(), - root=Root(), + attested=AttestedDataCommitment() votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length), weight=0, update_slot=slot, From 9a1a30c3d469e1ab55234df28a8ee97b994d0180 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Mon, 26 Jul 2021 19:30:50 +0600 Subject: [PATCH 43/75] Rebase Merge spec with London --- specs/merge/beacon-chain.md | 55 +++++++++++++++++++ .../test/helpers/execution_payload.py | 5 +- .../pyspec/eth2spec/test/helpers/genesis.py | 1 + 3 files changed, 60 insertions(+), 1 deletion(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 01c54e017..1a9c933d6 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -59,6 +59,10 @@ This patch adds transaction execution to the beacon chain as part of the Merge f | `MAX_BYTES_PER_OPAQUE_TRANSACTION` | `uint64(2**20)` (= 1,048,576) | | `MAX_TRANSACTIONS_PER_PAYLOAD` | `uint64(2**14)` (= 16,384) | | `BYTES_PER_LOGS_BLOOM` | `uint64(2**8)` (= 256) | +| `GAS_LIMIT_DIVISOR` | `uint64(2**10)` (= 1,024) | +| `MIN_GAS_LIMIT` | `uint64(5000)` (= 5,000) | +| `BASE_FEE_MAX_CHANGE_DENOMINATOR` | `uint64(2**3)` (= 8) | +| `ELASTICITY_MULTIPLIER` | `uint64(2**1)` (= 2) | ## Containers @@ -141,6 +145,7 @@ class ExecutionPayload(Container): gas_limit: uint64 gas_used: uint64 timestamp: uint64 + base_fee_per_gas: uint64 # base fee introduced in eip-1559 # Extra payload fields block_hash: Hash32 # Hash of execution block transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD] @@ -161,6 +166,7 @@ class ExecutionPayloadHeader(Container): gas_limit: uint64 gas_used: uint64 timestamp: uint64 + base_fee_per_gas: uint64 # Extra payload fields block_hash: Hash32 # Hash of execution block transactions_root: Root @@ -239,6 +245,51 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: ### Execution payload processing +#### `is_valid_gas_limit` + +```python +def is_valid_gas_limit(payload: ExecutionPayload, parent: ExecutionPayloadHeader) -> bool: + parent_gas_limit = parent.gas_limit + + # check if the payload used too much gas + if payload.gas_used > payload.gas_limit: + return False + + # check if the payload changed the gas limit too much + if payload.gas_limit >= parent_gas_limit + parent_gas_limit // GAS_LIMIT_DIVISOR: + return False + if payload.gas_limit <= parent_gas_limit - parent_gas_limit // GAS_LIMIT_DIVISOR: + return False + + # check if the gas limit is at least the minimum gas limit + if payload.gas_limit < MIN_GAS_LIMIT: + return False + + return True +``` + +#### `compute_base_fee_per_gas` + +```python +def compute_base_fee_per_gas(payload: ExecutionPayload, parent: ExecutionPayloadHeader) -> uint64: + parent_gas_target = parent.gas_limit // ELASTICITY_MULTIPLIER + parent_base_fee_per_gas = parent.base_fee_per_gas + parent_gas_used = payload.gas_used + + if parent_gas_used == parent_gas_target: + return parent_base_fee_per_gas + elif parent_gas_used > parent_gas_target: + gas_used_delta = parent_gas_used - parent_gas_target + base_fee_per_gas_delta = \ + max(parent_base_fee_per_gas * gas_used_delta // parent_gas_target // BASE_FEE_MAX_CHANGE_DENOMINATOR, 1) + return parent_base_fee_per_gas + base_fee_per_gas_delta + else: + gas_used_delta = parent_gas_target - parent_gas_used + base_fee_per_gas_delta = \ + parent_base_fee_per_gas * gas_used_delta // parent_gas_target // BASE_FEE_MAX_CHANGE_DENOMINATOR + return max(parent_base_fee_per_gas - base_fee_per_gas_delta, 0) +``` + #### `process_execution_payload` *Note:* This function depends on `process_randao` function call as it retrieves the most recent randao mix from the `state`. Implementations that are considering parallel processing of execution payload with respect to beacon chain state transition function should work around this dependency. @@ -250,6 +301,8 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe assert payload.parent_hash == state.latest_execution_payload_header.block_hash assert payload.block_number == state.latest_execution_payload_header.block_number + uint64(1) assert payload.random == get_randao_mix(state, get_current_epoch(state)) + assert payload.base_fee_per_gas == compute_base_fee_per_gas(payload, state.latest_execution_payload_header) + assert is_valid_gas_limit(payload, state.latest_execution_payload_header) # Verify timestamp assert payload.timestamp == compute_timestamp_at_slot(state, state.slot) # Verify the execution payload is valid @@ -266,6 +319,7 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe gas_limit=payload.gas_limit, gas_used=payload.gas_used, timestamp=payload.timestamp, + base_fee_per_gas=payload.base_fee_per_gas, block_hash=payload.block_hash, transactions_root=hash_tree_root(payload.transactions), ) @@ -321,6 +375,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32, state.latest_execution_payload_header.block_hash = eth1_block_hash state.latest_execution_payload_header.timestamp = eth1_timestamp state.latest_execution_payload_header.random = eth1_block_hash + state.latest_execution_payload_header.gas_limit = MIN_GAS_LIMIT return state ``` diff --git a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py index c41a05079..5c7eb6de0 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py +++ b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py @@ -17,12 +17,14 @@ def build_empty_execution_payload(spec, state, randao_mix=None): logs_bloom=spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](), # TODO: zeroed logs bloom for empty logs ok? block_number=latest.block_number + 1, random=randao_mix, - gas_limit=latest.gas_limit, # retain same limit + gas_limit=max(latest.gas_limit, spec.MIN_GAS_LIMIT), gas_used=0, # empty block, 0 gas timestamp=timestamp, + base_fee_per_gas=spec.uint64(0), block_hash=spec.Hash32(), transactions=empty_txs, ) + payload.base_fee_per_gas = spec.compute_base_fee_per_gas(latest, payload) # TODO: real RLP + block hash logic would be nice, requires RLP and keccak256 dependency however. payload.block_hash = spec.Hash32(spec.hash(payload.hash_tree_root() + b"FAKE RLP HASH")) @@ -41,6 +43,7 @@ def get_execution_payload_header(spec, execution_payload): gas_limit=execution_payload.gas_limit, gas_used=execution_payload.gas_used, timestamp=execution_payload.timestamp, + base_fee_per_gas=execution_payload.base_fee_per_gas, block_hash=execution_payload.block_hash, transactions_root=spec.hash_tree_root(execution_payload.transactions) ) diff --git a/tests/core/pyspec/eth2spec/test/helpers/genesis.py b/tests/core/pyspec/eth2spec/test/helpers/genesis.py index a9eb59f67..fc14c0aef 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/genesis.py +++ b/tests/core/pyspec/eth2spec/test/helpers/genesis.py @@ -78,5 +78,6 @@ def create_genesis_state(spec, validator_balances, activation_threshold): # Initialize the execution payload header (with block number and genesis time set to 0) state.latest_execution_payload_header.block_hash = eth1_block_hash state.latest_execution_payload_header.random = eth1_block_hash + state.latest_execution_payload_header.gas_limit = spec.MIN_GAS_LIMIT return state From 789e10ea7c890a909aeaaf7028a49aee9d795ff1 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Mon, 26 Jul 2021 20:09:57 +0600 Subject: [PATCH 44/75] Update toc --- specs/merge/beacon-chain.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 1a9c933d6..1a99a1227 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -31,6 +31,8 @@ - [`on_payload`](#on_payload) - [Block processing](#block-processing) - [Execution payload processing](#execution-payload-processing) + - [`is_valid_gas_limit`](#is_valid_gas_limit) + - [`compute_base_fee_per_gas`](#compute_base_fee_per_gas) - [`process_execution_payload`](#process_execution_payload) - [Testing](#testing) From 756eb90bfed131f7c63b0416f3b8b14efabbd6da Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 26 Jul 2021 16:27:19 +0200 Subject: [PATCH 45/75] consider per-slot sample target adjustment, to avoid racing and ordering problems --- specs/sharding/beacon-chain.md | 43 ++++++++++++++-------------------- 1 file changed, 17 insertions(+), 26 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 40eabe548..80e2ec85c 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -441,7 +441,8 @@ def compute_previous_slot(slot: Slot) -> Slot: #### `compute_updated_sample_price` ```python -def compute_updated_sample_price(prev_price: Gwei, samples: uint64, adjustment_quotient: uint64) -> Gwei: +def compute_updated_sample_price(prev_price: Gwei, samples: uint64, active_shards: uint64) -> Gwei: + adjustment_quotient = active_shards * SLOTS_PER_EPOCH * SAMPLE_PRICE_ADJUSTMENT_COEFFICIENT if samples > TARGET_SAMPLES_PER_BLOB: delta = max(1, prev_price * (samples - TARGET_SAMPLES_PER_BLOB) // TARGET_SAMPLES_PER_BLOB // adjustment_quotient) return min(prev_price + delta, MAX_SAMPLE_PRICE) @@ -578,19 +579,19 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: for_ops(body.attester_slashings, process_attester_slashing) # New shard proposer slashing processing for_ops(body.shard_proposer_slashings, process_shard_proposer_slashing) - # Limit is dynamic based on active shard count + + # Limit is dynamic: based on active shard count assert len(body.shard_headers) <= MAX_SHARD_HEADERS_PER_SHARD * get_active_shard_count(state, get_current_epoch(state)) - # Included shard headers must be sorted by shard index, the base-fee is adjusted in sequence (capacity is staggered) - # Duplicates (same slot and shard) are allowed, although slashable, only the first affects capacity. - if len(body.shard_headers) > 0: - shard = 0 - for i, header in body.shard_headers[1:] - for_ops(body.shard_headers, process_shard_header) + # New attestation processing for_ops(body.attestations, process_attestation) for_ops(body.deposits, process_deposit) for_ops(body.voluntary_exits, process_voluntary_exit) + + # TODO: to avoid parallel shards racing, and avoid inclusion-order problems, + # update the fee price per slot, instead of per header. + # state.shard_sample_price = compute_updated_sample_price(state.shard_sample_price, ?, shard_count) ``` ##### Extended Attestation processing @@ -689,17 +690,15 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade header_epoch = compute_epoch_at_slot(slot) # Verify that the header is within the processing time window assert header_epoch in [get_previous_epoch(state), get_current_epoch(state)] - # Verify that the shard is active - start_shard = get_start_shard(state, slot) - committees_per_slot = get_committee_count_per_slot(state, header_epoch) - end_shard = start_shard + committees_per_slot + # Verify that the shard is valid shard_count = get_active_shard_count(state, header_epoch) - # Per slot, there may be max. shard_count committees. - # If there are shard_count * 2/3 per slot, then wrap around. - if end_shard >= shard_count: - assert not (end_shard - shard_count <= shard < start_shard) - else: - assert start_shard <= shard < end_shard + assert shard < shard_count + # Verify that a committee is able to attest this (slot, shard) + start_shard = get_start_shard(state, slot) + committee_index = (shard_count + shard - start_shard) % shard_count + committees_per_slot = get_committee_count_per_slot(state, header_epoch) + assert committee_index <= committees_per_slot + # Verify that the block root matches, # to ensure the header will only be included in this specific Beacon Chain sub-tree. assert header.body_summary.beacon_block_root == get_block_root_at_slot(state, slot - 1) @@ -753,14 +752,6 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade # Pay out priority fee increase_balance(state, header.proposer_index, priority_fee) - # Track updated sample price - adjustment_quotient = ( - get_active_shard_count(state, previous_epoch) - * SLOTS_PER_EPOCH * SAMPLE_PRICE_ADJUSTMENT_COEFFICIENT - ) - state.shard_sample_price = compute_updated_sample_price( - state.shard_sample_price, blob_summary.commitment.length, adjustment_quotient) - # Initialize the pending header index = compute_committee_index_from_shard(state, slot, shard) committee_length = len(get_beacon_committee(state, slot, index)) From d47d2f92cc8fe630ba1f182634a0186bf76924d4 Mon Sep 17 00:00:00 2001 From: protolambda Date: Tue, 27 Jul 2021 14:48:21 +0200 Subject: [PATCH 46/75] shard fees: implement review suggestions from @nashatyrev --- specs/sharding/beacon-chain.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 80e2ec85c..93ac2278a 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -412,7 +412,7 @@ class ShardWork(Container): # Upon confirmation the data is reduced to just the commitment. status: Union[ # See Shard Work Status enum None, # SHARD_WORK_UNCONFIRMED - ConfirmedDataCommitment, # SHARD_WORK_CONFIRMED + AttestedDataCommitment, # SHARD_WORK_CONFIRMED List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD] # SHARD_WORK_PENDING ] ``` @@ -737,7 +737,7 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade max_fee = blob_summary.max_fee_per_sample * samples # Builder must have sufficient balance, even if max_fee is not completely utilized - assert state.blob_builder_balances[header.builder_index] > max_fee + assert state.blob_builder_balances[header.builder_index] >= max_fee base_fee = state.shard_sample_price * samples # Base fee must be paid @@ -748,7 +748,8 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade priority_fee = min(max_fee - base_fee, max_priority_fee) # Burn base fee, take priority fee - decrease_balance(state, header.builder_index, base_fee + priority_fee) + # priority_fee <= max_fee - base_fee, thus priority_fee + base_fee <= max_fee, thus sufficient balance. + state.blob_builder_balances[header.builder_index] -= base_fee + priority_fee # Pay out priority fee increase_balance(state, header.proposer_index, priority_fee) From 0daaafbc98e32925120fb4a22665286548e70988 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 28 Jul 2021 22:00:23 +0200 Subject: [PATCH 47/75] fix union value retrieval, thanks @terencechain --- specs/sharding/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 93ac2278a..25c3423b7 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -618,7 +618,7 @@ def process_attested_shard_work(state: BeaconState, attestation: Attestation) -> if committee_work.status.selector != SHARD_WORK_PENDING: # If the data was already confirmed, check if this matches, to apply the flag to the attesters. if committee_work.status.selector == SHARD_WORK_CONFIRMED: - attested: AttestedDataCommitment = current_headers[header_index] + attested: AttestedDataCommitment = committee_work.status.value if attested.root == attestation.data.shard_blob_root: batch_apply_participation_flag(state, attestation.aggregation_bits, attestation.data.target.epoch, From c311712bcad2c795b71a34e8a81d47cb1b38f3ae Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Fri, 30 Jul 2021 12:36:02 +0600 Subject: [PATCH 48/75] Apply suggestions from code review Co-authored-by: Hsiao-Wei Wang --- specs/merge/beacon-chain.md | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 1a99a1227..08d357c3c 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -147,7 +147,7 @@ class ExecutionPayload(Container): gas_limit: uint64 gas_used: uint64 timestamp: uint64 - base_fee_per_gas: uint64 # base fee introduced in eip-1559 + base_fee_per_gas: uint64 # base fee introduced in EIP-1559 # Extra payload fields block_hash: Hash32 # Hash of execution block transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD] @@ -253,17 +253,17 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: def is_valid_gas_limit(payload: ExecutionPayload, parent: ExecutionPayloadHeader) -> bool: parent_gas_limit = parent.gas_limit - # check if the payload used too much gas + # Check if the payload used too much gas if payload.gas_used > payload.gas_limit: return False - # check if the payload changed the gas limit too much + # Check if the payload changed the gas limit too much if payload.gas_limit >= parent_gas_limit + parent_gas_limit // GAS_LIMIT_DIVISOR: return False if payload.gas_limit <= parent_gas_limit - parent_gas_limit // GAS_LIMIT_DIVISOR: return False - # check if the gas limit is at least the minimum gas limit + # Check if the gas limit is at least the minimum gas limit if payload.gas_limit < MIN_GAS_LIMIT: return False @@ -282,13 +282,16 @@ def compute_base_fee_per_gas(payload: ExecutionPayload, parent: ExecutionPayload return parent_base_fee_per_gas elif parent_gas_used > parent_gas_target: gas_used_delta = parent_gas_used - parent_gas_target - base_fee_per_gas_delta = \ - max(parent_base_fee_per_gas * gas_used_delta // parent_gas_target // BASE_FEE_MAX_CHANGE_DENOMINATOR, 1) + base_fee_per_gas_delta = max( + parent_base_fee_per_gas * gas_used_delta // parent_gas_target // BASE_FEE_MAX_CHANGE_DENOMINATOR, + 1, + ) return parent_base_fee_per_gas + base_fee_per_gas_delta else: gas_used_delta = parent_gas_target - parent_gas_used - base_fee_per_gas_delta = \ + base_fee_per_gas_delta = ( parent_base_fee_per_gas * gas_used_delta // parent_gas_target // BASE_FEE_MAX_CHANGE_DENOMINATOR + ) return max(parent_base_fee_per_gas - base_fee_per_gas_delta, 0) ``` From 5d5a9e392b0925460d2f189920745bddf84edd02 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Fri, 30 Jul 2021 12:43:54 +0600 Subject: [PATCH 49/75] Rename GAS_LIMIT_DIVISOR to GAS_LIMIT_DENOMINATOR --- specs/merge/beacon-chain.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 08d357c3c..03784ffa8 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -61,7 +61,7 @@ This patch adds transaction execution to the beacon chain as part of the Merge f | `MAX_BYTES_PER_OPAQUE_TRANSACTION` | `uint64(2**20)` (= 1,048,576) | | `MAX_TRANSACTIONS_PER_PAYLOAD` | `uint64(2**14)` (= 16,384) | | `BYTES_PER_LOGS_BLOOM` | `uint64(2**8)` (= 256) | -| `GAS_LIMIT_DIVISOR` | `uint64(2**10)` (= 1,024) | +| `GAS_LIMIT_DENOMINATOR` | `uint64(2**10)` (= 1,024) | | `MIN_GAS_LIMIT` | `uint64(5000)` (= 5,000) | | `BASE_FEE_MAX_CHANGE_DENOMINATOR` | `uint64(2**3)` (= 8) | | `ELASTICITY_MULTIPLIER` | `uint64(2**1)` (= 2) | @@ -258,9 +258,9 @@ def is_valid_gas_limit(payload: ExecutionPayload, parent: ExecutionPayloadHeader return False # Check if the payload changed the gas limit too much - if payload.gas_limit >= parent_gas_limit + parent_gas_limit // GAS_LIMIT_DIVISOR: + if payload.gas_limit >= parent_gas_limit + parent_gas_limit // GAS_LIMIT_DENOMINATOR: return False - if payload.gas_limit <= parent_gas_limit - parent_gas_limit // GAS_LIMIT_DIVISOR: + if payload.gas_limit <= parent_gas_limit - parent_gas_limit // GAS_LIMIT_DENOMINATOR: return False # Check if the gas limit is at least the minimum gas limit From d58ffc7dfc69a11305444f8db23d0352920a3316 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Fri, 30 Jul 2021 16:10:33 +0600 Subject: [PATCH 50/75] Add genesis settings section --- specs/merge/beacon-chain.md | 16 +++++++++++++++- .../eth2spec/test/helpers/execution_payload.py | 4 ++-- .../core/pyspec/eth2spec/test/helpers/genesis.py | 3 ++- 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 03784ffa8..1341cc9cb 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -12,6 +12,8 @@ - [Custom types](#custom-types) - [Constants](#constants) - [Execution](#execution) +- [Configuration](#configuration) + - [Genesis settings](#genesis-settings) - [Containers](#containers) - [Extended containers](#extended-containers) - [`BeaconBlockBody`](#beaconblockbody) @@ -66,6 +68,17 @@ This patch adds transaction execution to the beacon chain as part of the Merge f | `BASE_FEE_MAX_CHANGE_DENOMINATOR` | `uint64(2**3)` (= 8) | | `ELASTICITY_MULTIPLIER` | `uint64(2**1)` (= 2) | +## Configuration + +### Genesis settings + +*Note*: These configuration settings do not apply to the mainnet and are utilized only by pure Merge testing. + +| Name | Value | +| - | - | +| `GENESIS_GAS_LIMIT` | `uint64(30000000)` (= 30,000,000) | +| `GENESIS_BASE_FEE_PER_GAS` | `uint64(1000000000)` (= 1,000,000,000) | + ## Containers ### Extended containers @@ -380,7 +393,8 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32, state.latest_execution_payload_header.block_hash = eth1_block_hash state.latest_execution_payload_header.timestamp = eth1_timestamp state.latest_execution_payload_header.random = eth1_block_hash - state.latest_execution_payload_header.gas_limit = MIN_GAS_LIMIT + state.latest_execution_payload_header.gas_limit = GENESIS_GAS_LIMIT + state.latest_execution_payload_header.base_fee_per_gas = GENESIS_BASE_FEE_PER_GAS return state ``` diff --git a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py index 5c7eb6de0..ce653a986 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py +++ b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py @@ -17,14 +17,14 @@ def build_empty_execution_payload(spec, state, randao_mix=None): logs_bloom=spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](), # TODO: zeroed logs bloom for empty logs ok? block_number=latest.block_number + 1, random=randao_mix, - gas_limit=max(latest.gas_limit, spec.MIN_GAS_LIMIT), + gas_limit=latest.gas_limit, # retain same limit gas_used=0, # empty block, 0 gas timestamp=timestamp, base_fee_per_gas=spec.uint64(0), block_hash=spec.Hash32(), transactions=empty_txs, ) - payload.base_fee_per_gas = spec.compute_base_fee_per_gas(latest, payload) + payload.base_fee_per_gas = spec.compute_base_fee_per_gas(payload, latest) # TODO: real RLP + block hash logic would be nice, requires RLP and keccak256 dependency however. payload.block_hash = spec.Hash32(spec.hash(payload.hash_tree_root() + b"FAKE RLP HASH")) diff --git a/tests/core/pyspec/eth2spec/test/helpers/genesis.py b/tests/core/pyspec/eth2spec/test/helpers/genesis.py index fc14c0aef..0e9af4cff 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/genesis.py +++ b/tests/core/pyspec/eth2spec/test/helpers/genesis.py @@ -78,6 +78,7 @@ def create_genesis_state(spec, validator_balances, activation_threshold): # Initialize the execution payload header (with block number and genesis time set to 0) state.latest_execution_payload_header.block_hash = eth1_block_hash state.latest_execution_payload_header.random = eth1_block_hash - state.latest_execution_payload_header.gas_limit = spec.MIN_GAS_LIMIT + state.latest_execution_payload_header.gas_limit = spec.GENESIS_GAS_LIMIT + state.latest_execution_payload_header.base_fee_per_gas = spec.GENESIS_BASE_FEE_PER_GAS return state From f1982d4fc3ff2f6320c7f47fcd96c302fc91e95a Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Fri, 30 Jul 2021 16:18:05 +0600 Subject: [PATCH 51/75] Replace underflow check with respective comment --- specs/merge/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 1341cc9cb..590e2bc4f 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -305,7 +305,7 @@ def compute_base_fee_per_gas(payload: ExecutionPayload, parent: ExecutionPayload base_fee_per_gas_delta = ( parent_base_fee_per_gas * gas_used_delta // parent_gas_target // BASE_FEE_MAX_CHANGE_DENOMINATOR ) - return max(parent_base_fee_per_gas - base_fee_per_gas_delta, 0) + return parent_base_fee_per_gas - base_fee_per_gas_delta # This subtraction can't underflow ``` #### `process_execution_payload` From ab78339350f9e2a66e5aa2dfe98a028393fb474b Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 30 Jul 2021 21:54:55 +0200 Subject: [PATCH 52/75] fix variable name of summary field --- specs/sharding/beacon-chain.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 25c3423b7..13a58e088 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -732,9 +732,9 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade # Charge EIP 1559 fee, builder pays for opportunity, and is responsible for later availability, # or fail to publish at their own expense. - samples = blob_summary.commitment.length + samples = body_summary.commitment.length # TODO: overflows, need bigger int type - max_fee = blob_summary.max_fee_per_sample * samples + max_fee = body_summary.max_fee_per_sample * samples # Builder must have sufficient balance, even if max_fee is not completely utilized assert state.blob_builder_balances[header.builder_index] >= max_fee @@ -744,7 +744,7 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade assert max_fee >= base_fee # Remaining fee goes towards proposer for prioritizing, up to a maximum - max_priority_fee = blob_summary.max_priority_fee_per_sample * samples + max_priority_fee = body_summary.max_priority_fee_per_sample * samples priority_fee = min(max_fee - base_fee, max_priority_fee) # Burn base fee, take priority fee @@ -759,7 +759,7 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade initial_votes = Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length) pending_header = PendingShardHeader( attested=AttestedDataCommitment( - commitment=blob_summary.commitment, + commitment=body_summary.commitment, root=header_root, includer_index=get_beacon_proposer_index(state), ) From add5810d71ade5a80c65363336f1c1a43a0b4a2f Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 30 Jul 2021 22:06:04 +0200 Subject: [PATCH 53/75] remove unused pending attestation fields --- specs/sharding/beacon-chain.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 13a58e088..94495ef46 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -216,10 +216,6 @@ class BeaconBlockBody(merge.BeaconBlockBody): # [extends The Merge block body] ```python class BeaconState(merge.BeaconState): - # [Updated fields] (Warning: this changes with Altair, Sharding will rebase to use participation-flags) - previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] - current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] - # [New fields] # Blob builder registry. blob_builders: List[Builder, BLOB_BUILDER_REGISTRY_LIMIT] blob_builder_balances: List[Gwei, BLOB_BUILDER_REGISTRY_LIMIT] From 2d17c8c3c442c5a3c8d4b1e47e1c335f7fcacd4f Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 30 Jul 2021 22:22:43 +0200 Subject: [PATCH 54/75] move back INITIAL_ACTIVE_SHARDS to preset, avoid changing mainnet config --- specs/sharding/beacon-chain.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 94495ef46..e81e7e764 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -151,6 +151,7 @@ TODO: `WEIGHT_DENOMINATOR` needs to be adjusted, but this breaks a lot of Altair | Name | Value | Notes | | - | - | - | | `MAX_SHARDS` | `uint64(2**10)` (= 1,024) | Theoretical max shard count (used to determine data structure sizes) | +| `INITIAL_ACTIVE_SHARDS` | `uint64(2**6)` (= 64) | Initial shard count | | `SAMPLE_PRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 8) | Sample price may decrease/increase by at most exp(1 / this value) *per epoch* | | `MAX_SHARD_PROPOSER_SLASHINGS` | `2**4` (= 16) | Maximum amount of shard proposer slashing operations per block | | `MAX_SHARD_HEADERS_PER_SHARD` | `4` | | @@ -181,9 +182,8 @@ TODO: `WEIGHT_DENOMINATOR` needs to be adjusted, but this breaks a lot of Altair ## Configuration -| Name | Value | Notes | -| - | - | - | -| `INITIAL_ACTIVE_SHARDS` | `uint64(2**6)` (= 64) | Initial shard count | +Note: some preset variables may become run-time configurable for testnets, but default to a preset while the spec is unstable. +E.g. `INITIAL_ACTIVE_SHARDS`, `MAX_SAMPLES_PER_BLOB` and `TARGET_SAMPLES_PER_BLOB`. ## Updated containers From 91968de423a8614e1fe20fac7b1b563c4123f7ca Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 30 Jul 2021 22:31:26 +0200 Subject: [PATCH 55/75] update sharding presets --- presets/mainnet/sharding.yaml | 20 +++++++++++--------- presets/minimal/sharding.yaml | 17 ++++++++++------- 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/presets/mainnet/sharding.yaml b/presets/mainnet/sharding.yaml index 2b78855fc..120a716c9 100644 --- a/presets/mainnet/sharding.yaml +++ b/presets/mainnet/sharding.yaml @@ -1,22 +1,24 @@ # Mainnet preset - Sharding -# Beacon-chain -# --------------------------------------------------------------- # Misc +# --------------------------------------------------------------- # 2**10 (= 1,024) MAX_SHARDS: 1024 -# 2**6 = 64 +# 2**6 (= 64) INITIAL_ACTIVE_SHARDS: 64 # 2**3 (= 8) -GASPRICE_ADJUSTMENT_COEFFICIENT: 8 +SAMPLE_PRICE_ADJUSTMENT_COEFFICIENT: 8 # 2**4 (= 16) MAX_SHARD_PROPOSER_SLASHINGS: 16 - -# Shard block configs -# --------------------------------------------------------------- +# MAX_SHARD_HEADERS_PER_SHARD: 4 # 2**8 (= 256) SHARD_STATE_MEMORY_SLOTS: 256 +# 2**40 (= 1,099,511,627,776) +BLOB_BUILDER_REGISTRY_LIMIT: 1099511627776 + +# Shard blob samples +# --------------------------------------------------------------- # 2**11 (= 2,048) MAX_SAMPLES_PER_BLOCK: 2048 # 2**10 (= 1,1024) @@ -25,6 +27,6 @@ TARGET_SAMPLES_PER_BLOCK: 1024 # Gwei values # --------------------------------------------------------------- # 2**33 (= 8,589,934,592) Gwei -MAX_GASPRICE: 8589934592 +MAX_SAMPLE_PRICE: 8589934592 # 2**3 (= 8) Gwei -MIN_GASPRICE: 8 +MIN_SAMPLE_PRICE: 8 diff --git a/presets/minimal/sharding.yaml b/presets/minimal/sharding.yaml index 10f79c96e..6b8d223b4 100644 --- a/presets/minimal/sharding.yaml +++ b/presets/minimal/sharding.yaml @@ -1,6 +1,6 @@ # Minimal preset - Sharding -# Beacon-chain +# Misc # --------------------------------------------------------------- # Misc # [customized] reduced for testing @@ -8,15 +8,18 @@ MAX_SHARDS: 8 # [customized] reduced for testing INITIAL_ACTIVE_SHARDS: 2 # 2**3 (= 8) -GASPRICE_ADJUSTMENT_COEFFICIENT: 8 +SAMPLE_PRICE_ADJUSTMENT_COEFFICIENT: 8 # [customized] reduced for testing MAX_SHARD_PROPOSER_SLASHINGS: 4 - -# Shard block configs -# --------------------------------------------------------------- +# MAX_SHARD_HEADERS_PER_SHARD: 4 # 2**8 (= 256) SHARD_STATE_MEMORY_SLOTS: 256 +# 2**40 (= 1,099,511,627,776) +BLOB_BUILDER_REGISTRY_LIMIT: 1099511627776 + +# Shard blob samples +# --------------------------------------------------------------- # 2**11 (= 2,048) MAX_SAMPLES_PER_BLOCK: 2048 # 2**10 (= 1,1024) @@ -25,6 +28,6 @@ TARGET_SAMPLES_PER_BLOCK: 1024 # Gwei values # --------------------------------------------------------------- # 2**33 (= 8,589,934,592) Gwei -MAX_GASPRICE: 8589934592 +MAX_SAMPLE_PRICE: 8589934592 # 2**3 (= 8) Gwei -MIN_GASPRICE: 8 +MIN_SAMPLE_PRICE: 8 From 322f072703636b7b70a459fe6af80d1fd42a20c1 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 31 Jul 2021 13:22:26 +0200 Subject: [PATCH 56/75] sharding: remove outdated comment, timely shard attesters are marked in attestation-processing, no need for epoch processing additions --- specs/sharding/beacon-chain.md | 1 - 1 file changed, 1 deletion(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index e81e7e764..f5b212c6e 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -853,7 +853,6 @@ def process_pending_shard_confirmations(state: BeaconState) -> None: committee_work = state.shard_buffer[buffer_index][shard_index] if committee_work.status.selector == SHARD_WORK_PENDING: winning_header = max(committee_work.status.value, key=lambda header: header.weight) - # TODO In Altair: set participation bit flag of voters for winning header if winning_header.attested.commitment == DataCommitment(): committee_work.status.change(selector=SHARD_WORK_UNCONFIRMED, value=None) else: From b262854bb1ee5850fb51b2682eb858a766194ecc Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 3 Aug 2021 21:46:11 +0800 Subject: [PATCH 57/75] Rename the `eth2_*` functions to `eth_` --- setup.py | 6 +++--- specs/altair/beacon-chain.md | 4 ++-- specs/altair/bls.md | 12 ++++++------ 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/setup.py b/setup.py index 4c6969a26..ccc3afbe4 100644 --- a/setup.py +++ b/setup.py @@ -56,7 +56,7 @@ def floorlog2(x: int) -> uint64: OPTIMIZED_BLS_AGGREGATE_PUBKEYS = ''' -def eth2_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey: +def eth_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey: return bls.AggregatePKs(pubkeys) ''' @@ -480,8 +480,8 @@ def get_generalized_index(ssz_class: Any, *path: Sequence[PyUnion[int, SSZVariab @classmethod def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]: - if "eth2_aggregate_pubkeys" in functions: - functions["eth2_aggregate_pubkeys"] = OPTIMIZED_BLS_AGGREGATE_PUBKEYS.strip() + if "eth_aggregate_pubkeys" in functions: + functions["eth_aggregate_pubkeys"] = OPTIMIZED_BLS_AGGREGATE_PUBKEYS.strip() return super().implement_optimizations(functions) # diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index a8d7fd8ef..7c4d20fe5 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -287,7 +287,7 @@ def get_next_sync_committee(state: BeaconState) -> SyncCommittee: """ indices = get_next_sync_committee_indices(state) pubkeys = [state.validators[index].pubkey for index in indices] - aggregate_pubkey = eth2_aggregate_pubkeys(pubkeys) + aggregate_pubkey = eth_aggregate_pubkeys(pubkeys) return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey) ``` @@ -544,7 +544,7 @@ def process_sync_aggregate(state: BeaconState, sync_aggregate: SyncAggregate) -> previous_slot = max(state.slot, Slot(1)) - Slot(1) domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, compute_epoch_at_slot(previous_slot)) signing_root = compute_signing_root(get_block_root_at_slot(state, previous_slot), domain) - assert eth2_fast_aggregate_verify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature) + assert eth_fast_aggregate_verify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature) # Compute participant and proposer rewards total_active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT diff --git a/specs/altair/bls.md b/specs/altair/bls.md index 529236056..32f55572b 100644 --- a/specs/altair/bls.md +++ b/specs/altair/bls.md @@ -9,8 +9,8 @@ - [Introduction](#introduction) - [Constants](#constants) - [Extensions](#extensions) - - [`eth2_aggregate_pubkeys`](#eth2_aggregate_pubkeys) - - [`eth2_fast_aggregate_verify`](#eth2_fast_aggregate_verify) + - [`eth_aggregate_pubkeys`](#eth_aggregate_pubkeys) + - [`eth_fast_aggregate_verify`](#eth_fast_aggregate_verify) @@ -29,14 +29,14 @@ Knowledge of the [phase 0 specification](../phase0/beacon-chain.md) is assumed, ## Extensions -### `eth2_aggregate_pubkeys` +### `eth_aggregate_pubkeys` An additional function `AggregatePKs` is defined to extend the [IETF BLS signature draft standard v4](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04) spec referenced in the phase 0 document. ```python -def eth2_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey: +def eth_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey: """ Return the aggregate public key for the public keys in ``pubkeys``. @@ -52,10 +52,10 @@ def eth2_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey: return result ``` -### `eth2_fast_aggregate_verify` +### `eth_fast_aggregate_verify` ```python -def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature) -> bool: +def eth_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature) -> bool: """ Wrapper to ``bls.FastAggregateVerify`` accepting the ``G2_POINT_AT_INFINITY`` signature when ``pubkeys`` is empty. """ From d8d068640011541ef99fe29cc6438c1c986168ac Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 4 Aug 2021 01:55:18 +0800 Subject: [PATCH 58/75] Add tests for the Altair BLS helpers --- tests/formats/bls/README.md | 2 + tests/formats/bls/eth2_aggregate_pubkeys.md | 19 +++ .../formats/bls/eth2_fast_aggregate_verify.md | 17 +++ tests/formats/bls/fast_aggregate_verify.md | 2 +- tests/generators/bls/main.py | 144 +++++++++++++++++- 5 files changed, 176 insertions(+), 8 deletions(-) create mode 100644 tests/formats/bls/eth2_aggregate_pubkeys.md create mode 100644 tests/formats/bls/eth2_fast_aggregate_verify.md diff --git a/tests/formats/bls/README.md b/tests/formats/bls/README.md index 65154ba1c..65018631a 100644 --- a/tests/formats/bls/README.md +++ b/tests/formats/bls/README.md @@ -7,6 +7,8 @@ The BLS test suite runner has the following handlers: - [`aggregate_verify`](./aggregate_verify.md) - [`aggregate`](./aggregate.md) +- [`eth2_aggregate_pubkeys`](./eth2_aggregate_pubkeys.md) +- [`eth2_fast_aggregate_verify`](./eth2_fast_aggregate_verify.md) - [`fast_aggregate_verify`](./fast_aggregate_verify.md) - [`sign`](./sign.md) - [`verify`](./verify.md) diff --git a/tests/formats/bls/eth2_aggregate_pubkeys.md b/tests/formats/bls/eth2_aggregate_pubkeys.md new file mode 100644 index 000000000..dd35b3166 --- /dev/null +++ b/tests/formats/bls/eth2_aggregate_pubkeys.md @@ -0,0 +1,19 @@ +# Test format: Ethereum-customized BLS pubkeys aggregation + +A BLS pubkeys aggregation combines a series of pubkeys into a single pubkey. + +## Test case format + +The test data is declared in a `data.yaml` file: + +```yaml +input: List[BLS Pubkey] -- list of input BLS pubkeys +output: BLS Pubkey -- expected output, single BLS pubkeys or empty. +``` + +- `BLS Pubkey` here is encoded as a string: hexadecimal encoding of 48 bytes (96 nibbles), prefixed with `0x`. +- No output value if the input is invalid. + +## Condition + +The `eth2_aggregate_pubkeys` handler should aggregate the signatures in the `input`, and the result should match the expected `output`. diff --git a/tests/formats/bls/eth2_fast_aggregate_verify.md b/tests/formats/bls/eth2_fast_aggregate_verify.md new file mode 100644 index 000000000..ddc1b5208 --- /dev/null +++ b/tests/formats/bls/eth2_fast_aggregate_verify.md @@ -0,0 +1,17 @@ +# Test format: Ethereum-customized BLS fast aggregate verify + +Verify the signature against the given pubkeys and one message. + +## Test case format + +The test data is declared in a `data.yaml` file: + +```yaml +input: + pubkeys: List[bytes48] -- the pubkey + message: bytes32 -- the message + signature: bytes96 -- the signature to verify against pubkeys and message +output: bool -- VALID or INVALID +``` + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. diff --git a/tests/formats/bls/fast_aggregate_verify.md b/tests/formats/bls/fast_aggregate_verify.md index 7e3899a15..3366cbb79 100644 --- a/tests/formats/bls/fast_aggregate_verify.md +++ b/tests/formats/bls/fast_aggregate_verify.md @@ -1,4 +1,4 @@ -# Test format: BLS sign message +# Test format: BLS fast aggregate verify Verify the signature against the given pubkeys and one message. diff --git a/tests/generators/bls/main.py b/tests/generators/bls/main.py index 3ebaa1354..877324a2e 100644 --- a/tests/generators/bls/main.py +++ b/tests/generators/bls/main.py @@ -12,8 +12,10 @@ from eth_utils import ( import milagro_bls_binding as milagro_bls from eth2spec.utils import bls -from eth2spec.test.helpers.constants import PHASE0 +from eth2spec.test.helpers.constants import PHASE0, ALTAIR +from eth2spec.test.helpers.typing import SpecForkName from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing +from eth2spec.altair import spec def to_bytes(i): @@ -51,6 +53,7 @@ PRIVKEYS = [ ] PUBKEYS = [bls.SkToPk(privkey) for privkey in PRIVKEYS] +ZERO_PUBKEY = b'\x00' * 48 Z1_PUBKEY = b'\xc0' + b'\x00' * 47 NO_SIGNATURE = b'\x00' * 96 Z2_SIGNATURE = b'\xc0' + b'\x00' * 95 @@ -355,7 +358,130 @@ def case05_aggregate_verify(): } +def case06_eth2_aggregate_pubkeys(): + aggregate_pubkey = spec.eth2_aggregate_pubkeys(PUBKEYS) + assert aggregate_pubkey == milagro_bls._AggregatePKs(PUBKEYS) + yield f'eth2_aggregate_pubkeys_some_pubkeys', { + 'input': [encode_hex(pubkey) for pubkey in PUBKEYS], + 'output': encode_hex(aggregate_pubkey), + } + + # Invalid pubkeys -- len(pubkeys) == 0 + expect_exception(spec.eth2_aggregate_pubkeys, []) + expect_exception(milagro_bls._AggregatePKs, []) + yield f'eth2_aggregate_pubkeys_', { + 'input': [], + 'output': None, + } + + # Invalid pubkeys -- [ZERO_PUBKEY] + expect_exception(spec.eth2_aggregate_pubkeys, [ZERO_PUBKEY]) + expect_exception(milagro_bls._AggregatePKs, [ZERO_PUBKEY]) + yield f'eth2_aggregate_pubkeys_all_zero_pubkey', { + 'input': [encode_hex(ZERO_PUBKEY)], + 'output': None, + } + + # TODO: TBD + # Valid to aggregate G1 point at infinity + # aggregate_pubkey = spec.eth2_aggregate_pubkeys([Z1_PUBKEY]) + # assert aggregate_pubkey == milagro_bls._AggregatePKs([Z1_PUBKEY]) == Z1_PUBKEY + # yield f'eth2_aggregate_pubkeys_infinity_pubkey', { + # 'input': [encode_hex(Z1_PUBKEY)], + # 'output': encode_hex(aggregate_pubkey), + # } + + +def case07_eth2_fast_aggregate_verify(): + """ + Similar to `case04_fast_aggregate_verify` except for the empty case + """ + for i, message in enumerate(MESSAGES): + privkeys = PRIVKEYS[:i + 1] + sigs = [bls.Sign(privkey, message) for privkey in privkeys] + aggregate_signature = bls.Aggregate(sigs) + pubkeys = [bls.SkToPk(privkey) for privkey in privkeys] + pubkeys_serial = [encode_hex(pubkey) for pubkey in pubkeys] + + # Valid signature + identifier = f'{pubkeys_serial}_{encode_hex(message)}' + assert spec.eth2_fast_aggregate_verify(pubkeys, message, aggregate_signature) + yield f'eth2_fast_aggregate_verify_valid_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'pubkeys': pubkeys_serial, + 'message': encode_hex(message), + 'signature': encode_hex(aggregate_signature), + }, + 'output': True, + } + + # Invalid signature -- extra pubkey + pubkeys_extra = pubkeys + [bls.SkToPk(PRIVKEYS[-1])] + pubkeys_extra_serial = [encode_hex(pubkey) for pubkey in pubkeys_extra] + identifier = f'{pubkeys_extra_serial}_{encode_hex(message)}' + assert not spec.eth2_fast_aggregate_verify(pubkeys_extra, message, aggregate_signature) + yield f'eth_fast_aggregate_verify_extra_pubkey_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'pubkeys': pubkeys_extra_serial, + 'message': encode_hex(message), + 'signature': encode_hex(aggregate_signature), + }, + 'output': False, + } + + # Invalid signature -- tampered with signature + tampered_signature = aggregate_signature[:-4] + b'\xff\xff\xff\xff' + identifier = f'{pubkeys_serial}_{encode_hex(message)}' + assert not spec.eth2_fast_aggregate_verify(pubkeys, message, tampered_signature) + yield f'eth2_fast_aggregate_verify_tampered_signature_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'pubkeys': pubkeys_serial, + 'message': encode_hex(message), + 'signature': encode_hex(tampered_signature), + }, + 'output': False, + } + + # NOTE: Unlike `FastAggregateVerify`, len(pubkeys) == 0 and signature == Z1_SIGNATURE is VALID + assert spec.eth2_fast_aggregate_verify([], message, Z2_SIGNATURE) + yield f'eth2_fast_aggregate_verify_na_pubkeys_and_infinity_signature', { + 'input': { + 'pubkeys': [], + 'message': encode_hex(message), + 'signature': encode_hex(Z2_SIGNATURE), + }, + 'output': True, + } + + # Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00... + assert not spec.eth2_fast_aggregate_verify([], message, NO_SIGNATURE) + yield f'eth2_fast_aggregate_verify_na_pubkeys_and_na_signature', { + 'input': { + 'pubkeys': [], + 'message': encode_hex(message), + 'signature': encode_hex(NO_SIGNATURE), + }, + 'output': False, + } + + # Invalid pubkeys and signature -- pubkeys contains point at infinity + pubkeys = PUBKEYS.copy() + pubkeys_with_infinity = pubkeys + [Z1_PUBKEY] + signatures = [bls.Sign(privkey, SAMPLE_MESSAGE) for privkey in PRIVKEYS] + aggregate_signature = bls.Aggregate(signatures) + assert not spec.eth2_fast_aggregate_verify(pubkeys_with_infinity, SAMPLE_MESSAGE, aggregate_signature) + yield f'eth2_fast_aggregate_verify_infinity_pubkey', { + 'input': { + 'pubkeys': [encode_hex(pubkey) for pubkey in pubkeys_with_infinity], + 'message': encode_hex(SAMPLE_MESSAGE), + 'signature': encode_hex(aggregate_signature), + }, + 'output': False, + } + + def create_provider(handler_name: str, + fork_name: SpecForkName, test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]]) -> gen_typing.TestProvider: def prepare_fn() -> None: @@ -368,7 +494,7 @@ def create_provider(handler_name: str, print(data) (case_name, case_content) = data yield gen_typing.TestCase( - fork_name=PHASE0, + fork_name=fork_name, preset_name='general', runner_name='bls', handler_name=handler_name, @@ -383,9 +509,13 @@ def create_provider(handler_name: str, if __name__ == "__main__": bls.use_py_ecc() # Py-ecc is chosen instead of Milagro, since the code is better understood to be correct. gen_runner.run_generator("bls", [ - create_provider('sign', case01_sign), - create_provider('verify', case02_verify), - create_provider('aggregate', case03_aggregate), - create_provider('fast_aggregate_verify', case04_fast_aggregate_verify), - create_provider('aggregate_verify', case05_aggregate_verify), + # PHASE0 + create_provider('sign', PHASE0, case01_sign), + create_provider('verify', PHASE0, case02_verify), + create_provider('aggregate', PHASE0, case03_aggregate), + create_provider('fast_aggregate_verify', PHASE0, case04_fast_aggregate_verify), + create_provider('aggregate_verify', PHASE0, case05_aggregate_verify), + # ALTAIR + create_provider('eth2_aggregate_pubkeys', ALTAIR, case06_eth2_aggregate_pubkeys), + create_provider('eth2_fast_aggregate_verify', ALTAIR, case07_eth2_fast_aggregate_verify), ]) From 424f8387473ef6b98ceac01704174472c917c194 Mon Sep 17 00:00:00 2001 From: Diederik Loerakker Date: Wed, 4 Aug 2021 12:44:42 +0200 Subject: [PATCH 59/75] Update specs/sharding/beacon-chain.md Co-authored-by: Danny Ryan --- specs/sharding/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index f5b212c6e..49ece701f 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -182,7 +182,7 @@ TODO: `WEIGHT_DENOMINATOR` needs to be adjusted, but this breaks a lot of Altair ## Configuration -Note: some preset variables may become run-time configurable for testnets, but default to a preset while the spec is unstable. +Note: Some preset variables may become run-time configurable for testnets, but default to a preset while the spec is unstable. E.g. `INITIAL_ACTIVE_SHARDS`, `MAX_SAMPLES_PER_BLOB` and `TARGET_SAMPLES_PER_BLOB`. ## Updated containers From 43a1617ffa94a8483f9cda7abec12dce88a2bef3 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 4 Aug 2021 21:12:19 +0800 Subject: [PATCH 60/75] Ensure that the given PKs are valid PKs + fix typos --- specs/altair/bls.md | 4 +++ specs/phase0/beacon-chain.md | 1 + tests/core/pyspec/eth2spec/utils/bls.py | 7 ++++ tests/generators/bls/main.py | 48 ++++++++++++++----------- 4 files changed, 40 insertions(+), 20 deletions(-) diff --git a/specs/altair/bls.md b/specs/altair/bls.md index 529236056..e31da02ff 100644 --- a/specs/altair/bls.md +++ b/specs/altair/bls.md @@ -46,6 +46,10 @@ def eth2_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey: Refer to the BLS signature draft standard for more information. """ assert len(pubkeys) > 0 + for pubkey in pubkeys: + # Ensure that the given inputs are valid pubkeys + assert bls.KeyValidate(pubkey) + result = copy(pubkeys[0]) for pubkey in pubkeys[1:]: result += pubkey diff --git a/specs/phase0/beacon-chain.md b/specs/phase0/beacon-chain.md index 0169e2725..5ab9b75fd 100644 --- a/specs/phase0/beacon-chain.md +++ b/specs/phase0/beacon-chain.md @@ -647,6 +647,7 @@ The [IETF BLS signature draft standard v4](https://tools.ietf.org/html/draft-irt - `def Aggregate(signatures: Sequence[BLSSignature]) -> BLSSignature` - `def FastAggregateVerify(pubkeys: Sequence[BLSPubkey], message: Bytes, signature: BLSSignature) -> bool` - `def AggregateVerify(pubkeys: Sequence[BLSPubkey], messages: Sequence[Bytes], signature: BLSSignature) -> bool` +- `def KeyValidate(pubkey: BLSPubkey) -> bool` The above functions are accessed through the `bls` module, e.g. `bls.Verify`. diff --git a/tests/core/pyspec/eth2spec/utils/bls.py b/tests/core/pyspec/eth2spec/utils/bls.py index dc4daca49..5bda0232f 100644 --- a/tests/core/pyspec/eth2spec/utils/bls.py +++ b/tests/core/pyspec/eth2spec/utils/bls.py @@ -95,6 +95,13 @@ def signature_to_G2(signature): @only_with_bls(alt_return=STUB_PUBKEY) def AggregatePKs(pubkeys): + if bls == py_ecc_bls: + for pubkey in pubkeys: + assert bls.KeyValidate(pubkey) + elif bls == milagro_bls: + # milagro_bls._AggregatePKs checks KeyValidate internally + pass + return bls._AggregatePKs(list(pubkeys)) diff --git a/tests/generators/bls/main.py b/tests/generators/bls/main.py index 877324a2e..467560e70 100644 --- a/tests/generators/bls/main.py +++ b/tests/generators/bls/main.py @@ -369,7 +369,7 @@ def case06_eth2_aggregate_pubkeys(): # Invalid pubkeys -- len(pubkeys) == 0 expect_exception(spec.eth2_aggregate_pubkeys, []) expect_exception(milagro_bls._AggregatePKs, []) - yield f'eth2_aggregate_pubkeys_', { + yield f'eth2_aggregate_pubkeys_empty_list', { 'input': [], 'output': None, } @@ -377,19 +377,27 @@ def case06_eth2_aggregate_pubkeys(): # Invalid pubkeys -- [ZERO_PUBKEY] expect_exception(spec.eth2_aggregate_pubkeys, [ZERO_PUBKEY]) expect_exception(milagro_bls._AggregatePKs, [ZERO_PUBKEY]) - yield f'eth2_aggregate_pubkeys_all_zero_pubkey', { + yield f'eth2_aggregate_pubkeys_na_pubkey', { 'input': [encode_hex(ZERO_PUBKEY)], 'output': None, } - # TODO: TBD - # Valid to aggregate G1 point at infinity - # aggregate_pubkey = spec.eth2_aggregate_pubkeys([Z1_PUBKEY]) - # assert aggregate_pubkey == milagro_bls._AggregatePKs([Z1_PUBKEY]) == Z1_PUBKEY - # yield f'eth2_aggregate_pubkeys_infinity_pubkey', { - # 'input': [encode_hex(Z1_PUBKEY)], - # 'output': encode_hex(aggregate_pubkey), - # } + # Invalid pubkeys -- G1 point at infinity + expect_exception(spec.eth2_aggregate_pubkeys, [Z1_PUBKEY]) + expect_exception(milagro_bls._AggregatePKs, [Z1_PUBKEY]) + yield f'eth2_aggregate_pubkeys_infinity_pubkey', { + 'input': [encode_hex(Z1_PUBKEY)], + 'output': None, + } + + # Invalid pubkeys -- b'\x40\x00\x00\x00....\x00' pubkey + x40_pubkey = b'\x40' + b'\00' * 47 + expect_exception(spec.eth2_aggregate_pubkeys, [x40_pubkey]) + expect_exception(milagro_bls._AggregatePKs, [x40_pubkey]) + yield f'eth2_aggregate_pubkeys_x40_pubkey', { + 'input': [encode_hex(x40_pubkey)], + 'output': None, + } def case07_eth2_fast_aggregate_verify(): @@ -420,7 +428,7 @@ def case07_eth2_fast_aggregate_verify(): pubkeys_extra_serial = [encode_hex(pubkey) for pubkey in pubkeys_extra] identifier = f'{pubkeys_extra_serial}_{encode_hex(message)}' assert not spec.eth2_fast_aggregate_verify(pubkeys_extra, message, aggregate_signature) - yield f'eth_fast_aggregate_verify_extra_pubkey_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + yield f'eth2_fast_aggregate_verify_extra_pubkey_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'pubkeys': pubkeys_extra_serial, 'message': encode_hex(message), @@ -480,8 +488,8 @@ def case07_eth2_fast_aggregate_verify(): } -def create_provider(handler_name: str, - fork_name: SpecForkName, +def create_provider(fork_name: SpecForkName, + handler_name: str, test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]]) -> gen_typing.TestProvider: def prepare_fn() -> None: @@ -510,12 +518,12 @@ if __name__ == "__main__": bls.use_py_ecc() # Py-ecc is chosen instead of Milagro, since the code is better understood to be correct. gen_runner.run_generator("bls", [ # PHASE0 - create_provider('sign', PHASE0, case01_sign), - create_provider('verify', PHASE0, case02_verify), - create_provider('aggregate', PHASE0, case03_aggregate), - create_provider('fast_aggregate_verify', PHASE0, case04_fast_aggregate_verify), - create_provider('aggregate_verify', PHASE0, case05_aggregate_verify), + create_provider(PHASE0, 'sign', case01_sign), + create_provider(PHASE0, 'verify', case02_verify), + create_provider(PHASE0, 'aggregate', case03_aggregate), + create_provider(PHASE0, 'fast_aggregate_verify', case04_fast_aggregate_verify), + create_provider(PHASE0, 'aggregate_verify', case05_aggregate_verify), # ALTAIR - create_provider('eth2_aggregate_pubkeys', ALTAIR, case06_eth2_aggregate_pubkeys), - create_provider('eth2_fast_aggregate_verify', ALTAIR, case07_eth2_fast_aggregate_verify), + create_provider(ALTAIR, 'eth2_aggregate_pubkeys', case06_eth2_aggregate_pubkeys), + create_provider(ALTAIR, 'eth2_fast_aggregate_verify', case07_eth2_fast_aggregate_verify), ]) From fc3e651817e1775deae5f8252d546974ab24bd87 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 4 Aug 2021 23:25:25 +0200 Subject: [PATCH 61/75] samples -> samples_length --- specs/sharding/beacon-chain.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 49ece701f..bbb4ae930 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -437,13 +437,13 @@ def compute_previous_slot(slot: Slot) -> Slot: #### `compute_updated_sample_price` ```python -def compute_updated_sample_price(prev_price: Gwei, samples: uint64, active_shards: uint64) -> Gwei: +def compute_updated_sample_price(prev_price: Gwei, samples_length: uint64, active_shards: uint64) -> Gwei: adjustment_quotient = active_shards * SLOTS_PER_EPOCH * SAMPLE_PRICE_ADJUSTMENT_COEFFICIENT - if samples > TARGET_SAMPLES_PER_BLOB: - delta = max(1, prev_price * (samples - TARGET_SAMPLES_PER_BLOB) // TARGET_SAMPLES_PER_BLOB // adjustment_quotient) + if samples_length > TARGET_SAMPLES_PER_BLOB: + delta = max(1, prev_price * (samples_length - TARGET_SAMPLES_PER_BLOB) // TARGET_SAMPLES_PER_BLOB // adjustment_quotient) return min(prev_price + delta, MAX_SAMPLE_PRICE) else: - delta = max(1, prev_price * (TARGET_SAMPLES_PER_BLOB - samples) // TARGET_SAMPLES_PER_BLOB // adjustment_quotient) + delta = max(1, prev_price * (TARGET_SAMPLES_PER_BLOB - samples_length) // TARGET_SAMPLES_PER_BLOB // adjustment_quotient) return max(prev_price, MIN_SAMPLE_PRICE + delta) - delta ``` From 2ff143c719eb8f6a310d9cb0a851ea4bfc28d001 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Wed, 4 Aug 2021 16:28:42 -0700 Subject: [PATCH 62/75] Add test for exited validators during inactivity leak --- .../test_process_inactivity_updates.py | 55 +++++++++++++++++++ .../eth2spec/test/helpers/voluntary_exits.py | 19 +++++++ 2 files changed, 74 insertions(+) diff --git a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py index f7d2fa9c8..5a334f28a 100644 --- a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py +++ b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py @@ -3,10 +3,15 @@ from random import Random from eth2spec.test.context import spec_state_test, with_altair_and_later from eth2spec.test.helpers.inactivity_scores import randomize_inactivity_scores, zero_inactivity_scores from eth2spec.test.helpers.state import ( + next_epoch, next_epoch_via_block, set_full_participation, set_empty_participation, ) +from eth2spec.test.helpers.voluntary_exits import ( + exit_validators, + get_exited_validators +) from eth2spec.test.helpers.epoch_processing import ( run_epoch_processing_with ) @@ -266,3 +271,53 @@ def test_some_slashed_full_random_leaking(spec, state): # Check still in leak assert spec.is_in_inactivity_leak(state) + + +@with_altair_and_later +@spec_state_test +@leaking() +def test_some_exited_full_random_leaking(spec, state): + rng = Random(1102233) + + exit_count = 3 + + # randomize ahead of time to check exited validators do not have + # mutations applied to their inactivity scores + randomize_inactivity_scores(spec, state, rng=rng) + + assert not get_exited_validators(spec, state) + exited_indices = exit_validators(spec, state, exit_count, rng=rng) + assert not get_exited_validators(spec, state) + + # advance the state to effect the exits + target_epoch = max(state.validators[index].exit_epoch for index in exited_indices) + # validators that have exited in the previous epoch or earlier will not + # have their inactivity scores modified, the test advances the state past this point + # to confirm this invariant: + previous_epoch = spec.get_previous_epoch(state) + for _ in range(target_epoch - previous_epoch): + next_epoch(spec, state) + assert len(get_exited_validators(spec, state)) == exit_count + + previous_scores = state.inactivity_scores.copy() + + yield from run_inactivity_scores_test( + spec, state, + randomize_previous_epoch_participation, rng=rng, + ) + + # ensure exited validators have their score "frozen" at exit + # but otherwise there was a change + some_changed = False + for index in range(len(state.validators)): + if index in exited_indices: + assert previous_scores[index] == state.inactivity_scores[index] + else: + previous_score = previous_scores[index] + current_score = state.inactivity_scores[index] + if previous_score != current_score: + some_changed = True + assert some_changed + + # Check still in leak + assert spec.is_in_inactivity_leak(state) diff --git a/tests/core/pyspec/eth2spec/test/helpers/voluntary_exits.py b/tests/core/pyspec/eth2spec/test/helpers/voluntary_exits.py index 28232cc23..73d4598b3 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/voluntary_exits.py +++ b/tests/core/pyspec/eth2spec/test/helpers/voluntary_exits.py @@ -1,3 +1,4 @@ +from random import Random from eth2spec.utils import bls from eth2spec.test.helpers.keys import privkeys @@ -23,3 +24,21 @@ def sign_voluntary_exit(spec, state, voluntary_exit, privkey): message=voluntary_exit, signature=bls.Sign(privkey, signing_root) ) + + +# +# Helpers for applying effects of a voluntary exit +# +def get_exited_validators(spec, state): + current_epoch = spec.get_current_epoch(state) + return [index for (index, validator) in enumerate(state.validators) if validator.exit_epoch <= current_epoch] + + +def exit_validators(spec, state, validator_count, rng=None): + if rng is None: + rng = Random(1337) + + indices = rng.sample(range(len(state.validators)), validator_count) + for index in indices: + spec.initiate_validator_exit(state, index) + return indices From a8383be878dc69540903b5a9eee8a555b79880e9 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 5 Aug 2021 11:12:36 +0800 Subject: [PATCH 63/75] Apply suggestions from code review Co-authored-by: Alex Stokes --- tests/formats/bls/eth2_aggregate_pubkeys.md | 4 ++-- tests/generators/bls/main.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/formats/bls/eth2_aggregate_pubkeys.md b/tests/formats/bls/eth2_aggregate_pubkeys.md index dd35b3166..b07d9bd06 100644 --- a/tests/formats/bls/eth2_aggregate_pubkeys.md +++ b/tests/formats/bls/eth2_aggregate_pubkeys.md @@ -1,6 +1,6 @@ -# Test format: Ethereum-customized BLS pubkeys aggregation +# Test format: Ethereum-customized BLS pubkey aggregation -A BLS pubkeys aggregation combines a series of pubkeys into a single pubkey. +A BLS pubkey aggregation combines a series of pubkeys into a single pubkey. ## Test case format diff --git a/tests/generators/bls/main.py b/tests/generators/bls/main.py index 467560e70..42754a581 100644 --- a/tests/generators/bls/main.py +++ b/tests/generators/bls/main.py @@ -450,7 +450,7 @@ def case07_eth2_fast_aggregate_verify(): 'output': False, } - # NOTE: Unlike `FastAggregateVerify`, len(pubkeys) == 0 and signature == Z1_SIGNATURE is VALID + # NOTE: Unlike `FastAggregateVerify`, len(pubkeys) == 0 and signature == Z2_SIGNATURE is VALID assert spec.eth2_fast_aggregate_verify([], message, Z2_SIGNATURE) yield f'eth2_fast_aggregate_verify_na_pubkeys_and_infinity_signature', { 'input': { From 3b86bd340f493dcfdbc8d3b9c9d93f630e4736ac Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 5 Aug 2021 11:20:49 +0800 Subject: [PATCH 64/75] Rename eth2_* to eth_* --- tests/formats/bls/README.md | 4 +- ...te_pubkeys.md => eth_aggregate_pubkeys.md} | 2 +- ...verify.md => eth_fast_aggregate_verify.md} | 0 tests/generators/bls/main.py | 52 +++++++++---------- 4 files changed, 29 insertions(+), 29 deletions(-) rename tests/formats/bls/{eth2_aggregate_pubkeys.md => eth_aggregate_pubkeys.md} (78%) rename tests/formats/bls/{eth2_fast_aggregate_verify.md => eth_fast_aggregate_verify.md} (100%) diff --git a/tests/formats/bls/README.md b/tests/formats/bls/README.md index 65018631a..77a9654a8 100644 --- a/tests/formats/bls/README.md +++ b/tests/formats/bls/README.md @@ -7,8 +7,8 @@ The BLS test suite runner has the following handlers: - [`aggregate_verify`](./aggregate_verify.md) - [`aggregate`](./aggregate.md) -- [`eth2_aggregate_pubkeys`](./eth2_aggregate_pubkeys.md) -- [`eth2_fast_aggregate_verify`](./eth2_fast_aggregate_verify.md) +- [`eth_aggregate_pubkeys`](./eth_aggregate_pubkeys.md) +- [`eth_fast_aggregate_verify`](./eth_fast_aggregate_verify.md) - [`fast_aggregate_verify`](./fast_aggregate_verify.md) - [`sign`](./sign.md) - [`verify`](./verify.md) diff --git a/tests/formats/bls/eth2_aggregate_pubkeys.md b/tests/formats/bls/eth_aggregate_pubkeys.md similarity index 78% rename from tests/formats/bls/eth2_aggregate_pubkeys.md rename to tests/formats/bls/eth_aggregate_pubkeys.md index b07d9bd06..86d0e3cd0 100644 --- a/tests/formats/bls/eth2_aggregate_pubkeys.md +++ b/tests/formats/bls/eth_aggregate_pubkeys.md @@ -16,4 +16,4 @@ output: BLS Pubkey -- expected output, single BLS pubkeys or empty. ## Condition -The `eth2_aggregate_pubkeys` handler should aggregate the signatures in the `input`, and the result should match the expected `output`. +The `eth_aggregate_pubkeys` handler should aggregate the signatures in the `input`, and the result should match the expected `output`. diff --git a/tests/formats/bls/eth2_fast_aggregate_verify.md b/tests/formats/bls/eth_fast_aggregate_verify.md similarity index 100% rename from tests/formats/bls/eth2_fast_aggregate_verify.md rename to tests/formats/bls/eth_fast_aggregate_verify.md diff --git a/tests/generators/bls/main.py b/tests/generators/bls/main.py index 42754a581..0d6f4942e 100644 --- a/tests/generators/bls/main.py +++ b/tests/generators/bls/main.py @@ -358,49 +358,49 @@ def case05_aggregate_verify(): } -def case06_eth2_aggregate_pubkeys(): - aggregate_pubkey = spec.eth2_aggregate_pubkeys(PUBKEYS) +def case06_eth_aggregate_pubkeys(): + aggregate_pubkey = spec.eth_aggregate_pubkeys(PUBKEYS) assert aggregate_pubkey == milagro_bls._AggregatePKs(PUBKEYS) - yield f'eth2_aggregate_pubkeys_some_pubkeys', { + yield f'eth_aggregate_pubkeys_some_pubkeys', { 'input': [encode_hex(pubkey) for pubkey in PUBKEYS], 'output': encode_hex(aggregate_pubkey), } # Invalid pubkeys -- len(pubkeys) == 0 - expect_exception(spec.eth2_aggregate_pubkeys, []) + expect_exception(spec.eth_aggregate_pubkeys, []) expect_exception(milagro_bls._AggregatePKs, []) - yield f'eth2_aggregate_pubkeys_empty_list', { + yield f'eth_aggregate_pubkeys_empty_list', { 'input': [], 'output': None, } # Invalid pubkeys -- [ZERO_PUBKEY] - expect_exception(spec.eth2_aggregate_pubkeys, [ZERO_PUBKEY]) + expect_exception(spec.eth_aggregate_pubkeys, [ZERO_PUBKEY]) expect_exception(milagro_bls._AggregatePKs, [ZERO_PUBKEY]) - yield f'eth2_aggregate_pubkeys_na_pubkey', { + yield f'eth_aggregate_pubkeys_na_pubkey', { 'input': [encode_hex(ZERO_PUBKEY)], 'output': None, } # Invalid pubkeys -- G1 point at infinity - expect_exception(spec.eth2_aggregate_pubkeys, [Z1_PUBKEY]) + expect_exception(spec.eth_aggregate_pubkeys, [Z1_PUBKEY]) expect_exception(milagro_bls._AggregatePKs, [Z1_PUBKEY]) - yield f'eth2_aggregate_pubkeys_infinity_pubkey', { + yield f'eth_aggregate_pubkeys_infinity_pubkey', { 'input': [encode_hex(Z1_PUBKEY)], 'output': None, } # Invalid pubkeys -- b'\x40\x00\x00\x00....\x00' pubkey x40_pubkey = b'\x40' + b'\00' * 47 - expect_exception(spec.eth2_aggregate_pubkeys, [x40_pubkey]) + expect_exception(spec.eth_aggregate_pubkeys, [x40_pubkey]) expect_exception(milagro_bls._AggregatePKs, [x40_pubkey]) - yield f'eth2_aggregate_pubkeys_x40_pubkey', { + yield f'eth_aggregate_pubkeys_x40_pubkey', { 'input': [encode_hex(x40_pubkey)], 'output': None, } -def case07_eth2_fast_aggregate_verify(): +def case07_eth_fast_aggregate_verify(): """ Similar to `case04_fast_aggregate_verify` except for the empty case """ @@ -413,8 +413,8 @@ def case07_eth2_fast_aggregate_verify(): # Valid signature identifier = f'{pubkeys_serial}_{encode_hex(message)}' - assert spec.eth2_fast_aggregate_verify(pubkeys, message, aggregate_signature) - yield f'eth2_fast_aggregate_verify_valid_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + assert spec.eth_fast_aggregate_verify(pubkeys, message, aggregate_signature) + yield f'eth_fast_aggregate_verify_valid_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'pubkeys': pubkeys_serial, 'message': encode_hex(message), @@ -427,8 +427,8 @@ def case07_eth2_fast_aggregate_verify(): pubkeys_extra = pubkeys + [bls.SkToPk(PRIVKEYS[-1])] pubkeys_extra_serial = [encode_hex(pubkey) for pubkey in pubkeys_extra] identifier = f'{pubkeys_extra_serial}_{encode_hex(message)}' - assert not spec.eth2_fast_aggregate_verify(pubkeys_extra, message, aggregate_signature) - yield f'eth2_fast_aggregate_verify_extra_pubkey_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + assert not spec.eth_fast_aggregate_verify(pubkeys_extra, message, aggregate_signature) + yield f'eth_fast_aggregate_verify_extra_pubkey_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'pubkeys': pubkeys_extra_serial, 'message': encode_hex(message), @@ -440,8 +440,8 @@ def case07_eth2_fast_aggregate_verify(): # Invalid signature -- tampered with signature tampered_signature = aggregate_signature[:-4] + b'\xff\xff\xff\xff' identifier = f'{pubkeys_serial}_{encode_hex(message)}' - assert not spec.eth2_fast_aggregate_verify(pubkeys, message, tampered_signature) - yield f'eth2_fast_aggregate_verify_tampered_signature_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + assert not spec.eth_fast_aggregate_verify(pubkeys, message, tampered_signature) + yield f'eth_fast_aggregate_verify_tampered_signature_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'pubkeys': pubkeys_serial, 'message': encode_hex(message), @@ -451,8 +451,8 @@ def case07_eth2_fast_aggregate_verify(): } # NOTE: Unlike `FastAggregateVerify`, len(pubkeys) == 0 and signature == Z2_SIGNATURE is VALID - assert spec.eth2_fast_aggregate_verify([], message, Z2_SIGNATURE) - yield f'eth2_fast_aggregate_verify_na_pubkeys_and_infinity_signature', { + assert spec.eth_fast_aggregate_verify([], message, Z2_SIGNATURE) + yield f'eth_fast_aggregate_verify_na_pubkeys_and_infinity_signature', { 'input': { 'pubkeys': [], 'message': encode_hex(message), @@ -462,8 +462,8 @@ def case07_eth2_fast_aggregate_verify(): } # Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00... - assert not spec.eth2_fast_aggregate_verify([], message, NO_SIGNATURE) - yield f'eth2_fast_aggregate_verify_na_pubkeys_and_na_signature', { + assert not spec.eth_fast_aggregate_verify([], message, NO_SIGNATURE) + yield f'eth_fast_aggregate_verify_na_pubkeys_and_na_signature', { 'input': { 'pubkeys': [], 'message': encode_hex(message), @@ -477,8 +477,8 @@ def case07_eth2_fast_aggregate_verify(): pubkeys_with_infinity = pubkeys + [Z1_PUBKEY] signatures = [bls.Sign(privkey, SAMPLE_MESSAGE) for privkey in PRIVKEYS] aggregate_signature = bls.Aggregate(signatures) - assert not spec.eth2_fast_aggregate_verify(pubkeys_with_infinity, SAMPLE_MESSAGE, aggregate_signature) - yield f'eth2_fast_aggregate_verify_infinity_pubkey', { + assert not spec.eth_fast_aggregate_verify(pubkeys_with_infinity, SAMPLE_MESSAGE, aggregate_signature) + yield f'eth_fast_aggregate_verify_infinity_pubkey', { 'input': { 'pubkeys': [encode_hex(pubkey) for pubkey in pubkeys_with_infinity], 'message': encode_hex(SAMPLE_MESSAGE), @@ -524,6 +524,6 @@ if __name__ == "__main__": create_provider(PHASE0, 'fast_aggregate_verify', case04_fast_aggregate_verify), create_provider(PHASE0, 'aggregate_verify', case05_aggregate_verify), # ALTAIR - create_provider(ALTAIR, 'eth2_aggregate_pubkeys', case06_eth2_aggregate_pubkeys), - create_provider(ALTAIR, 'eth2_fast_aggregate_verify', case07_eth2_fast_aggregate_verify), + create_provider(ALTAIR, 'eth_aggregate_pubkeys', case06_eth_aggregate_pubkeys), + create_provider(ALTAIR, 'eth_fast_aggregate_verify', case07_eth_fast_aggregate_verify), ]) From 93af122b2db71b9d33f6e5fc963273a6adb8c211 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 5 Aug 2021 12:09:30 +0800 Subject: [PATCH 65/75] PR feedback from @ralexstokes and add single pubkey aggregate tests --- specs/altair/bls.md | 5 +- .../test_process_attester_slashing.py | 8 +- tests/core/pyspec/eth2spec/utils/bls.py | 8 +- tests/generators/bls/main.py | 90 +++++++++++-------- 4 files changed, 61 insertions(+), 50 deletions(-) diff --git a/specs/altair/bls.md b/specs/altair/bls.md index a09c6b3e3..06b0313a9 100644 --- a/specs/altair/bls.md +++ b/specs/altair/bls.md @@ -46,9 +46,8 @@ def eth_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey: Refer to the BLS signature draft standard for more information. """ assert len(pubkeys) > 0 - for pubkey in pubkeys: - # Ensure that the given inputs are valid pubkeys - assert bls.KeyValidate(pubkey) + # Ensure that the given inputs are valid pubkeys + assert all(bls.KeyValidate(pubkey) for pubkey in pubkeys) result = copy(pubkeys[0]) for pubkey in pubkeys[1:]: diff --git a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attester_slashing.py b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attester_slashing.py index b620a7342..13d64e03b 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attester_slashing.py +++ b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attester_slashing.py @@ -306,7 +306,7 @@ def test_att1_empty_indices(spec, state): attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True) attester_slashing.attestation_1.attesting_indices = [] - attester_slashing.attestation_1.signature = spec.bls.Z2_SIGNATURE + attester_slashing.attestation_1.signature = spec.bls.G2_POINT_AT_INFINITY yield from run_attester_slashing_processing(spec, state, attester_slashing, False) @@ -318,7 +318,7 @@ def test_att2_empty_indices(spec, state): attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False) attester_slashing.attestation_2.attesting_indices = [] - attester_slashing.attestation_2.signature = spec.bls.Z2_SIGNATURE + attester_slashing.attestation_2.signature = spec.bls.G2_POINT_AT_INFINITY yield from run_attester_slashing_processing(spec, state, attester_slashing, False) @@ -330,10 +330,10 @@ def test_all_empty_indices(spec, state): attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False) attester_slashing.attestation_1.attesting_indices = [] - attester_slashing.attestation_1.signature = spec.bls.Z2_SIGNATURE + attester_slashing.attestation_1.signature = spec.bls.G2_POINT_AT_INFINITY attester_slashing.attestation_2.attesting_indices = [] - attester_slashing.attestation_2.signature = spec.bls.Z2_SIGNATURE + attester_slashing.attestation_2.signature = spec.bls.G2_POINT_AT_INFINITY yield from run_attester_slashing_processing(spec, state, attester_slashing, False) diff --git a/tests/core/pyspec/eth2spec/utils/bls.py b/tests/core/pyspec/eth2spec/utils/bls.py index 5bda0232f..9211e0ff0 100644 --- a/tests/core/pyspec/eth2spec/utils/bls.py +++ b/tests/core/pyspec/eth2spec/utils/bls.py @@ -10,9 +10,8 @@ bls = py_ecc_bls STUB_SIGNATURE = b'\x11' * 96 STUB_PUBKEY = b'\x22' * 48 -Z1_PUBKEY = b'\xc0' + b'\x00' * 47 -Z2_SIGNATURE = b'\xc0' + b'\x00' * 95 -STUB_COORDINATES = _signature_to_G2(Z2_SIGNATURE) +G2_POINT_AT_INFINITY = b'\xc0' + b'\x00' * 95 +STUB_COORDINATES = _signature_to_G2(G2_POINT_AT_INFINITY) def use_milagro(): @@ -96,8 +95,7 @@ def signature_to_G2(signature): @only_with_bls(alt_return=STUB_PUBKEY) def AggregatePKs(pubkeys): if bls == py_ecc_bls: - for pubkey in pubkeys: - assert bls.KeyValidate(pubkey) + assert all(bls.KeyValidate(pubkey) for pubkey in pubkeys) elif bls == milagro_bls: # milagro_bls._AggregatePKs checks KeyValidate internally pass diff --git a/tests/generators/bls/main.py b/tests/generators/bls/main.py index 0d6f4942e..75468b162 100644 --- a/tests/generators/bls/main.py +++ b/tests/generators/bls/main.py @@ -54,9 +54,11 @@ PRIVKEYS = [ PUBKEYS = [bls.SkToPk(privkey) for privkey in PRIVKEYS] ZERO_PUBKEY = b'\x00' * 48 -Z1_PUBKEY = b'\xc0' + b'\x00' * 47 -NO_SIGNATURE = b'\x00' * 96 -Z2_SIGNATURE = b'\xc0' + b'\x00' * 95 +G1_POINT_AT_INFINITY = b'\xc0' + b'\x00' * 47 + +ZERO_SIGNATURE = b'\x00' * 96 +G2_POINT_AT_INFINITY = b'\xc0' + b'\x00' * 95 + ZERO_PRIVKEY = 0 ZERO_PRIVKEY_BYTES = b'\x00' * 32 @@ -149,13 +151,13 @@ def case02_verify(): } # Invalid pubkey and signature with the point at infinity - assert not bls.Verify(Z1_PUBKEY, SAMPLE_MESSAGE, Z2_SIGNATURE) - assert not milagro_bls.Verify(Z1_PUBKEY, SAMPLE_MESSAGE, Z2_SIGNATURE) + assert not bls.Verify(G1_POINT_AT_INFINITY, SAMPLE_MESSAGE, G2_POINT_AT_INFINITY) + assert not milagro_bls.Verify(G1_POINT_AT_INFINITY, SAMPLE_MESSAGE, G2_POINT_AT_INFINITY) yield f'verify_infinity_pubkey_and_infinity_signature', { 'input': { - 'pubkey': encode_hex(Z1_PUBKEY), + 'pubkey': encode_hex(G1_POINT_AT_INFINITY), 'message': encode_hex(SAMPLE_MESSAGE), - 'signature': encode_hex(Z2_SIGNATURE), + 'signature': encode_hex(G2_POINT_AT_INFINITY), }, 'output': False, } @@ -181,10 +183,10 @@ def case03_aggregate(): } # Valid to aggregate G2 point at infinity - aggregate_sig = bls.Aggregate([Z2_SIGNATURE]) - assert aggregate_sig == milagro_bls.Aggregate([Z2_SIGNATURE]) == Z2_SIGNATURE + aggregate_sig = bls.Aggregate([G2_POINT_AT_INFINITY]) + assert aggregate_sig == milagro_bls.Aggregate([G2_POINT_AT_INFINITY]) == G2_POINT_AT_INFINITY yield f'aggregate_infinity_signature', { - 'input': [encode_hex(Z2_SIGNATURE)], + 'input': [encode_hex(G2_POINT_AT_INFINITY)], 'output': encode_hex(aggregate_sig), } @@ -240,32 +242,32 @@ def case04_fast_aggregate_verify(): } # Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == Z1_SIGNATURE - assert not bls.FastAggregateVerify([], message, Z2_SIGNATURE) - assert not milagro_bls.FastAggregateVerify([], message, Z2_SIGNATURE) + assert not bls.FastAggregateVerify([], message, G2_POINT_AT_INFINITY) + assert not milagro_bls.FastAggregateVerify([], message, G2_POINT_AT_INFINITY) yield f'fast_aggregate_verify_na_pubkeys_and_infinity_signature', { 'input': { 'pubkeys': [], 'message': encode_hex(message), - 'signature': encode_hex(Z2_SIGNATURE), + 'signature': encode_hex(G2_POINT_AT_INFINITY), }, 'output': False, } # Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00... - assert not bls.FastAggregateVerify([], message, NO_SIGNATURE) - assert not milagro_bls.FastAggregateVerify([], message, NO_SIGNATURE) - yield f'fast_aggregate_verify_na_pubkeys_and_na_signature', { + assert not bls.FastAggregateVerify([], message, ZERO_SIGNATURE) + assert not milagro_bls.FastAggregateVerify([], message, ZERO_SIGNATURE) + yield f'fast_aggregate_verify_na_pubkeys_and_zero_signature', { 'input': { 'pubkeys': [], 'message': encode_hex(message), - 'signature': encode_hex(NO_SIGNATURE), + 'signature': encode_hex(ZERO_SIGNATURE), }, 'output': False, } # Invalid pubkeys and signature -- pubkeys contains point at infinity pubkeys = PUBKEYS.copy() - pubkeys_with_infinity = pubkeys + [Z1_PUBKEY] + pubkeys_with_infinity = pubkeys + [G1_POINT_AT_INFINITY] signatures = [bls.Sign(privkey, SAMPLE_MESSAGE) for privkey in PRIVKEYS] aggregate_signature = bls.Aggregate(signatures) assert not bls.FastAggregateVerify(pubkeys_with_infinity, SAMPLE_MESSAGE, aggregate_signature) @@ -320,31 +322,31 @@ def case05_aggregate_verify(): } # Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == Z1_SIGNATURE - assert not bls.AggregateVerify([], [], Z2_SIGNATURE) - assert not milagro_bls.AggregateVerify([], [], Z2_SIGNATURE) + assert not bls.AggregateVerify([], [], G2_POINT_AT_INFINITY) + assert not milagro_bls.AggregateVerify([], [], G2_POINT_AT_INFINITY) yield f'aggregate_verify_na_pubkeys_and_infinity_signature', { 'input': { 'pubkeys': [], 'messages': [], - 'signature': encode_hex(Z2_SIGNATURE), + 'signature': encode_hex(G2_POINT_AT_INFINITY), }, 'output': False, } # Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00... - assert not bls.AggregateVerify([], [], NO_SIGNATURE) - assert not milagro_bls.AggregateVerify([], [], NO_SIGNATURE) - yield f'aggregate_verify_na_pubkeys_and_na_signature', { + assert not bls.AggregateVerify([], [], ZERO_SIGNATURE) + assert not milagro_bls.AggregateVerify([], [], ZERO_SIGNATURE) + yield f'aggregate_verify_na_pubkeys_and_zero_signature', { 'input': { 'pubkeys': [], 'messages': [], - 'signature': encode_hex(NO_SIGNATURE), + 'signature': encode_hex(ZERO_SIGNATURE), }, 'output': False, } # Invalid pubkeys and signature -- pubkeys contains point at infinity - pubkeys_with_infinity = pubkeys + [Z1_PUBKEY] + pubkeys_with_infinity = pubkeys + [G1_POINT_AT_INFINITY] messages_with_sample = messages + [SAMPLE_MESSAGE] assert not bls.AggregateVerify(pubkeys_with_infinity, messages_with_sample, aggregate_signature) assert not milagro_bls.AggregateVerify(pubkeys_with_infinity, messages_with_sample, aggregate_signature) @@ -359,9 +361,21 @@ def case05_aggregate_verify(): def case06_eth_aggregate_pubkeys(): + for pubkey in PUBKEYS: + encoded_pubkey = encode_hex(pubkey) + aggregate_pubkey = spec.eth_aggregate_pubkeys([pubkey]) + # Should be unchanged + assert aggregate_pubkey == milagro_bls._AggregatePKs([pubkey]) == pubkey + # Valid pubkey + yield f'eth_aggregate_pubkeys_valid_{(hash(bytes(encoded_pubkey, "utf-8"))[:8]).hex()}', { + 'input': [encode_hex(pubkey)], + 'output': encode_hex(aggregate_pubkey), + } + + # Valid pubkeys aggregate_pubkey = spec.eth_aggregate_pubkeys(PUBKEYS) assert aggregate_pubkey == milagro_bls._AggregatePKs(PUBKEYS) - yield f'eth_aggregate_pubkeys_some_pubkeys', { + yield f'eth_aggregate_pubkeys_valid_pubkeys', { 'input': [encode_hex(pubkey) for pubkey in PUBKEYS], 'output': encode_hex(aggregate_pubkey), } @@ -377,16 +391,16 @@ def case06_eth_aggregate_pubkeys(): # Invalid pubkeys -- [ZERO_PUBKEY] expect_exception(spec.eth_aggregate_pubkeys, [ZERO_PUBKEY]) expect_exception(milagro_bls._AggregatePKs, [ZERO_PUBKEY]) - yield f'eth_aggregate_pubkeys_na_pubkey', { + yield f'eth_aggregate_pubkeys_zero_pubkey', { 'input': [encode_hex(ZERO_PUBKEY)], 'output': None, } # Invalid pubkeys -- G1 point at infinity - expect_exception(spec.eth_aggregate_pubkeys, [Z1_PUBKEY]) - expect_exception(milagro_bls._AggregatePKs, [Z1_PUBKEY]) + expect_exception(spec.eth_aggregate_pubkeys, [G1_POINT_AT_INFINITY]) + expect_exception(milagro_bls._AggregatePKs, [G1_POINT_AT_INFINITY]) yield f'eth_aggregate_pubkeys_infinity_pubkey', { - 'input': [encode_hex(Z1_PUBKEY)], + 'input': [encode_hex(G1_POINT_AT_INFINITY)], 'output': None, } @@ -450,31 +464,31 @@ def case07_eth_fast_aggregate_verify(): 'output': False, } - # NOTE: Unlike `FastAggregateVerify`, len(pubkeys) == 0 and signature == Z2_SIGNATURE is VALID - assert spec.eth_fast_aggregate_verify([], message, Z2_SIGNATURE) + # NOTE: Unlike `FastAggregateVerify`, len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY is VALID + assert spec.eth_fast_aggregate_verify([], message, G2_POINT_AT_INFINITY) yield f'eth_fast_aggregate_verify_na_pubkeys_and_infinity_signature', { 'input': { 'pubkeys': [], 'message': encode_hex(message), - 'signature': encode_hex(Z2_SIGNATURE), + 'signature': encode_hex(G2_POINT_AT_INFINITY), }, 'output': True, } # Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00... - assert not spec.eth_fast_aggregate_verify([], message, NO_SIGNATURE) - yield f'eth_fast_aggregate_verify_na_pubkeys_and_na_signature', { + assert not spec.eth_fast_aggregate_verify([], message, ZERO_SIGNATURE) + yield f'eth_fast_aggregate_verify_na_pubkeys_and_zero_signature', { 'input': { 'pubkeys': [], 'message': encode_hex(message), - 'signature': encode_hex(NO_SIGNATURE), + 'signature': encode_hex(ZERO_SIGNATURE), }, 'output': False, } # Invalid pubkeys and signature -- pubkeys contains point at infinity pubkeys = PUBKEYS.copy() - pubkeys_with_infinity = pubkeys + [Z1_PUBKEY] + pubkeys_with_infinity = pubkeys + [G1_POINT_AT_INFINITY] signatures = [bls.Sign(privkey, SAMPLE_MESSAGE) for privkey in PRIVKEYS] aggregate_signature = bls.Aggregate(signatures) assert not spec.eth_fast_aggregate_verify(pubkeys_with_infinity, SAMPLE_MESSAGE, aggregate_signature) From 6f56e33f00647bfc75cc752a1d62d8bee4b295af Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Thu, 5 Aug 2021 16:54:22 -0700 Subject: [PATCH 66/75] Update tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py Co-authored-by: Danny Ryan --- .../altair/epoch_processing/test_process_inactivity_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py index 5a334f28a..30359f822 100644 --- a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py +++ b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py @@ -285,7 +285,7 @@ def test_some_exited_full_random_leaking(spec, state): # mutations applied to their inactivity scores randomize_inactivity_scores(spec, state, rng=rng) - assert not get_exited_validators(spec, state) + assert not any(get_exited_validators(spec, state)) exited_indices = exit_validators(spec, state, exit_count, rng=rng) assert not get_exited_validators(spec, state) From 170d7dc023e77cbdb60a119e768a364c565960f7 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Thu, 5 Aug 2021 16:54:29 -0700 Subject: [PATCH 67/75] Update tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py Co-authored-by: Danny Ryan --- .../altair/epoch_processing/test_process_inactivity_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py index 30359f822..9bc0f4841 100644 --- a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py +++ b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py @@ -287,7 +287,7 @@ def test_some_exited_full_random_leaking(spec, state): assert not any(get_exited_validators(spec, state)) exited_indices = exit_validators(spec, state, exit_count, rng=rng) - assert not get_exited_validators(spec, state) + assert not any(get_exited_validators(spec, state)) # advance the state to effect the exits target_epoch = max(state.validators[index].exit_epoch for index in exited_indices) From ad4445fa9e0609c8d449c7a880e44179a1421a8e Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 6 Aug 2021 16:39:35 +0800 Subject: [PATCH 68/75] Apply PR feedback from Danny and clean up the BLS test format docs --- tests/formats/bls/aggregate.md | 2 ++ tests/formats/bls/aggregate_verify.md | 13 ++++++++++--- tests/formats/bls/eth_aggregate_pubkeys.md | 2 +- tests/formats/bls/eth_fast_aggregate_verify.md | 13 ++++++++++--- tests/formats/bls/fast_aggregate_verify.md | 13 ++++++++++--- 5 files changed, 33 insertions(+), 10 deletions(-) diff --git a/tests/formats/bls/aggregate.md b/tests/formats/bls/aggregate.md index af8444540..81ce85fe6 100644 --- a/tests/formats/bls/aggregate.md +++ b/tests/formats/bls/aggregate.md @@ -14,6 +14,8 @@ output: BLS Signature -- expected output, single BLS signature or empty. - `BLS Signature` here is encoded as a string: hexadecimal encoding of 96 bytes (192 nibbles), prefixed with `0x`. - No output value if the input is invalid. +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. + ## Condition The `aggregate` handler should aggregate the signatures in the `input`, and the result should match the expected `output`. diff --git a/tests/formats/bls/aggregate_verify.md b/tests/formats/bls/aggregate_verify.md index 3985de9f4..9b251af46 100644 --- a/tests/formats/bls/aggregate_verify.md +++ b/tests/formats/bls/aggregate_verify.md @@ -8,10 +8,17 @@ The test data is declared in a `data.yaml` file: ```yaml input: - pubkeys: List[bytes48] -- the pubkeys + pubkeys: List[BLS Pubkey] -- the pubkeys messages: List[bytes32] -- the messages - signature: bytes96 -- the signature to verify against pubkeys and messages -output: bool -- VALID or INVALID + signature: BLS Signature -- the signature to verify against pubkeys and messages +output: bool -- true (VALID) or false (INVALID) ``` +- `BLS Pubkey` here is encoded as a string: hexadecimal encoding of 48 bytes (96 nibbles), prefixed with `0x`. +- `BLS Signature` here is encoded as a string: hexadecimal encoding of 96 bytes (192 nibbles), prefixed with `0x`. + All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. + +## Condition + +The `aggregate_verify` handler should verify the signature with pubkeys and messages in the `input`, and the result should match the expected `output`. diff --git a/tests/formats/bls/eth_aggregate_pubkeys.md b/tests/formats/bls/eth_aggregate_pubkeys.md index 86d0e3cd0..4f66adec2 100644 --- a/tests/formats/bls/eth_aggregate_pubkeys.md +++ b/tests/formats/bls/eth_aggregate_pubkeys.md @@ -8,7 +8,7 @@ The test data is declared in a `data.yaml` file: ```yaml input: List[BLS Pubkey] -- list of input BLS pubkeys -output: BLS Pubkey -- expected output, single BLS pubkeys or empty. +output: BLSPubkey -- expected output, single BLS pubkeys or empty. ``` - `BLS Pubkey` here is encoded as a string: hexadecimal encoding of 48 bytes (96 nibbles), prefixed with `0x`. diff --git a/tests/formats/bls/eth_fast_aggregate_verify.md b/tests/formats/bls/eth_fast_aggregate_verify.md index ddc1b5208..83b5484e0 100644 --- a/tests/formats/bls/eth_fast_aggregate_verify.md +++ b/tests/formats/bls/eth_fast_aggregate_verify.md @@ -8,10 +8,17 @@ The test data is declared in a `data.yaml` file: ```yaml input: - pubkeys: List[bytes48] -- the pubkey + pubkeys: List[BLS Pubkey] -- list of input BLS pubkeys message: bytes32 -- the message - signature: bytes96 -- the signature to verify against pubkeys and message -output: bool -- VALID or INVALID + signature: BLS Signature -- the signature to verify against pubkeys and message +output: bool -- true (VALID) or false (INVALID) ``` +- `BLS Pubkey` here is encoded as a string: hexadecimal encoding of 48 bytes (96 nibbles), prefixed with `0x`. +- `BLS Signature` here is encoded as a string: hexadecimal encoding of 96 bytes (192 nibbles), prefixed with `0x`. + All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. + +## Condition + +The `eth_fast_aggregate_verify` handler should verify the signature with pubkeys and message in the `input`, and the result should match the expected `output`. diff --git a/tests/formats/bls/fast_aggregate_verify.md b/tests/formats/bls/fast_aggregate_verify.md index 3366cbb79..38ea29bb5 100644 --- a/tests/formats/bls/fast_aggregate_verify.md +++ b/tests/formats/bls/fast_aggregate_verify.md @@ -8,10 +8,17 @@ The test data is declared in a `data.yaml` file: ```yaml input: - pubkeys: List[bytes48] -- the pubkey + pubkeys: List[BLS Pubkey] -- list of input BLS pubkeys message: bytes32 -- the message - signature: bytes96 -- the signature to verify against pubkeys and message -output: bool -- VALID or INVALID + signature: BLS Signature -- the signature to verify against pubkeys and message +output: bool -- true (VALID) or false (INVALID) ``` +- `BLS Pubkey` here is encoded as a string: hexadecimal encoding of 48 bytes (96 nibbles), prefixed with `0x`. +- `BLS Signature` here is encoded as a string: hexadecimal encoding of 96 bytes (192 nibbles), prefixed with `0x`. + All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. + +## Condition + +The `fast_aggregate_verify` handler should verify the signature with pubkeys and message in the `input`, and the result should match the expected `output`. From 600f55ba7ff34a95756ba202537c46666a643e9d Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 9 Aug 2021 17:50:56 -0600 Subject: [PATCH 69/75] add basic execution-layer p2p beacon_block validations --- specs/merge/p2p-interface.md | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/specs/merge/p2p-interface.md b/specs/merge/p2p-interface.md index 712a17549..c79d14b05 100644 --- a/specs/merge/p2p-interface.md +++ b/specs/merge/p2p-interface.md @@ -61,12 +61,32 @@ The Merge changes the type of the global beacon block topic. ##### `beacon_block` -The existing specification for this topic does not change from prior upgrades, -but the type of the payload does change to the (modified) `SignedBeaconBlock` found in the Merge. -This type changes due to the addition of `execution_payload` to the inner `BeaconBlockBody`. - +The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in the Merge. +Specifically, this type changes with the addition of `execution_payload` to the inner `BeaconBlockBody`. See the Merge [state transition document](./beacon-chain.md#beaconblockbody) for further details. +In addition to the gossip validations for this topic from prior specifications, +the following validations MUST pass before forwarding the `signed_beacon_block` on the network. +Alias `block = signed_beacon_block.message`, `execution_payload = block.body.execution_payload`. +- If the merge is complete with respect to the head state -- i.e. `is_merge_complete(state)` -- + then validate the following: + - _[REJECT]_ The block's execution payload must be non-empty -- + i.e. `execution_payload != ExecutionPayload()` +- If the execution is enabled for the block -- i.e. `is_execution_enabled(state, block.body)` + then validate the following: + - _[REJECT]_ The block's execution payload timestamp is correct with respect to the slot + -- i.e. `execution_payload.timestamp == compute_time_at_slot(state, block.slot)`. + - _[REJECT]_ Gas used is less than the gas limit -- + i.e. `execution_payload.gas_used <= execution_payload.gas_limit`. + - _[REJECT]_ The execution payload block hash is not equal to the parent hash -- + i.e. `execution_payload.block_hash != execution_payload.parent_hash`. + - _[REJECT]_ The execution payload transaction list data is within expected size limits, + the data MUST NOT be larger than the SSZ list-limit, + and a client MAY be more strict. + +*Note*: Additional [gossip validations](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#block-encoding-and-validity) +(see block "data validity" conditions) that rely more heavily on execution-layer state and logic are currently under consideration. + ### Transitioning the gossip See gossip transition details found in the [Altair document](../altair/p2p) for From ef71a4af1d35c0882a4dd584267b1d4d285976a3 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 10 Aug 2021 17:15:07 +0600 Subject: [PATCH 70/75] Polishing as per code review --- specs/merge/beacon-chain.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 590e2bc4f..8837c0ea3 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -13,7 +13,7 @@ - [Constants](#constants) - [Execution](#execution) - [Configuration](#configuration) - - [Genesis settings](#genesis-settings) + - [Genesis testing settings](#genesis-testing-settings) - [Containers](#containers) - [Extended containers](#extended-containers) - [`BeaconBlockBody`](#beaconblockbody) @@ -70,7 +70,7 @@ This patch adds transaction execution to the beacon chain as part of the Merge f ## Configuration -### Genesis settings +### Genesis testing settings *Note*: These configuration settings do not apply to the mainnet and are utilized only by pure Merge testing. @@ -314,7 +314,7 @@ def compute_base_fee_per_gas(payload: ExecutionPayload, parent: ExecutionPayload ```python def process_execution_payload(state: BeaconState, payload: ExecutionPayload, execution_engine: ExecutionEngine) -> None: - # Verify consistency of the parent hash, block number and random + # Verify consistency of the parent hash, block number, random, base fee per gas and gas limit if is_merge_complete(state): assert payload.parent_hash == state.latest_execution_payload_header.block_hash assert payload.block_number == state.latest_execution_payload_header.block_number + uint64(1) From d005fee67df0cdc9bed448ac44d6912c19f8dcbb Mon Sep 17 00:00:00 2001 From: Diederik Loerakker Date: Tue, 10 Aug 2021 13:48:26 +0200 Subject: [PATCH 71/75] sharding p2p code review fixes Co-authored-by: Danny Ryan --- specs/sharding/p2p-interface.md | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/specs/sharding/p2p-interface.md b/specs/sharding/p2p-interface.md index 4eb3f6f2e..e5394abc2 100644 --- a/specs/sharding/p2p-interface.md +++ b/specs/sharding/p2p-interface.md @@ -89,10 +89,10 @@ on the horizontal subnet or creating samples for it. Alias `blob = signed_blob.m - _[REJECT]_ The shard blob is for the correct subnet -- i.e. `compute_subnet_for_shard_blob(state, blob.slot, blob.shard) == subnet_id` - _[IGNORE]_ The blob is the first blob with valid signature received for the `(blob.proposer_index, blob.slot, blob.shard)` combination. -- _[REJECT]_ The blob is not too large, the data MUST NOT be larger than the SSZ list-limit, and a client MAY be more strict. +- _[REJECT]_ The blob is not too large -- the data MUST NOT be larger than the SSZ list-limit, and a client MAY apply stricter bounds. - _[REJECT]_ The `blob.body.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid. -- _[REJECT]_ The blob builder exists and has sufficient balance to back the fee payment. -- _[REJECT]_ The blob signature is valid for the aggregate of proposer and builder, `signed_blob.signature`, +- _[REJECT]_ The blob builder defined by `blob.builder_index` exists and has sufficient balance to back the fee payment. +- _[REJECT]_ The blob signature, `signed_blob.signature`, is valid for the aggregate of proposer and builder -- i.e. `bls.FastAggregateVerify([builder_pubkey, proposer_pubkey], blob_signing_root, signed_blob.signature)`. - _[REJECT]_ The blob is proposed by the expected `proposer_index` for the blob's `slot` and `shard`, in the context of the current shuffling (defined by `blob.body.beacon_block_root`/`slot`). @@ -104,17 +104,17 @@ on the horizontal subnet or creating samples for it. Alias `blob = signed_blob.m There are three additional global topics for Sharding. -- `shard_blob_header`: co-signed headers, to be included on-chain, and signaling builders to publish full data. +- `shard_blob_header`: co-signed headers to be included on-chain and to serve as a signal to the builder to publish full data. - `shard_blob_tx`: builder-signed headers, also known as "data transaction". -- `shard_proposer_slashing`: slashings of duplicate shard proposals +- `shard_proposer_slashing`: slashings of duplicate shard proposals. ##### `shard_blob_header` Shard header data, in the form of a `SignedShardBlobHeader` is published to the global `shard_blob_header` subnet. -Shard blob headers select shard blob bids by builders, +Shard blob headers select shard blob bids by builders and should be timely to ensure builders can publish the full shard blob before subsequent attestations. -The following validations MUST pass before forwarding the `signed_blob_header` on the network. Alias `header = signed_blob_header.message` +The following validations MUST pass before forwarding the `signed_blob_header` on the network. Alias `header = signed_blob_header.message`. - _[IGNORE]_ The `header` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `header.slot <= current_slot` @@ -126,8 +126,8 @@ The following validations MUST pass before forwarding the `signed_blob_header` o - _[REJECT]_ The `header.shard` MUST have a committee at the `header.slot` -- i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard)` doesn't raise an error - _[IGNORE]_ The header is the first header with valid signature received for the `(header.proposer_index, header.slot, header.shard)` combination. -- _[REJECT]_ The blob builder exists and has sufficient balance to back the fee payment. -- _[REJECT]_ The header signature is valid for the aggregate of proposer and builder, `signed_blob_header.signature`, +- _[REJECT]_ The blob builder defined by `blob.builder_index` exists and has sufficient balance to back the fee payment. +- _[REJECT]_ The header signature, `signed_blob_header.signature`, is valid for the aggregate of proposer and builder -- i.e. `bls.FastAggregateVerify([builder_pubkey, proposer_pubkey], blob_signing_root, signed_blob_header.signature)`. - _[REJECT]_ The header is proposed by the expected `proposer_index` for the blob's `header.slot` and `header.shard` in the context of the current shuffling (defined by `header.body_summary.beacon_block_root`/`slot`). @@ -137,12 +137,12 @@ The following validations MUST pass before forwarding the `signed_blob_header` o ##### `shard_blob_tx` -Shard data-transactions, in the form of a `SignedShardBlobHeader` is published to the global `shard_blob_tx` subnet. +Shard data-transactions in the form of a `SignedShardBlobHeader` are published to the global `shard_blob_tx` subnet. These shard blob headers are signed solely by the blob-builder. -The following validations MUST pass before forwarding the `signed_blob_header` on the network. Alias `header = signed_blob_header.message` +The following validations MUST pass before forwarding the `signed_blob_header` on the network. Alias `header = signed_blob_header.message`. -- _[IGNORE]_ The `header` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- +- _[IGNORE]_ The header is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `header.slot <= current_slot` (a client MAY queue future headers for processing at the appropriate slot). - _[IGNORE]_ The header is new enough to still be processed -- @@ -152,10 +152,10 @@ The following validations MUST pass before forwarding the `signed_blob_header` o - _[REJECT]_ The `header.shard` MUST have a committee at the `header.slot` -- i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard)` doesn't raise an error - _[IGNORE]_ The header is the first header with valid signature received for the `(header.builder_index, header.slot, header.shard)` combination. -- _[REJECT]_ The blob builder exists and has sufficient balance to back the fee payment. +- _[REJECT]_ The blob builder, define by `header.builder_index`, exists and has sufficient balance to back the fee payment. - _[IGNORE]_ The header fee SHOULD be higher than previously seen headers for `(header.slot, header.shard)`, from any builder. Propagating nodes MAY increase fee increments in case of spam. -- _[REJECT]_ The header signature is valid for ONLY the builder, `signed_blob_header.signature`, +- _[REJECT]_ The header signature, `signed_blob_header.signature`, is valid for ONLY the builder -- i.e. `bls.Verify(builder_pubkey, blob_signing_root, signed_blob_header.signature)`. The signature is not an aggregate with the proposer. - _[REJECT]_ The header is designated for proposal by the expected `proposer_index` for the blob's `header.slot` and `header.shard` in the context of the current shuffling (defined by `header.body_summary.beacon_block_root`/`slot`). From da893c123e933ace3663f0306e19fb79d85b7d8f Mon Sep 17 00:00:00 2001 From: protolambda Date: Tue, 10 Aug 2021 23:18:59 +0200 Subject: [PATCH 72/75] update p2p shard blob/header/tx propagation windows --- specs/sharding/p2p-interface.md | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/specs/sharding/p2p-interface.md b/specs/sharding/p2p-interface.md index e5394abc2..93ff1e26d 100644 --- a/specs/sharding/p2p-interface.md +++ b/specs/sharding/p2p-interface.md @@ -36,6 +36,9 @@ The adjustments and additions for Shards are outlined in this document. | Name | Value | Description | | ---- | ----- | ----------- | | `SHARD_BLOB_SUBNET_COUNT` | `64` | The number of `shard_blob_{subnet_id}` subnets used in the gossipsub protocol. | +| `SHARD_TX_PROPAGATION_GRACE_SLOTS` | `4` | The number of slots for a late transaction to propagate | +| `SHARD_TX_PROPAGATION_BUFFER_SLOTS` | `8` | The number of slots for an early transaction to propagate | + ## Gossip domain @@ -77,9 +80,9 @@ def compute_subnet_for_shard_blob(state: BeaconState, slot: Slot, shard: Shard) The following validations MUST pass before forwarding the `signed_blob`, on the horizontal subnet or creating samples for it. Alias `blob = signed_blob.message`. -- _[IGNORE]_ The `blob` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. validate that `blob.slot <= current_slot` - (a client MAY queue future blobs for processing at the appropriate slot). +- _[IGNORE]_ The `blob` is published 1 slot early or later (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- + i.e. validate that `blob.slot <= current_slot + 1` + (a client MAY queue future blobs for propagation at the appropriate slot). - _[IGNORE]_ The `blob` is new enough to still be processed -- i.e. validate that `compute_epoch_at_slot(blob.slot) >= get_previous_epoch(state)` - _[REJECT]_ The shard blob is for an active shard -- @@ -116,15 +119,15 @@ and should be timely to ensure builders can publish the full shard blob before s The following validations MUST pass before forwarding the `signed_blob_header` on the network. Alias `header = signed_blob_header.message`. -- _[IGNORE]_ The `header` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. validate that `header.slot <= current_slot` - (a client MAY queue future headers for processing at the appropriate slot). +- _[IGNORE]_ The `header` is published 1 slot early or later (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- + i.e. validate that `header.slot <= current_slot + 1` + (a client MAY queue future headers for propagation at the appropriate slot). - _[IGNORE]_ The header is new enough to still be processed -- i.e. validate that `compute_epoch_at_slot(header.slot) >= get_previous_epoch(state)` - _[REJECT]_ The shard header is for an active shard -- i.e. `header.shard < get_active_shard_count(state, compute_epoch_at_slot(header.slot))` - _[REJECT]_ The `header.shard` MUST have a committee at the `header.slot` -- - i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard)` doesn't raise an error + i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard)` doesn't raise an error. - _[IGNORE]_ The header is the first header with valid signature received for the `(header.proposer_index, header.slot, header.shard)` combination. - _[REJECT]_ The blob builder defined by `blob.builder_index` exists and has sufficient balance to back the fee payment. - _[REJECT]_ The header signature, `signed_blob_header.signature`, is valid for the aggregate of proposer and builder -- @@ -142,15 +145,15 @@ These shard blob headers are signed solely by the blob-builder. The following validations MUST pass before forwarding the `signed_blob_header` on the network. Alias `header = signed_blob_header.message`. -- _[IGNORE]_ The header is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. validate that `header.slot <= current_slot` - (a client MAY queue future headers for processing at the appropriate slot). -- _[IGNORE]_ The header is new enough to still be processed -- - i.e. validate that `compute_epoch_at_slot(header.slot) >= get_previous_epoch(state)` +- _[IGNORE]_ The header is not propagating more than `SHARD_TX_PROPAGATION_BUFFER_SLOTS` slots ahead of time -- + i.e. validate that `header.slot <= current_slot + SHARD_TX_PROPAGATION_BUFFER_SLOTS`. +- _[IGNORE]_ The header is not propagating later than `SHARD_TX_PROPAGATION_GRACE_SLOTS` slots too late -- + i.e. validate that `header.slot + SHARD_TX_PROPAGATION_GRACE_SLOTS >= current_slot` - _[REJECT]_ The shard header is for an active shard -- i.e. `header.shard < get_active_shard_count(state, compute_epoch_at_slot(header.slot))` - _[REJECT]_ The `header.shard` MUST have a committee at the `header.slot` -- - i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard)` doesn't raise an error + i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard)` doesn't raise an error. +- _[IGNORE]_ The header is not stale -- i.e. the corresponding shard proposer has not already selected a header for `(header.slot, header.shard)`. - _[IGNORE]_ The header is the first header with valid signature received for the `(header.builder_index, header.slot, header.shard)` combination. - _[REJECT]_ The blob builder, define by `header.builder_index`, exists and has sufficient balance to back the fee payment. - _[IGNORE]_ The header fee SHOULD be higher than previously seen headers for `(header.slot, header.shard)`, from any builder. From 68db644ae97199faad5b1f3b183f40acd386bb3a Mon Sep 17 00:00:00 2001 From: Anton Nashatyrev Date: Wed, 4 Aug 2021 15:05:21 +0300 Subject: [PATCH 73/75] Rename DataCommitment.length field to samples_count and fix degree_proof validation --- specs/sharding/beacon-chain.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index bbb4ae930..9269d1461 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -242,7 +242,7 @@ class DataCommitment(Container): # KZG10 commitment to the data point: BLSCommitment # Length of the data in samples - length: uint64 + samples_count: uint64 ``` ### `AttestedDataCommitment` @@ -266,7 +266,7 @@ Unique, signing different bodies as shard proposer for the same `(slot, shard)` class ShardBlobBody(Container): # The actual data commitment commitment: DataCommitment - # Proof that the degree < commitment.length + # Proof that the degree < commitment.samples_count * POINTS_PER_SAMPLE degree_proof: BLSCommitment # The actual data. Should match the commitment and degree proof. data: List[BLSPoint, POINTS_PER_SAMPLE * MAX_SAMPLES_PER_BLOB] @@ -289,7 +289,7 @@ to avoid an extra network roundtrip between proposer and builder, to include the class ShardBlobBodySummary(Container): # The actual data commitment commitment: DataCommitment - # Proof that the degree < commitment.length + # Proof that the degree < commitment.samples_count * POINTS_PER_SAMPLE degree_proof: BLSCommitment # Hash-tree-root as summary of the data field data_root: Root @@ -719,16 +719,17 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade # Verify the length by verifying the degree. body_summary = header.body_summary - if body_summary.commitment.length == 0: + points_count = body_summary.commitment.samples_count * POINTS_PER_SAMPLE + if points_count == 0: assert body_summary.degree_proof == G1_SETUP[0] assert ( bls.Pairing(body_summary.degree_proof, G2_SETUP[0]) - == bls.Pairing(body_summary.commitment.point, G2_SETUP[-body_summary.commitment.length]) + == bls.Pairing(body_summary.commitment.point, G2_SETUP[-points_count]) ) # Charge EIP 1559 fee, builder pays for opportunity, and is responsible for later availability, # or fail to publish at their own expense. - samples = body_summary.commitment.length + samples = body_summary.commitment.samples_count # TODO: overflows, need bigger int type max_fee = body_summary.max_fee_per_sample * samples From 301157c0273b9046d2c86760064769972e18cf98 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Fri, 13 Aug 2021 14:21:37 +0600 Subject: [PATCH 74/75] Change base_fee_per_gas type to Bytes32 --- specs/merge/beacon-chain.md | 35 ++----------------- .../test/helpers/execution_payload.py | 3 +- 2 files changed, 4 insertions(+), 34 deletions(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 8837c0ea3..0d9f03079 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -34,7 +34,6 @@ - [Block processing](#block-processing) - [Execution payload processing](#execution-payload-processing) - [`is_valid_gas_limit`](#is_valid_gas_limit) - - [`compute_base_fee_per_gas`](#compute_base_fee_per_gas) - [`process_execution_payload`](#process_execution_payload) - [Testing](#testing) @@ -65,8 +64,6 @@ This patch adds transaction execution to the beacon chain as part of the Merge f | `BYTES_PER_LOGS_BLOOM` | `uint64(2**8)` (= 256) | | `GAS_LIMIT_DENOMINATOR` | `uint64(2**10)` (= 1,024) | | `MIN_GAS_LIMIT` | `uint64(5000)` (= 5,000) | -| `BASE_FEE_MAX_CHANGE_DENOMINATOR` | `uint64(2**3)` (= 8) | -| `ELASTICITY_MULTIPLIER` | `uint64(2**1)` (= 2) | ## Configuration @@ -77,7 +74,7 @@ This patch adds transaction execution to the beacon chain as part of the Merge f | Name | Value | | - | - | | `GENESIS_GAS_LIMIT` | `uint64(30000000)` (= 30,000,000) | -| `GENESIS_BASE_FEE_PER_GAS` | `uint64(1000000000)` (= 1,000,000,000) | +| `GENESIS_BASE_FEE_PER_GAS` | `Bytes32('0x000000000000000000000000000000000000000000000000000000003b9aca00')` (= 1,000,000,000) | ## Containers @@ -160,7 +157,7 @@ class ExecutionPayload(Container): gas_limit: uint64 gas_used: uint64 timestamp: uint64 - base_fee_per_gas: uint64 # base fee introduced in EIP-1559 + base_fee_per_gas: Bytes32 # base fee introduced in EIP-1559 # Extra payload fields block_hash: Hash32 # Hash of execution block transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD] @@ -181,7 +178,7 @@ class ExecutionPayloadHeader(Container): gas_limit: uint64 gas_used: uint64 timestamp: uint64 - base_fee_per_gas: uint64 + base_fee_per_gas: Bytes32 # Extra payload fields block_hash: Hash32 # Hash of execution block transactions_root: Root @@ -283,31 +280,6 @@ def is_valid_gas_limit(payload: ExecutionPayload, parent: ExecutionPayloadHeader return True ``` -#### `compute_base_fee_per_gas` - -```python -def compute_base_fee_per_gas(payload: ExecutionPayload, parent: ExecutionPayloadHeader) -> uint64: - parent_gas_target = parent.gas_limit // ELASTICITY_MULTIPLIER - parent_base_fee_per_gas = parent.base_fee_per_gas - parent_gas_used = payload.gas_used - - if parent_gas_used == parent_gas_target: - return parent_base_fee_per_gas - elif parent_gas_used > parent_gas_target: - gas_used_delta = parent_gas_used - parent_gas_target - base_fee_per_gas_delta = max( - parent_base_fee_per_gas * gas_used_delta // parent_gas_target // BASE_FEE_MAX_CHANGE_DENOMINATOR, - 1, - ) - return parent_base_fee_per_gas + base_fee_per_gas_delta - else: - gas_used_delta = parent_gas_target - parent_gas_used - base_fee_per_gas_delta = ( - parent_base_fee_per_gas * gas_used_delta // parent_gas_target // BASE_FEE_MAX_CHANGE_DENOMINATOR - ) - return parent_base_fee_per_gas - base_fee_per_gas_delta # This subtraction can't underflow -``` - #### `process_execution_payload` *Note:* This function depends on `process_randao` function call as it retrieves the most recent randao mix from the `state`. Implementations that are considering parallel processing of execution payload with respect to beacon chain state transition function should work around this dependency. @@ -319,7 +291,6 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe assert payload.parent_hash == state.latest_execution_payload_header.block_hash assert payload.block_number == state.latest_execution_payload_header.block_number + uint64(1) assert payload.random == get_randao_mix(state, get_current_epoch(state)) - assert payload.base_fee_per_gas == compute_base_fee_per_gas(payload, state.latest_execution_payload_header) assert is_valid_gas_limit(payload, state.latest_execution_payload_header) # Verify timestamp assert payload.timestamp == compute_timestamp_at_slot(state, state.slot) diff --git a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py index ce653a986..43be965a5 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py +++ b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py @@ -20,11 +20,10 @@ def build_empty_execution_payload(spec, state, randao_mix=None): gas_limit=latest.gas_limit, # retain same limit gas_used=0, # empty block, 0 gas timestamp=timestamp, - base_fee_per_gas=spec.uint64(0), + base_fee_per_gas=latest.base_fee_per_gas, # retain same base_fee block_hash=spec.Hash32(), transactions=empty_txs, ) - payload.base_fee_per_gas = spec.compute_base_fee_per_gas(payload, latest) # TODO: real RLP + block hash logic would be nice, requires RLP and keccak256 dependency however. payload.block_hash = spec.Hash32(spec.hash(payload.hash_tree_root() + b"FAKE RLP HASH")) From e3cad13497f10fef5a1c16446d116e074e72e252 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Mon, 16 Aug 2021 18:35:11 +0600 Subject: [PATCH 75/75] Make base_fee_per_gas little-endian --- specs/merge/beacon-chain.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 0d9f03079..5defa6bcb 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -74,7 +74,7 @@ This patch adds transaction execution to the beacon chain as part of the Merge f | Name | Value | | - | - | | `GENESIS_GAS_LIMIT` | `uint64(30000000)` (= 30,000,000) | -| `GENESIS_BASE_FEE_PER_GAS` | `Bytes32('0x000000000000000000000000000000000000000000000000000000003b9aca00')` (= 1,000,000,000) | +| `GENESIS_BASE_FEE_PER_GAS` | `Bytes32('0x00ca9a3b00000000000000000000000000000000000000000000000000000000')` (= 1,000,000,000) | ## Containers @@ -144,6 +144,8 @@ class BeaconState(Container): #### `ExecutionPayload` +*Note*: The `base_fee_per_gas` field is serialized in little-endian. + ```python class ExecutionPayload(Container): # Execution block header fields @@ -157,7 +159,7 @@ class ExecutionPayload(Container): gas_limit: uint64 gas_used: uint64 timestamp: uint64 - base_fee_per_gas: Bytes32 # base fee introduced in EIP-1559 + base_fee_per_gas: Bytes32 # base fee introduced in EIP-1559, little-endian serialized # Extra payload fields block_hash: Hash32 # Hash of execution block transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]