Merge pull request #3161 from ethereum/dev

release v1.3.0-alpha.2
This commit is contained in:
Danny Ryan 2022-12-14 12:33:22 -07:00 committed by GitHub
commit faa9fea420
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
60 changed files with 1183 additions and 720 deletions

View File

@ -105,12 +105,12 @@ install_test:
# Testing against `minimal` config by default
test: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \
python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov=eth2spec.bellatrix.minimal --cov=eth2spec.capella.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov=eth2spec.bellatrix.minimal --cov=eth2spec.capella.minimal --cov=eth2spec.eip4844.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
# Testing against `minimal` config by default
find_test: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \
python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov=eth2spec.bellatrix.minimal --cov=eth2spec.capella.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov=eth2spec.bellatrix.minimal --cov=eth2spec.capella.minimal --cov=eth2spec.eip4844.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
citest: pyspec
mkdir -p $(TEST_REPORT_DIR);
@ -142,8 +142,8 @@ codespell:
lint: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \
flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \
&& pylint --disable=all --enable unused-argument ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix ./eth2spec/capella \
&& mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.bellatrix -p eth2spec.capella
&& pylint --rcfile $(LINTER_CONFIG_FILE) ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix ./eth2spec/capella ./eth2spec/eip4844 \
&& mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.bellatrix -p eth2spec.capella -p eth2spec.eip4844
lint_generators: pyspec
. venv/bin/activate; cd $(TEST_GENERATORS_DIR); \
@ -202,8 +202,8 @@ gen_kzg_setups:
if ! test -d venv; then python3 -m venv venv; fi; \
. venv/bin/activate; \
pip3 install -r requirements.txt; \
python3 ./gen_kzg_trusted_setups.py --secret=1337 --length=4 --output-dir ${CURRENT_DIR}/presets/minimal/trusted_setups; \
python3 ./gen_kzg_trusted_setups.py --secret=1337 --length=4096 --output-dir ${CURRENT_DIR}/presets/mainnet/trusted_setups
python3 ./gen_kzg_trusted_setups.py --secret=1337 --g1-length=4 --g2-length=65 --output-dir ${CURRENT_DIR}/presets/minimal/trusted_setups; \
python3 ./gen_kzg_trusted_setups.py --secret=1337 --g1-length=4096 --g2-length=65 --output-dir ${CURRENT_DIR}/presets/mainnet/trusted_setups
# For any generator, build it using the run_generator function.
# (creation of output dir is a dependency)

View File

@ -24,7 +24,7 @@ Features are researched and developed in parallel, and then consolidated into se
### In-development Specifications
| Code Name or Topic | Specs | Notes |
| - | - | - |
| Capella (tentative) | <ul><li>Core</li><ul><li>[Beacon chain changes](specs/capella/beacon-chain.md)</li><li>[Capella fork](specs/capella/fork.md)</li></ul><li>Additions</li><ul><li>[Validator additions](specs/capella/validator.md)</li></ul></ul> |
| Capella (tentative) | <ul><li>Core</li><ul><li>[Beacon chain changes](specs/capella/beacon-chain.md)</li><li>[Capella fork](specs/capella/fork.md)</li></ul><li>Additions</li><ul><li>[Validator additions](specs/capella/validator.md)</li><li>[P2P networking](specs/capella/p2p-interface.md)</li></ul></ul> |
| EIP4844 (tentative) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/eip4844/beacon-chain.md)</li><li>[EIP-4844 fork](specs/eip4844/fork.md)</li><li>[Polynomial commitments](specs/eip4844/polynomial-commitments.md)</li></ul><li>Additions</li><ul><li>[Honest validator guide changes](specs/eip4844/validator.md)</li><li>[P2P networking](specs/eip4844/p2p-interface.md)</li></ul></ul> |
| Sharding (outdated) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/sharding/beacon-chain.md)</li></ul><li>Additions</li><ul><li>[P2P networking](specs/sharding/p2p-interface.md)</li></ul></ul> |
| Custody Game (outdated) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/custody_game/beacon-chain.md)</li></ul><li>Additions</li><ul><li>[Honest validator guide changes](specs/custody_game/validator.md)</li></ul></ul> | Dependent on sharding |

View File

@ -11,3 +11,8 @@ warn_unused_configs = True
warn_redundant_casts = True
ignore_missing_imports = True
# pylint
[MESSAGES CONTROL]
disable = all
enable = unused-argument

View File

@ -10,3 +10,8 @@ MAX_BLS_TO_EXECUTION_CHANGES: 16
# ---------------------------------------------------------------
# 2**4 (= 16) withdrawals
MAX_WITHDRAWALS_PER_PAYLOAD: 16
# Withdrawals processing
# ---------------------------------------------------------------
# 2**14 (= 16384) validators
MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16384

View File

@ -4,5 +4,5 @@
# ---------------------------------------------------------------
# `uint64(4096)`
FIELD_ELEMENTS_PER_BLOB: 4096
# `uint64(2**4)` (= 16)
MAX_BLOBS_PER_BLOCK: 16
# `uint64(2**2)` (= 4)
MAX_BLOBS_PER_BLOCK: 4

File diff suppressed because one or more lines are too long

View File

@ -10,3 +10,8 @@ MAX_BLS_TO_EXECUTION_CHANGES: 16
# ---------------------------------------------------------------
# [customized] 2**2 (= 4)
MAX_WITHDRAWALS_PER_PAYLOAD: 4
# Withdrawals processing
# ---------------------------------------------------------------
# [customized] 2**4 (= 16) validators
MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16

View File

@ -4,5 +4,5 @@
# ---------------------------------------------------------------
# [customized]
FIELD_ELEMENTS_PER_BLOB: 4
# `uint64(2**4)` (= 16)
MAX_BLOBS_PER_BLOCK: 16
# `uint64(2**2)` (= 4)
MAX_BLOBS_PER_BLOCK: 4

File diff suppressed because one or more lines are too long

View File

@ -18,11 +18,18 @@ if __name__ == '__main__':
help='the secret of trusted setup',
)
parser.add_argument(
"--length",
dest="length",
"--g1-length",
dest="g1_length",
type=int,
required=True,
help='the length of trusted setup',
help='the length of G1 trusted setup',
)
parser.add_argument(
"--g2-length",
dest="g2_length",
type=int,
required=True,
help='the length of G2 trusted setup',
)
parser.add_argument(
"-o",
@ -33,4 +40,4 @@ if __name__ == '__main__':
)
args = parser.parse_args()
dump_kzg_trusted_setup_files(args.secret, args.length, args.output_dir)
dump_kzg_trusted_setup_files(args.secret, args.g1_length, args.g2_length, args.output_dir)

View File

@ -588,6 +588,7 @@ class NoopExecutionEngine(ExecutionEngine):
pass
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> ExecutionPayload:
# pylint: disable=unused-argument
raise NotImplementedError("no default block production")
@ -643,12 +644,14 @@ T = TypeVar('T') # For generic function
def no_op(fn): # type: ignore
# pylint: disable=unused-argument
def wrapper(*args, **kw): # type: ignore
return None
return wrapper
def get_empty_list_result(fn): # type: ignore
# pylint: disable=unused-argument
def wrapper(*args, **kw): # type: ignore
return []
return wrapper
@ -663,7 +666,8 @@ get_expected_withdrawals = get_empty_list_result(get_expected_withdrawals)
# End
#
def retrieve_blobs_sidecar(slot: Slot, beacon_block_root: Root) -> Optional[BlobsSidecar]:
def retrieve_blobs_sidecar(slot: Slot, beacon_block_root: Root) -> PyUnion[BlobsSidecar, str]:
# pylint: disable=unused-argument
return "TEST"'''
@classmethod
@ -682,8 +686,8 @@ spec_builders = {
}
def is_spec_defined_type(value: str) -> bool:
return value.startswith(('ByteList', 'Union', 'Vector', 'List'))
def is_byte_vector(value: str) -> bool:
return value.startswith(('ByteVector'))
def objects_to_spec(preset_name: str,
@ -696,17 +700,8 @@ def objects_to_spec(preset_name: str,
new_type_definitions = (
'\n\n'.join(
[
f"class {key}({value}):\n pass\n"
f"class {key}({value}):\n pass\n" if not is_byte_vector(value) else f"class {key}({value}): # type: ignore\n pass\n"
for key, value in spec_object.custom_types.items()
if not is_spec_defined_type(value)
]
)
+ ('\n\n' if len([key for key, value in spec_object.custom_types.items() if is_spec_defined_type(value)]) > 0 else '')
+ '\n\n'.join(
[
f"{key} = {value}\n"
for key, value in spec_object.custom_types.items()
if is_spec_defined_type(value)
]
)
)
@ -1178,6 +1173,7 @@ setup(
"py_ecc==6.0.0",
"milagro_bls_binding==1.9.0",
"remerkleable==0.1.25",
"trie==2.0.2",
RUAMEL_YAML_VERSION,
"lru-dict==1.1.8",
MARKO_VERSION,

View File

@ -523,6 +523,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None:
if bls.Verify(pubkey, signing_root, deposit.data.signature):
state.validators.append(get_validator_from_deposit(deposit))
state.balances.append(amount)
# [New in Altair]
state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000))
state.current_epoch_participation.append(ParticipationFlags(0b0000_0000))
state.inactivity_scores.append(uint64(0))

View File

@ -40,10 +40,19 @@ Full nodes are expected to derive light client data from historic blocks and sta
### `create_light_client_bootstrap`
To form a `LightClientBootstrap`, the following objects are needed:
- `state`: the post state of any post-Altair block
- `block`: the corresponding block
```python
def create_light_client_bootstrap(state: BeaconState) -> LightClientBootstrap:
def create_light_client_bootstrap(state: BeaconState,
block: SignedBeaconBlock) -> LightClientBootstrap:
assert compute_epoch_at_slot(state.slot) >= ALTAIR_FORK_EPOCH
assert state.slot == state.latest_block_header.slot
header = state.latest_block_header.copy()
header.state_root = hash_tree_root(state)
assert hash_tree_root(header) == hash_tree_root(block.message)
return LightClientBootstrap(
header=BeaconBlockHeader(
@ -54,7 +63,7 @@ def create_light_client_bootstrap(state: BeaconState) -> LightClientBootstrap:
body_root=state.latest_block_header.body_root,
),
current_sync_committee=state.current_sync_committee,
current_sync_committee_branch=compute_merkle_proof_for_state(state, CURRENT_SYNC_COMMITTEE_INDEX)
current_sync_committee_branch=compute_merkle_proof_for_state(state, CURRENT_SYNC_COMMITTEE_INDEX),
)
```
@ -69,13 +78,15 @@ Blocks are considered to be epoch boundary blocks if their block root can occur
To form a `LightClientUpdate`, the following historical states and blocks are needed:
- `state`: the post state of any block with a post-Altair parent block
- `block`: the corresponding block
- `attested_state`: the post state of the block referred to by `block.parent_root`
- `attested_state`: the post state of `attested_block`
- `attested_block`: the block referred to by `block.parent_root`
- `finalized_block`: the block referred to by `attested_state.finalized_checkpoint.root`, if locally available (may be unavailable, e.g., when using checkpoint sync, or if it was pruned locally)
```python
def create_light_client_update(state: BeaconState,
block: SignedBeaconBlock,
attested_state: BeaconState,
attested_block: SignedBeaconBlock,
finalized_block: Optional[SignedBeaconBlock]) -> LightClientUpdate:
assert compute_epoch_at_slot(attested_state.slot) >= ALTAIR_FORK_EPOCH
assert sum(block.message.body.sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
@ -89,8 +100,8 @@ def create_light_client_update(state: BeaconState,
assert attested_state.slot == attested_state.latest_block_header.slot
attested_header = attested_state.latest_block_header.copy()
attested_header.state_root = hash_tree_root(attested_state)
assert hash_tree_root(attested_header) == block.message.parent_root
update_attested_period = compute_sync_committee_period_at_slot(attested_header.slot)
assert hash_tree_root(attested_header) == hash_tree_root(attested_block.message) == block.message.parent_root
update_attested_period = compute_sync_committee_period_at_slot(attested_block.message.slot)
# `next_sync_committee` is only useful if the message is signed by the current sync committee
if update_attested_period == update_signature_period:

View File

@ -138,7 +138,7 @@ Requests the `LightClientBootstrap` structure corresponding to a given post-Alta
The request MUST be encoded as an SSZ-field.
Peers SHOULD provide results as defined in [`create_light_client_bootstrap`](./full-node.md#create_light_client_bootstrap). To fulfill a request, the requested block's post state needs to be known.
Peers SHOULD provide results as defined in [`create_light_client_bootstrap`](./full-node.md#create_light_client_bootstrap). To fulfill a request, the requested block and its post state need to be known.
When a `LightClientBootstrap` instance cannot be produced for a given block root, peers SHOULD respond with error code `3: ResourceUnavailable`.

View File

@ -8,11 +8,11 @@
- [Introduction](#introduction)
- [Custom types](#custom-types)
- [Constants](#constants)
- [Domain types](#domain-types)
- [Preset](#preset)
- [Max operations per block](#max-operations-per-block)
- [Execution](#execution)
- [Withdrawals processing](#withdrawals-processing)
- [Containers](#containers)
- [New containers](#new-containers)
- [`Withdrawal`](#withdrawal)
@ -58,8 +58,6 @@ We define the following Python custom types for type hinting and readability:
| - | - | - |
| `WithdrawalIndex` | `uint64` | an index of a `Withdrawal` |
## Constants
### Domain types
| Name | Value |
@ -80,6 +78,12 @@ We define the following Python custom types for type hinting and readability:
| - | - | - |
| `MAX_WITHDRAWALS_PER_PAYLOAD` | `uint64(2**4)` (= 16) | Maximum amount of withdrawals allowed in each payload |
### Withdrawals processing
| Name | Value |
| - | - |
| `MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP` | `16384` (= 2**14 ) |
## Containers
### New containers
@ -288,7 +292,8 @@ def get_expected_withdrawals(state: BeaconState) -> Sequence[Withdrawal]:
withdrawal_index = state.next_withdrawal_index
validator_index = state.next_withdrawal_validator_index
withdrawals: List[Withdrawal] = []
for _ in range(len(state.validators)):
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
for _ in range(bound):
validator = state.validators[validator_index]
balance = state.balances[validator_index]
if is_fully_withdrawable_validator(validator, balance, epoch):
@ -323,10 +328,21 @@ def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
assert withdrawal == expected_withdrawal
decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
if len(expected_withdrawals) > 0:
# Update the next withdrawal index if this block contained withdrawals
if len(expected_withdrawals) != 0:
latest_withdrawal = expected_withdrawals[-1]
state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
next_validator_index = ValidatorIndex((latest_withdrawal.validator_index + 1) % len(state.validators))
# Update the next validator index to start the next withdrawal sweep
if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
# Next sweep starts after the latest withdrawal's validator index
next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
state.next_withdrawal_validator_index = next_validator_index
else:
# Advance sweep by the max length of the sweep if there was not a full set of withdrawals
next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
next_validator_index = ValidatorIndex(next_index % len(state.validators))
state.next_withdrawal_validator_index = next_validator_index
```

View File

@ -112,10 +112,10 @@ Up to `MAX_BLS_TO_EXECUTION_CHANGES`, [`BLSToExecutionChange`](./beacon-chain.md
## Enabling validator withdrawals
Validator balances are fully or partially withdrawn via an automatic process.
Validator balances are withdrawn periodically via an automatic process. For exited validators, the full balance is withdrawn. For active validators, the balance in excess of `MAX_EFFECTIVE_BALANCE` is withdrawn.
For validators, there is one prerequisite for this automated process:
withdrawal credentials pointing to an execution layer address, i.e. having an `ETH1_ADDRESS_WITHDRAWAL_PREFIX`.
There is one prerequisite for this automated process:
the validator's withdrawal credentials pointing to an execution layer address, i.e. having an `ETH1_ADDRESS_WITHDRAWAL_PREFIX`.
If a validator has a `BLS_WITHDRAWAL_PREFIX` withdrawal credential prefix, to participate in withdrawals the validator must
create a one-time message to change their withdrawal credential from the version authenticated with a BLS key to the

View File

@ -55,7 +55,7 @@ This upgrade adds blobs to the beacon chain as part of EIP-4844. This is an exte
| Name | Value |
| - | - |
| `BLOB_TX_TYPE` | `uint8(0x05)` |
| `VERSIONED_HASH_VERSION_KZG` | `Bytes1(0x01)` |
| `VERSIONED_HASH_VERSION_KZG` | `Bytes1('0x01')` |
## Preset
@ -63,7 +63,7 @@ This upgrade adds blobs to the beacon chain as part of EIP-4844. This is an exte
| Name | Value |
| - | - |
| `MAX_BLOBS_PER_BLOCK` | `uint64(2**4)` (= 16) |
| `MAX_BLOBS_PER_BLOCK` | `uint64(2**2)` (= 4) |
## Configuration
@ -168,17 +168,19 @@ The implementation of `is_data_available` is meant to change with later sharding
Initially, it requires every verifying actor to retrieve the matching `BlobsSidecar`,
and validate the sidecar with `validate_blobs_sidecar`.
Without the sidecar the block may be processed further optimistically,
but MUST NOT be considered valid until a valid `BlobsSidecar` has been downloaded.
The block MUST NOT be considered valid until a valid `BlobsSidecar` has been downloaded.
```python
def is_data_available(slot: Slot, beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool:
# `retrieve_blobs_sidecar` is implementation dependent, raises an exception if not available.
sidecar = retrieve_blobs_sidecar(slot, beacon_block_root)
if sidecar == "TEST":
return True # For testing; remove once we have a way to inject `BlobsSidecar` into tests
validate_blobs_sidecar(slot, beacon_block_root, blob_kzg_commitments, sidecar)
# For testing, `retrieve_blobs_sidecar` returns "TEST".
# TODO: Remove it once we have a way to inject `BlobsSidecar` into tests.
if isinstance(sidecar, str):
return True
validate_blobs_sidecar(slot, beacon_block_root, blob_kzg_commitments, sidecar)
return True
```
@ -216,7 +218,7 @@ def tx_peek_blob_versioned_hashes(opaque_tx: Transaction) -> Sequence[VersionedH
```python
def verify_kzg_commitments_against_transactions(transactions: Sequence[Transaction],
kzg_commitments: Sequence[KZGCommitment]) -> bool:
all_versioned_hashes = []
all_versioned_hashes: List[VersionedHash] = []
for tx in transactions:
if tx[0] == BLOB_TX_TYPE:
all_versioned_hashes += tx_peek_blob_versioned_hashes(tx)
@ -239,7 +241,7 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
process_sync_aggregate(state, block.body.sync_aggregate)
process_blob_kzg_commitments(state, block.body) # [New in EIP-4844]
# New in EIP-4844, note: Can sync optimistically without this condition, see note on `is_data_available`
# New in EIP-4844
assert is_data_available(block.slot, hash_tree_root(block), block.body.blob_kzg_commitments)
```
@ -283,7 +285,8 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe
#### Blob KZG commitments
```python
def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody):
def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody) -> None:
# pylint: disable=unused-argument
assert verify_kzg_commitments_against_transactions(body.execution_payload.transactions, body.blob_kzg_commitments)
```
@ -344,8 +347,9 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
```
### Disabling Withdrawals
During testing we avoid Capella-specific updates to the state transition. We do this by replacing the following functions with a no-op implementation:
- `process_withdrawals`
- `process_bls_to_execution_change`
The `get_expected_withdrawals` function is also modified to return an empty withdrawals list. As such, the PayloadAttributes used to update forkchoice does not contain withdrawals.
The `get_expected_withdrawals` function is also modified to return an empty withdrawals list. As such, the `PayloadAttributes` used to update forkchoice does not contain withdrawals.

View File

@ -58,7 +58,7 @@ def compute_fork_version(epoch: Epoch) -> Version:
### Fork trigger
TBD. This fork is defined for testing purposes, the EIP may be combined with other consensus-layer upgrade.
For now we assume the condition will be triggered at epoch `EIP4844_FORK_EPOCH`.
For now, we assume the condition will be triggered at epoch `EIP4844_FORK_EPOCH`.
Note that for the pure EIP-4844 networks, we don't apply `upgrade_to_eip4844` since it starts with EIP-4844 version logic.

View File

@ -61,7 +61,7 @@ class SignedBeaconBlockAndBlobsSidecar(Container):
## The gossip domain: gossipsub
Some gossip meshes are upgraded in the fork of EIP4844 to support upgraded types.
Some gossip meshes are upgraded in the fork of EIP-4844 to support upgraded types.
### Topics and messages
@ -78,10 +78,9 @@ The new topics along with the type of the `data` field of a gossipsub message ar
| - | - |
| `beacon_block_and_blobs_sidecar` | `SignedBeaconBlockAndBlobsSidecar` (new) |
#### Global topics
EIP4844 introduces a new global topic for beacon block and blobs-sidecars.
EIP-4844 introduces a new global topic for beacon block and blobs-sidecars.
##### `beacon_block`
@ -97,16 +96,18 @@ This topic is used to propagate new signed and coupled beacon blocks and blobs s
In addition to the gossip validations for the `beacon_block` topic from prior specifications, the following validations MUST pass before forwarding the `signed_beacon_block_and_blobs_sidecar` on the network.
Alias `signed_beacon_block = signed_beacon_block_and_blobs_sidecar.beacon_block`, `block = signed_beacon_block.message`, `execution_payload = block.body.execution_payload`.
- _[REJECT]_ The KZG commitments of the blobs are all correctly encoded compressed BLS G1 Points.
- _[REJECT]_ The KZG commitments of the blobs are all correctly encoded compressed BLS G1 points
-- i.e. `all(bls.KeyValidate(commitment) for commitment in block.body.blob_kzg_commitments)`
- _[REJECT]_ The KZG commitments correspond to the versioned hashes in the transactions list.
- _[REJECT]_ The KZG commitments correspond to the versioned hashes in the transactions list
-- i.e. `verify_kzg_commitments_against_transactions(block.body.execution_payload.transactions, block.body.blob_kzg_commitments)`
Alias `sidecar = signed_beacon_block_and_blobs_sidecar.blobs_sidecar`.
- _[IGNORE]_ the `sidecar.beacon_block_slot` is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `sidecar.beacon_block_slot == block.slot`.
- _[IGNORE]_ the `sidecar.beacon_block_slot` is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance)
-- i.e. `sidecar.beacon_block_slot == block.slot`.
- _[REJECT]_ the `sidecar.blobs` are all well formatted, i.e. the `BLSFieldElement` in valid range (`x < BLS_MODULUS`).
- _[REJECT]_ The KZG proof is a correctly encoded compressed BLS G1 Point -- i.e. `bls.KeyValidate(blobs_sidecar.kzg_aggregated_proof)`
- _[REJECT]_ The KZG commitments in the block are valid against the provided blobs sidecar.
- _[REJECT]_ The KZG proof is a correctly encoded compressed BLS G1 point
-- i.e. `bls.KeyValidate(blobs_sidecar.kzg_aggregated_proof)`
- _[REJECT]_ The KZG commitments in the block are valid against the provided blobs sidecar
-- i.e. `validate_blobs_sidecar(block.slot, hash_tree_root(block), block.body.blob_kzg_commitments, sidecar)`
### Transitioning the gossip
@ -148,7 +149,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
[1]: # (eth2spec: skip)
| `fork_version` | Chunk SSZ type |
| ------------------------ | -------------------------- |
|--------------------------|-------------------------------|
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
@ -230,7 +231,7 @@ Clients MUST keep a record of signed blobs sidecars seen on the epoch range
where `current_epoch` is defined by the current wall-clock time,
and clients MUST support serving requests of blocks on this range.
Peers that are unable to reply to block requests within the `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS`
Peers that are unable to reply to blobs sidecars requests within the `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS`
epoch range SHOULD respond with error code `3: ResourceUnavailable`.
Such peers that are unable to successfully reply to this range of requests MAY get descored
or disconnected at any time.

View File

@ -19,9 +19,10 @@
- [`reverse_bits`](#reverse_bits)
- [`bit_reversal_permutation`](#bit_reversal_permutation)
- [BLS12-381 helpers](#bls12-381-helpers)
- [`hash_to_bls_field`](#hash_to_bls_field)
- [`bytes_to_bls_field`](#bytes_to_bls_field)
- [`blob_to_polynomial`](#blob_to_polynomial)
- [`hash_to_bls_field`](#hash_to_bls_field)
- [`compute_challenges`](#compute_challenges)
- [`bls_modular_inverse`](#bls_modular_inverse)
- [`div`](#div)
- [`g1_lincomb`](#g1_lincomb)
@ -41,7 +42,6 @@
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
<!-- /TOC -->
## Introduction
This document specifies basic polynomial operations and KZG polynomial commitment operations as they are needed for the EIP-4844 specification. The implementations are not optimized for performance, but readability. All practical implementations should optimize the polynomial operations.
@ -89,8 +89,9 @@ but reusing the `mainnet` settings in public networks is a critical security req
| Name | Value |
| - | - |
| `KZG_SETUP_G2_LENGTH` | `65` |
| `KZG_SETUP_G1` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
| `KZG_SETUP_G2` | `Vector[G2Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
| `KZG_SETUP_G2` | `Vector[G2Point, KZG_SETUP_G2_LENGTH]`, contents TBD |
| `KZG_SETUP_LAGRANGE` | `Vector[KZGCommitment, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
## Helper functions
@ -138,14 +139,29 @@ def bit_reversal_permutation(sequence: Sequence[T]) -> Sequence[T]:
### BLS12-381 helpers
#### `hash_to_bls_field`
```python
def hash_to_bls_field(data: bytes) -> BLSFieldElement:
"""
Hash ``data`` and convert the output to a BLS scalar field element.
The output is not uniform over the BLS field.
"""
hashed_data = hash(data)
return BLSFieldElement(int.from_bytes(hashed_data, ENDIANNESS) % BLS_MODULUS)
```
#### `bytes_to_bls_field`
```python
def bytes_to_bls_field(b: Bytes32) -> BLSFieldElement:
"""
Convert 32-byte value to a BLS field scalar. The output is not uniform over the BLS field.
Convert 32-byte value to a BLS scalar field element.
This function does not accept inputs greater than the BLS modulus.
"""
return int.from_bytes(b, ENDIANNESS) % BLS_MODULUS
field_element = int.from_bytes(b, ENDIANNESS)
assert field_element < BLS_MODULUS
return BLSFieldElement(field_element)
```
#### `blob_to_polynomial`
@ -157,37 +173,49 @@ def blob_to_polynomial(blob: Blob) -> Polynomial:
"""
polynomial = Polynomial()
for i in range(FIELD_ELEMENTS_PER_BLOB):
value = int.from_bytes(blob[i * BYTES_PER_FIELD_ELEMENT: (i + 1) * BYTES_PER_FIELD_ELEMENT], ENDIANNESS)
assert value < BLS_MODULUS
value = bytes_to_bls_field(blob[i * BYTES_PER_FIELD_ELEMENT: (i + 1) * BYTES_PER_FIELD_ELEMENT])
polynomial[i] = value
return polynomial
```
#### `hash_to_bls_field`
#### `compute_challenges`
```python
def hash_to_bls_field(polys: Sequence[Polynomial],
comms: Sequence[KZGCommitment]) -> BLSFieldElement:
def compute_challenges(polynomials: Sequence[Polynomial],
commitments: Sequence[KZGCommitment]) -> Tuple[Sequence[BLSFieldElement], BLSFieldElement]:
"""
Compute 32-byte hash of serialized polynomials and commitments concatenated.
This hash is then converted to a BLS field element, where the result is not uniform over the BLS field.
Return the BLS field element.
Return the Fiat-Shamir challenges required by the rest of the protocol.
The Fiat-Shamir logic works as per the following pseudocode:
hashed_data = hash(DOMAIN_SEPARATOR, polynomials, commitments)
r = hash(hashed_data, 0)
r_powers = [1, r, r**2, r**3, ...]
eval_challenge = hash(hashed_data, 1)
Then return `r_powers` and `eval_challenge` after converting them to BLS field elements.
The resulting field elements are not uniform over the BLS field.
"""
# Append the number of polynomials and the degree of each polynomial as a domain separator
num_polys = int.to_bytes(len(polys), 8, ENDIANNESS)
num_polynomials = int.to_bytes(len(polynomials), 8, ENDIANNESS)
degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 8, ENDIANNESS)
data = FIAT_SHAMIR_PROTOCOL_DOMAIN + degree_poly + num_polys
data = FIAT_SHAMIR_PROTOCOL_DOMAIN + degree_poly + num_polynomials
# Append each polynomial which is composed by field elements
for poly in polys:
for poly in polynomials:
for field_element in poly:
data += int.to_bytes(field_element, BYTES_PER_FIELD_ELEMENT, ENDIANNESS)
# Append serialized G1 points
for commitment in comms:
for commitment in commitments:
data += commitment
return bytes_to_bls_field(hash(data))
# Transcript has been prepared: time to create the challenges
hashed_data = hash(data)
r = hash_to_bls_field(hashed_data + b'\x00')
r_powers = compute_powers(r, len(commitments))
eval_challenge = hash_to_bls_field(hashed_data + b'\x01')
return r_powers, eval_challenge
```
#### `bls_modular_inverse`
@ -198,7 +226,7 @@ def bls_modular_inverse(x: BLSFieldElement) -> BLSFieldElement:
Compute the modular inverse of x
i.e. return y such that x * y % BLS_MODULUS == 1 and return 0 for x == 0
"""
return pow(x, -1, BLS_MODULUS) if x != 0 else 0
return BLSFieldElement(pow(x, -1, BLS_MODULUS)) if x != 0 else BLSFieldElement(0)
```
#### `div`
@ -208,7 +236,7 @@ def div(x: BLSFieldElement, y: BLSFieldElement) -> BLSFieldElement:
"""
Divide two field elements: ``x`` by `y``.
"""
return (int(x) * int(bls_modular_inverse(y))) % BLS_MODULUS
return BLSFieldElement((int(x) * int(bls_modular_inverse(y))) % BLS_MODULUS)
```
#### `g1_lincomb`
@ -234,11 +262,12 @@ def poly_lincomb(polys: Sequence[Polynomial],
Given a list of ``polynomials``, interpret it as a 2D matrix and compute the linear combination
of each column with `scalars`: return the resulting polynomials.
"""
result = [0] * len(polys[0])
assert len(polys) == len(scalars)
result = [0] * FIELD_ELEMENTS_PER_BLOB
for v, s in zip(polys, scalars):
for i, x in enumerate(v):
result[i] = (result[i] + int(s) * int(x)) % BLS_MODULUS
return [BLSFieldElement(x) for x in result]
return Polynomial([BLSFieldElement(x) for x in result])
```
#### `compute_powers`
@ -246,7 +275,7 @@ def poly_lincomb(polys: Sequence[Polynomial],
```python
def compute_powers(x: BLSFieldElement, n: uint64) -> Sequence[BLSFieldElement]:
"""
Return ``x`` to power of [0, n-1].
Return ``x`` to power of [0, n-1], if n > 0. When n==0, an empty array is returned.
"""
current_power = 1
powers = []
@ -256,6 +285,7 @@ def compute_powers(x: BLSFieldElement, n: uint64) -> Sequence[BLSFieldElement]:
return powers
```
### Polynomials
#### `evaluate_polynomial_in_evaluation_form`
@ -264,13 +294,13 @@ def compute_powers(x: BLSFieldElement, n: uint64) -> Sequence[BLSFieldElement]:
def evaluate_polynomial_in_evaluation_form(polynomial: Polynomial,
z: BLSFieldElement) -> BLSFieldElement:
"""
Evaluate a polynomial (in evaluation form) at an arbitrary point ``z``.
Evaluate a polynomial (in evaluation form) at an arbitrary point ``z`` that is not in the domain.
Uses the barycentric formula:
f(z) = (z**WIDTH - 1) / WIDTH * sum_(i=0)^WIDTH (f(DOMAIN[i]) * DOMAIN[i]) / (z - DOMAIN[i])
"""
width = len(polynomial)
assert width == FIELD_ELEMENTS_PER_BLOB
inverse_width = bls_modular_inverse(width)
inverse_width = bls_modular_inverse(BLSFieldElement(width))
# Make sure we won't divide by zero during division
assert z not in ROOTS_OF_UNITY
@ -279,9 +309,11 @@ def evaluate_polynomial_in_evaluation_form(polynomial: Polynomial,
result = 0
for i in range(width):
result += div(int(polynomial[i]) * int(roots_of_unity_brp[i]), (int(z) - int(roots_of_unity_brp[i])))
result = result * (pow(z, width, BLS_MODULUS) - 1) * inverse_width % BLS_MODULUS
return result
a = BLSFieldElement(int(polynomial[i]) * int(roots_of_unity_brp[i]) % BLS_MODULUS)
b = BLSFieldElement((int(BLS_MODULUS) + int(z) - int(roots_of_unity_brp[i])) % BLS_MODULUS)
result += int(div(a, b) % BLS_MODULUS)
result = result * int(pow(z, width, BLS_MODULUS) - 1) * int(inverse_width)
return BLSFieldElement(result % BLS_MODULUS)
```
### KZG
@ -341,17 +373,13 @@ def compute_kzg_proof(polynomial: Polynomial, z: BLSFieldElement) -> KZGProof:
Compute KZG proof at point `z` with `polynomial` being in evaluation form
Do this by computing the quotient polynomial in evaluation form: q(x) = (p(x) - p(z)) / (x - z)
"""
# To avoid SSZ overflow/underflow, convert element into int
polynomial = [int(i) for i in polynomial]
z = int(z)
y = evaluate_polynomial_in_evaluation_form(polynomial, z)
polynomial_shifted = [(p - int(y)) % BLS_MODULUS for p in polynomial]
polynomial_shifted = [BLSFieldElement((int(p) - int(y)) % BLS_MODULUS) for p in polynomial]
# Make sure we won't divide by zero during division
assert z not in ROOTS_OF_UNITY
denominator_poly = [(int(x) - z) % BLS_MODULUS for x in bit_reversal_permutation(ROOTS_OF_UNITY)]
denominator_poly = [BLSFieldElement((int(x) - int(z)) % BLS_MODULUS)
for x in bit_reversal_permutation(ROOTS_OF_UNITY)]
# Calculate quotient polynomial by doing point-by-point division
quotient_polynomial = [div(a, b) for a, b in zip(polynomial_shifted, denominator_poly)]
@ -367,17 +395,18 @@ def compute_aggregated_poly_and_commitment(
"""
Return (1) the aggregated polynomial, (2) the aggregated KZG commitment,
and (3) the polynomial evaluation random challenge.
This function should also work with blobs == [] and kzg_commitments == []
"""
assert len(blobs) == len(kzg_commitments)
# Convert blobs to polynomials
polynomials = [blob_to_polynomial(blob) for blob in blobs]
# Generate random linear combination challenges
r = hash_to_bls_field(polynomials, kzg_commitments)
r_powers = compute_powers(r, len(kzg_commitments))
evaluation_challenge = int(r_powers[-1]) * r % BLS_MODULUS
# Generate random linear combination and evaluation challenges
r_powers, evaluation_challenge = compute_challenges(polynomials, kzg_commitments)
# Create aggregated polynomial in evaluation form
aggregated_poly = Polynomial(poly_lincomb(polynomials, r_powers))
aggregated_poly = poly_lincomb(polynomials, r_powers)
# Compute commitment to aggregated polynomial
aggregated_poly_commitment = KZGCommitment(g1_lincomb(kzg_commitments, r_powers))
@ -390,6 +419,7 @@ def compute_aggregated_poly_and_commitment(
```python
def compute_aggregate_kzg_proof(blobs: Sequence[Blob]) -> KZGProof:
"""
Given a list of blobs, return the aggregated KZG proof that is used to verify them against their commitments.
Public method.
"""
commitments = [blob_to_kzg_commitment(blob) for blob in blobs]
@ -405,8 +435,10 @@ def compute_aggregate_kzg_proof(blobs: Sequence[Blob]) -> KZGProof:
```python
def verify_aggregate_kzg_proof(blobs: Sequence[Blob],
expected_kzg_commitments: Sequence[KZGCommitment],
kzg_aggregated_proof: KZGCommitment) -> bool:
kzg_aggregated_proof: KZGProof) -> bool:
"""
Given a list of blobs and an aggregated KZG proof, verify that they correspond to the provided commitments.
Public method.
"""
aggregated_poly, aggregated_poly_commitment, evaluation_challenge = compute_aggregated_poly_and_commitment(

View File

@ -32,7 +32,7 @@ This document represents the changes to be made in the code of an "honest valida
This document is an extension of the [Capella -- Honest Validator](../capella/validator.md) guide.
All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden.
All terminology, constants, functions, and protocol mechanics defined in the updated [Beacon Chain doc of EIP4844](./beacon-chain.md) are requisite for this document and used throughout.
All terminology, constants, functions, and protocol mechanics defined in the updated [Beacon Chain doc of EIP-4844](./beacon-chain.md) are requisite for this document and used throughout.
Please see related Beacon Chain doc before continuing and use them as a reference throughout.
## Helpers
@ -46,6 +46,7 @@ Implementers may also retrieve blobs individually per transaction.
```python
def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> Tuple[Sequence[BLSFieldElement], Sequence[KZGCommitment]]:
# pylint: disable=unused-argument
...
```

View File

@ -745,6 +745,8 @@ For example, if slot 4 were empty in the previous example, the returned array wo
`step` is deprecated and must be set to 1. Clients may respond with a single block if a larger step is returned during the deprecation transition period.
`/eth2/beacon_chain/req/beacon_blocks_by_range/1/` is deprecated. Clients MAY respond with an empty list during the deprecation transition period.
`BeaconBlocksByRange` is primarily used to sync historical blocks.
The request MUST be encoded as an SSZ-container.
@ -831,6 +833,8 @@ Clients MUST support requesting blocks since the latest finalized epoch.
Clients MUST respond with at least one block, if they have it.
Clients MAY limit the number of blocks in the response.
`/eth2/beacon_chain/req/beacon_blocks_by_root/1/` is deprecated. Clients MAY respond with an empty list during the deprecation transition period.
#### Ping
**Protocol ID:** `/eth2/beacon_chain/req/ping/1/`

View File

@ -1 +1 @@
1.3.0-alpha.1
1.3.0-alpha.2

View File

@ -136,18 +136,22 @@ def test_invalid_signature_extra_participant(spec, state):
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
def is_duplicate_sync_committee(committee_indices):
dup = {v for v in committee_indices if committee_indices.count(v) > 1}
return len(dup) > 0
@with_altair_and_later
@with_presets([MINIMAL], reason="to create nonduplicate committee")
@spec_state_test
def test_sync_committee_rewards_nonduplicate_committee(spec, state):
committee_indices = compute_committee_indices(state)
committee_size = len(committee_indices)
committee_bits = [True] * committee_size
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
# Preconditions of this test case
assert active_validator_count > spec.SYNC_COMMITTEE_SIZE
assert committee_size == len(set(committee_indices))
assert not is_duplicate_sync_committee(committee_indices)
committee_size = len(committee_indices)
committee_bits = [True] * committee_size
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
@ -157,13 +161,12 @@ def test_sync_committee_rewards_nonduplicate_committee(spec, state):
@spec_state_test
def test_sync_committee_rewards_duplicate_committee_no_participation(spec, state):
committee_indices = compute_committee_indices(state)
committee_size = len(committee_indices)
committee_bits = [False] * committee_size
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
# Preconditions of this test case
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
assert committee_size > len(set(committee_indices))
assert is_duplicate_sync_committee(committee_indices)
committee_size = len(committee_indices)
committee_bits = [False] * committee_size
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
@ -173,14 +176,13 @@ def test_sync_committee_rewards_duplicate_committee_no_participation(spec, state
@spec_state_test
def test_sync_committee_rewards_duplicate_committee_half_participation(spec, state):
committee_indices = compute_committee_indices(state)
# Preconditions of this test case
assert is_duplicate_sync_committee(committee_indices)
committee_size = len(committee_indices)
committee_bits = [True] * (committee_size // 2) + [False] * (committee_size // 2)
assert len(committee_bits) == committee_size
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
# Preconditions of this test case
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
assert committee_size > len(set(committee_indices))
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
@ -190,17 +192,115 @@ def test_sync_committee_rewards_duplicate_committee_half_participation(spec, sta
@spec_state_test
def test_sync_committee_rewards_duplicate_committee_full_participation(spec, state):
committee_indices = compute_committee_indices(state)
committee_size = len(committee_indices)
committee_bits = [True] * committee_size
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
# Preconditions of this test case
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
assert committee_size > len(set(committee_indices))
assert is_duplicate_sync_committee(committee_indices)
committee_size = len(committee_indices)
committee_bits = [True] * committee_size
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
def _run_sync_committee_selected_twice(
spec, state,
pre_balance, participate_first_position, participate_second_position,
skip_reward_validation=False):
committee_indices = compute_committee_indices(state)
# Preconditions of this test case
assert is_duplicate_sync_committee(committee_indices)
committee_size = len(committee_indices)
committee_bits = [False] * committee_size
# Find duplicate indices that get selected twice
dup = {v for v in committee_indices if committee_indices.count(v) == 2}
assert len(dup) > 0
validator_index = dup.pop()
positions = [i for i, v in enumerate(committee_indices) if v == validator_index]
committee_bits[positions[0]] = participate_first_position
committee_bits[positions[1]] = participate_second_position
# Set validator's balance
state.balances[validator_index] = pre_balance
state.validators[validator_index].effective_balance = min(
pre_balance - pre_balance % spec.EFFECTIVE_BALANCE_INCREMENT,
spec.MAX_EFFECTIVE_BALANCE,
)
yield from run_successful_sync_committee_test(
spec, state, committee_indices, committee_bits,
skip_reward_validation=skip_reward_validation)
return validator_index
@with_altair_and_later
@with_presets([MAINNET], reason="to create duplicate committee")
@spec_state_test
def test_sync_committee_rewards_duplicate_committee_zero_balance_only_participate_first_one(spec, state):
validator_index = yield from _run_sync_committee_selected_twice(
spec,
state,
pre_balance=0,
participate_first_position=True,
participate_second_position=False,
)
# The validator gets reward first (balance > 0) and then gets the same amount of penalty (balance == 0)
assert state.balances[validator_index] == 0
@with_altair_and_later
@with_presets([MAINNET], reason="to create duplicate committee")
@spec_state_test
def test_sync_committee_rewards_duplicate_committee_zero_balance_only_participate_second_one(spec, state):
# Skip `validate_sync_committee_rewards` because it doesn't handle the balance computation order
# inside the for loop
validator_index = yield from _run_sync_committee_selected_twice(
spec,
state,
pre_balance=0,
participate_first_position=False,
participate_second_position=True,
skip_reward_validation=True,
)
# The validator gets penalty first (balance is still 0) and then gets reward (balance > 0)
assert state.balances[validator_index] > 0
@with_altair_and_later
@with_presets([MAINNET], reason="to create duplicate committee")
@spec_state_test
def test_sync_committee_rewards_duplicate_committee_max_effective_balance_only_participate_first_one(spec, state):
validator_index = yield from _run_sync_committee_selected_twice(
spec,
state,
pre_balance=spec.MAX_EFFECTIVE_BALANCE,
participate_first_position=True,
participate_second_position=False,
)
assert state.balances[validator_index] == spec.MAX_EFFECTIVE_BALANCE
@with_altair_and_later
@with_presets([MAINNET], reason="to create duplicate committee")
@spec_state_test
def test_sync_committee_rewards_duplicate_committee_max_effective_balance_only_participate_second_one(spec, state):
validator_index = yield from _run_sync_committee_selected_twice(
spec,
state,
pre_balance=spec.MAX_EFFECTIVE_BALANCE,
participate_first_position=False,
participate_second_position=True,
)
assert state.balances[validator_index] == spec.MAX_EFFECTIVE_BALANCE
@with_altair_and_later
@spec_state_test
@always_bls

View File

@ -0,0 +1,41 @@
from eth2spec.test.context import (
spec_state_test,
always_bls,
with_phases,
with_altair_and_later,
)
from eth2spec.test.helpers.constants import (
ALTAIR,
)
from eth2spec.test.helpers.deposits import (
run_deposit_processing_with_specific_fork_version,
)
@with_phases([ALTAIR])
@spec_state_test
@always_bls
def test_effective_deposit_with_previous_fork_version(spec, state):
assert state.fork.previous_version != state.fork.current_version
# It's only effective in Altair because the default `fork_version` of `compute_domain` is `GENESIS_FORK_VERSION`.
# Therefore it's just a normal `DepositMessage`.
yield from run_deposit_processing_with_specific_fork_version(
spec,
state,
fork_version=state.fork.previous_version,
)
@with_altair_and_later
@spec_state_test
@always_bls
def test_ineffective_deposit_with_current_fork_version(spec, state):
yield from run_deposit_processing_with_specific_fork_version(
spec,
state,
fork_version=state.fork.current_version,
effective=False,
)

View File

@ -110,7 +110,7 @@ def test_sync_committees_progress_misc_balances_not_genesis(spec, state):
@spec_state_test
@always_bls
@with_presets([MINIMAL], reason="too slow")
def test_sync_committees_no_progress_not_boundary(spec, state):
def test_sync_committees_no_progress_not_at_period_boundary(spec, state):
assert spec.get_current_epoch(state) == spec.GENESIS_EPOCH
slot_not_at_period_boundary = state.slot + spec.SLOTS_PER_EPOCH
transition_to(spec, state, slot_not_at_period_boundary)

View File

@ -35,7 +35,7 @@ def setup_test(spec, state):
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2 - 1)
trusted_block = state_transition_with_full_block(spec, state, True, True)
trusted_block_root = trusted_block.message.hash_tree_root()
bootstrap = spec.create_light_client_bootstrap(state)
bootstrap = spec.create_light_client_bootstrap(state, trusted_block)
yield "trusted_block_root", "meta", "0x" + trusted_block_root.hex()
yield "bootstrap", bootstrap
test.store = spec.initialize_light_client_store(trusted_block_root, bootstrap)
@ -85,9 +85,9 @@ def emit_force_update(test, spec, state):
})
def emit_update(test, spec, state, block, attested_state, finalized_block, with_next_sync_committee=True):
update = spec.create_light_client_update(state, block, attested_state, finalized_block)
if not with_next_sync_committee:
def emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, with_next=True):
update = spec.create_light_client_update(state, block, attested_state, attested_block, finalized_block)
if not with_next:
update.next_sync_committee = spec.SyncCommittee()
update.next_sync_committee_branch = \
[spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
@ -135,11 +135,12 @@ def test_light_client_sync(spec, state):
next_slots(spec, state, spec.SLOTS_PER_EPOCH - 1)
finalized_block = state_transition_with_full_block(spec, state, True, True)
finalized_state = state.copy()
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH, True, True)
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True)
attested_block = state_transition_with_full_block(spec, state, True, True)
attested_state = state.copy()
sync_aggregate, _ = get_sync_aggregate(spec, state)
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
yield from emit_update(test, spec, state, block, attested_state, finalized_block)
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
assert test.store.finalized_header.slot == finalized_state.slot
assert test.store.next_sync_committee == finalized_state.next_sync_committee
assert test.store.best_valid_update is None
@ -160,11 +161,12 @@ def test_light_client_sync(spec, state):
next_slots(spec, state, spec.SLOTS_PER_EPOCH - 1)
finalized_block = state_transition_with_full_block(spec, state, True, True)
finalized_state = state.copy()
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH, True, True)
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True)
attested_block = state_transition_with_full_block(spec, state, True, True)
attested_state = state.copy()
sync_aggregate, _ = get_sync_aggregate(spec, state)
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
yield from emit_update(test, spec, state, block, attested_state, finalized_block)
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
assert test.store.finalized_header.slot == finalized_state.slot
assert test.store.next_sync_committee == finalized_state.next_sync_committee
assert test.store.best_valid_update is None
@ -184,12 +186,13 @@ def test_light_client_sync(spec, state):
next_slots(spec, state, spec.SLOTS_PER_EPOCH - 2)
finalized_block = state_transition_with_full_block(spec, state, True, True)
finalized_state = state.copy()
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH, True, True)
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True)
attested_block = state_transition_with_full_block(spec, state, True, True)
attested_state = state.copy()
transition_to(spec, state, compute_start_slot_at_next_sync_committee_period(spec, state))
sync_aggregate, _ = get_sync_aggregate(spec, state)
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
yield from emit_update(test, spec, state, block, attested_state, finalized_block)
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
assert test.store.finalized_header.slot == finalized_state.slot
assert test.store.next_sync_committee == finalized_state.next_sync_committee
assert test.store.best_valid_update is None
@ -206,10 +209,11 @@ def test_light_client_sync(spec, state):
# sync committee
# period boundary
# ```
attested_block = block.copy()
attested_state = state.copy()
sync_aggregate, _ = get_sync_aggregate(spec, state)
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
update = yield from emit_update(test, spec, state, block, attested_state, finalized_block=None)
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block=None)
assert test.store.finalized_header.slot == finalized_state.slot
assert test.store.next_sync_committee == finalized_state.next_sync_committee
assert test.store.best_valid_update == update
@ -226,11 +230,12 @@ def test_light_client_sync(spec, state):
# sync committee
# period boundary
# ```
attested_block = block.copy()
attested_state = state.copy()
store_state = attested_state.copy()
sync_aggregate, _ = get_sync_aggregate(spec, state)
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
update = yield from emit_update(test, spec, state, block, attested_state, finalized_block)
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
assert test.store.finalized_header.slot == finalized_state.slot
assert test.store.next_sync_committee == finalized_state.next_sync_committee
assert test.store.best_valid_update == update
@ -247,6 +252,7 @@ def test_light_client_sync(spec, state):
# sync committee `--- store.finalized_header
# period boundary
# ```
attested_block = block.copy()
attested_state = state.copy()
next_slots(spec, state, spec.UPDATE_TIMEOUT - 1)
yield from emit_force_update(test, spec, state)
@ -268,7 +274,7 @@ def test_light_client_sync(spec, state):
# ```
sync_aggregate, _ = get_sync_aggregate(spec, state)
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
update = yield from emit_update(test, spec, state, block, attested_state, finalized_block=None)
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block=None)
assert test.store.finalized_header.slot == store_state.slot
assert test.store.next_sync_committee == store_state.next_sync_committee
assert test.store.best_valid_update == update
@ -285,10 +291,11 @@ def test_light_client_sync(spec, state):
# sync committee sync committee
# period boundary period boundary
# ```
attested_block = block.copy()
attested_state = state.copy()
sync_aggregate, _ = get_sync_aggregate(spec, state)
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
update = yield from emit_update(test, spec, state, block, attested_state, finalized_block)
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
assert test.store.finalized_header.slot == store_state.slot
assert test.store.next_sync_committee == store_state.next_sync_committee
assert test.store.best_valid_update == update
@ -314,11 +321,12 @@ def test_light_client_sync(spec, state):
next_slots(spec, state, spec.SLOTS_PER_EPOCH - 1)
finalized_block = state_transition_with_full_block(spec, state, True, True)
finalized_state = state.copy()
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH, True, True)
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True)
attested_block = state_transition_with_full_block(spec, state, True, True)
attested_state = state.copy()
sync_aggregate, _ = get_sync_aggregate(spec, state)
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
yield from emit_update(test, spec, state, block, attested_state, finalized_block)
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
assert test.store.finalized_header.slot == finalized_state.slot
assert test.store.next_sync_committee == finalized_state.next_sync_committee
assert test.store.best_valid_update is None
@ -336,7 +344,8 @@ def test_supply_sync_committee_from_past_update(spec, state):
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2 - 1)
finalized_block = state_transition_with_full_block(spec, state, True, True)
finalized_state = state.copy()
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH, True, True)
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True)
attested_block = state_transition_with_full_block(spec, state, True, True)
attested_state = state.copy()
sync_aggregate, _ = get_sync_aggregate(spec, state)
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
@ -347,7 +356,7 @@ def test_supply_sync_committee_from_past_update(spec, state):
assert not spec.is_next_sync_committee_known(test.store)
# Apply `LightClientUpdate` from the past, populating `store.next_sync_committee`
yield from emit_update(test, spec, past_state, block, attested_state, finalized_block)
yield from emit_update(test, spec, past_state, block, attested_state, attested_block, finalized_block)
assert test.store.finalized_header.slot == state.slot
assert test.store.next_sync_committee == finalized_state.next_sync_committee
assert test.store.best_valid_update is None
@ -368,11 +377,12 @@ def test_advance_finality_without_sync_committee(spec, state):
next_slots(spec, state, spec.SLOTS_PER_EPOCH - 1)
finalized_block = state_transition_with_full_block(spec, state, True, True)
finalized_state = state.copy()
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH, True, True)
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True)
attested_block = state_transition_with_full_block(spec, state, True, True)
attested_state = state.copy()
sync_aggregate, _ = get_sync_aggregate(spec, state)
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
yield from emit_update(test, spec, state, block, attested_state, finalized_block)
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
assert test.store.finalized_header.slot == finalized_state.slot
assert test.store.next_sync_committee == finalized_state.next_sync_committee
assert test.store.best_valid_update is None
@ -386,11 +396,12 @@ def test_advance_finality_without_sync_committee(spec, state):
_, _, state = next_slots_with_attestations(spec, state, spec.SLOTS_PER_EPOCH - 1, True, True)
justified_block = state_transition_with_full_block(spec, state, True, True)
justified_state = state.copy()
_, _, state = next_slots_with_attestations(spec, state, spec.SLOTS_PER_EPOCH, True, True)
_, _, state = next_slots_with_attestations(spec, state, spec.SLOTS_PER_EPOCH - 1, True, True)
attested_block = state_transition_with_full_block(spec, state, True, True)
attested_state = state.copy()
sync_aggregate, _ = get_sync_aggregate(spec, state)
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
yield from emit_update(test, spec, state, block, attested_state, finalized_block, with_next_sync_committee=False)
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, with_next=False)
assert test.store.finalized_header.slot == finalized_state.slot
assert not spec.is_next_sync_committee_known(test.store)
assert test.store.best_valid_update is None
@ -400,27 +411,28 @@ def test_advance_finality_without_sync_committee(spec, state):
past_state = finalized_state
finalized_block = justified_block
finalized_state = justified_state
_, _, state = next_slots_with_attestations(spec, state, spec.SLOTS_PER_EPOCH - 1, True, True)
_, _, state = next_slots_with_attestations(spec, state, spec.SLOTS_PER_EPOCH - 2, True, True)
attested_block = state_transition_with_full_block(spec, state, True, True)
attested_state = state.copy()
sync_aggregate, _ = get_sync_aggregate(spec, state)
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
# Apply `LightClientUpdate` without `finalized_header` nor `next_sync_committee`
update = yield from emit_update(test, spec, state, block, attested_state, None, with_next_sync_committee=False)
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, None, with_next=False)
assert test.store.finalized_header.slot == past_state.slot
assert not spec.is_next_sync_committee_known(test.store)
assert test.store.best_valid_update == update
assert test.store.optimistic_header.slot == attested_state.slot
# Apply `LightClientUpdate` with `finalized_header` but no `next_sync_committee`
yield from emit_update(test, spec, state, block, attested_state, finalized_block, with_next_sync_committee=False)
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, with_next=False)
assert test.store.finalized_header.slot == finalized_state.slot
assert not spec.is_next_sync_committee_known(test.store)
assert test.store.best_valid_update is None
assert test.store.optimistic_header.slot == attested_state.slot
# Apply full `LightClientUpdate`, supplying `next_sync_committee`
yield from emit_update(test, spec, state, block, attested_state, finalized_block)
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
assert test.store.finalized_header.slot == finalized_state.slot
assert test.store.next_sync_committee == finalized_state.next_sync_committee
assert test.store.best_valid_update is None

View File

@ -18,13 +18,13 @@ from eth2spec.test.helpers.state import (
from math import floor
def create_update(spec, test, with_next_sync_committee, with_finality, participation_rate):
def create_update(spec, test, with_next, with_finality, participation_rate):
attested_state, attested_block, finalized_block = test
num_participants = floor(spec.SYNC_COMMITTEE_SIZE * participation_rate)
attested_header = signed_block_to_header(spec, attested_block)
if with_next_sync_committee:
if with_next:
next_sync_committee = attested_state.next_sync_committee
next_sync_committee_branch = spec.compute_merkle_proof_for_state(attested_state, spec.NEXT_SYNC_COMMITTEE_INDEX)
else:
@ -84,76 +84,76 @@ def test_update_ranking(spec, state):
# Create updates (in descending order of quality)
updates = [
# Updates with sync committee finality
create_update(spec, fin, with_next_sync_committee=1, with_finality=1, participation_rate=1.0),
create_update(spec, lat, with_next_sync_committee=1, with_finality=1, participation_rate=1.0),
create_update(spec, fin, with_next_sync_committee=1, with_finality=1, participation_rate=0.8),
create_update(spec, lat, with_next_sync_committee=1, with_finality=1, participation_rate=0.8),
create_update(spec, fin, with_next=1, with_finality=1, participation_rate=1.0),
create_update(spec, lat, with_next=1, with_finality=1, participation_rate=1.0),
create_update(spec, fin, with_next=1, with_finality=1, participation_rate=0.8),
create_update(spec, lat, with_next=1, with_finality=1, participation_rate=0.8),
# Updates without sync committee finality
create_update(spec, att, with_next_sync_committee=1, with_finality=1, participation_rate=1.0),
create_update(spec, att, with_next_sync_committee=1, with_finality=1, participation_rate=0.8),
create_update(spec, att, with_next=1, with_finality=1, participation_rate=1.0),
create_update(spec, att, with_next=1, with_finality=1, participation_rate=0.8),
# Updates without indication of any finality
create_update(spec, att, with_next_sync_committee=1, with_finality=0, participation_rate=1.0),
create_update(spec, fin, with_next_sync_committee=1, with_finality=0, participation_rate=1.0),
create_update(spec, lat, with_next_sync_committee=1, with_finality=0, participation_rate=1.0),
create_update(spec, att, with_next_sync_committee=1, with_finality=0, participation_rate=0.8),
create_update(spec, fin, with_next_sync_committee=1, with_finality=0, participation_rate=0.8),
create_update(spec, lat, with_next_sync_committee=1, with_finality=0, participation_rate=0.8),
create_update(spec, att, with_next=1, with_finality=0, participation_rate=1.0),
create_update(spec, fin, with_next=1, with_finality=0, participation_rate=1.0),
create_update(spec, lat, with_next=1, with_finality=0, participation_rate=1.0),
create_update(spec, att, with_next=1, with_finality=0, participation_rate=0.8),
create_update(spec, fin, with_next=1, with_finality=0, participation_rate=0.8),
create_update(spec, lat, with_next=1, with_finality=0, participation_rate=0.8),
# Updates with sync committee finality but no `next_sync_committee`
create_update(spec, sig, with_next_sync_committee=0, with_finality=1, participation_rate=1.0),
create_update(spec, fin, with_next_sync_committee=0, with_finality=1, participation_rate=1.0),
create_update(spec, lat, with_next_sync_committee=0, with_finality=1, participation_rate=1.0),
create_update(spec, sig, with_next_sync_committee=0, with_finality=1, participation_rate=0.8),
create_update(spec, fin, with_next_sync_committee=0, with_finality=1, participation_rate=0.8),
create_update(spec, lat, with_next_sync_committee=0, with_finality=1, participation_rate=0.8),
create_update(spec, sig, with_next=0, with_finality=1, participation_rate=1.0),
create_update(spec, fin, with_next=0, with_finality=1, participation_rate=1.0),
create_update(spec, lat, with_next=0, with_finality=1, participation_rate=1.0),
create_update(spec, sig, with_next=0, with_finality=1, participation_rate=0.8),
create_update(spec, fin, with_next=0, with_finality=1, participation_rate=0.8),
create_update(spec, lat, with_next=0, with_finality=1, participation_rate=0.8),
# Updates without sync committee finality and also no `next_sync_committee`
create_update(spec, att, with_next_sync_committee=0, with_finality=1, participation_rate=1.0),
create_update(spec, att, with_next_sync_committee=0, with_finality=1, participation_rate=0.8),
create_update(spec, att, with_next=0, with_finality=1, participation_rate=1.0),
create_update(spec, att, with_next=0, with_finality=1, participation_rate=0.8),
# Updates without indication of any finality nor `next_sync_committee`
create_update(spec, sig, with_next_sync_committee=0, with_finality=0, participation_rate=1.0),
create_update(spec, att, with_next_sync_committee=0, with_finality=0, participation_rate=1.0),
create_update(spec, fin, with_next_sync_committee=0, with_finality=0, participation_rate=1.0),
create_update(spec, lat, with_next_sync_committee=0, with_finality=0, participation_rate=1.0),
create_update(spec, sig, with_next_sync_committee=0, with_finality=0, participation_rate=0.8),
create_update(spec, att, with_next_sync_committee=0, with_finality=0, participation_rate=0.8),
create_update(spec, fin, with_next_sync_committee=0, with_finality=0, participation_rate=0.8),
create_update(spec, lat, with_next_sync_committee=0, with_finality=0, participation_rate=0.8),
create_update(spec, sig, with_next=0, with_finality=0, participation_rate=1.0),
create_update(spec, att, with_next=0, with_finality=0, participation_rate=1.0),
create_update(spec, fin, with_next=0, with_finality=0, participation_rate=1.0),
create_update(spec, lat, with_next=0, with_finality=0, participation_rate=1.0),
create_update(spec, sig, with_next=0, with_finality=0, participation_rate=0.8),
create_update(spec, att, with_next=0, with_finality=0, participation_rate=0.8),
create_update(spec, fin, with_next=0, with_finality=0, participation_rate=0.8),
create_update(spec, lat, with_next=0, with_finality=0, participation_rate=0.8),
# Updates with low sync committee participation
create_update(spec, fin, with_next_sync_committee=1, with_finality=1, participation_rate=0.4),
create_update(spec, lat, with_next_sync_committee=1, with_finality=1, participation_rate=0.4),
create_update(spec, att, with_next_sync_committee=1, with_finality=1, participation_rate=0.4),
create_update(spec, att, with_next_sync_committee=1, with_finality=0, participation_rate=0.4),
create_update(spec, fin, with_next_sync_committee=1, with_finality=0, participation_rate=0.4),
create_update(spec, lat, with_next_sync_committee=1, with_finality=0, participation_rate=0.4),
create_update(spec, sig, with_next_sync_committee=0, with_finality=1, participation_rate=0.4),
create_update(spec, fin, with_next_sync_committee=0, with_finality=1, participation_rate=0.4),
create_update(spec, lat, with_next_sync_committee=0, with_finality=1, participation_rate=0.4),
create_update(spec, att, with_next_sync_committee=0, with_finality=1, participation_rate=0.4),
create_update(spec, sig, with_next_sync_committee=0, with_finality=0, participation_rate=0.4),
create_update(spec, att, with_next_sync_committee=0, with_finality=0, participation_rate=0.4),
create_update(spec, fin, with_next_sync_committee=0, with_finality=0, participation_rate=0.4),
create_update(spec, lat, with_next_sync_committee=0, with_finality=0, participation_rate=0.4),
create_update(spec, fin, with_next=1, with_finality=1, participation_rate=0.4),
create_update(spec, lat, with_next=1, with_finality=1, participation_rate=0.4),
create_update(spec, att, with_next=1, with_finality=1, participation_rate=0.4),
create_update(spec, att, with_next=1, with_finality=0, participation_rate=0.4),
create_update(spec, fin, with_next=1, with_finality=0, participation_rate=0.4),
create_update(spec, lat, with_next=1, with_finality=0, participation_rate=0.4),
create_update(spec, sig, with_next=0, with_finality=1, participation_rate=0.4),
create_update(spec, fin, with_next=0, with_finality=1, participation_rate=0.4),
create_update(spec, lat, with_next=0, with_finality=1, participation_rate=0.4),
create_update(spec, att, with_next=0, with_finality=1, participation_rate=0.4),
create_update(spec, sig, with_next=0, with_finality=0, participation_rate=0.4),
create_update(spec, att, with_next=0, with_finality=0, participation_rate=0.4),
create_update(spec, fin, with_next=0, with_finality=0, participation_rate=0.4),
create_update(spec, lat, with_next=0, with_finality=0, participation_rate=0.4),
# Updates with very low sync committee participation
create_update(spec, fin, with_next_sync_committee=1, with_finality=1, participation_rate=0.2),
create_update(spec, lat, with_next_sync_committee=1, with_finality=1, participation_rate=0.2),
create_update(spec, att, with_next_sync_committee=1, with_finality=1, participation_rate=0.2),
create_update(spec, att, with_next_sync_committee=1, with_finality=0, participation_rate=0.2),
create_update(spec, fin, with_next_sync_committee=1, with_finality=0, participation_rate=0.2),
create_update(spec, lat, with_next_sync_committee=1, with_finality=0, participation_rate=0.2),
create_update(spec, sig, with_next_sync_committee=0, with_finality=1, participation_rate=0.2),
create_update(spec, fin, with_next_sync_committee=0, with_finality=1, participation_rate=0.2),
create_update(spec, lat, with_next_sync_committee=0, with_finality=1, participation_rate=0.2),
create_update(spec, att, with_next_sync_committee=0, with_finality=1, participation_rate=0.2),
create_update(spec, sig, with_next_sync_committee=0, with_finality=0, participation_rate=0.2),
create_update(spec, att, with_next_sync_committee=0, with_finality=0, participation_rate=0.2),
create_update(spec, fin, with_next_sync_committee=0, with_finality=0, participation_rate=0.2),
create_update(spec, lat, with_next_sync_committee=0, with_finality=0, participation_rate=0.2),
create_update(spec, fin, with_next=1, with_finality=1, participation_rate=0.2),
create_update(spec, lat, with_next=1, with_finality=1, participation_rate=0.2),
create_update(spec, att, with_next=1, with_finality=1, participation_rate=0.2),
create_update(spec, att, with_next=1, with_finality=0, participation_rate=0.2),
create_update(spec, fin, with_next=1, with_finality=0, participation_rate=0.2),
create_update(spec, lat, with_next=1, with_finality=0, participation_rate=0.2),
create_update(spec, sig, with_next=0, with_finality=1, participation_rate=0.2),
create_update(spec, fin, with_next=0, with_finality=1, participation_rate=0.2),
create_update(spec, lat, with_next=0, with_finality=1, participation_rate=0.2),
create_update(spec, att, with_next=0, with_finality=1, participation_rate=0.2),
create_update(spec, sig, with_next=0, with_finality=0, participation_rate=0.2),
create_update(spec, att, with_next=0, with_finality=0, participation_rate=0.2),
create_update(spec, fin, with_next=0, with_finality=0, participation_rate=0.2),
create_update(spec, lat, with_next=0, with_finality=0, participation_rate=0.2),
]
yield "updates", updates

View File

@ -51,40 +51,40 @@ def run_sync_committee_sanity_test(spec, state, fraction_full=1.0, rng=Random(45
@with_altair_and_later
@spec_state_test
def test_full_sync_committee_committee(spec, state):
def test_sync_committee_committee__full(spec, state):
next_epoch(spec, state)
yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0)
@with_altair_and_later
@spec_state_test
def test_half_sync_committee_committee(spec, state):
def test_sync_committee_committee__half(spec, state):
next_epoch(spec, state)
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5, rng=Random(1212))
@with_altair_and_later
@spec_state_test
def test_empty_sync_committee_committee(spec, state):
def test_sync_committee_committee__empty(spec, state):
next_epoch(spec, state)
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0)
@with_altair_and_later
@spec_state_test
def test_full_sync_committee_committee_genesis(spec, state):
def test_sync_committee_committee_genesis__full(spec, state):
yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0)
@with_altair_and_later
@spec_state_test
def test_half_sync_committee_committee_genesis(spec, state):
def test_sync_committee_committee_genesis__half(spec, state):
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5, rng=Random(2323))
@with_altair_and_later
@spec_state_test
def test_empty_sync_committee_committee_genesis(spec, state):
def test_sync_committee_committee_genesis__empty(spec, state):
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0)

View File

@ -4,10 +4,11 @@ from eth2spec.test.context import (
with_all_phases,
with_phases,
)
from eth2spec.test.helpers.constants import ALTAIR
from eth2spec.test.helpers.forks import (
is_post_capella, is_post_eip4844,
from eth2spec.test.helpers.constants import (
PHASE0, ALTAIR,
ALL_PHASES,
)
from eth2spec.test.helpers.forks import is_post_fork
@with_phases([ALTAIR])
@ -30,29 +31,28 @@ def test_config_override(spec, state):
@with_all_phases
@spec_state_test_with_matching_config
def test_override_config_fork_epoch(spec, state):
if state.fork.current_version == spec.config.GENESIS_FORK_VERSION:
return
# Fork schedule must be consistent with state fork
epoch = spec.get_current_epoch(state)
if is_post_fork(spec.fork, ALTAIR):
assert state.fork.current_version == spec.compute_fork_version(epoch)
else:
assert state.fork.current_version == spec.config.GENESIS_FORK_VERSION
assert spec.config.ALTAIR_FORK_EPOCH == spec.GENESIS_EPOCH
if state.fork.current_version == spec.config.ALTAIR_FORK_VERSION:
return
# Identify state fork
state_fork = None
for fork in [fork for fork in ALL_PHASES if is_post_fork(spec.fork, fork)]:
if fork == PHASE0:
fork_version_field = 'GENESIS_FORK_VERSION'
else:
fork_version_field = fork.upper() + '_FORK_VERSION'
if state.fork.current_version == getattr(spec.config, fork_version_field):
state_fork = fork
break
assert state_fork is not None
assert spec.config.BELLATRIX_FORK_EPOCH == spec.GENESIS_EPOCH
if state.fork.current_version == spec.config.BELLATRIX_FORK_VERSION:
return
if is_post_capella(spec):
assert spec.config.CAPELLA_FORK_EPOCH == spec.GENESIS_EPOCH
if state.fork.current_version == spec.config.CAPELLA_FORK_VERSION:
return
if is_post_eip4844(spec):
assert spec.config.EIP4844_FORK_EPOCH == spec.GENESIS_EPOCH
if state.fork.current_version == spec.config.EIP4844_FORK_VERSION:
return
assert spec.config.SHARDING_FORK_EPOCH == spec.GENESIS_EPOCH
if state.fork.current_version == spec.config.SHARDING_FORK_VERSION:
return
assert False # Fork is missing
# Check that all prior forks have already been triggered
for fork in [fork for fork in ALL_PHASES if is_post_fork(state_fork, fork)]:
if fork == PHASE0:
continue
fork_epoch_field = fork.upper() + '_FORK_EPOCH'
assert getattr(spec.config, fork_epoch_field) <= epoch

View File

@ -4,55 +4,23 @@ from eth2spec.test.context import (
with_bellatrix_and_later,
)
from eth2spec.test.helpers.deposits import (
deposit_from_context,
run_deposit_processing,
run_deposit_processing_with_specific_fork_version,
)
from eth2spec.test.helpers.keys import (
privkeys,
pubkeys,
)
from eth2spec.utils import bls
def _run_deposit_processing_with_specific_fork_version(
spec,
state,
fork_version,
valid,
effective):
validator_index = len(state.validators)
amount = spec.MAX_EFFECTIVE_BALANCE
pubkey = pubkeys[validator_index]
privkey = privkeys[validator_index]
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
deposit_message = spec.DepositMessage(pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount)
domain = spec.compute_domain(domain_type=spec.DOMAIN_DEPOSIT, fork_version=fork_version)
deposit_data = spec.DepositData(
pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount,
signature=bls.Sign(privkey, spec.compute_signing_root(deposit_message, domain))
)
deposit, root, _ = deposit_from_context(spec, [deposit_data], 0)
state.eth1_deposit_index = 0
state.eth1_data.deposit_root = root
state.eth1_data.deposit_count = 1
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=valid, effective=effective)
@with_bellatrix_and_later
@spec_state_test
@always_bls
def test_deposit_with_previous_fork_version__valid_ineffective(spec, state):
def test_ineffective_deposit_with_previous_fork_version(spec, state):
# Since deposits are valid across forks, the domain is always set with `GENESIS_FORK_VERSION`.
# It's an ineffective deposit because it fails at BLS sig verification.
# NOTE: it was effective in Altair.
assert state.fork.previous_version != state.fork.current_version
yield from _run_deposit_processing_with_specific_fork_version(
yield from run_deposit_processing_with_specific_fork_version(
spec,
state,
fork_version=state.fork.previous_version,
valid=True,
effective=False,
)
@ -60,26 +28,11 @@ def test_deposit_with_previous_fork_version__valid_ineffective(spec, state):
@with_bellatrix_and_later
@spec_state_test
@always_bls
def test_deposit_with_genesis_fork_version__valid_effective(spec, state):
def test_effective_deposit_with_genesis_fork_version(spec, state):
assert spec.config.GENESIS_FORK_VERSION not in (state.fork.previous_version, state.fork.current_version)
yield from _run_deposit_processing_with_specific_fork_version(
yield from run_deposit_processing_with_specific_fork_version(
spec,
state,
fork_version=spec.config.GENESIS_FORK_VERSION,
valid=True,
effective=True,
)
@with_bellatrix_and_later
@spec_state_test
@always_bls
def test_deposit_with_bad_fork_version__valid_ineffective(spec, state):
yield from _run_deposit_processing_with_specific_fork_version(
spec,
state,
fork_version=spec.Version('0xAaBbCcDd'),
valid=True,
effective=False,
)

View File

@ -3,6 +3,7 @@ from random import Random
from eth2spec.test.helpers.execution_payload import (
build_empty_execution_payload,
build_randomized_execution_payload,
compute_el_block_hash,
get_execution_payload_header,
build_state_with_incomplete_transition,
build_state_with_complete_transition,
@ -104,14 +105,14 @@ def run_bad_execution_test(spec, state):
@with_bellatrix_and_later
@spec_state_test
def test_bad_execution_first_payload(spec, state):
def test_invalid_bad_execution_first_payload(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_bad_execution_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
def test_bad_execution_regular_payload(spec, state):
def test_invalid_bad_execution_regular_payload(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_bad_execution_test(spec, state)
@ -124,18 +125,20 @@ def test_bad_parent_hash_first_payload(spec, state):
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.parent_hash = b'\x55' * 32
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_execution_payload_processing(spec, state, execution_payload, valid=True)
yield from run_execution_payload_processing(spec, state, execution_payload)
@with_bellatrix_and_later
@spec_state_test
def test_bad_parent_hash_regular_payload(spec, state):
def test_invalid_bad_parent_hash_regular_payload(spec, state):
state = build_state_with_complete_transition(spec, state)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.parent_hash = spec.Hash32()
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
@ -145,20 +148,21 @@ def run_bad_prev_randao_test(spec, state):
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.prev_randao = b'\x42' * 32
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
@with_bellatrix_and_later
@spec_state_test
def test_bad_prev_randao_first_payload(spec, state):
def test_invalid_bad_prev_randao_first_payload(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_bad_prev_randao_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
def test_bad_pre_randao_regular_payload(spec, state):
def test_invalid_bad_pre_randao_regular_payload(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_bad_prev_randao_test(spec, state)
@ -170,20 +174,21 @@ def run_bad_everything_test(spec, state):
execution_payload.parent_hash = spec.Hash32()
execution_payload.prev_randao = spec.Bytes32()
execution_payload.timestamp = 0
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
@with_bellatrix_and_later
@spec_state_test
def test_bad_everything_first_payload(spec, state):
def test_invalid_bad_everything_first_payload(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_bad_everything_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
def test_bad_everything_regular_payload(spec, state):
def test_invalid_bad_everything_regular_payload(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_bad_everything_test(spec, state)
@ -198,34 +203,35 @@ def run_bad_timestamp_test(spec, state, is_future):
else:
timestamp = execution_payload.timestamp - 1
execution_payload.timestamp = timestamp
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
@with_bellatrix_and_later
@spec_state_test
def test_future_timestamp_first_payload(spec, state):
def test_invalid_future_timestamp_first_payload(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_bad_timestamp_test(spec, state, is_future=True)
@with_bellatrix_and_later
@spec_state_test
def test_future_timestamp_regular_payload(spec, state):
def test_invalid_future_timestamp_regular_payload(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_bad_timestamp_test(spec, state, is_future=True)
@with_bellatrix_and_later
@spec_state_test
def test_past_timestamp_first_payload(spec, state):
def test_invalid_past_timestamp_first_payload(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_bad_timestamp_test(spec, state, is_future=False)
@with_bellatrix_and_later
@spec_state_test
def test_past_timestamp_regular_payload(spec, state):
def test_invalid_past_timestamp_regular_payload(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_bad_timestamp_test(spec, state, is_future=False)
@ -235,6 +241,7 @@ def run_non_empty_extra_data_test(spec, state):
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.extra_data = b'\x45' * 12
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_execution_payload_processing(spec, state, execution_payload)
assert state.latest_execution_payload_header.extra_data == execution_payload.extra_data
@ -263,6 +270,7 @@ def run_non_empty_transactions_test(spec, state):
spec.Transaction(b'\x99' * 128)
for _ in range(num_transactions)
]
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_execution_payload_processing(spec, state, execution_payload)
assert state.latest_execution_payload_header.transactions_root == execution_payload.transactions.hash_tree_root()
@ -288,6 +296,7 @@ def run_zero_length_transaction_test(spec, state):
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.transactions = [spec.Transaction(b'')]
assert len(execution_payload.transactions[0]) == 0
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_execution_payload_processing(spec, state, execution_payload)
assert state.latest_execution_payload_header.transactions_root == execution_payload.transactions.hash_tree_root()
@ -320,27 +329,27 @@ def run_randomized_non_validated_execution_fields_test(spec, state, execution_va
@with_bellatrix_and_later
@spec_state_test
def test_randomized_non_validated_execution_fields_first_payload__valid(spec, state):
def test_randomized_non_validated_execution_fields_first_payload__execution_valid(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_randomized_non_validated_execution_fields_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
def test_randomized_non_validated_execution_fields_regular_payload__valid(spec, state):
def test_randomized_non_validated_execution_fields_regular_payload__execution_valid(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_randomized_non_validated_execution_fields_test(spec, state)
@with_bellatrix_and_later
@spec_state_test
def test_randomized_non_validated_execution_fields_first_payload__invalid(spec, state):
def test_invalid_randomized_non_validated_execution_fields_first_payload__execution_invalid(spec, state):
state = build_state_with_incomplete_transition(spec, state)
yield from run_randomized_non_validated_execution_fields_test(spec, state, execution_valid=False)
@with_bellatrix_and_later
@spec_state_test
def test_randomized_non_validated_execution_fields_regular_payload__invalid(spec, state):
def test_invalid_randomized_non_validated_execution_fields_regular_payload__execution_invalid(spec, state):
state = build_state_with_complete_transition(spec, state)
yield from run_randomized_non_validated_execution_fields_test(spec, state, execution_valid=False)

View File

@ -18,7 +18,7 @@ def _run_voluntary_exit_processing_test(
state,
fork_version,
is_before_fork_epoch,
valid):
valid=True):
# create a fork
next_epoch(spec, state)
state.fork.epoch = spec.get_current_epoch(state)
@ -50,7 +50,7 @@ def _run_voluntary_exit_processing_test(
@with_bellatrix_and_later
@spec_state_test
@always_bls
def test_voluntary_exit_with_current_fork_version_is_before_fork_epoch__invalid(spec, state):
def test_invalid_voluntary_exit_with_current_fork_version_is_before_fork_epoch(spec, state):
yield from _run_voluntary_exit_processing_test(
spec,
state,
@ -63,20 +63,19 @@ def test_voluntary_exit_with_current_fork_version_is_before_fork_epoch__invalid(
@with_bellatrix_and_later
@spec_state_test
@always_bls
def test_voluntary_exit_with_current_fork_version_not_is_before_fork_epoch__valid(spec, state):
def test_voluntary_exit_with_current_fork_version_not_is_before_fork_epoch(spec, state):
yield from _run_voluntary_exit_processing_test(
spec,
state,
fork_version=state.fork.current_version,
is_before_fork_epoch=False,
valid=True,
)
@with_bellatrix_and_later
@spec_state_test
@always_bls
def test_voluntary_exit_with_previous_fork_version_is_before_fork_epoch__valid(spec, state):
def test_voluntary_exit_with_previous_fork_version_is_before_fork_epoch(spec, state):
assert state.fork.previous_version != state.fork.current_version
yield from _run_voluntary_exit_processing_test(
@ -84,14 +83,13 @@ def test_voluntary_exit_with_previous_fork_version_is_before_fork_epoch__valid(s
state,
fork_version=state.fork.previous_version,
is_before_fork_epoch=True,
valid=True,
)
@with_bellatrix_and_later
@spec_state_test
@always_bls
def test_voluntary_exit_with_previous_fork_version_not_is_before_fork_epoch__invalid(spec, state):
def test_invalid_voluntary_exit_with_previous_fork_version_not_is_before_fork_epoch(spec, state):
assert state.fork.previous_version != state.fork.current_version
yield from _run_voluntary_exit_processing_test(
@ -106,7 +104,7 @@ def test_voluntary_exit_with_previous_fork_version_not_is_before_fork_epoch__inv
@with_bellatrix_and_later
@spec_state_test
@always_bls
def test_voluntary_exit_with_genesis_fork_version_is_before_fork_epoch__invalid(spec, state):
def test_invalid_voluntary_exit_with_genesis_fork_version_is_before_fork_epoch(spec, state):
assert spec.config.GENESIS_FORK_VERSION not in (state.fork.previous_version, state.fork.current_version)
yield from _run_voluntary_exit_processing_test(
@ -121,7 +119,7 @@ def test_voluntary_exit_with_genesis_fork_version_is_before_fork_epoch__invalid(
@with_bellatrix_and_later
@spec_state_test
@always_bls
def test_voluntary_exit_with_genesis_fork_version_not_is_before_fork_epoch__invalid(spec, state):
def test_invalid_voluntary_exit_with_genesis_fork_version_not_is_before_fork_epoch(spec, state):
assert spec.config.GENESIS_FORK_VERSION not in (state.fork.previous_version, state.fork.current_version)
yield from _run_voluntary_exit_processing_test(

View File

@ -4,6 +4,9 @@ from eth2spec.test.context import spec_state_test, with_phases, BELLATRIX
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot,
)
from eth2spec.test.helpers.execution_payload import (
compute_el_block_hash,
)
from eth2spec.test.helpers.fork_choice import (
get_genesis_forkchoice_store_and_block,
on_tick_and_append_step,
@ -72,6 +75,7 @@ def test_all_valid(spec, state):
def run_func():
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_block.block_hash
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
signed_block = state_transition_and_sign_block(spec, state, block)
yield from tick_and_add_block(spec, store, signed_block, test_steps, merge_block=True)
# valid
@ -103,6 +107,7 @@ def test_block_lookup_failed(spec, state):
def run_func():
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_block.block_hash
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
signed_block = state_transition_and_sign_block(spec, state, block)
yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False, merge_block=True,
block_not_found=True)
@ -136,6 +141,7 @@ def test_too_early_for_merge(spec, state):
def run_func():
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_block.block_hash
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
signed_block = state_transition_and_sign_block(spec, state, block)
yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False, merge_block=True)
@ -168,6 +174,7 @@ def test_too_late_for_merge(spec, state):
def run_func():
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_block.block_hash
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
signed_block = state_transition_and_sign_block(spec, state, block)
yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False, merge_block=True)

View File

@ -8,6 +8,9 @@ from eth2spec.test.helpers.attestations import (
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot,
)
from eth2spec.test.helpers.execution_payload import (
compute_el_block_hash,
)
from eth2spec.test.helpers.fork_choice import (
get_genesis_forkchoice_store_and_block,
on_tick_and_append_step,
@ -33,6 +36,7 @@ def test_from_syncing_to_invalid(spec, state):
fc_store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
op_store = get_optimistic_store(spec, state, anchor_block)
mega_store = MegaStore(spec, fc_store, op_store)
block_hashes = {}
yield 'anchor_state', state
yield 'anchor_block', anchor_block
@ -46,7 +50,7 @@ def test_from_syncing_to_invalid(spec, state):
# Block 0
block_0 = build_empty_block_for_next_slot(spec, state)
block_0.body.execution_payload.block_hash = spec.hash(bytes('block_0', 'UTF-8'))
block_hashes['block_0'] = block_0.body.execution_payload.block_hash
signed_block = state_transition_and_sign_block(spec, state, block_0)
yield from add_optimistic_block(spec, mega_store, signed_block, test_steps, status=PayloadStatusV1Status.VALID)
assert spec.get_head(mega_store.fc_store) == mega_store.opt_store.head_block_root
@ -57,10 +61,11 @@ def test_from_syncing_to_invalid(spec, state):
signed_blocks_a = []
for i in range(3):
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.block_hash = spec.hash(bytes(f'chain_a_{i}', 'UTF-8'))
block.body.execution_payload.parent_hash = (
spec.hash(bytes(f'chain_a_{i - 1}', 'UTF-8')) if i != 0 else block_0.body.execution_payload.block_hash
block_hashes[f'chain_a_{i - 1}'] if i != 0 else block_hashes['block_0']
)
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
block_hashes[f'chain_a_{i}'] = block.body.execution_payload.block_hash
signed_block = state_transition_and_sign_block(spec, state, block)
yield from add_optimistic_block(spec, mega_store, signed_block, test_steps, status=PayloadStatusV1Status.VALID)
@ -72,10 +77,12 @@ def test_from_syncing_to_invalid(spec, state):
state = state_0.copy()
for i in range(3):
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.block_hash = spec.hash(bytes(f'chain_b_{i}', 'UTF-8'))
block.body.execution_payload.parent_hash = (
spec.hash(bytes(f'chain_b_{i - 1}', 'UTF-8')) if i != 0 else block_0.body.execution_payload.block_hash
block_hashes[f'chain_b_{i - 1}'] if i != 0 else block_hashes['block_0']
)
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
block_hashes[f'chain_b_{i}'] = block.body.execution_payload.block_hash
signed_block = state_transition_with_full_block(spec, state, True, True, block=block)
signed_blocks_b.append(signed_block.copy())
yield from add_optimistic_block(spec, mega_store, signed_block, test_steps,
@ -84,8 +91,10 @@ def test_from_syncing_to_invalid(spec, state):
# Now add block 4 to chain `b` with INVALID
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.block_hash = spec.hash(bytes('chain_b_3', 'UTF-8'))
block.body.execution_payload.parent_hash = signed_blocks_b[-1].message.body.execution_payload.block_hash
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
block_hashes['chain_b_3'] = block.body.execution_payload.block_hash
signed_block = state_transition_and_sign_block(spec, state, block)
payload_status = PayloadStatusV1(
status=PayloadStatusV1Status.INVALID,

View File

@ -3,6 +3,9 @@ from eth2spec.utils.ssz.ssz_typing import uint256, Bytes32
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot,
)
from eth2spec.test.helpers.execution_payload import (
compute_el_block_hash,
)
from eth2spec.test.helpers.pow_block import (
prepare_random_pow_chain,
)
@ -57,6 +60,7 @@ def test_validate_merge_block_success(spec, state):
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
run_validate_merge_block(spec, pow_chain, block)
@ -77,6 +81,7 @@ def test_validate_merge_block_fail_parent_block_lookup(spec, state):
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
run_validate_merge_block(spec, pow_chain, block, valid=False)
@ -88,6 +93,7 @@ def test_validate_merge_block_fail_after_terminal(spec, state):
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY + uint256(1)
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
run_validate_merge_block(spec, pow_chain, block, valid=False)
@ -104,6 +110,7 @@ def test_validate_merge_block_tbh_override_success(spec, state):
pow_chain.head().block_hash = TERMINAL_BLOCK_HASH
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
run_validate_merge_block(spec, pow_chain, block)
@ -119,6 +126,7 @@ def test_validate_merge_block_fail_parent_hash_is_not_tbh(spec, state):
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
run_validate_merge_block(spec, pow_chain, block, valid=False)
@ -135,6 +143,7 @@ def test_validate_merge_block_terminal_block_hash_fail_activation_not_reached(sp
pow_chain.head().block_hash = TERMINAL_BLOCK_HASH
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
run_validate_merge_block(spec, pow_chain, block, valid=False)
@ -150,4 +159,5 @@ def test_validate_merge_block_fail_activation_not_reached_parent_hash_is_not_tbh
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
run_validate_merge_block(spec, pow_chain, block, valid=False)

View File

@ -131,7 +131,7 @@ def test_success_withdrawable(spec, state):
@with_phases([CAPELLA])
@spec_state_test
def test_fail_val_index_out_of_range(spec, state):
def test_invalid_val_index_out_of_range(spec, state):
# Create for one validator beyond the validator list length
signed_address_change = get_signed_address_change(spec, state, validator_index=len(state.validators))
@ -140,7 +140,7 @@ def test_fail_val_index_out_of_range(spec, state):
@with_phases([CAPELLA])
@spec_state_test
def test_fail_already_0x01(spec, state):
def test_invalid_already_0x01(spec, state):
# Create for one validator beyond the validator list length
validator_index = len(state.validators) // 2
validator = state.validators[validator_index]
@ -152,7 +152,7 @@ def test_fail_already_0x01(spec, state):
@with_phases([CAPELLA])
@spec_state_test
def test_fail_incorrect_from_bls_pubkey(spec, state):
def test_invalid_incorrect_from_bls_pubkey(spec, state):
# Create for one validator beyond the validator list length
validator_index = 2
signed_address_change = get_signed_address_change(
@ -167,7 +167,7 @@ def test_fail_incorrect_from_bls_pubkey(spec, state):
@with_phases([CAPELLA])
@spec_state_test
@always_bls
def test_fail_bad_signature(spec, state):
def test_invalid_bad_signature(spec, state):
signed_address_change = get_signed_address_change(spec, state)
# Mutate signature
signed_address_change.signature = spec.BLSSignature(b'\x42' * 96)

View File

@ -6,9 +6,10 @@ from eth2spec.test.context import (
with_presets,
with_phases,
)
from eth2spec.test.helpers.constants import MINIMAL, CAPELLA
from eth2spec.test.helpers.constants import MAINNET, MINIMAL, CAPELLA
from eth2spec.test.helpers.execution_payload import (
build_empty_execution_payload,
compute_el_block_hash,
)
from eth2spec.test.helpers.random import (
randomize_state,
@ -33,8 +34,13 @@ def verify_post_state(state, spec, expected_withdrawals,
expected_withdrawals_validator_indices = [withdrawal.validator_index for withdrawal in expected_withdrawals]
assert state.next_withdrawal_index == expected_withdrawals[-1].index + 1
if len(expected_withdrawals) == spec.MAX_WITHDRAWALS_PER_PAYLOAD:
# NOTE: ideally we would also check in the case with
# fewer than maximum withdrawals but that requires the pre-state info
next_withdrawal_validator_index = (expected_withdrawals_validator_indices[-1] + 1) % len(state.validators)
assert state.next_withdrawal_validator_index == next_withdrawal_validator_index
for index in fully_withdrawable_indices:
if index in expected_withdrawals_validator_indices:
assert state.balances[index] == 0
@ -75,9 +81,13 @@ def run_withdrawals_processing(spec, state, execution_payload, num_expected_with
yield 'post', state
if len(expected_withdrawals) == 0:
assert state == pre_state
elif len(expected_withdrawals) < spec.MAX_WITHDRAWALS_PER_PAYLOAD:
assert len(spec.get_expected_withdrawals(state)) == 0
next_withdrawal_validator_index = (
pre_state.next_withdrawal_validator_index + spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
)
assert state.next_withdrawal_validator_index == next_withdrawal_validator_index % len(state.validators)
elif len(expected_withdrawals) <= spec.MAX_WITHDRAWALS_PER_PAYLOAD:
bound = min(spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP, spec.MAX_WITHDRAWALS_PER_PAYLOAD)
assert len(spec.get_expected_withdrawals(state)) <= bound
elif len(expected_withdrawals) > spec.MAX_WITHDRAWALS_PER_PAYLOAD:
raise ValueError('len(expected_withdrawals) should not be greater than MAX_WITHDRAWALS_PER_PAYLOAD')
@ -154,10 +164,14 @@ def test_success_max_per_slot(spec, state):
@with_phases([CAPELLA])
@with_presets([MAINNET], reason="too few validators with minimal config")
@spec_state_test
def test_success_all_fully_withdrawable(spec, state):
def test_success_all_fully_withdrawable_in_one_sweep(spec, state):
assert len(state.validators) <= spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
withdrawal_count = len(state.validators)
fully_withdrawable_indices, partial_withdrawals_indices = prepare_expected_withdrawals(
spec, state, num_full_withdrawals=len(state.validators))
spec, state, num_full_withdrawals=withdrawal_count)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
@ -169,10 +183,52 @@ def test_success_all_fully_withdrawable(spec, state):
@with_phases([CAPELLA])
@with_presets([MINIMAL], reason="too many validators with mainnet config")
@spec_state_test
def test_success_all_fully_withdrawable(spec, state):
assert len(state.validators) > spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
withdrawal_count = spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
fully_withdrawable_indices, partial_withdrawals_indices = prepare_expected_withdrawals(
spec, state, num_full_withdrawals=withdrawal_count)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
yield from run_withdrawals_processing(
spec, state, execution_payload,
fully_withdrawable_indices=fully_withdrawable_indices,
partial_withdrawals_indices=partial_withdrawals_indices)
@with_phases([CAPELLA])
@with_presets([MAINNET], reason="too few validators with minimal config")
@spec_state_test
def test_success_all_partially_withdrawable_in_one_sweep(spec, state):
assert len(state.validators) <= spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
withdrawal_count = len(state.validators)
fully_withdrawable_indices, partial_withdrawals_indices = prepare_expected_withdrawals(
spec, state, num_partial_withdrawals=withdrawal_count)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
yield from run_withdrawals_processing(
spec, state, execution_payload,
fully_withdrawable_indices=fully_withdrawable_indices,
partial_withdrawals_indices=partial_withdrawals_indices)
@with_phases([CAPELLA])
@with_presets([MINIMAL], reason="too many validators with mainnet config")
@spec_state_test
def test_success_all_partially_withdrawable(spec, state):
assert len(state.validators) > spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
withdrawal_count = spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
fully_withdrawable_indices, partial_withdrawals_indices = prepare_expected_withdrawals(
spec, state, num_partial_withdrawals=len(state.validators))
spec, state, num_partial_withdrawals=withdrawal_count)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
@ -189,7 +245,7 @@ def test_success_all_partially_withdrawable(spec, state):
@with_phases([CAPELLA])
@spec_state_test
def test_fail_non_withdrawable_non_empty_withdrawals(spec, state):
def test_invalid_non_withdrawable_non_empty_withdrawals(spec, state):
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
withdrawal = spec.Withdrawal(
@ -199,115 +255,125 @@ def test_fail_non_withdrawable_non_empty_withdrawals(spec, state):
amount=420,
)
execution_payload.withdrawals.append(withdrawal)
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@with_phases([CAPELLA])
@spec_state_test
def test_fail_one_expected_full_withdrawal_and_none_in_withdrawals(spec, state):
def test_invalid_one_expected_full_withdrawal_and_none_in_withdrawals(spec, state):
prepare_expected_withdrawals(spec, state, num_full_withdrawals=1)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals = []
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@with_phases([CAPELLA])
@spec_state_test
def test_fail_one_expected_partial_withdrawal_and_none_in_withdrawals(spec, state):
def test_invalid_one_expected_partial_withdrawal_and_none_in_withdrawals(spec, state):
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=1)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals = []
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@with_phases([CAPELLA])
@spec_state_test
def test_fail_one_expected_full_withdrawal_and_duplicate_in_withdrawals(spec, state):
def test_invalid_one_expected_full_withdrawal_and_duplicate_in_withdrawals(spec, state):
prepare_expected_withdrawals(spec, state, num_full_withdrawals=2)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals.append(execution_payload.withdrawals[0].copy())
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@with_phases([CAPELLA])
@spec_state_test
def test_fail_two_expected_partial_withdrawal_and_duplicate_in_withdrawals(spec, state):
def test_invalid_two_expected_partial_withdrawal_and_duplicate_in_withdrawals(spec, state):
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=2)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals.append(execution_payload.withdrawals[0].copy())
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@with_phases([CAPELLA])
@spec_state_test
def test_fail_max_per_slot_full_withdrawals_and_one_less_in_withdrawals(spec, state):
def test_invalid_max_per_slot_full_withdrawals_and_one_less_in_withdrawals(spec, state):
prepare_expected_withdrawals(spec, state, num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals = execution_payload.withdrawals[:-1]
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@with_phases([CAPELLA])
@spec_state_test
def test_fail_max_per_slot_partial_withdrawals_and_one_less_in_withdrawals(spec, state):
def test_invalid_max_per_slot_partial_withdrawals_and_one_less_in_withdrawals(spec, state):
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals = execution_payload.withdrawals[:-1]
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@with_phases([CAPELLA])
@spec_state_test
def test_fail_a_lot_fully_withdrawable_too_few_in_withdrawals(spec, state):
def test_invalid_a_lot_fully_withdrawable_too_few_in_withdrawals(spec, state):
prepare_expected_withdrawals(spec, state, num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals = execution_payload.withdrawals[:-1]
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@with_phases([CAPELLA])
@spec_state_test
def test_fail_a_lot_partially_withdrawable_too_few_in_withdrawals(spec, state):
def test_invalid_a_lot_partially_withdrawable_too_few_in_withdrawals(spec, state):
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals = execution_payload.withdrawals[:-1]
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@with_phases([CAPELLA])
@spec_state_test
def test_fail_a_lot_mixed_withdrawable_in_queue_too_few_in_withdrawals(spec, state):
prepare_expected_withdrawals(spec, state, num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4,
num_partial_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
def test_invalid_a_lot_mixed_withdrawable_in_queue_too_few_in_withdrawals(spec, state):
prepare_expected_withdrawals(spec, state, num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD,
num_partial_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals = execution_payload.withdrawals[:-1]
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@ -318,67 +384,72 @@ def test_fail_a_lot_mixed_withdrawable_in_queue_too_few_in_withdrawals(spec, sta
@with_phases([CAPELLA])
@spec_state_test
def test_fail_incorrect_withdrawal_index(spec, state):
def test_invalid_incorrect_withdrawal_index(spec, state):
prepare_expected_withdrawals(spec, state, num_full_withdrawals=1)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals[0].index += 1
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@with_phases([CAPELLA])
@spec_state_test
def test_fail_incorrect_address_full(spec, state):
def test_invalid_incorrect_address_full(spec, state):
prepare_expected_withdrawals(spec, state, num_full_withdrawals=1)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals[0].address = b'\xff' * 20
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@with_phases([CAPELLA])
@spec_state_test
def test_fail_incorrect_address_partial(spec, state):
def test_invalid_incorrect_address_partial(spec, state):
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=1)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals[0].address = b'\xff' * 20
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@with_phases([CAPELLA])
@spec_state_test
def test_fail_incorrect_amount_full(spec, state):
def test_invalid_incorrect_amount_full(spec, state):
prepare_expected_withdrawals(spec, state, num_full_withdrawals=1)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals[0].amount += 1
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@with_phases([CAPELLA])
@spec_state_test
def test_fail_incorrect_amount_partial(spec, state):
def test_invalid_incorrect_amount_partial(spec, state):
prepare_expected_withdrawals(spec, state, num_full_withdrawals=1)
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals[0].amount += 1
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@with_phases([CAPELLA])
@spec_state_test
def test_fail_one_of_many_incorrectly_full(spec, state):
def test_invalid_one_of_many_incorrectly_full(spec, state):
prepare_expected_withdrawals(spec, state, num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
next_slot(spec, state)
@ -390,13 +461,14 @@ def test_fail_one_of_many_incorrectly_full(spec, state):
withdrawal.index += 1
withdrawal.address = b'\x99' * 20
withdrawal.amount += 4000000
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@with_phases([CAPELLA])
@spec_state_test
def test_fail_one_of_many_incorrectly_partial(spec, state):
def test_invalid_one_of_many_incorrectly_partial(spec, state):
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
next_slot(spec, state)
@ -408,13 +480,14 @@ def test_fail_one_of_many_incorrectly_partial(spec, state):
withdrawal.index += 1
withdrawal.address = b'\x99' * 20
withdrawal.amount += 4000000
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@with_phases([CAPELLA])
@spec_state_test
def test_fail_many_incorrectly_full(spec, state):
def test_invalid_many_incorrectly_full(spec, state):
prepare_expected_withdrawals(spec, state, num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
next_slot(spec, state)
@ -426,13 +499,14 @@ def test_fail_many_incorrectly_full(spec, state):
withdrawal.address = i.to_bytes(20, 'big')
else:
withdrawal.amount += 1
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@with_phases([CAPELLA])
@spec_state_test
def test_fail_many_incorrectly_partial(spec, state):
def test_invalid_many_incorrectly_partial(spec, state):
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
next_slot(spec, state)
@ -444,6 +518,7 @@ def test_fail_many_incorrectly_partial(spec, state):
withdrawal.address = i.to_bytes(20, 'big')
else:
withdrawal.amount += 1
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@ -624,7 +699,7 @@ def test_success_excess_balance_but_no_max_effective_balance(spec, state):
@with_phases([CAPELLA])
@spec_state_test
def test_success_one_partial_withdrawable_not_yet_active(spec, state):
validator_index = len(state.validators) // 2
validator_index = min(len(state.validators) // 2, spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP - 1)
state.validators[validator_index].activation_epoch += 4
set_validator_partially_withdrawable(spec, state, validator_index)
@ -638,7 +713,7 @@ def test_success_one_partial_withdrawable_not_yet_active(spec, state):
@with_phases([CAPELLA])
@spec_state_test
def test_success_one_partial_withdrawable_in_exit_queue(spec, state):
validator_index = len(state.validators) // 2
validator_index = min(len(state.validators) // 2, spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP - 1)
state.validators[validator_index].exit_epoch = spec.get_current_epoch(state) + 1
set_validator_partially_withdrawable(spec, state, validator_index)
@ -653,7 +728,7 @@ def test_success_one_partial_withdrawable_in_exit_queue(spec, state):
@with_phases([CAPELLA])
@spec_state_test
def test_success_one_partial_withdrawable_exited(spec, state):
validator_index = len(state.validators) // 2
validator_index = min(len(state.validators) // 2, spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP - 1)
state.validators[validator_index].exit_epoch = spec.get_current_epoch(state)
set_validator_partially_withdrawable(spec, state, validator_index)
@ -667,7 +742,7 @@ def test_success_one_partial_withdrawable_exited(spec, state):
@with_phases([CAPELLA])
@spec_state_test
def test_success_one_partial_withdrawable_active_and_slashed(spec, state):
validator_index = len(state.validators) // 2
validator_index = min(len(state.validators) // 2, spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP - 1)
state.validators[validator_index].slashed = True
set_validator_partially_withdrawable(spec, state, validator_index)
@ -681,7 +756,7 @@ def test_success_one_partial_withdrawable_active_and_slashed(spec, state):
@with_phases([CAPELLA])
@spec_state_test
def test_success_one_partial_withdrawable_exited_and_slashed(spec, state):
validator_index = len(state.validators) // 2
validator_index = min(len(state.validators) // 2, spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP - 1)
state.validators[validator_index].slashed = True
state.validators[validator_index].exit_epoch = spec.get_current_epoch(state)
set_validator_partially_withdrawable(spec, state, validator_index)

View File

@ -194,8 +194,8 @@ def test_many_partial_withdrawals_in_epoch_transition(spec, state):
def _perform_valid_withdrawal(spec, state):
fully_withdrawable_indices, partial_withdrawals_indices = prepare_expected_withdrawals(
spec, state, num_partial_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4,
num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
spec, state, num_partial_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 2,
num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 2)
next_slot(spec, state)
pre_next_withdrawal_index = state.next_withdrawal_index
@ -240,7 +240,7 @@ def test_withdrawal_success_two_blocks(spec, state):
@with_phases([CAPELLA])
@spec_state_test
def test_withdrawal_fail_second_block_payload_isnt_compatible(spec, state):
def test_invalid_withdrawal_fail_second_block_payload_isnt_compatible(spec, state):
_perform_valid_withdrawal(spec, state)
# Block 2

View File

@ -11,7 +11,7 @@ from eth2spec.utils import bls
from .exceptions import SkippedTest
from .helpers.constants import (
PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844, SHARDING,
PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844,
MINIMAL, MAINNET,
ALL_PHASES,
ALL_FORK_UPGRADES,
@ -258,6 +258,12 @@ def dump_skipping_message(reason: str) -> None:
raise SkippedTest(message)
def description(case_description: str):
def entry(fn):
return with_meta_tags({'description': case_description})(fn)
return entry
def spec_test(fn):
# Bls switch must be wrapped by vector_test,
# to fully go through the yielded bls switch data, before setting back the BLS setting.
@ -267,7 +273,7 @@ def spec_test(fn):
return vector_test()(bls_switch(fn))
# shorthand for decorating @spectest() @with_state @single_phase
# shorthand for decorating @spec_test @with_state @single_phase
def spec_state_test(fn):
return spec_test(with_state(single_phase(fn)))
@ -291,30 +297,15 @@ def _check_current_version(spec, state, version_name):
def config_fork_epoch_overrides(spec, state):
overrides = {}
if state.fork.current_version == spec.config.GENESIS_FORK_VERSION:
pass
elif _check_current_version(spec, state, ALTAIR):
overrides['ALTAIR_FORK_EPOCH'] = spec.GENESIS_EPOCH
elif _check_current_version(spec, state, BELLATRIX):
overrides['ALTAIR_FORK_EPOCH'] = spec.GENESIS_EPOCH
overrides['BELLATRIX_FORK_EPOCH'] = spec.GENESIS_EPOCH
elif _check_current_version(spec, state, CAPELLA):
overrides['ALTAIR_FORK_EPOCH'] = spec.GENESIS_EPOCH
overrides['BELLATRIX_FORK_EPOCH'] = spec.GENESIS_EPOCH
overrides['CAPELLA_FORK_EPOCH'] = spec.GENESIS_EPOCH
elif _check_current_version(spec, state, EIP4844):
overrides['ALTAIR_FORK_EPOCH'] = spec.GENESIS_EPOCH
overrides['BELLATRIX_FORK_EPOCH'] = spec.GENESIS_EPOCH
overrides['CAPELLA_FORK_EPOCH'] = spec.GENESIS_EPOCH
overrides['EIP4844_FORK_EPOCH'] = spec.GENESIS_EPOCH
elif _check_current_version(spec, state, SHARDING):
overrides['ALTAIR_FORK_EPOCH'] = spec.GENESIS_EPOCH
overrides['BELLATRIX_FORK_EPOCH'] = spec.GENESIS_EPOCH
overrides['CAPELLA_FORK_EPOCH'] = spec.GENESIS_EPOCH
overrides['SHARDING_FORK_EPOCH'] = spec.GENESIS_EPOCH
else:
assert False # Fork is missing
return {}
for fork in ALL_PHASES:
if fork != PHASE0 and _check_current_version(spec, state, fork):
overrides = {}
for f in ALL_PHASES:
if f != PHASE0 and is_post_fork(fork, f):
overrides[f.upper() + '_FORK_EPOCH'] = spec.GENESIS_EPOCH
return overrides

View File

@ -8,6 +8,9 @@ from eth2spec.test.context import (
spec_state_test,
with_eip4844_and_later,
)
from eth2spec.test.helpers.execution_payload import (
compute_el_block_hash,
)
from eth2spec.test.helpers.sharding import (
get_sample_opaque_tx,
)
@ -22,6 +25,7 @@ def test_one_blob(spec, state):
opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
signed_block = state_transition_and_sign_block(spec, state, block)
yield 'blocks', [signed_block]
@ -30,13 +34,14 @@ def test_one_blob(spec, state):
@with_eip4844_and_later
@spec_state_test
def test_multiple_blobs(spec, state):
def test_max_blobs(spec, state):
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=5)
opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=spec.MAX_BLOBS_PER_BLOCK)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
signed_block = state_transition_and_sign_block(spec, state, block)
yield 'blocks', [signed_block]

View File

@ -8,6 +8,9 @@ from eth2spec.test.context import (
spec_state_test,
with_eip4844_and_later,
)
from eth2spec.test.helpers.execution_payload import (
compute_el_block_hash,
)
from eth2spec.test.helpers.sharding import (
get_sample_opaque_tx,
)
@ -18,6 +21,7 @@ def _run_validate_blobs_sidecar_test(spec, state, blob_count):
opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
state_transition_and_sign_block(spec, state, block)
blobs_sidecar = spec.get_blobs_sidecar(block, blobs)
@ -25,6 +29,12 @@ def _run_validate_blobs_sidecar_test(spec, state, blob_count):
spec.validate_blobs_sidecar(block.slot, block.hash_tree_root(), expected_commitments, blobs_sidecar)
@with_eip4844_and_later
@spec_state_test
def test_validate_blobs_sidecar_zero_blobs(spec, state):
_run_validate_blobs_sidecar_test(spec, state, blob_count=0)
@with_eip4844_and_later
@spec_state_test
def test_validate_blobs_sidecar_one_blob(spec, state):
@ -39,5 +49,5 @@ def test_validate_blobs_sidecar_two_blobs(spec, state):
@with_eip4844_and_later
@spec_state_test
def test_validate_blobs_sidecar_ten_blobs(spec, state):
_run_validate_blobs_sidecar_test(spec, state, blob_count=10)
def test_validate_blobs_sidecar_max_blobs(spec, state):
_run_validate_blobs_sidecar_test(spec, state, blob_count=spec.MAX_BLOBS_PER_BLOCK)

View File

@ -208,25 +208,50 @@ def run_deposit_processing(spec, state, deposit, validator_index, valid=True, ef
if not effective or not bls.KeyValidate(deposit.data.pubkey):
assert len(state.validators) == pre_validator_count
assert len(state.balances) == pre_validator_count
if validator_index < pre_validator_count:
if is_top_up:
assert get_balance(state, validator_index) == pre_balance
else:
if validator_index < pre_validator_count:
# top-up
if is_top_up:
# Top-ups do not change effective balance
assert state.validators[validator_index].effective_balance == pre_effective_balance
assert len(state.validators) == pre_validator_count
assert len(state.balances) == pre_validator_count
else:
# new validator
assert len(state.validators) == pre_validator_count + 1
assert len(state.balances) == pre_validator_count + 1
assert get_balance(state, validator_index) == pre_balance + deposit.data.amount
if is_top_up:
# Top-ups do not change effective balance
assert state.validators[validator_index].effective_balance == pre_effective_balance
else:
effective_balance = min(spec.MAX_EFFECTIVE_BALANCE, deposit.data.amount)
effective_balance -= effective_balance % spec.EFFECTIVE_BALANCE_INCREMENT
assert state.validators[validator_index].effective_balance == effective_balance
assert get_balance(state, validator_index) == pre_balance + deposit.data.amount
assert state.eth1_deposit_index == state.eth1_data.deposit_count
def run_deposit_processing_with_specific_fork_version(
spec,
state,
fork_version,
valid=True,
effective=True):
validator_index = len(state.validators)
amount = spec.MAX_EFFECTIVE_BALANCE
pubkey = pubkeys[validator_index]
privkey = privkeys[validator_index]
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
deposit_message = spec.DepositMessage(pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount)
domain = spec.compute_domain(domain_type=spec.DOMAIN_DEPOSIT, fork_version=fork_version)
deposit_data = spec.DepositData(
pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount,
signature=bls.Sign(privkey, spec.compute_signing_root(deposit_message, domain))
)
deposit, root, _ = deposit_from_context(spec, [deposit_data], 0)
state.eth1_deposit_index = 0
state.eth1_data.deposit_root = root
state.eth1_data.deposit_count = 1
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=valid, effective=effective)

View File

@ -1,68 +1,10 @@
from eth_hash.auto import keccak
from trie import HexaryTrie
from rlp import encode
from rlp.sedes import big_endian_int, Binary, List
from eth2spec.debug.random_value import get_random_bytes_list
from eth2spec.test.helpers.forks import is_post_capella
def build_empty_execution_payload(spec, state, randao_mix=None):
"""
Assuming a pre-state of the same slot, build a valid ExecutionPayload without any transactions.
"""
latest = state.latest_execution_payload_header
timestamp = spec.compute_timestamp_at_slot(state, state.slot)
empty_txs = spec.List[spec.Transaction, spec.MAX_TRANSACTIONS_PER_PAYLOAD]()
if randao_mix is None:
randao_mix = spec.get_randao_mix(state, spec.get_current_epoch(state))
payload = spec.ExecutionPayload(
parent_hash=latest.block_hash,
fee_recipient=spec.ExecutionAddress(),
state_root=latest.state_root, # no changes to the state
receipts_root=b"no receipts here" + b"\x00" * 16, # TODO: root of empty MPT may be better.
logs_bloom=spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](), # TODO: zeroed logs bloom for empty logs ok?
block_number=latest.block_number + 1,
prev_randao=randao_mix,
gas_limit=latest.gas_limit, # retain same limit
gas_used=0, # empty block, 0 gas
timestamp=timestamp,
extra_data=spec.ByteList[spec.MAX_EXTRA_DATA_BYTES](),
base_fee_per_gas=latest.base_fee_per_gas, # retain same base_fee
block_hash=spec.Hash32(),
transactions=empty_txs,
)
if is_post_capella(spec):
payload.withdrawals = spec.get_expected_withdrawals(state)
# TODO: real RLP + block hash logic would be nice, requires RLP and keccak256 dependency however.
payload.block_hash = spec.Hash32(spec.hash(payload.hash_tree_root() + b"FAKE RLP HASH"))
return payload
def build_randomized_execution_payload(spec, state, rng):
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.fee_recipient = spec.ExecutionAddress(get_random_bytes_list(rng, 20))
execution_payload.state_root = spec.Bytes32(get_random_bytes_list(rng, 32))
execution_payload.receipts_root = spec.Bytes32(get_random_bytes_list(rng, 32))
execution_payload.logs_bloom = spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](
get_random_bytes_list(rng, spec.BYTES_PER_LOGS_BLOOM)
)
execution_payload.block_number = rng.randint(0, 10e10)
execution_payload.gas_limit = rng.randint(0, 10e10)
execution_payload.gas_used = rng.randint(0, 10e10)
extra_data_length = rng.randint(0, spec.MAX_EXTRA_DATA_BYTES)
execution_payload.extra_data = spec.ByteList[spec.MAX_EXTRA_DATA_BYTES](
get_random_bytes_list(rng, extra_data_length)
)
execution_payload.base_fee_per_gas = rng.randint(0, 2**256 - 1)
execution_payload.block_hash = spec.Hash32(get_random_bytes_list(rng, 32))
num_transactions = rng.randint(0, 100)
execution_payload.transactions = [
spec.Transaction(get_random_bytes_list(rng, rng.randint(0, 1000)))
for _ in range(num_transactions)
]
return execution_payload
from eth2spec.test.helpers.forks import is_post_capella, is_post_eip4844
def get_execution_payload_header(spec, execution_payload):
@ -87,6 +29,174 @@ def get_execution_payload_header(spec, execution_payload):
return payload_header
# https://eips.ethereum.org/EIPS/eip-2718
def compute_trie_root_from_indexed_data(data):
"""
Computes the root hash of `patriciaTrie(rlp(Index) => Data)` for a data array.
"""
t = HexaryTrie(db={})
for i, obj in enumerate(data):
k = encode(i, big_endian_int)
t.set(k, obj)
return t.root_hash
# https://eips.ethereum.org/EIPS/eip-4895
# https://eips.ethereum.org/EIPS/eip-4844
def compute_el_header_block_hash(spec,
payload_header,
transactions_trie_root,
withdrawals_trie_root=None):
"""
Computes the RLP execution block hash described by an `ExecutionPayloadHeader`.
"""
execution_payload_header_rlp = [
# parent_hash
(Binary(32, 32), payload_header.parent_hash),
# ommers_hash
(Binary(32, 32), bytes.fromhex("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")),
# coinbase
(Binary(20, 20), payload_header.fee_recipient),
# state_root
(Binary(32, 32), payload_header.state_root),
# txs_root
(Binary(32, 32), transactions_trie_root),
# receipts_root
(Binary(32, 32), payload_header.receipts_root),
# logs_bloom
(Binary(256, 256), payload_header.logs_bloom),
# difficulty
(big_endian_int, 0),
# number
(big_endian_int, payload_header.block_number),
# gas_limit
(big_endian_int, payload_header.gas_limit),
# gas_used
(big_endian_int, payload_header.gas_used),
# timestamp
(big_endian_int, payload_header.timestamp),
# extradata
(Binary(0, 32), payload_header.extra_data),
# prev_randao
(Binary(32, 32), payload_header.prev_randao),
# nonce
(Binary(8, 8), bytes.fromhex("0000000000000000")),
# base_fee_per_gas
(big_endian_int, payload_header.base_fee_per_gas),
]
if is_post_capella(spec):
# withdrawals_root
execution_payload_header_rlp.append((Binary(32, 32), withdrawals_trie_root))
if is_post_eip4844(spec):
# excess_data_gas
execution_payload_header_rlp.append((big_endian_int, payload_header.excess_data_gas))
sedes = List([schema for schema, _ in execution_payload_header_rlp])
values = [value for _, value in execution_payload_header_rlp]
encoded = encode(values, sedes)
return spec.Hash32(keccak(encoded))
# https://eips.ethereum.org/EIPS/eip-4895
def get_withdrawal_rlp(spec, withdrawal):
withdrawal_rlp = [
# index
(big_endian_int, withdrawal.index),
# validator_index
(big_endian_int, withdrawal.validator_index),
# address
(Binary(20, 20), withdrawal.address),
# amount
(big_endian_int, spec.uint256(withdrawal.amount) * (10**9)),
]
sedes = List([schema for schema, _ in withdrawal_rlp])
values = [value for _, value in withdrawal_rlp]
return encode(values, sedes)
def compute_el_block_hash(spec, payload):
transactions_trie_root = compute_trie_root_from_indexed_data(payload.transactions)
if is_post_capella(spec):
withdrawals_encoded = [get_withdrawal_rlp(spec, withdrawal) for withdrawal in payload.withdrawals]
withdrawals_trie_root = compute_trie_root_from_indexed_data(withdrawals_encoded)
else:
withdrawals_trie_root = None
payload_header = get_execution_payload_header(spec, payload)
return compute_el_header_block_hash(
spec,
payload_header,
transactions_trie_root,
withdrawals_trie_root,
)
def build_empty_execution_payload(spec, state, randao_mix=None):
"""
Assuming a pre-state of the same slot, build a valid ExecutionPayload without any transactions.
"""
latest = state.latest_execution_payload_header
timestamp = spec.compute_timestamp_at_slot(state, state.slot)
empty_txs = spec.List[spec.Transaction, spec.MAX_TRANSACTIONS_PER_PAYLOAD]()
if randao_mix is None:
randao_mix = spec.get_randao_mix(state, spec.get_current_epoch(state))
payload = spec.ExecutionPayload(
parent_hash=latest.block_hash,
fee_recipient=spec.ExecutionAddress(),
state_root=latest.state_root, # no changes to the state
receipts_root=spec.Bytes32(bytes.fromhex("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")),
logs_bloom=spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](), # TODO: zeroed logs bloom for empty logs ok?
block_number=latest.block_number + 1,
prev_randao=randao_mix,
gas_limit=latest.gas_limit, # retain same limit
gas_used=0, # empty block, 0 gas
timestamp=timestamp,
extra_data=spec.ByteList[spec.MAX_EXTRA_DATA_BYTES](),
base_fee_per_gas=latest.base_fee_per_gas, # retain same base_fee
transactions=empty_txs,
)
if is_post_capella(spec):
payload.withdrawals = spec.get_expected_withdrawals(state)
payload.block_hash = compute_el_block_hash(spec, payload)
return payload
def build_randomized_execution_payload(spec, state, rng):
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.fee_recipient = spec.ExecutionAddress(get_random_bytes_list(rng, 20))
execution_payload.state_root = spec.Bytes32(get_random_bytes_list(rng, 32))
execution_payload.receipts_root = spec.Bytes32(get_random_bytes_list(rng, 32))
execution_payload.logs_bloom = spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](
get_random_bytes_list(rng, spec.BYTES_PER_LOGS_BLOOM)
)
execution_payload.block_number = rng.randint(0, 10e10)
execution_payload.gas_limit = rng.randint(0, 10e10)
execution_payload.gas_used = rng.randint(0, 10e10)
extra_data_length = rng.randint(0, spec.MAX_EXTRA_DATA_BYTES)
execution_payload.extra_data = spec.ByteList[spec.MAX_EXTRA_DATA_BYTES](
get_random_bytes_list(rng, extra_data_length)
)
execution_payload.base_fee_per_gas = rng.randint(0, 2**256 - 1)
num_transactions = rng.randint(0, 100)
execution_payload.transactions = [
spec.Transaction(get_random_bytes_list(rng, rng.randint(0, 1000)))
for _ in range(num_transactions)
]
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
return execution_payload
def build_state_with_incomplete_transition(spec, state):
state = build_state_with_execution_payload_header(spec, state, spec.ExecutionPayloadHeader())
assert not spec.is_merge_transition_complete(state)

View File

@ -165,6 +165,9 @@ def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict=
elif post_spec.fork == CAPELLA:
assert state.fork.previous_version == post_spec.config.BELLATRIX_FORK_VERSION
assert state.fork.current_version == post_spec.config.CAPELLA_FORK_VERSION
elif post_spec.fork == EIP4844:
assert state.fork.previous_version == post_spec.config.CAPELLA_FORK_VERSION
assert state.fork.current_version == post_spec.config.EIP4844_FORK_VERSION
if with_block:
return state, _state_transition_and_sign_block_at_slot(post_spec, state, operation_dict=operation_dict)

View File

@ -1,8 +1,11 @@
from eth2spec.test.helpers.constants import (
ALTAIR, BELLATRIX, CAPELLA, EIP4844,
)
from eth2spec.test.helpers.execution_payload import (
compute_el_header_block_hash,
)
from eth2spec.test.helpers.forks import (
is_post_altair, is_post_bellatrix,
is_post_altair, is_post_bellatrix, is_post_capella,
)
from eth2spec.test.helpers.keys import pubkeys
@ -29,7 +32,7 @@ def get_sample_genesis_execution_payload_header(spec,
eth1_block_hash=None):
if eth1_block_hash is None:
eth1_block_hash = b'\x55' * 32
return spec.ExecutionPayloadHeader(
payload_header = spec.ExecutionPayloadHeader(
parent_hash=b'\x30' * 32,
fee_recipient=b'\x42' * 20,
state_root=b'\x20' * 32,
@ -43,6 +46,21 @@ def get_sample_genesis_execution_payload_header(spec,
transactions_root=spec.Root(b'\x56' * 32),
)
transactions_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
if is_post_capella(spec):
withdrawals_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
else:
withdrawals_trie_root = None
payload_header.block_hash = compute_el_header_block_hash(
spec,
payload_header,
transactions_trie_root,
withdrawals_trie_root,
)
return payload_header
def create_genesis_state(spec, validator_balances, activation_threshold):
deposit_root = b'\x42' * 32
@ -60,7 +78,7 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
previous_version = spec.config.BELLATRIX_FORK_VERSION
current_version = spec.config.CAPELLA_FORK_VERSION
elif spec.fork == EIP4844:
previous_version = spec.config.BELLATRIX_FORK_VERSION
previous_version = spec.config.CAPELLA_FORK_VERSION
current_version = spec.config.EIP4844_FORK_VERSION
state = spec.BeaconState(

View File

@ -107,10 +107,11 @@ def validate_sync_committee_rewards(spec, pre_state, post_state, committee_indic
committee_bits,
)
assert post_state.balances[index] == pre_state.balances[index] + reward - penalty
balance = pre_state.balances[index] + reward
assert post_state.balances[index] == (0 if balance < penalty else balance - penalty)
def run_sync_committee_processing(spec, state, block, expect_exception=False):
def run_sync_committee_processing(spec, state, block, expect_exception=False, skip_reward_validation=False):
"""
Processes everything up to the sync committee work, then runs the sync committee work in isolation, and
produces a pre-state and post-state (None if exception) specifically for sync-committee processing changes.
@ -131,6 +132,7 @@ def run_sync_committee_processing(spec, state, block, expect_exception=False):
else:
committee_indices = compute_committee_indices(state, state.current_sync_committee)
committee_bits = block.body.sync_aggregate.sync_committee_bits
if not skip_reward_validation:
validate_sync_committee_rewards(
spec,
pre_state,
@ -156,6 +158,6 @@ def _build_block_for_next_slot_with_sync_participation(spec, state, committee_in
return block
def run_successful_sync_committee_test(spec, state, committee_indices, committee_bits):
def run_successful_sync_committee_test(spec, state, committee_indices, committee_bits, skip_reward_validation=False):
block = _build_block_for_next_slot_with_sync_participation(spec, state, committee_indices, committee_bits)
yield from run_sync_committee_processing(spec, state, block)
yield from run_sync_committee_processing(spec, state, block, skip_reward_validation=skip_reward_validation)

View File

@ -36,9 +36,10 @@ def set_validator_partially_withdrawable(spec, state, index, excess_balance=1000
def prepare_expected_withdrawals(spec, state,
num_full_withdrawals=0, num_partial_withdrawals=0, rng=random.Random(5566)):
assert num_full_withdrawals + num_partial_withdrawals <= len(state.validators)
all_validator_indices = list(range(len(state.validators)))
sampled_indices = rng.sample(all_validator_indices, num_full_withdrawals + num_partial_withdrawals)
bound = min(len(state.validators), spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
assert num_full_withdrawals + num_partial_withdrawals <= bound
eligible_validator_indices = list(range(bound))
sampled_indices = rng.sample(eligible_validator_indices, num_full_withdrawals + num_partial_withdrawals)
fully_withdrawable_indices = rng.sample(sampled_indices, num_full_withdrawals)
partial_withdrawals_indices = list(set(sampled_indices).difference(set(fully_withdrawable_indices)))

View File

@ -23,7 +23,7 @@ from eth2spec.utils.ssz.ssz_typing import Bitlist
@with_all_phases
@spec_state_test
def test_success(spec, state):
def test_one_basic_attestation(spec, state):
attestation = get_valid_attestation(spec, state, signed=True)
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
@ -34,7 +34,7 @@ def test_success(spec, state):
@spec_test
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
@single_phase
def test_success_multi_proposer_index_iterations(spec, state):
def test_multi_proposer_index_iterations(spec, state):
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2)
attestation = get_valid_attestation(spec, state, signed=True)
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
@ -44,7 +44,7 @@ def test_success_multi_proposer_index_iterations(spec, state):
@with_all_phases
@spec_state_test
def test_success_previous_epoch(spec, state):
def test_previous_epoch(spec, state):
attestation = get_valid_attestation(spec, state, signed=True)
next_epoch_via_block(spec, state)
@ -58,55 +58,55 @@ def test_invalid_attestation_signature(spec, state):
attestation = get_valid_attestation(spec, state)
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_empty_participants_zeroes_sig(spec, state):
def test_invalid_empty_participants_zeroes_sig(spec, state):
attestation = get_valid_attestation(spec, state, filter_participant_set=lambda comm: []) # 0 participants
attestation.signature = spec.BLSSignature(b'\x00' * 96)
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_empty_participants_seemingly_valid_sig(spec, state):
def test_invalid_empty_participants_seemingly_valid_sig(spec, state):
attestation = get_valid_attestation(spec, state, filter_participant_set=lambda comm: []) # 0 participants
# Special BLS value, valid for zero pubkeys on some (but not all) BLS implementations.
attestation.signature = spec.BLSSignature(b'\xc0' + b'\x00' * 95)
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
@with_all_phases
@spec_state_test
def test_before_inclusion_delay(spec, state):
def test_invalid_before_inclusion_delay(spec, state):
attestation = get_valid_attestation(spec, state, signed=True)
# do not increment slot to allow for inclusion delay
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
@with_all_phases
@spec_state_test
def test_after_epoch_slots(spec, state):
def test_invalid_after_epoch_slots(spec, state):
attestation = get_valid_attestation(spec, state, signed=True)
# increment past latest inclusion slot
transition_to_slot_via_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH + 1)
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
@with_all_phases
@spec_state_test
def test_old_source_epoch(spec, state):
def test_invalid_old_source_epoch(spec, state):
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 5)
state.finalized_checkpoint.epoch = 2
state.previous_justified_checkpoint.epoch = 3
@ -121,19 +121,19 @@ def test_old_source_epoch(spec, state):
sign_attestation(spec, state, attestation)
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_wrong_index_for_committee_signature(spec, state):
def test_invalid_wrong_index_for_committee_signature(spec, state):
attestation = get_valid_attestation(spec, state)
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
attestation.data.index += 1
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
def reduce_state_committee_count_from_max(spec, state):
@ -148,7 +148,7 @@ def reduce_state_committee_count_from_max(spec, state):
@with_all_phases
@spec_state_test
@never_bls
def test_wrong_index_for_slot_0(spec, state):
def test_invalid_wrong_index_for_slot_0(spec, state):
reduce_state_committee_count_from_max(spec, state)
attestation = get_valid_attestation(spec, state)
@ -157,13 +157,13 @@ def test_wrong_index_for_slot_0(spec, state):
# Invalid index: current committees per slot is less than the max
attestation.data.index = spec.MAX_COMMITTEES_PER_SLOT - 1
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
@with_all_phases
@spec_state_test
@never_bls
def test_wrong_index_for_slot_1(spec, state):
def test_invalid_wrong_index_for_slot_1(spec, state):
reduce_state_committee_count_from_max(spec, state)
current_epoch = spec.get_current_epoch(state)
@ -175,7 +175,7 @@ def test_wrong_index_for_slot_1(spec, state):
# Invalid index: off by one
attestation.data.index = committee_count
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
@with_all_phases
@ -188,12 +188,12 @@ def test_invalid_index(spec, state):
# Invalid index: off by one (with respect to valid range) on purpose
attestation.data.index = spec.MAX_COMMITTEES_PER_SLOT
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
@with_all_phases
@spec_state_test
def test_mismatched_target_and_slot(spec, state):
def test_invalid_mismatched_target_and_slot(spec, state):
next_epoch_via_block(spec, state)
next_epoch_via_block(spec, state)
@ -202,24 +202,24 @@ def test_mismatched_target_and_slot(spec, state):
sign_attestation(spec, state, attestation)
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
@with_all_phases
@spec_state_test
def test_old_target_epoch(spec, state):
def test_invalid_old_target_epoch(spec, state):
assert spec.MIN_ATTESTATION_INCLUSION_DELAY < spec.SLOTS_PER_EPOCH * 2
attestation = get_valid_attestation(spec, state, signed=True)
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2) # target epoch will be too old to handle
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
@with_all_phases
@spec_state_test
def test_future_target_epoch(spec, state):
def test_invalid_future_target_epoch(spec, state):
assert spec.MIN_ATTESTATION_INCLUSION_DELAY < spec.SLOTS_PER_EPOCH * 2
attestation = get_valid_attestation(spec, state)
@ -236,12 +236,12 @@ def test_future_target_epoch(spec, state):
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
@with_all_phases
@spec_state_test
def test_new_source_epoch(spec, state):
def test_invalid_new_source_epoch(spec, state):
attestation = get_valid_attestation(spec, state)
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
@ -249,12 +249,12 @@ def test_new_source_epoch(spec, state):
sign_attestation(spec, state, attestation)
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
@with_all_phases
@spec_state_test
def test_source_root_is_target_root(spec, state):
def test_invalid_source_root_is_target_root(spec, state):
attestation = get_valid_attestation(spec, state)
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
@ -262,7 +262,7 @@ def test_source_root_is_target_root(spec, state):
sign_attestation(spec, state, attestation)
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
@with_all_phases
@ -289,7 +289,7 @@ def test_invalid_current_source_root(spec, state):
sign_attestation(spec, state, attestation)
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
@with_all_phases
@ -315,12 +315,12 @@ def test_invalid_previous_source_root(spec, state):
sign_attestation(spec, state, attestation)
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
@with_all_phases
@spec_state_test
def test_bad_source_root(spec, state):
def test_invalid_bad_source_root(spec, state):
attestation = get_valid_attestation(spec, state)
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
@ -328,24 +328,24 @@ def test_bad_source_root(spec, state):
sign_attestation(spec, state, attestation)
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
@with_all_phases
@spec_state_test
def test_too_many_aggregation_bits(spec, state):
def test_invalid_too_many_aggregation_bits(spec, state):
attestation = get_valid_attestation(spec, state, signed=True)
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
# one too many bits
attestation.aggregation_bits.append(0b0)
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
@with_all_phases
@spec_state_test
def test_too_few_aggregation_bits(spec, state):
def test_invalid_too_few_aggregation_bits(spec, state):
attestation = get_valid_attestation(spec, state)
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
@ -357,7 +357,7 @@ def test_too_few_aggregation_bits(spec, state):
# one too few bits
attestation.aggregation_bits = attestation.aggregation_bits[:-1]
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
#
@ -366,7 +366,7 @@ def test_too_few_aggregation_bits(spec, state):
@with_all_phases
@spec_state_test
def test_correct_min_inclusion_delay(spec, state):
def test_correct_attestation_included_at_min_inclusion_delay(spec, state):
attestation = get_valid_attestation(spec, state, signed=True)
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
@ -375,7 +375,7 @@ def test_correct_min_inclusion_delay(spec, state):
@with_all_phases
@spec_state_test
def test_correct_sqrt_epoch_delay(spec, state):
def test_correct_attestation_included_at_sqrt_epoch_delay(spec, state):
attestation = get_valid_attestation(spec, state, signed=True)
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
@ -384,7 +384,7 @@ def test_correct_sqrt_epoch_delay(spec, state):
@with_all_phases
@spec_state_test
def test_correct_epoch_delay(spec, state):
def test_correct_attestation_included_at_one_epoch_delay(spec, state):
attestation = get_valid_attestation(spec, state, signed=True)
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
@ -393,13 +393,13 @@ def test_correct_epoch_delay(spec, state):
@with_all_phases
@spec_state_test
def test_correct_after_epoch_delay(spec, state):
def test_invalid_correct_attestation_included_after_epoch_delay(spec, state):
attestation = get_valid_attestation(spec, state, signed=True)
# increment past latest inclusion slot
next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1)
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
#
@ -408,7 +408,7 @@ def test_correct_after_epoch_delay(spec, state):
@with_all_phases
@spec_state_test
def test_incorrect_head_min_inclusion_delay(spec, state):
def test_incorrect_head_included_at_min_inclusion_delay(spec, state):
attestation = get_valid_attestation(spec, state, signed=False)
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
@ -420,7 +420,7 @@ def test_incorrect_head_min_inclusion_delay(spec, state):
@with_all_phases
@spec_state_test
def test_incorrect_head_sqrt_epoch_delay(spec, state):
def test_incorrect_head_included_at_sqrt_epoch_delay(spec, state):
attestation = get_valid_attestation(spec, state, signed=False)
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
@ -432,7 +432,7 @@ def test_incorrect_head_sqrt_epoch_delay(spec, state):
@with_all_phases
@spec_state_test
def test_incorrect_head_epoch_delay(spec, state):
def test_incorrect_head_included_at_epoch_delay(spec, state):
attestation = get_valid_attestation(spec, state, signed=False)
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
@ -444,7 +444,7 @@ def test_incorrect_head_epoch_delay(spec, state):
@with_all_phases
@spec_state_test
def test_incorrect_head_after_epoch_delay(spec, state):
def test_invalid_incorrect_head_included_after_epoch_delay(spec, state):
attestation = get_valid_attestation(spec, state, signed=False)
# increment past latest inclusion slot
@ -453,7 +453,7 @@ def test_incorrect_head_after_epoch_delay(spec, state):
attestation.data.beacon_block_root = b'\x42' * 32
sign_attestation(spec, state, attestation)
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
#
@ -475,7 +475,7 @@ def test_incorrect_head_and_target_min_inclusion_delay(spec, state):
@with_all_phases
@spec_state_test
def test_incorrect_head_and_target_sqrt_epoch_delay(spec, state):
def test_incorrect_head_and_target_included_at_sqrt_epoch_delay(spec, state):
attestation = get_valid_attestation(spec, state, signed=False)
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
@ -488,7 +488,7 @@ def test_incorrect_head_and_target_sqrt_epoch_delay(spec, state):
@with_all_phases
@spec_state_test
def test_incorrect_head_and_target_epoch_delay(spec, state):
def test_incorrect_head_and_target_included_at_epoch_delay(spec, state):
attestation = get_valid_attestation(spec, state, signed=False)
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
@ -501,7 +501,7 @@ def test_incorrect_head_and_target_epoch_delay(spec, state):
@with_all_phases
@spec_state_test
def test_incorrect_head_and_target_after_epoch_delay(spec, state):
def test_invalid_incorrect_head_and_target_included_after_epoch_delay(spec, state):
attestation = get_valid_attestation(spec, state, signed=False)
# increment past latest inclusion slot
next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1)
@ -510,7 +510,7 @@ def test_incorrect_head_and_target_after_epoch_delay(spec, state):
attestation.data.target.root = b'\x42' * 32
sign_attestation(spec, state, attestation)
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)
#
@ -519,7 +519,7 @@ def test_incorrect_head_and_target_after_epoch_delay(spec, state):
@with_all_phases
@spec_state_test
def test_incorrect_target_min_inclusion_delay(spec, state):
def test_incorrect_target_included_at_min_inclusion_delay(spec, state):
attestation = get_valid_attestation(spec, state, signed=False)
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
@ -531,7 +531,7 @@ def test_incorrect_target_min_inclusion_delay(spec, state):
@with_all_phases
@spec_state_test
def test_incorrect_target_sqrt_epoch_delay(spec, state):
def test_incorrect_target_included_at_sqrt_epoch_delay(spec, state):
attestation = get_valid_attestation(spec, state, signed=False)
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
@ -543,7 +543,7 @@ def test_incorrect_target_sqrt_epoch_delay(spec, state):
@with_all_phases
@spec_state_test
def test_incorrect_target_epoch_delay(spec, state):
def test_incorrect_target_included_at_epoch_delay(spec, state):
attestation = get_valid_attestation(spec, state, signed=False)
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
@ -555,7 +555,7 @@ def test_incorrect_target_epoch_delay(spec, state):
@with_all_phases
@spec_state_test
def test_incorrect_target_after_epoch_delay(spec, state):
def test_invalid_incorrect_target_included_after_epoch_delay(spec, state):
attestation = get_valid_attestation(spec, state, signed=False)
# increment past latest inclusion slot
next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1)
@ -563,4 +563,4 @@ def test_incorrect_target_after_epoch_delay(spec, state):
attestation.data.target.root = b'\x42' * 32
sign_attestation(spec, state, attestation)
yield from run_attestation_processing(spec, state, attestation, False)
yield from run_attestation_processing(spec, state, attestation, valid=False)

View File

@ -91,7 +91,7 @@ def run_attester_slashing_processing(spec, state, attester_slashing, valid=True)
@with_all_phases
@spec_state_test
def test_success_double(spec, state):
def test_basic_double(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
yield from run_attester_slashing_processing(spec, state, attester_slashing)
@ -99,7 +99,7 @@ def test_success_double(spec, state):
@with_all_phases
@spec_state_test
def test_success_surround(spec, state):
def test_basic_surround(spec, state):
next_epoch_via_block(spec, state)
state.current_justified_checkpoint.epoch += 1
@ -119,7 +119,7 @@ def test_success_surround(spec, state):
@with_all_phases
@spec_state_test
@always_bls
def test_success_already_exited_recent(spec, state):
def test_already_exited_recent(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
slashed_indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
for index in slashed_indices:
@ -131,7 +131,7 @@ def test_success_already_exited_recent(spec, state):
@with_all_phases
@spec_state_test
@always_bls
def test_success_proposer_index_slashed(spec, state):
def test_proposer_index_slashed(spec, state):
# Transition past genesis slot because generally doesn't have a proposer
next_epoch_via_block(spec, state)
@ -147,7 +147,7 @@ def test_success_proposer_index_slashed(spec, state):
@with_all_phases
@spec_state_test
def test_success_attestation_from_future(spec, state):
def test_attestation_from_future(spec, state):
# Transition state to future to enable generation of a "future" attestation
future_state = state.copy()
next_epoch_via_block(spec, future_state)
@ -165,7 +165,7 @@ def test_success_attestation_from_future(spec, state):
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
@spec_test
@single_phase
def test_success_low_balances(spec, state):
def test_low_balances(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
yield from run_attester_slashing_processing(spec, state, attester_slashing)
@ -175,7 +175,7 @@ def test_success_low_balances(spec, state):
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
@spec_test
@single_phase
def test_success_misc_balances(spec, state):
def test_misc_balances(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
yield from run_attester_slashing_processing(spec, state, attester_slashing)
@ -185,7 +185,7 @@ def test_success_misc_balances(spec, state):
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
@spec_test
@single_phase
def test_success_with_effective_balance_disparity(spec, state):
def test_with_effective_balance_disparity(spec, state):
# Jitter balances to be different from effective balances
rng = Random(12345)
for i in range(len(state.balances)):
@ -200,7 +200,7 @@ def test_success_with_effective_balance_disparity(spec, state):
@with_all_phases
@spec_state_test
@always_bls
def test_success_already_exited_long_ago(spec, state):
def test_already_exited_long_ago(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
slashed_indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
for index in slashed_indices:
@ -213,30 +213,30 @@ def test_success_already_exited_long_ago(spec, state):
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_sig_1(spec, state):
def test_invalid_incorrect_sig_1(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_sig_2(spec, state):
def test_invalid_incorrect_sig_2(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_sig_1_and_2(spec, state):
def test_invalid_incorrect_sig_1_and_2(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
def test_same_data(spec, state):
def test_invalid_same_data(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
indexed_att_1 = attester_slashing.attestation_1
@ -244,12 +244,12 @@ def test_same_data(spec, state):
indexed_att_1.data = att_2_data
sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
def test_no_double_or_surround(spec, state):
def test_invalid_no_double_or_surround(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
att_1_data = get_attestation_1_data(spec, attester_slashing)
@ -257,12 +257,12 @@ def test_no_double_or_surround(spec, state):
sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
def test_participants_already_slashed(spec, state):
def test_invalid_participants_already_slashed(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
# set all indices to slashed
@ -270,63 +270,63 @@ def test_participants_already_slashed(spec, state):
for index in validator_indices:
state.validators[index].slashed = True
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_att1_high_index(spec, state):
def test_invalid_att1_high_index(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
indices.append(spec.ValidatorIndex(len(state.validators))) # off by 1
attester_slashing.attestation_1.attesting_indices = indices
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_att2_high_index(spec, state):
def test_invalid_att2_high_index(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_2)
indices.append(spec.ValidatorIndex(len(state.validators))) # off by 1
attester_slashing.attestation_2.attesting_indices = indices
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_att1_empty_indices(spec, state):
def test_invalid_att1_empty_indices(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
attester_slashing.attestation_1.attesting_indices = []
attester_slashing.attestation_1.signature = spec.bls.G2_POINT_AT_INFINITY
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_att2_empty_indices(spec, state):
def test_invalid_att2_empty_indices(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
attester_slashing.attestation_2.attesting_indices = []
attester_slashing.attestation_2.signature = spec.bls.G2_POINT_AT_INFINITY
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_all_empty_indices(spec, state):
def test_invalid_all_empty_indices(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False)
attester_slashing.attestation_1.attesting_indices = []
@ -335,13 +335,13 @@ def test_all_empty_indices(spec, state):
attester_slashing.attestation_2.attesting_indices = []
attester_slashing.attestation_2.signature = spec.bls.G2_POINT_AT_INFINITY
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_att1_bad_extra_index(spec, state):
def test_invalid_att1_bad_extra_index(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
@ -351,13 +351,13 @@ def test_att1_bad_extra_index(spec, state):
# Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
# see if the bad extra index is spotted, and slashing is aborted.
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_att1_bad_replaced_index(spec, state):
def test_invalid_att1_bad_replaced_index(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
indices = attester_slashing.attestation_1.attesting_indices
@ -367,13 +367,13 @@ def test_att1_bad_replaced_index(spec, state):
# Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
# see if the bad replaced index is spotted, and slashing is aborted.
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_att2_bad_extra_index(spec, state):
def test_invalid_att2_bad_extra_index(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
indices = attester_slashing.attestation_2.attesting_indices
@ -383,13 +383,13 @@ def test_att2_bad_extra_index(spec, state):
# Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
# see if the bad extra index is spotted, and slashing is aborted.
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_att2_bad_replaced_index(spec, state):
def test_invalid_att2_bad_replaced_index(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
indices = attester_slashing.attestation_2.attesting_indices
@ -399,13 +399,13 @@ def test_att2_bad_replaced_index(spec, state):
# Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
# see if the bad replaced index is spotted, and slashing is aborted.
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_att1_duplicate_index_normal_signed(spec, state):
def test_invalid_att1_duplicate_index_normal_signed(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
indices = list(attester_slashing.attestation_1.attesting_indices)
@ -419,13 +419,13 @@ def test_att1_duplicate_index_normal_signed(spec, state):
attester_slashing.attestation_1.attesting_indices = sorted(indices)
# it will just appear normal, unless the double index is spotted
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_att2_duplicate_index_normal_signed(spec, state):
def test_invalid_att2_duplicate_index_normal_signed(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
indices = list(attester_slashing.attestation_2.attesting_indices)
@ -439,13 +439,13 @@ def test_att2_duplicate_index_normal_signed(spec, state):
attester_slashing.attestation_2.attesting_indices = sorted(indices)
# it will just appear normal, unless the double index is spotted
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_att1_duplicate_index_double_signed(spec, state):
def test_invalid_att1_duplicate_index_double_signed(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
indices = list(attester_slashing.attestation_1.attesting_indices)
@ -454,13 +454,13 @@ def test_att1_duplicate_index_double_signed(spec, state):
attester_slashing.attestation_1.attesting_indices = sorted(indices)
sign_indexed_attestation(spec, state, attester_slashing.attestation_1) # will have one attester signing it double
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_att2_duplicate_index_double_signed(spec, state):
def test_invalid_att2_duplicate_index_double_signed(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
indices = list(attester_slashing.attestation_2.attesting_indices)
@ -469,12 +469,12 @@ def test_att2_duplicate_index_double_signed(spec, state):
attester_slashing.attestation_2.attesting_indices = sorted(indices)
sign_indexed_attestation(spec, state, attester_slashing.attestation_2) # will have one attester signing it double
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
def test_unsorted_att_1(spec, state):
def test_invalid_unsorted_att_1(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
indices = attester_slashing.attestation_1.attesting_indices
@ -482,12 +482,12 @@ def test_unsorted_att_1(spec, state):
indices[1], indices[2] = indices[2], indices[1] # unsort second and third index
sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
@with_all_phases
@spec_state_test
def test_unsorted_att_2(spec, state):
def test_invalid_unsorted_att_2(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
indices = attester_slashing.attestation_2.attesting_indices
@ -495,4 +495,4 @@ def test_unsorted_att_2(spec, state):
indices[1], indices[2] = indices[2], indices[1] # unsort second and third index
sign_indexed_attestation(spec, state, attester_slashing.attestation_2)
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)

View File

@ -34,7 +34,7 @@ def run_block_header_processing(spec, state, block, prepare_state=True, valid=Tr
@with_all_phases
@spec_state_test
def test_success_block_header(spec, state):
def test_basic_block_header(spec, state):
block = build_empty_block_for_next_slot(spec, state)
yield from run_block_header_processing(spec, state, block)
@ -87,7 +87,7 @@ def test_invalid_multiple_blocks_single_slot(spec, state):
@with_all_phases
@spec_state_test
def test_proposer_slashed(spec, state):
def test_invalid_proposer_slashed(spec, state):
# use stub state to get proposer index of next slot
stub_state = deepcopy(state)
next_slot(spec, stub_state)

View File

@ -1,13 +1,12 @@
from eth2spec.test.context import spec_state_test, always_bls, with_all_phases
from eth2spec.test.helpers.deposits import (
build_deposit,
deposit_from_context,
prepare_state_and_deposit,
run_deposit_processing,
run_deposit_processing_with_specific_fork_version,
sign_deposit_data,
)
from eth2spec.test.helpers.keys import privkeys, pubkeys
from eth2spec.utils import bls
@with_all_phases
@ -92,56 +91,29 @@ def test_new_deposit_non_versioned_withdrawal_credentials(spec, state):
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_sig_other_version(spec, state):
validator_index = len(state.validators)
amount = spec.MAX_EFFECTIVE_BALANCE
pubkey = pubkeys[validator_index]
privkey = privkeys[validator_index]
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
# Go through the effort of manually signing, not something normally done. This sig domain will be invalid.
deposit_message = spec.DepositMessage(pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount)
domain = spec.compute_domain(domain_type=spec.DOMAIN_DEPOSIT, fork_version=spec.Version('0xaabbccdd'))
deposit_data = spec.DepositData(
pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount,
signature=bls.Sign(privkey, spec.compute_signing_root(deposit_message, domain))
)
deposit, root, _ = deposit_from_context(spec, [deposit_data], 0)
state.eth1_deposit_index = 0
state.eth1_data.deposit_root = root
state.eth1_data.deposit_count = 1
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=False)
@with_all_phases
@spec_state_test
@always_bls
def test_valid_sig_but_forked_state(spec, state):
def test_correct_sig_but_forked_state(spec, state):
validator_index = len(state.validators)
amount = spec.MAX_EFFECTIVE_BALANCE
# deposits will always be valid, regardless of the current fork
state.fork.current_version = spec.Version('0x1234abcd')
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=True)
yield from run_deposit_processing(spec, state, deposit, validator_index)
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_sig_new_deposit(spec, state):
def test_incorrect_sig_new_deposit(spec, state):
# fresh deposit = next validator index = validator appended to registry
validator_index = len(state.validators)
amount = spec.MAX_EFFECTIVE_BALANCE
deposit = prepare_state_and_deposit(spec, state, validator_index, amount)
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=False)
yield from run_deposit_processing(spec, state, deposit, validator_index, effective=False)
@with_all_phases
@spec_state_test
def test_success_top_up__max_effective_balance(spec, state):
def test_top_up__max_effective_balance(spec, state):
validator_index = 0
amount = spec.MAX_EFFECTIVE_BALANCE // 4
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
@ -157,7 +129,7 @@ def test_success_top_up__max_effective_balance(spec, state):
@with_all_phases
@spec_state_test
def test_success_top_up__less_effective_balance(spec, state):
def test_top_up__less_effective_balance(spec, state):
validator_index = 0
amount = spec.MAX_EFFECTIVE_BALANCE // 4
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
@ -176,7 +148,7 @@ def test_success_top_up__less_effective_balance(spec, state):
@with_all_phases
@spec_state_test
def test_success_top_up__zero_balance(spec, state):
def test_top_up__zero_balance(spec, state):
validator_index = 0
amount = spec.MAX_EFFECTIVE_BALANCE // 4
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
@ -196,18 +168,18 @@ def test_success_top_up__zero_balance(spec, state):
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_sig_top_up(spec, state):
def test_incorrect_sig_top_up(spec, state):
validator_index = 0
amount = spec.MAX_EFFECTIVE_BALANCE // 4
deposit = prepare_state_and_deposit(spec, state, validator_index, amount)
# invalid signatures, in top-ups, are allowed!
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=True)
yield from run_deposit_processing(spec, state, deposit, validator_index)
@with_all_phases
@spec_state_test
def test_invalid_withdrawal_credentials_top_up(spec, state):
def test_incorrect_withdrawal_credentials_top_up(spec, state):
validator_index = 0
amount = spec.MAX_EFFECTIVE_BALANCE // 4
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(b"junk")[1:]
@ -220,12 +192,12 @@ def test_invalid_withdrawal_credentials_top_up(spec, state):
)
# inconsistent withdrawal credentials, in top-ups, are allowed!
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=True)
yield from run_deposit_processing(spec, state, deposit, validator_index)
@with_all_phases
@spec_state_test
def test_wrong_deposit_for_deposit_count(spec, state):
def test_invalid_wrong_deposit_for_deposit_count(spec, state):
deposit_data_leaves = [spec.DepositData() for _ in range(len(state.validators))]
# build root for deposit_1
@ -266,7 +238,7 @@ def test_wrong_deposit_for_deposit_count(spec, state):
@with_all_phases
@spec_state_test
def test_bad_merkle_proof(spec, state):
def test_invalid_bad_merkle_proof(spec, state):
validator_index = len(state.validators)
amount = spec.MAX_EFFECTIVE_BALANCE
deposit = prepare_state_and_deposit(spec, state, validator_index, amount)
@ -307,3 +279,15 @@ def test_key_validate_invalid_decompression(spec, state):
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, pubkey=pubkey, signed=True)
yield from run_deposit_processing(spec, state, deposit, validator_index)
@with_all_phases
@spec_state_test
@always_bls
def test_ineffective_deposit_with_bad_fork_version(spec, state):
yield from run_deposit_processing_with_specific_fork_version(
spec,
state,
fork_version=spec.Version('0xAaBbCcDd'),
effective=False,
)

View File

@ -34,7 +34,7 @@ def run_proposer_slashing_processing(spec, state, proposer_slashing, valid=True)
@with_all_phases
@spec_state_test
def test_success(spec, state):
def test_basic(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing)
@ -42,7 +42,7 @@ def test_success(spec, state):
@with_all_phases
@spec_state_test
def test_success_slashed_and_proposer_index_the_same(spec, state):
def test_slashed_and_proposer_index_the_same(spec, state):
# Get proposer for next slot
block = build_empty_block_for_next_slot(spec, state)
proposer_index = block.proposer_index
@ -57,7 +57,7 @@ def test_success_slashed_and_proposer_index_the_same(spec, state):
@with_all_phases
@spec_state_test
def test_success_block_header_from_future(spec, state):
def test_block_header_from_future(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, slot=state.slot + 5, signed_1=True, signed_2=True)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing)
@ -66,31 +66,31 @@ def test_success_block_header_from_future(spec, state):
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_sig_1(spec, state):
def test_invalid_incorrect_sig_1(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=True)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_sig_2(spec, state):
def test_invalid_incorrect_sig_2(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_sig_1_and_2(spec, state):
def test_invalid_incorrect_sig_1_and_2(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=False)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_sig_1_and_2_swap(spec, state):
def test_invalid_incorrect_sig_1_and_2_swap(spec, state):
# Get valid signatures for the slashings
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
@ -98,18 +98,18 @@ def test_invalid_sig_1_and_2_swap(spec, state):
signature_1 = proposer_slashing.signed_header_1.signature
proposer_slashing.signed_header_1.signature = proposer_slashing.signed_header_2.signature
proposer_slashing.signed_header_2.signature = signature_1
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
@with_all_phases
@spec_state_test
def test_invalid_proposer_index(spec, state):
def test_invalid_incorrect_proposer_index(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
# Index just too high (by 1)
proposer_slashing.signed_header_1.message.proposer_index = len(state.validators)
proposer_slashing.signed_header_2.message.proposer_index = len(state.validators)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
@with_all_phases
@ -125,12 +125,12 @@ def test_invalid_different_proposer_indices(spec, state):
header_2.proposer_index = active_indices[0]
proposer_slashing.signed_header_2 = sign_block_header(spec, state, header_2, privkeys[header_2.proposer_index])
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
@with_all_phases
@spec_state_test
def test_epochs_are_different(spec, state):
def test_invalid_slots_of_different_epochs(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False)
# set slots to be in different epochs
@ -139,23 +139,23 @@ def test_epochs_are_different(spec, state):
header_2.slot += spec.SLOTS_PER_EPOCH
proposer_slashing.signed_header_2 = sign_block_header(spec, state, header_2, privkeys[proposer_index])
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
@with_all_phases
@spec_state_test
def test_headers_are_same_sigs_are_same(spec, state):
def test_invalid_headers_are_same_sigs_are_same(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False)
# set headers to be the same
proposer_slashing.signed_header_2 = proposer_slashing.signed_header_1.copy()
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
@with_all_phases
@spec_state_test
def test_headers_are_same_sigs_are_different(spec, state):
def test_invalid_headers_are_same_sigs_are_different(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False)
# set headers to be the same
@ -165,36 +165,36 @@ def test_headers_are_same_sigs_are_different(spec, state):
assert proposer_slashing.signed_header_1.signature != proposer_slashing.signed_header_2.signature
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
@with_all_phases
@spec_state_test
def test_proposer_is_not_activated(spec, state):
def test_invalid_proposer_is_not_activated(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
# set proposer to be not active yet
proposer_index = proposer_slashing.signed_header_1.message.proposer_index
state.validators[proposer_index].activation_epoch = spec.get_current_epoch(state) + 1
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
@with_all_phases
@spec_state_test
def test_proposer_is_slashed(spec, state):
def test_invalid_proposer_is_slashed(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
# set proposer to slashed
proposer_index = proposer_slashing.signed_header_1.message.proposer_index
state.validators[proposer_index].slashed = True
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
@with_all_phases
@spec_state_test
def test_proposer_is_withdrawn(spec, state):
def test_invalid_proposer_is_withdrawn(spec, state):
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
# move 1 epoch into future, to allow for past withdrawable epoch
@ -204,4 +204,4 @@ def test_proposer_is_withdrawn(spec, state):
proposer_index = proposer_slashing.signed_header_1.message.proposer_index
state.validators[proposer_index].withdrawable_epoch = current_epoch - 1
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)

View File

@ -14,7 +14,7 @@ from eth2spec.test.helpers.voluntary_exits import (
@with_all_phases
@spec_state_test
def test_success(spec, state):
def test_basic(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
@ -33,7 +33,7 @@ def test_success(spec, state):
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_signature(spec, state):
def test_invalid_incorrect_signature(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
@ -46,7 +46,7 @@ def test_invalid_signature(spec, state):
)
signed_voluntary_exit = sign_voluntary_exit(spec, state, voluntary_exit, 12345)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, valid=False)
def run_test_success_exit_queue(spec, state):
@ -134,7 +134,7 @@ def test_default_exit_epoch_subsequent_exit(spec, state):
@with_all_phases
@spec_state_test
def test_validator_exit_in_future(spec, state):
def test_invalid_validator_exit_in_future(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
@ -148,12 +148,12 @@ def test_validator_exit_in_future(spec, state):
)
signed_voluntary_exit = sign_voluntary_exit(spec, state, voluntary_exit, privkey)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, valid=False)
@with_all_phases
@spec_state_test
def test_validator_invalid_validator_index(spec, state):
def test_invalid_validator_incorrect_validator_index(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
@ -167,12 +167,12 @@ def test_validator_invalid_validator_index(spec, state):
)
signed_voluntary_exit = sign_voluntary_exit(spec, state, voluntary_exit, privkey)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, valid=False)
@with_all_phases
@spec_state_test
def test_validator_not_active(spec, state):
def test_invalid_validator_not_active(spec, state):
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
@ -182,12 +182,12 @@ def test_validator_not_active(spec, state):
signed_voluntary_exit = sign_voluntary_exit(
spec, state, spec.VoluntaryExit(epoch=current_epoch, validator_index=validator_index), privkey)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, valid=False)
@with_all_phases
@spec_state_test
def test_validator_already_exited(spec, state):
def test_invalid_validator_already_exited(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow validator able to exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
@ -201,12 +201,12 @@ def test_validator_already_exited(spec, state):
signed_voluntary_exit = sign_voluntary_exit(
spec, state, spec.VoluntaryExit(epoch=current_epoch, validator_index=validator_index), privkey)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, valid=False)
@with_all_phases
@spec_state_test
def test_validator_not_active_long_enough(spec, state):
def test_invalid_validator_not_active_long_enough(spec, state):
current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
@ -219,4 +219,4 @@ def test_validator_not_active_long_enough(spec, state):
spec.config.SHARD_COMMITTEE_PERIOD
)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, valid=False)

View File

@ -47,20 +47,20 @@ def run_is_valid_genesis_state(spec, state, valid=True):
@spec_test
@single_phase
@with_presets([MINIMAL], reason="too slow")
def test_is_valid_genesis_state_true(spec):
def test_full_genesis_deposits(spec):
if is_post_altair(spec):
yield 'description', 'meta', get_post_altair_description(spec)
state = create_valid_beacon_state(spec)
yield from run_is_valid_genesis_state(spec, state, valid=True)
yield from run_is_valid_genesis_state(spec, state)
@with_all_phases
@spec_test
@single_phase
@with_presets([MINIMAL], reason="too slow")
def test_is_valid_genesis_state_false_invalid_timestamp(spec):
def test_invalid_invalid_timestamp(spec):
if is_post_altair(spec):
yield 'description', 'meta', get_post_altair_description(spec)
@ -74,21 +74,21 @@ def test_is_valid_genesis_state_false_invalid_timestamp(spec):
@spec_test
@single_phase
@with_presets([MINIMAL], reason="too slow")
def test_is_valid_genesis_state_true_more_balance(spec):
def test_extra_balance(spec):
if is_post_altair(spec):
yield 'description', 'meta', get_post_altair_description(spec)
state = create_valid_beacon_state(spec)
state.validators[0].effective_balance = spec.MAX_EFFECTIVE_BALANCE + 1
yield from run_is_valid_genesis_state(spec, state, valid=True)
yield from run_is_valid_genesis_state(spec, state)
@with_all_phases
@spec_test
@single_phase
@with_presets([MINIMAL], reason="too slow")
def test_is_valid_genesis_state_true_one_more_validator(spec):
def test_one_more_validator(spec):
if is_post_altair(spec):
yield 'description', 'meta', get_post_altair_description(spec)
@ -104,14 +104,14 @@ def test_is_valid_genesis_state_true_one_more_validator(spec):
eth1_timestamp = spec.config.MIN_GENESIS_TIME
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
yield from run_is_valid_genesis_state(spec, state, valid=True)
yield from run_is_valid_genesis_state(spec, state)
@with_all_phases
@spec_test
@single_phase
@with_presets([MINIMAL], reason="too slow")
def test_is_valid_genesis_state_false_not_enough_validator(spec):
def test_invalid_not_enough_validator_count(spec):
if is_post_altair(spec):
yield 'description', 'meta', get_post_altair_description(spec)

View File

@ -43,7 +43,7 @@ from eth2spec.test.context import (
@with_all_phases
@spec_state_test
def test_prev_slot_block_transition(spec, state):
def test_invalid_prev_slot_block_transition(spec, state):
# Go to clean slot
spec.process_slots(state, state.slot + 1)
# Make a block for it
@ -64,7 +64,7 @@ def test_prev_slot_block_transition(spec, state):
@with_all_phases
@spec_state_test
def test_same_slot_block_transition(spec, state):
def test_invalid_same_slot_block_transition(spec, state):
# Same slot on top of pre-state, but move out of slot 0 first.
spec.process_slots(state, state.slot + 1)
@ -161,7 +161,7 @@ def process_and_sign_block_without_header_validations(spec, state, block):
@with_phases([PHASE0])
@spec_state_test
def test_proposal_for_genesis_slot(spec, state):
def test_invalid_proposal_for_genesis_slot(spec, state):
assert state.slot == spec.GENESIS_SLOT
yield 'pre', state
@ -184,7 +184,7 @@ def test_proposal_for_genesis_slot(spec, state):
@with_all_phases
@spec_state_test
def test_parent_from_same_slot(spec, state):
def test_invalid_parent_from_same_slot(spec, state):
yield 'pre', state
parent_block = build_empty_block_for_next_slot(spec, state)
@ -211,7 +211,7 @@ def test_parent_from_same_slot(spec, state):
@with_all_phases
@spec_state_test
def test_invalid_state_root(spec, state):
def test_invalid_incorrect_state_root(spec, state):
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
@ -227,7 +227,7 @@ def test_invalid_state_root(spec, state):
@with_all_phases
@spec_state_test
@always_bls
def test_zero_block_sig(spec, state):
def test_invalid_all_zeroed_sig(spec, state):
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
@ -241,7 +241,7 @@ def test_zero_block_sig(spec, state):
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_block_sig(spec, state):
def test_invalid_incorrect_block_sig(spec, state):
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
@ -260,7 +260,7 @@ def test_invalid_block_sig(spec, state):
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_proposer_index_sig_from_expected_proposer(spec, state):
def test_invalid_incorrect_proposer_index_sig_from_expected_proposer(spec, state):
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
@ -282,7 +282,7 @@ def test_invalid_proposer_index_sig_from_expected_proposer(spec, state):
@with_all_phases
@spec_state_test
@always_bls
def test_invalid_proposer_index_sig_from_proposer_index(spec, state):
def test_invalid_incorrect_proposer_index_sig_from_proposer_index(spec, state):
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
@ -707,7 +707,7 @@ def test_high_proposer_index(spec, state):
@with_all_phases
@spec_state_test
def test_expected_deposit_in_block(spec, state):
def test_invalid_only_increase_deposit_count(spec, state):
# Make the state expect a deposit, then don't provide it.
state.eth1_data.deposit_count += 1
yield 'pre', state

View File

@ -1,5 +1,8 @@
from eth2spec.test.helpers.state import get_state_root
from eth2spec.test.context import spec_state_test, with_all_phases
from eth2spec.test.context import (
spec_state_test,
with_all_phases,
)
@with_all_phases

View File

@ -7,6 +7,9 @@ import warnings
from random import Random
from typing import Callable
from eth2spec.test.helpers.execution_payload import (
compute_el_block_hash,
)
from eth2spec.test.helpers.multi_operations import (
build_random_block_from_state_for_next_slot,
get_random_bls_to_execution_changes,
@ -234,6 +237,7 @@ def random_block_eip4844(spec, state, signed_blocks, scenario_state, rng=Random(
# TODO: more commitments. blob_kzg_commitments: List[KZGCommitment, MAX_BLOBS_PER_BLOCK]
opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=1)
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
block.body.blob_kzg_commitments = blob_kzg_commitments
return block

View File

@ -93,11 +93,11 @@ def get_lagrange(setup: Sequence[Optimized_Point3D]) -> Tuple[bytes]:
return tuple(bls.G1_to_bytes48(multiply(fft_output[-i], inv_length)) for i in range(len(fft_output)))
def dump_kzg_trusted_setup_files(secret: int, length: int, output_dir: str) -> None:
setup_g1 = generate_setup(bls.G1, secret, length)
setup_g2 = generate_setup(bls.G2, secret, length)
def dump_kzg_trusted_setup_files(secret: int, g1_length: int, g2_length: int, output_dir: str) -> None:
setup_g1 = generate_setup(bls.G1, secret, g1_length)
setup_g2 = generate_setup(bls.G2, secret, g2_length)
setup_g1_lagrange = get_lagrange(setup_g1)
roots_of_unity = compute_roots_of_unity(length)
roots_of_unity = compute_roots_of_unity(g1_length)
serailized_setup_g1 = [encode_hex(bls.G1_to_bytes48(p)) for p in setup_g1]
serialized_setup_g2 = [encode_hex(bls.G2_to_bytes96(p)) for p in setup_g2]

View File

@ -11,10 +11,15 @@ if __name__ == "__main__":
'proposer_slashing',
'voluntary_exit',
]}
_new_altair_mods = {'sync_aggregate': [
_new_altair_mods = {
**{'sync_aggregate': [
'eth2spec.test.altair.block_processing.sync_aggregate.test_process_' + key
for key in ['sync_aggregate', 'sync_aggregate_random']
]},
**{key: 'eth2spec.test.altair.block_processing.test_process_' + key for key in [
'deposit',
]}
}
altair_mods = combine_mods(_new_altair_mods, phase_0_mods)
_new_bellatrix_mods = {key: 'eth2spec.test.bellatrix.block_processing.test_process_' + key for key in [