commit
579da6d2dc
|
@ -1,6 +1,4 @@
|
|||
# Mainnet preset
|
||||
# Note: the intention of this file (for now) is to illustrate what a mainnet configuration could look like.
|
||||
# Some of these constants may still change before the launch of Phase 0.
|
||||
|
||||
CONFIG_NAME: "mainnet"
|
||||
|
||||
|
@ -20,16 +18,14 @@ CHURN_LIMIT_QUOTIENT: 65536
|
|||
SHUFFLE_ROUND_COUNT: 90
|
||||
# `2**14` (= 16,384)
|
||||
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384
|
||||
# Jan 3, 2020
|
||||
MIN_GENESIS_TIME: 1578009600
|
||||
# Dec 1, 2020, 12pm UTC
|
||||
MIN_GENESIS_TIME: 1606824000
|
||||
# 4
|
||||
HYSTERESIS_QUOTIENT: 4
|
||||
# 1 (minus 0.25)
|
||||
HYSTERESIS_DOWNWARD_MULTIPLIER: 1
|
||||
# 5 (plus 1.25)
|
||||
HYSTERESIS_UPWARD_MULTIPLIER: 5
|
||||
# 3
|
||||
PROPORTIONAL_SLASHING_MULTIPLIER: 3
|
||||
|
||||
|
||||
# Fork Choice
|
||||
|
@ -40,8 +36,8 @@ SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8
|
|||
|
||||
# Validator
|
||||
# ---------------------------------------------------------------
|
||||
# 2**10 (= 1,024)
|
||||
ETH1_FOLLOW_DISTANCE: 1024
|
||||
# 2**11 (= 2,048)
|
||||
ETH1_FOLLOW_DISTANCE: 2048
|
||||
# 2**4 (= 16)
|
||||
TARGET_AGGREGATORS_PER_COMMITTEE: 16
|
||||
# 2**0 (= 1)
|
||||
|
@ -58,7 +54,7 @@ SECONDS_PER_ETH1_BLOCK: 14
|
|||
DEPOSIT_CHAIN_ID: 1
|
||||
DEPOSIT_NETWORK_ID: 1
|
||||
# **TBD**
|
||||
DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890
|
||||
DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa
|
||||
|
||||
|
||||
# Gwei values
|
||||
|
@ -82,8 +78,8 @@ BLS_WITHDRAWAL_PREFIX: 0x00
|
|||
|
||||
# Time parameters
|
||||
# ---------------------------------------------------------------
|
||||
# 172800 seconds (2 days)
|
||||
GENESIS_DELAY: 172800
|
||||
# 604800 seconds (7 days)
|
||||
GENESIS_DELAY: 604800
|
||||
# 12 seconds
|
||||
SECONDS_PER_SLOT: 12
|
||||
# 2**0 (= 1) slots 12 seconds
|
||||
|
@ -94,8 +90,8 @@ SLOTS_PER_EPOCH: 32
|
|||
MIN_SEED_LOOKAHEAD: 1
|
||||
# 2**2 (= 4) epochs 25.6 minutes
|
||||
MAX_SEED_LOOKAHEAD: 4
|
||||
# 2**5 (= 32) epochs ~3.4 hours
|
||||
EPOCHS_PER_ETH1_VOTING_PERIOD: 32
|
||||
# 2**6 (= 64) epochs ~6.8 hours
|
||||
EPOCHS_PER_ETH1_VOTING_PERIOD: 64
|
||||
# 2**13 (= 8,192) slots ~13 hours
|
||||
SLOTS_PER_HISTORICAL_ROOT: 8192
|
||||
# 2**8 (= 256) epochs ~27 hours
|
||||
|
@ -126,10 +122,12 @@ BASE_REWARD_FACTOR: 64
|
|||
WHISTLEBLOWER_REWARD_QUOTIENT: 512
|
||||
# 2**3 (= 8)
|
||||
PROPOSER_REWARD_QUOTIENT: 8
|
||||
# 2**24 (= 16,777,216)
|
||||
INACTIVITY_PENALTY_QUOTIENT: 16777216
|
||||
# 2**5 (= 32)
|
||||
MIN_SLASHING_PENALTY_QUOTIENT: 32
|
||||
# 2**26 (= 67,108,864)
|
||||
INACTIVITY_PENALTY_QUOTIENT: 67108864
|
||||
# 2**7 (= 128) (lower safety margin at Phase 0 genesis)
|
||||
MIN_SLASHING_PENALTY_QUOTIENT: 128
|
||||
# 1 (lower safety margin at Phase 0 genesis)
|
||||
PROPORTIONAL_SLASHING_MULTIPLIER: 1
|
||||
|
||||
|
||||
# Max operations per block
|
||||
|
|
|
@ -27,8 +27,6 @@ HYSTERESIS_QUOTIENT: 4
|
|||
HYSTERESIS_DOWNWARD_MULTIPLIER: 1
|
||||
# 5 (plus 1.25)
|
||||
HYSTERESIS_UPWARD_MULTIPLIER: 5
|
||||
# 3
|
||||
PROPORTIONAL_SLASHING_MULTIPLIER: 3
|
||||
|
||||
|
||||
# Fork Choice
|
||||
|
@ -56,7 +54,7 @@ SECONDS_PER_ETH1_BLOCK: 14
|
|||
# Ethereum Goerli testnet
|
||||
DEPOSIT_CHAIN_ID: 5
|
||||
DEPOSIT_NETWORK_ID: 5
|
||||
# **TBD**
|
||||
# Configured on a per testnet basis
|
||||
DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890
|
||||
|
||||
|
||||
|
@ -125,10 +123,12 @@ BASE_REWARD_FACTOR: 64
|
|||
WHISTLEBLOWER_REWARD_QUOTIENT: 512
|
||||
# 2**3 (= 8)
|
||||
PROPOSER_REWARD_QUOTIENT: 8
|
||||
# 2**24 (= 16,777,216)
|
||||
INACTIVITY_PENALTY_QUOTIENT: 16777216
|
||||
# 2**5 (= 32)
|
||||
MIN_SLASHING_PENALTY_QUOTIENT: 32
|
||||
# [customized] 2**25 (= 33,554,432)
|
||||
INACTIVITY_PENALTY_QUOTIENT: 33554432
|
||||
# [customized] 2**6 (= 64)
|
||||
MIN_SLASHING_PENALTY_QUOTIENT: 64
|
||||
# [customized] 2 (lower safety margin than Phase 0 genesis but different than mainnet config for testing)
|
||||
PROPORTIONAL_SLASHING_MULTIPLIER: 2
|
||||
|
||||
|
||||
# Max operations per block
|
||||
|
|
4
setup.py
4
setup.py
|
@ -536,8 +536,8 @@ setup(
|
|||
"eth-utils>=1.3.0,<2",
|
||||
"eth-typing>=2.1.0,<3.0.0",
|
||||
"pycryptodome==3.9.4",
|
||||
"py_ecc==4.0.0",
|
||||
"milagro_bls_binding==1.3.0",
|
||||
"py_ecc==5.0.0",
|
||||
"milagro_bls_binding==1.5.0",
|
||||
"dataclasses==0.6",
|
||||
"remerkleable==0.1.17",
|
||||
"ruamel.yaml==0.16.5",
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
# Ethereum 2.0 Phase 0 -- The Beacon Chain
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
@ -177,13 +175,13 @@ The following values are (non-configurable) constants used throughout the specif
|
|||
|
||||
## Configuration
|
||||
|
||||
*Note*: The default mainnet configuration values are included here for spec-design purposes. The different configurations for mainnet, testnets, and YAML-based testing can be found in the [`configs/constant_presets`](../../configs) directory. These configurations are updated for releases and may be out of sync during `dev` changes.
|
||||
*Note*: The default mainnet configuration values are included here for illustrative purposes. The different configurations for mainnet, testnets, and YAML-based testing can be found in the [`configs/constant_presets`](../../configs) directory.
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `ETH1_FOLLOW_DISTANCE` | `uint64(2**10)` (= 1,024) |
|
||||
| `ETH1_FOLLOW_DISTANCE` | `uint64(2**11)` (= 2,048) |
|
||||
| `MAX_COMMITTEES_PER_SLOT` | `uint64(2**6)` (= 64) |
|
||||
| `TARGET_COMMITTEE_SIZE` | `uint64(2**7)` (= 128) |
|
||||
| `MAX_VALIDATORS_PER_COMMITTEE` | `uint64(2**11)` (= 2,048) |
|
||||
|
@ -191,11 +189,10 @@ The following values are (non-configurable) constants used throughout the specif
|
|||
| `CHURN_LIMIT_QUOTIENT` | `uint64(2**16)` (= 65,536) |
|
||||
| `SHUFFLE_ROUND_COUNT` | `uint64(90)` |
|
||||
| `MIN_GENESIS_ACTIVE_VALIDATOR_COUNT` | `uint64(2**14)` (= 16,384) |
|
||||
| `MIN_GENESIS_TIME` | `uint64(1578009600)` (Jan 3, 2020) |
|
||||
| `MIN_GENESIS_TIME` | `uint64(1606824000)` (Dec 1, 2020, 12pm UTC) |
|
||||
| `HYSTERESIS_QUOTIENT` | `uint64(4)` |
|
||||
| `HYSTERESIS_DOWNWARD_MULTIPLIER` | `uint64(1)` |
|
||||
| `HYSTERESIS_UPWARD_MULTIPLIER` | `uint64(5)` |
|
||||
| `PROPORTIONAL_SLASHING_MULTIPLIER` | `uint64(3)` |
|
||||
|
||||
- For the safety of committees, `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](http://web.archive.org/web/20190504131341/https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.)
|
||||
|
||||
|
@ -219,7 +216,7 @@ The following values are (non-configurable) constants used throughout the specif
|
|||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | :-: | :-: |
|
||||
| `GENESIS_DELAY` | `uint64(172800)` | seconds | 2 days |
|
||||
| `GENESIS_DELAY` | `uint64(604800)` | seconds | 7 days |
|
||||
| `SECONDS_PER_SLOT` | `uint64(12)` | seconds | 12 seconds |
|
||||
| `SECONDS_PER_ETH1_BLOCK` | `uint64(14)` | seconds | 14 seconds |
|
||||
| `MIN_ATTESTATION_INCLUSION_DELAY` | `uint64(2**0)` (= 1) | slots | 12 seconds |
|
||||
|
@ -227,7 +224,7 @@ The following values are (non-configurable) constants used throughout the specif
|
|||
| `MIN_SEED_LOOKAHEAD` | `uint64(2**0)` (= 1) | epochs | 6.4 minutes |
|
||||
| `MAX_SEED_LOOKAHEAD` | `uint64(2**2)` (= 4) | epochs | 25.6 minutes |
|
||||
| `MIN_EPOCHS_TO_INACTIVITY_PENALTY` | `uint64(2**2)` (= 4) | epochs | 25.6 minutes |
|
||||
| `EPOCHS_PER_ETH1_VOTING_PERIOD` | `uint64(2**5)` (= 32) | epochs | ~3.4 hours |
|
||||
| `EPOCHS_PER_ETH1_VOTING_PERIOD` | `uint64(2**6)` (= 64) | epochs | ~6.8 hours |
|
||||
| `SLOTS_PER_HISTORICAL_ROOT` | `uint64(2**13)` (= 8,192) | slots | ~27 hours |
|
||||
| `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `uint64(2**8)` (= 256) | epochs | ~27 hours |
|
||||
| `SHARD_COMMITTEE_PERIOD` | `uint64(2**8)` (= 256) | epochs | ~27 hours |
|
||||
|
@ -248,10 +245,13 @@ The following values are (non-configurable) constants used throughout the specif
|
|||
| `BASE_REWARD_FACTOR` | `uint64(2**6)` (= 64) |
|
||||
| `WHISTLEBLOWER_REWARD_QUOTIENT` | `uint64(2**9)` (= 512) |
|
||||
| `PROPOSER_REWARD_QUOTIENT` | `uint64(2**3)` (= 8) |
|
||||
| `INACTIVITY_PENALTY_QUOTIENT` | `uint64(2**24)` (= 16,777,216) |
|
||||
| `MIN_SLASHING_PENALTY_QUOTIENT` | `uint64(2**5)` (= 32) |
|
||||
| `INACTIVITY_PENALTY_QUOTIENT` | `uint64(2**26)` (= 67,108,864) |
|
||||
| `MIN_SLASHING_PENALTY_QUOTIENT` | `uint64(2**7)` (=128) |
|
||||
| `PROPORTIONAL_SLASHING_MULTIPLIER` | `uint64(1)` |
|
||||
|
||||
- The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12` epochs (about 18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`.
|
||||
- The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**13` epochs (about 36 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. Note this value will be upgraded to `2**24` after Phase 0 mainnet stabilizes to provide a faster recovery in the event of an inactivity leak.
|
||||
|
||||
- The `PROPORTIONAL_SLASHING_MULTIPLIER` is set to `1` at initial mainnet launch, resulting in one-third of the minimum accountable safety margin in the event of a finality attack. After Phase 0 mainnet stablizes, this value will be upgraded to `3` to provide the maximal minimum accoutable safety margin.
|
||||
|
||||
### Max operations per block
|
||||
|
||||
|
@ -603,7 +603,7 @@ def bytes_to_uint64(data: bytes) -> uint64:
|
|||
|
||||
#### BLS Signatures
|
||||
|
||||
Eth2 makes use of BLS signatures as specified in the [IETF draft BLS specification draft-irtf-cfrg-bls-signature-03](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-03). Specifically, eth2 uses the `BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_` ciphersuite which implements the following interfaces:
|
||||
Eth2 makes use of BLS signatures as specified in the [IETF draft BLS specification draft-irtf-cfrg-bls-signature-04](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04). Specifically, eth2 uses the `BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_` ciphersuite which implements the following interfaces:
|
||||
|
||||
- `def Sign(SK: int, message: Bytes) -> BLSSignature`
|
||||
- `def Verify(PK: BLSPubkey, message: Bytes, signature: BLSSignature) -> bool`
|
||||
|
@ -1189,8 +1189,6 @@ def is_valid_genesis_state(state: BeaconState) -> bool:
|
|||
return True
|
||||
```
|
||||
|
||||
*Note*: The `is_valid_genesis_state` function (including `MIN_GENESIS_TIME` and `MIN_GENESIS_ACTIVE_VALIDATOR_COUNT`) is a placeholder for testing. It has yet to be finalized by the community, and can be updated as necessary.
|
||||
|
||||
### Genesis block
|
||||
|
||||
Let `genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))`.
|
||||
|
@ -1200,7 +1198,7 @@ Let `genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))`.
|
|||
The post-state corresponding to a pre-state `state` and a signed block `signed_block` is defined as `state_transition(state, signed_block)`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid. State transitions that cause a `uint64` overflow or underflow are also considered invalid.
|
||||
|
||||
```python
|
||||
def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> BeaconState:
|
||||
def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> None:
|
||||
block = signed_block.message
|
||||
# Process slots (including those with no blocks) since block
|
||||
process_slots(state, block.slot)
|
||||
|
@ -1212,8 +1210,6 @@ def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, valida
|
|||
# Verify state root
|
||||
if validate_result:
|
||||
assert block.state_root == hash_tree_root(state)
|
||||
# Return post-state
|
||||
return state
|
||||
```
|
||||
|
||||
```python
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
# Ethereum 2.0 Phase 0 -- Deposit Contract
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
@ -43,7 +41,7 @@ These configurations are updated for releases and may be out of sync during `dev
|
|||
| - | - |
|
||||
| `DEPOSIT_CHAIN_ID` | `1` |
|
||||
| `DEPOSIT_NETWORK_ID` | `1` |
|
||||
| `DEPOSIT_CONTRACT_ADDRESS` | **TBD** |
|
||||
| `DEPOSIT_CONTRACT_ADDRESS` | `0x00000000219ab540356cBB839Cbe05303d7705Fa` |
|
||||
|
||||
## Ethereum 1.0 deposit contract
|
||||
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
# Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
@ -226,11 +224,10 @@ def get_head(store: Store) -> Root:
|
|||
blocks = get_filtered_block_tree(store)
|
||||
# Execute the LMD-GHOST fork choice
|
||||
head = store.justified_checkpoint.root
|
||||
justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
|
||||
while True:
|
||||
children = [
|
||||
root for root in blocks.keys()
|
||||
if blocks[root].parent_root == head and blocks[root].slot > justified_slot
|
||||
if blocks[root].parent_root == head
|
||||
]
|
||||
if len(children) == 0:
|
||||
return head
|
||||
|
@ -355,7 +352,8 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
|||
assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
|
||||
|
||||
# Check the block is valid and compute the post-state
|
||||
state = state_transition(pre_state, signed_block, True)
|
||||
state = pre_state.copy()
|
||||
state_transition(state, signed_block, True)
|
||||
# Add new block to the store
|
||||
store.blocks[hash_tree_root(block)] = block
|
||||
# Add new state for this block to the store
|
||||
|
|
|
@ -54,8 +54,6 @@ It consists of four main sections:
|
|||
- [ENR structure](#enr-structure)
|
||||
- [Attestation subnet bitfield](#attestation-subnet-bitfield)
|
||||
- [`eth2` field](#eth2-field)
|
||||
- [General capabilities](#general-capabilities)
|
||||
- [Topic advertisement](#topic-advertisement)
|
||||
- [Design decision rationale](#design-decision-rationale)
|
||||
- [Transport](#transport-1)
|
||||
- [Why are we defining specific transports?](#why-are-we-defining-specific-transports)
|
||||
|
@ -78,6 +76,7 @@ It consists of four main sections:
|
|||
- [How do we upgrade gossip channels (e.g. changes in encoding, compression)?](#how-do-we-upgrade-gossip-channels-eg-changes-in-encoding-compression)
|
||||
- [Why must all clients use the same gossip topic instead of one negotiated between each peer pair?](#why-must-all-clients-use-the-same-gossip-topic-instead-of-one-negotiated-between-each-peer-pair)
|
||||
- [Why are the topics strings and not hashes?](#why-are-the-topics-strings-and-not-hashes)
|
||||
- [Why are we using the `StrictNoSign` signature policy?](#why-are-we-using-the-strictnosign-signature-policy)
|
||||
- [Why are we overriding the default libp2p pubsub `message-id`?](#why-are-we-overriding-the-default-libp2p-pubsub-message-id)
|
||||
- [Why are these specific gossip parameters chosen?](#why-are-these-specific-gossip-parameters-chosen)
|
||||
- [Why is there `MAXIMUM_GOSSIP_CLOCK_DISPARITY` when validating slot ranges of messages in gossip subnets?](#why-is-there-maximum_gossip_clock_disparity-when-validating-slot-ranges-of-messages-in-gossip-subnets)
|
||||
|
@ -178,6 +177,9 @@ This section outlines constants that are used in this spec.
|
|||
| `RESP_TIMEOUT` | `10s` | The maximum time for complete response transfer. |
|
||||
| `ATTESTATION_PROPAGATION_SLOT_RANGE` | `32` | The maximum number of slots during which an attestation can be propagated. |
|
||||
| `MAXIMUM_GOSSIP_CLOCK_DISPARITY` | `500ms` | The maximum milliseconds of clock disparity assumed between honest nodes. |
|
||||
| `MESSAGE_DOMAIN_INVALID_SNAPPY` | `0x00000000` | 4-byte domain for gossip message-id isolation of *invalid* snappy messages |
|
||||
| `MESSAGE_DOMAIN_VALID_SNAPPY` | `0x01000000` | 4-byte domain for gossip message-id isolation of *valid* snappy messages |
|
||||
|
||||
|
||||
## MetaData
|
||||
|
||||
|
@ -209,12 +211,10 @@ including the [gossipsub v1.1](https://github.com/libp2p/specs/blob/master/pubsu
|
|||
|
||||
**Gossipsub Parameters**
|
||||
|
||||
*Note*: Parameters listed here are subject to a large-scale network feasibility study.
|
||||
|
||||
The following gossipsub [parameters](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md#parameters) will be used:
|
||||
|
||||
- `D` (topic stable mesh target count): 6
|
||||
- `D_low` (topic stable mesh low watermark): 5
|
||||
- `D` (topic stable mesh target count): 8
|
||||
- `D_low` (topic stable mesh low watermark): 6
|
||||
- `D_high` (topic stable mesh high watermark): 12
|
||||
- `D_lazy` (gossip target): 6
|
||||
- `heartbeat_interval` (frequency of heartbeat, seconds): 0.7
|
||||
|
@ -223,6 +223,11 @@ The following gossipsub [parameters](https://github.com/libp2p/specs/blob/master
|
|||
- `mcache_gossip` (number of windows to gossip about): 3
|
||||
- `seen_ttl` (number of heartbeat intervals to retain message IDs): 550
|
||||
|
||||
*Note*: Gossipsub v1.1 introduces a number of
|
||||
[additional parameters](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#overview-of-new-parameters)
|
||||
for peer scoring and other attack mitigations.
|
||||
These are currently under investigation and will be spec'd and released to mainnet when they are ready.
|
||||
|
||||
### Topics and messages
|
||||
|
||||
Topics are plain UTF-8 strings and are encoded on the wire as determined by protobuf (gossipsub messages are enveloped in protobuf messages).
|
||||
|
@ -243,11 +248,22 @@ Each gossipsub [message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/
|
|||
Clients MUST reject (fail validation) messages that are over this size limit.
|
||||
Likewise, clients MUST NOT emit or propagate messages larger than this limit.
|
||||
|
||||
The `message-id` of a gossipsub message MUST be the first 8 bytes of the SHA-256 hash of the message data, i.e.:
|
||||
The optional `from` (1), `seqno` (3), `signature` (5) and `key` (6) protobuf fields are omitted from the message,
|
||||
since messages are identified by content, anonymous, and signed where necessary in the application layer.
|
||||
Starting from Gossipsub v1.1, clients MUST enforce this by applying the `StrictNoSign`
|
||||
[signature policy](https://github.com/libp2p/specs/blob/master/pubsub/README.md#signature-policy-options).
|
||||
|
||||
```python
|
||||
message-id: SHA256(message.data)[0:8]
|
||||
```
|
||||
The `message-id` of a gossipsub message MUST be the following 20 byte value computed from the message data:
|
||||
* If `message.data` has a valid snappy decompression, set `message-id` to the first 20 bytes of the `SHA256` hash of
|
||||
the concatenation of `MESSAGE_DOMAIN_VALID_SNAPPY` with the snappy decompressed message data,
|
||||
i.e. `SHA256(MESSAGE_DOMAIN_VALID_SNAPPY + snappy_decompress(message.data))[:20]`.
|
||||
* Otherwise, set `message-id` to the first 20 bytes of the `SHA256` hash of
|
||||
the concatenation of `MESSAGE_DOMAIN_INVALID_SNAPPY` with the raw message data,
|
||||
i.e. `SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + message.data)[:20]`.
|
||||
|
||||
*Note*: The above logic handles two exceptional cases:
|
||||
(1) multiple snappy `data` can decompress to the same value,
|
||||
and (2) some message `data` can fail to snappy decompress altogether.
|
||||
|
||||
The payload is carried in the `data` field of a gossipsub message, and varies depending on the topic:
|
||||
|
||||
|
@ -317,6 +333,8 @@ The following validations MUST pass before forwarding the `signed_aggregate_and_
|
|||
- _[IGNORE]_ `aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) --
|
||||
i.e. `aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot`
|
||||
(a client MAY queue future aggregates for processing at the appropriate slot).
|
||||
- _[REJECT]_ The aggregate attestation's epoch matches its target -- i.e. `aggregate.data.target.epoch ==
|
||||
compute_epoch_at_slot(aggregate.data.slot)`
|
||||
- _[IGNORE]_ The valid aggregate attestation defined by `hash_tree_root(aggregate)` has _not_ already been seen
|
||||
(via aggregate gossip, within a verified block, or through the creation of an equivalent aggregate locally).
|
||||
- _[IGNORE]_ The `aggregate` is the first valid aggregate received for the aggregator
|
||||
|
@ -734,7 +752,7 @@ and MUST support serving requests of blocks up to their own `head_block_root`.
|
|||
|
||||
Clients MUST respond with at least the first block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOCKS` blocks.
|
||||
|
||||
The following blocks, where they exist, MUST be send in consecutive order.
|
||||
The following blocks, where they exist, MUST be sent in consecutive order.
|
||||
|
||||
Clients MAY limit the number of blocks in the response.
|
||||
|
||||
|
@ -846,13 +864,11 @@ The response MUST consist of a single `response_chunk`.
|
|||
|
||||
## The discovery domain: discv5
|
||||
|
||||
Discovery Version 5 ([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) is used for peer discovery.
|
||||
Discovery Version 5 ([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) (Protocol version v5.1) is used for peer discovery.
|
||||
|
||||
`discv5` is a standalone protocol, running on UDP on a dedicated port, meant for peer discovery only.
|
||||
`discv5` supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are (or will be) requirements in this context.
|
||||
|
||||
:warning: Under construction. :warning:
|
||||
|
||||
### Integration into libp2p stacks
|
||||
|
||||
`discv5` SHOULD be integrated into the client’s libp2p stack by implementing an adaptor
|
||||
|
@ -935,19 +951,6 @@ Clients MAY connect to peers with the same `fork_digest` but a different `next_f
|
|||
Unless `ENRForkID` is manually updated to matching prior to the earlier `next_fork_epoch` of the two clients,
|
||||
these connecting clients will be unable to successfully interact starting at the earlier `next_fork_epoch`.
|
||||
|
||||
#### General capabilities
|
||||
|
||||
ENRs MUST include a structure enumerating the capabilities offered by the peer in an efficient manner.
|
||||
The concrete solution is currently undefined.
|
||||
Proposals include using namespaced bloom filters mapping capabilities to specific protocol IDs supported under that capability.
|
||||
|
||||
### Topic advertisement
|
||||
|
||||
discv5's topic advertisement feature is not expected to be ready for mainnet launch of Phase 0.
|
||||
|
||||
Once this feature is built out and stable, we expect to use topic advertisement as a rendezvous facility for peers on shards.
|
||||
Until then, the ENR [attestation subnet bitfield](#attestation-subnet-bitfield) will be used for discovery of peers on particular subnets.
|
||||
|
||||
# Design decision rationale
|
||||
|
||||
## Transport
|
||||
|
@ -1164,10 +1167,16 @@ since the domain is finite anyway, and calculating a digest's preimage would be
|
|||
Furthermore, the Eth2 topic names are shorter than their digest equivalents (assuming SHA-256 hash),
|
||||
so hashing topics would bloat messages unnecessarily.
|
||||
|
||||
### Why are we using the `StrictNoSign` signature policy?
|
||||
|
||||
The policy omits the `from` (1), `seqno` (3), `signature` (5) and `key` (6) fields. These fields would:
|
||||
- Expose origin of sender (`from`), type of sender (based on `seqno`)
|
||||
- Add extra unused data to the gossip, since message IDs are based on `data`, not on the `from` and `seqno`.
|
||||
- Introduce more message validation than necessary, e.g. no `signature`.
|
||||
|
||||
### Why are we overriding the default libp2p pubsub `message-id`?
|
||||
|
||||
For our current purposes, there is no need to address messages based on source peer,
|
||||
and it seems likely we might even override the message `from` to obfuscate the peer.
|
||||
For our current purposes, there is no need to address messages based on source peer, or track a message `seqno`.
|
||||
By overriding the default `message-id` to use content-addressing we can filter unnecessary duplicates before hitting the application layer.
|
||||
|
||||
Some examples of where messages could be duplicated:
|
||||
|
@ -1210,8 +1219,6 @@ For minimum and maximum allowable slot broadcast times,
|
|||
Although messages can at times be eagerly gossiped to the network,
|
||||
the node's fork choice prevents integration of these messages into the actual consensus until the _actual local start_ of the designated slot.
|
||||
|
||||
The value of this constant is currently a placeholder and will be tuned based on data observed in testnets.
|
||||
|
||||
### Why are there `ATTESTATION_SUBNET_COUNT` attestation subnets?
|
||||
|
||||
Depending on the number of validators, it may be more efficient to group shard subnets and might provide better stability for the gossipsub channel.
|
||||
|
@ -1408,7 +1415,7 @@ discv5 supports self-certified, flexible peer records (ENRs) and topic-based adv
|
|||
On the other hand, libp2p Kademlia DHT is a fully-fledged DHT protocol/implementations
|
||||
with content routing and storage capabilities, both of which are irrelevant in this context.
|
||||
|
||||
We assume that Eth 1.0 nodes will evolve to support discv5.
|
||||
Eth 1.0 nodes will evolve to support discv5.
|
||||
By sharing the discovery network between Eth 1.0 and 2.0,
|
||||
we benefit from the additive effect on network size that enhances resilience and resistance against certain attacks,
|
||||
to which smaller networks are more vulnerable.
|
||||
|
@ -1444,7 +1451,7 @@ ENRs contain `fork_digest` which utilizes the `genesis_validators_root` for a cl
|
|||
so prior to knowing genesis, we cannot use `fork_digest` to cleanly find peers on our intended chain.
|
||||
Once genesis data is known, we can then form ENRs and safely find peers.
|
||||
|
||||
When using an eth1 deposit contract for deposits, `fork_digest` will be known `GENESIS_DELAY` (48hours in mainnet configuration) before `genesis_time`,
|
||||
When using an eth1 deposit contract for deposits, `fork_digest` will be known `GENESIS_DELAY` (7 days in mainnet configuration) before `genesis_time`,
|
||||
providing ample time to find peers and form initial connections and gossip subnets prior to genesis.
|
||||
|
||||
## Compression/Encoding
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Ethereum 2.0 Phase 0 -- Honest Validator
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 0 -- The Beacon Chain](./beacon-chain.md), which describes the expected actions of a "validator" participating in the Ethereum 2.0 protocol.
|
||||
This is an accompanying document to [Ethereum 2.0 Phase 0 -- The Beacon Chain](./beacon-chain.md), which describes the expected actions of a "validator" participating in the Ethereum 2.0 protocol.
|
||||
|
||||
## Table of contents
|
||||
|
||||
|
@ -65,6 +65,7 @@
|
|||
- [How to avoid slashing](#how-to-avoid-slashing)
|
||||
- [Proposer slashing](#proposer-slashing)
|
||||
- [Attester slashing](#attester-slashing)
|
||||
- [Protection best practices](#protection-best-practices)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
@ -129,7 +130,7 @@ To submit a deposit:
|
|||
|
||||
### Process deposit
|
||||
|
||||
Deposits cannot be processed into the beacon chain until the Eth1 block in which they were deposited or any of its descendants is added to the beacon chain `state.eth1_data`. This takes _a minimum_ of `ETH1_FOLLOW_DISTANCE` Eth1 blocks (~4 hours) plus `EPOCHS_PER_ETH1_VOTING_PERIOD` epochs (~3.4 hours). Once the requisite Eth1 data is added, the deposit will normally be added to a beacon chain block and processed into the `state.validators` within an epoch or two. The validator is then in a queue to be activated.
|
||||
Deposits cannot be processed into the beacon chain until the Eth1 block in which they were deposited or any of its descendants is added to the beacon chain `state.eth1_data`. This takes _a minimum_ of `ETH1_FOLLOW_DISTANCE` Eth1 blocks (~8 hours) plus `EPOCHS_PER_ETH1_VOTING_PERIOD` epochs (~6.8 hours). Once the requisite Eth1 data is added, the deposit will normally be added to a beacon chain block and processed into the `state.validators` within an epoch or two. The validator is then in a queue to be activated.
|
||||
|
||||
### Validator index
|
||||
|
||||
|
@ -289,7 +290,7 @@ class Eth1Block(Container):
|
|||
|
||||
Let `get_eth1_data(block: Eth1Block) -> Eth1Data` be the function that returns the Eth1 data for a given Eth1 block.
|
||||
|
||||
An honest block proposer sets `block.body.eth1_data = get_eth1_vote(state)` where:
|
||||
An honest block proposer sets `block.body.eth1_data = get_eth1_vote(state, eth1_chain)` where:
|
||||
|
||||
```python
|
||||
def compute_time_at_slot(state: BeaconState, slot: Slot) -> uint64:
|
||||
|
@ -358,6 +359,10 @@ The `proof` for each deposit must be constructed against the deposit root contai
|
|||
|
||||
Up to `MAX_VOLUNTARY_EXITS`, [`VoluntaryExit`](./beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](./beacon-chain.md#voluntary-exits).
|
||||
|
||||
*Note*: If a slashing for a validator is included in the same block as a
|
||||
voluntary exit, the voluntary exit will fail and cause the block to be invalid
|
||||
due to the slashing being processed first. Implementers must take heed of this
|
||||
operation interaction when packing blocks.
|
||||
|
||||
#### Packaging into a `SignedBeaconBlock`
|
||||
|
||||
|
@ -372,7 +377,7 @@ It is useful to be able to run a state transition function (working on a copy of
|
|||
def compute_new_state_root(state: BeaconState, block: BeaconBlock) -> Root:
|
||||
temp_state: BeaconState = state.copy()
|
||||
signed_block = SignedBeaconBlock(message=block)
|
||||
temp_state = state_transition(temp_state, signed_block, validate_result=False)
|
||||
state_transition(temp_state, signed_block, validate_result=False)
|
||||
return hash_tree_root(temp_state)
|
||||
```
|
||||
|
||||
|
@ -604,3 +609,13 @@ Specifically, when signing an `Attestation`, a validator should perform the foll
|
|||
2. Generate and broadcast attestation.
|
||||
|
||||
If the software crashes at some point within this routine, then when the validator comes back online, the hard disk has the record of the *potentially* signed/broadcast attestation and can effectively avoid slashing.
|
||||
|
||||
## Protection best practices
|
||||
|
||||
A validator client should be considered standalone and should consider the beacon node as untrusted. This means that the validator client should protect:
|
||||
|
||||
1) Private keys -- private keys should be protected from being exported accidentally or by an attacker.
|
||||
2) Slashing -- before a validator client signs a message it should validate the data, check it against a local slashing database (do not sign a slashable attestation or block) and update its internal slashing database with the newly signed object.
|
||||
3) Recovered validator -- Recovering a validator from a private key will result in an empty local slashing db. Best practice is to import (from a trusted source) that validator's attestation history. See [EIP 3076](https://github.com/ethereum/EIPs/pull/3076/files) for a standard slashing interchange format.
|
||||
4) Far future signing requests -- A validator client can be requested to sign a far into the future attestation, resulting in a valid non-slashable request. If the validator client signs this message, it will result in it blocking itself from attesting any other attestation until the beacon-chain reaches that far into the future epoch. This will result in an inactivity leak and potential ejection due to low balance.
|
||||
A validator client should prevent itself from signing such requests by: a) keeping a local time clock if possible and following best practices to stop time server attacks and b) refusing to sign, by default, any message that has a large (>6h) gap from the current slashing protection database indicated a time "jump" or a long offline event. The administrator can manually override this protection to restart the validator after a genuine long offline event.
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
# Ethereum 2.0 Phase 0 -- Weak Subjectivity Guide
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
|
@ -69,9 +67,9 @@ def compute_weak_subjectivity_period(state: BeaconState) -> uint64:
|
|||
weak_subjectivity_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
validator_count = len(get_active_validator_indices(state, get_current_epoch(state)))
|
||||
if validator_count >= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT:
|
||||
weak_subjectivity_period += SAFETY_DECAY * CHURN_LIMIT_QUOTIENT / (2 * 100)
|
||||
weak_subjectivity_period += SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100)
|
||||
else:
|
||||
weak_subjectivity_period += SAFETY_DECAY * validator_count / (2 * 100 * MIN_PER_EPOCH_CHURN_LIMIT)
|
||||
weak_subjectivity_period += SAFETY_DECAY * validator_count // (2 * 100 * MIN_PER_EPOCH_CHURN_LIMIT)
|
||||
return weak_subjectivity_period
|
||||
```
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
**Table of Contents**
|
||||
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
|
|
|
@ -1,28 +1,20 @@
|
|||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
|
||||
|
||||
- [Ethereum 2.0 Phase 1 -- From Phase 0 to Phase 1](#ethereum-20-phase-1----from-phase-0-to-phase-1)
|
||||
- [Table of contents](#table-of-contents)
|
||||
- [Introduction](#introduction)
|
||||
- [Configuration](#configuration)
|
||||
- [Fork to Phase 1](#fork-to-phase-1)
|
||||
- [Fork trigger](#fork-trigger)
|
||||
- [Upgrading the state](#upgrading-the-state)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
# Ethereum 2.0 Phase 1 -- From Phase 0 to Phase 1
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
TODO
|
||||
|
||||
<!-- /TOC -->
|
||||
- [Introduction](#introduction)
|
||||
- [Configuration](#configuration)
|
||||
- [Fork to Phase 1](#fork-to-phase-1)
|
||||
- [Fork trigger](#fork-trigger)
|
||||
- [Upgrading the state](#upgrading-the-state)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
## Introduction
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
|
||||
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Helper functions](#helper-functions)
|
||||
|
@ -72,14 +72,13 @@ The post-state corresponding to a pre-state `shard_state` and a signed block `si
|
|||
def shard_state_transition(shard_state: ShardState,
|
||||
signed_block: SignedShardBlock,
|
||||
beacon_parent_state: BeaconState,
|
||||
validate_result: bool = True) -> ShardState:
|
||||
validate_result: bool = True) -> None:
|
||||
assert verify_shard_block_message(beacon_parent_state, shard_state, signed_block.message)
|
||||
|
||||
if validate_result:
|
||||
assert verify_shard_block_signature(beacon_parent_state, signed_block)
|
||||
|
||||
process_shard_block(shard_state, signed_block.message)
|
||||
return shard_state
|
||||
```
|
||||
|
||||
```python
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
# SimpleSerialize (SSZ)
|
||||
|
||||
**Notice**: This document is a work-in-progress describing typing, serialization, and Merkleization of Eth2 objects.
|
||||
|
||||
## Table of contents
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
@ -252,7 +250,7 @@ We similarly define "summary types" and "expansion types". For example, [`Beacon
|
|||
| Language | Project | Maintainer | Implementation |
|
||||
|-|-|-|-|
|
||||
| Python | Ethereum 2.0 | Ethereum Foundation | [https://github.com/ethereum/py-ssz](https://github.com/ethereum/py-ssz) |
|
||||
| Rust | Lighthouse | Sigma Prime | [https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz](https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz) |
|
||||
| Rust | Lighthouse | Sigma Prime | [https://github.com/sigp/lighthouse/tree/master/consensus/ssz](https://github.com/sigp/lighthouse/tree/master/consensus/ssz) |
|
||||
| Nim | Nimbus | Status | [https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim](https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim) |
|
||||
| Rust | Shasper | ParityTech | [https://github.com/paritytech/shasper/tree/master/utils/ssz](https://github.com/paritytech/shasper/tree/master/utils/ssz) |
|
||||
| TypeScript | Lodestar | ChainSafe Systems | [https://github.com/ChainSafe/ssz-js](https://github.com/ChainSafe/ssz) |
|
||||
|
|
|
@ -1 +1 @@
|
|||
0.12.3
|
||||
1.0.0
|
|
@ -1,8 +1,11 @@
|
|||
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation, sign_indexed_attestation
|
||||
|
||||
|
||||
def get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False):
|
||||
attestation_1 = get_valid_attestation(spec, state, signed=signed_1)
|
||||
def get_valid_attester_slashing(spec, state, slot=None, signed_1=False, signed_2=False, filter_participant_set=None):
|
||||
attestation_1 = get_valid_attestation(
|
||||
spec, state,
|
||||
slot=slot, signed=signed_1, filter_participant_set=filter_participant_set
|
||||
)
|
||||
|
||||
attestation_2 = attestation_1.copy()
|
||||
attestation_2.data.target.root = b'\x01' * 32
|
||||
|
@ -16,14 +19,17 @@ def get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False):
|
|||
)
|
||||
|
||||
|
||||
def get_valid_attester_slashing_by_indices(spec, state, indices_1, indices_2=None, signed_1=False, signed_2=False):
|
||||
def get_valid_attester_slashing_by_indices(spec, state,
|
||||
indices_1, indices_2=None,
|
||||
slot=None,
|
||||
signed_1=False, signed_2=False):
|
||||
if indices_2 is None:
|
||||
indices_2 = indices_1
|
||||
|
||||
assert indices_1 == sorted(indices_1)
|
||||
assert indices_2 == sorted(indices_2)
|
||||
|
||||
attester_slashing = get_valid_attester_slashing(spec, state)
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, slot=slot)
|
||||
|
||||
attester_slashing.attestation_1.attesting_indices = indices_1
|
||||
attester_slashing.attestation_2.attesting_indices = indices_2
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
from random import Random
|
||||
|
||||
from eth2spec.test.helpers.keys import pubkeys, privkeys
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.merkle_minimal import calc_merkle_tree_from_leaves, get_merkle_proof
|
||||
|
@ -54,7 +56,10 @@ def deposit_from_context(spec, deposit_data_list, index):
|
|||
deposit_data = deposit_data_list[index]
|
||||
root = hash_tree_root(List[spec.DepositData, 2**spec.DEPOSIT_CONTRACT_TREE_DEPTH](*deposit_data_list))
|
||||
tree = calc_merkle_tree_from_leaves(tuple([d.hash_tree_root() for d in deposit_data_list]))
|
||||
proof = list(get_merkle_proof(tree, item_index=index, tree_len=32)) + [(index + 1).to_bytes(32, 'little')]
|
||||
proof = (
|
||||
list(get_merkle_proof(tree, item_index=index, tree_len=32))
|
||||
+ [len(deposit_data_list).to_bytes(32, 'little')]
|
||||
)
|
||||
leaf = deposit_data.hash_tree_root()
|
||||
assert spec.is_valid_merkle_branch(leaf, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH + 1, index, root)
|
||||
deposit = spec.Deposit(proof=proof, data=deposit_data)
|
||||
|
@ -62,29 +67,69 @@ def deposit_from_context(spec, deposit_data_list, index):
|
|||
return deposit, root, deposit_data_list
|
||||
|
||||
|
||||
def prepare_genesis_deposits(spec, genesis_validator_count, amount, signed=False, deposit_data_list=None):
|
||||
def prepare_full_genesis_deposits(spec,
|
||||
amount,
|
||||
deposit_count,
|
||||
min_pubkey_index=0,
|
||||
signed=False,
|
||||
deposit_data_list=None):
|
||||
if deposit_data_list is None:
|
||||
deposit_data_list = []
|
||||
genesis_deposits = []
|
||||
for validator_index in range(genesis_validator_count):
|
||||
pubkey = pubkeys[validator_index]
|
||||
privkey = privkeys[validator_index]
|
||||
for pubkey_index in range(min_pubkey_index, min_pubkey_index + deposit_count):
|
||||
pubkey = pubkeys[pubkey_index]
|
||||
privkey = privkeys[pubkey_index]
|
||||
# insecurely use pubkey as withdrawal key if no credentials provided
|
||||
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
|
||||
deposit, root, deposit_data_list = build_deposit(
|
||||
spec,
|
||||
deposit_data_list,
|
||||
pubkey,
|
||||
privkey,
|
||||
amount,
|
||||
withdrawal_credentials,
|
||||
signed,
|
||||
deposit_data_list=deposit_data_list,
|
||||
pubkey=pubkey,
|
||||
privkey=privkey,
|
||||
amount=amount,
|
||||
withdrawal_credentials=withdrawal_credentials,
|
||||
signed=signed,
|
||||
)
|
||||
genesis_deposits.append(deposit)
|
||||
|
||||
return genesis_deposits, root, deposit_data_list
|
||||
|
||||
|
||||
def prepare_random_genesis_deposits(spec,
|
||||
deposit_count,
|
||||
max_pubkey_index,
|
||||
min_pubkey_index=0,
|
||||
max_amount=None,
|
||||
min_amount=None,
|
||||
deposit_data_list=None,
|
||||
rng=Random(3131)):
|
||||
if max_amount is None:
|
||||
max_amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
if min_amount is None:
|
||||
min_amount = spec.MIN_DEPOSIT_AMOUNT
|
||||
if deposit_data_list is None:
|
||||
deposit_data_list = []
|
||||
deposits = []
|
||||
for _ in range(deposit_count):
|
||||
pubkey_index = rng.randint(min_pubkey_index, max_pubkey_index)
|
||||
pubkey = pubkeys[pubkey_index]
|
||||
privkey = privkeys[pubkey_index]
|
||||
amount = rng.randint(min_amount, max_amount)
|
||||
random_byte = bytes([rng.randint(0, 255)])
|
||||
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(random_byte)[1:]
|
||||
deposit, root, deposit_data_list = build_deposit(
|
||||
spec,
|
||||
deposit_data_list=deposit_data_list,
|
||||
pubkey=pubkey,
|
||||
privkey=privkey,
|
||||
amount=amount,
|
||||
withdrawal_credentials=withdrawal_credentials,
|
||||
signed=True,
|
||||
)
|
||||
deposits.append(deposit)
|
||||
return deposits, root, deposit_data_list
|
||||
|
||||
|
||||
def prepare_state_and_deposit(spec, state, validator_index, amount, withdrawal_credentials=None, signed=False):
|
||||
"""
|
||||
Prepare the state for the deposit, and create a deposit for the given validator, depositing the given amount.
|
||||
|
|
|
@ -0,0 +1,155 @@
|
|||
from random import Random
|
||||
|
||||
from eth2spec.test.helpers.keys import privkeys, pubkeys
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing
|
||||
from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing_by_indices
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation
|
||||
from eth2spec.test.helpers.deposits import build_deposit, deposit_from_context
|
||||
from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits
|
||||
|
||||
|
||||
def run_slash_and_exit(spec, state, slash_index, exit_index, valid=True):
|
||||
"""
|
||||
Helper function to run a test that slashes and exits two validators
|
||||
"""
|
||||
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||
state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
|
||||
proposer_slashing = get_valid_proposer_slashing(
|
||||
spec, state, slashed_index=slash_index, signed_1=True, signed_2=True)
|
||||
signed_exit = prepare_signed_exits(spec, state, [exit_index])[0]
|
||||
|
||||
block.body.proposer_slashings.append(proposer_slashing)
|
||||
block.body.voluntary_exits.append(signed_exit)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=(not valid))
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
|
||||
if not valid:
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
yield 'post', state
|
||||
|
||||
|
||||
def get_random_proposer_slashings(spec, state, rng):
|
||||
num_slashings = rng.randrange(spec.MAX_PROPOSER_SLASHINGS)
|
||||
indices = spec.get_active_validator_indices(state, spec.get_current_epoch(state)).copy()
|
||||
slashings = [
|
||||
get_valid_proposer_slashing(
|
||||
spec, state,
|
||||
slashed_index=indices.pop(rng.randrange(len(indices))), signed_1=True, signed_2=True,
|
||||
)
|
||||
for _ in range(num_slashings)
|
||||
]
|
||||
return slashings
|
||||
|
||||
|
||||
def get_random_attester_slashings(spec, state, rng):
|
||||
num_slashings = rng.randrange(spec.MAX_ATTESTER_SLASHINGS)
|
||||
indices = spec.get_active_validator_indices(state, spec.get_current_epoch(state)).copy()
|
||||
slot_range = list(range(state.slot - spec.SLOTS_PER_HISTORICAL_ROOT + 1, state.slot))
|
||||
slashings = [
|
||||
get_valid_attester_slashing_by_indices(
|
||||
spec, state,
|
||||
sorted([indices.pop(rng.randrange(len(indices))) for _ in range(rng.randrange(1, 4))]),
|
||||
slot=slot_range.pop(rng.randrange(len(slot_range))),
|
||||
signed_1=True, signed_2=True,
|
||||
)
|
||||
for _ in range(num_slashings)
|
||||
]
|
||||
return slashings
|
||||
|
||||
|
||||
def get_random_attestations(spec, state, rng):
|
||||
num_attestations = rng.randrange(spec.MAX_ATTESTATIONS)
|
||||
|
||||
attestations = [
|
||||
get_valid_attestation(
|
||||
spec, state,
|
||||
slot=rng.randrange(state.slot - spec.SLOTS_PER_EPOCH + 1, state.slot),
|
||||
signed=True,
|
||||
)
|
||||
for _ in range(num_attestations)
|
||||
]
|
||||
return attestations
|
||||
|
||||
|
||||
def prepare_state_and_get_random_deposits(spec, state, rng):
|
||||
num_deposits = rng.randrange(spec.MAX_DEPOSITS)
|
||||
|
||||
deposit_data_leaves = [spec.DepositData() for _ in range(len(state.validators))]
|
||||
deposits = []
|
||||
|
||||
# First build deposit data leaves
|
||||
for i in range(num_deposits):
|
||||
index = len(state.validators) + i
|
||||
_, root, deposit_data_leaves = build_deposit(
|
||||
spec,
|
||||
deposit_data_leaves,
|
||||
pubkeys[index],
|
||||
privkeys[index],
|
||||
spec.MAX_EFFECTIVE_BALANCE,
|
||||
withdrawal_credentials=b'\x00' * 32,
|
||||
signed=True,
|
||||
)
|
||||
|
||||
state.eth1_data.deposit_root = root
|
||||
state.eth1_data.deposit_count += num_deposits
|
||||
|
||||
# Then for that context, build deposits/proofs
|
||||
for i in range(num_deposits):
|
||||
index = len(state.validators) + i
|
||||
deposit, _, _ = deposit_from_context(spec, deposit_data_leaves, index)
|
||||
deposits.append(deposit)
|
||||
|
||||
return deposits
|
||||
|
||||
|
||||
def get_random_voluntary_exits(spec, state, to_be_slashed_indices, rng):
|
||||
num_exits = rng.randrange(spec.MAX_VOLUNTARY_EXITS)
|
||||
indices = set(spec.get_active_validator_indices(state, spec.get_current_epoch(state)).copy())
|
||||
eligible_indices = indices - to_be_slashed_indices
|
||||
exit_indices = [eligible_indices.pop() for _ in range(num_exits)]
|
||||
return prepare_signed_exits(spec, state, exit_indices)
|
||||
|
||||
|
||||
def run_test_full_random_operations(spec, state, rng=Random(2080)):
|
||||
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||
state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
|
||||
# prepare state for deposits before building block
|
||||
deposits = prepare_state_and_get_random_deposits(spec, state, rng)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.proposer_slashings = get_random_proposer_slashings(spec, state, rng)
|
||||
block.body.attester_slashings = get_random_attester_slashings(spec, state, rng)
|
||||
block.body.attestations = get_random_attestations(spec, state, rng)
|
||||
block.body.deposits = deposits
|
||||
|
||||
# cannot include to be slashed indices as exits
|
||||
slashed_indices = set([
|
||||
slashing.signed_header_1.message.proposer_index
|
||||
for slashing in block.body.proposer_slashings
|
||||
])
|
||||
for attester_slashing in block.body.attester_slashings:
|
||||
slashed_indices = slashed_indices.union(attester_slashing.attestation_1.attesting_indices)
|
||||
slashed_indices = slashed_indices.union(attester_slashing.attestation_2.attesting_indices)
|
||||
block.body.voluntary_exits = get_random_voluntary_exits(spec, state, slashed_indices, rng)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'pre', state
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
|
@ -1,4 +1,19 @@
|
|||
from eth2spec.utils import bls
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
|
||||
|
||||
def prepare_signed_exits(spec, state, indices):
|
||||
domain = spec.get_domain(state, spec.DOMAIN_VOLUNTARY_EXIT)
|
||||
|
||||
def create_signed_exit(index):
|
||||
exit = spec.VoluntaryExit(
|
||||
epoch=spec.get_current_epoch(state),
|
||||
validator_index=index,
|
||||
)
|
||||
signing_root = spec.compute_signing_root(exit, domain)
|
||||
return spec.SignedVoluntaryExit(message=exit, signature=bls.Sign(privkeys[index], signing_root))
|
||||
|
||||
return [create_signed_exit(index) for index in indices]
|
||||
|
||||
|
||||
def sign_voluntary_exit(spec, state, voluntary_exit, privkey):
|
||||
|
|
|
@ -135,20 +135,44 @@ def test_wrong_index_for_committee_signature(spec, state):
|
|||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_wrong_index_for_slot(spec, state):
|
||||
def reduce_state_committee_count_from_max(spec, state):
|
||||
"""
|
||||
Modified ``state`` to ensure that it has fewer committees at each slot than ``MAX_COMMITTEES_PER_SLOT``
|
||||
"""
|
||||
while spec.get_committee_count_per_slot(state, spec.get_current_epoch(state)) >= spec.MAX_COMMITTEES_PER_SLOT:
|
||||
state.validators = state.validators[:len(state.validators) // 2]
|
||||
state.balances = state.balances[:len(state.balances) // 2]
|
||||
|
||||
index = spec.MAX_COMMITTEES_PER_SLOT - 1
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_wrong_index_for_slot_0(spec, state):
|
||||
reduce_state_committee_count_from_max(spec, state)
|
||||
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
attestation.data.index = index
|
||||
# Invalid index: current committees per slot is less than the max
|
||||
attestation.data.index = spec.MAX_COMMITTEES_PER_SLOT - 1
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_wrong_index_for_slot_1(spec, state):
|
||||
reduce_state_committee_count_from_max(spec, state)
|
||||
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
committee_count = spec.get_committee_count_per_slot(state, current_epoch)
|
||||
|
||||
attestation = get_valid_attestation(spec, state, index=0)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
# Invalid index: off by one
|
||||
attestation.data.index = committee_count
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
@ -160,7 +184,7 @@ def test_invalid_index(spec, state):
|
|||
attestation = get_valid_attestation(spec, state)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
# off by one (with respect to valid range) on purpose
|
||||
# Invalid index: off by one (with respect to valid range) on purpose
|
||||
attestation.data.index = spec.MAX_COMMITTEES_PER_SLOT
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
from eth2spec.test.context import (
|
||||
spec_state_test, spec_test,
|
||||
with_all_phases, single_phase,
|
||||
with_phases, PHASE0,
|
||||
with_custom_state,
|
||||
zero_activation_threshold,
|
||||
misc_balances, low_single_balance,
|
||||
|
@ -12,6 +13,7 @@ from eth2spec.test.helpers.state import (
|
|||
from eth2spec.test.helpers.attestations import (
|
||||
add_attestations_to_state,
|
||||
get_valid_attestation,
|
||||
sign_attestation,
|
||||
prepare_state_with_attestations,
|
||||
)
|
||||
from eth2spec.test.helpers.rewards import leaking
|
||||
|
@ -278,6 +280,135 @@ def test_duplicate_attestation(spec, state):
|
|||
assert single_state.balances[index] == dup_state.balances[index]
|
||||
|
||||
|
||||
# TODO: update to all phases when https://github.com/ethereum/eth2.0-specs/pull/2024 is merged
|
||||
# Currently disabled for Phase 1+ due to the mechanics of on-time-attestations complicating what should be a simple test
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_duplicate_participants_different_attestation_1(spec, state):
|
||||
"""
|
||||
Same attesters get two different attestations on chain for the same inclusion delay
|
||||
Earlier attestation (by list order) is correct, later has incorrect head
|
||||
Note: although these are slashable, they can validly be included
|
||||
"""
|
||||
correct_attestation = get_valid_attestation(spec, state, signed=True)
|
||||
incorrect_attestation = correct_attestation.copy()
|
||||
incorrect_attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, incorrect_attestation)
|
||||
|
||||
indexed_attestation = spec.get_indexed_attestation(state, correct_attestation)
|
||||
participants = get_indexed_attestation_participants(spec, indexed_attestation)
|
||||
|
||||
assert len(participants) > 0
|
||||
|
||||
single_correct_state = state.copy()
|
||||
dup_state = state.copy()
|
||||
|
||||
inclusion_slot = state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
add_attestations_to_state(spec, single_correct_state, [correct_attestation], inclusion_slot)
|
||||
add_attestations_to_state(spec, dup_state, [correct_attestation, incorrect_attestation], inclusion_slot)
|
||||
|
||||
next_epoch(spec, single_correct_state)
|
||||
next_epoch(spec, dup_state)
|
||||
|
||||
# Run non-duplicate inclusion rewards for comparison. Do not yield test vectors
|
||||
for _ in run_process_rewards_and_penalties(spec, single_correct_state):
|
||||
pass
|
||||
|
||||
# Output duplicate inclusion to test vectors
|
||||
yield from run_process_rewards_and_penalties(spec, dup_state)
|
||||
|
||||
for index in participants:
|
||||
assert state.balances[index] < single_correct_state.balances[index]
|
||||
assert single_correct_state.balances[index] == dup_state.balances[index]
|
||||
|
||||
|
||||
# TODO: update to all phases when https://github.com/ethereum/eth2.0-specs/pull/2024 is merged
|
||||
# Currently disabled for Phase 1+ due to the mechanics of on-time-attestations complicating what should be a simple test
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_duplicate_participants_different_attestation_2(spec, state):
|
||||
"""
|
||||
Same attesters get two different attestations on chain for the same inclusion delay
|
||||
Earlier attestation (by list order) has incorrect head, later is correct
|
||||
Note: although these are slashable, they can validly be included
|
||||
"""
|
||||
correct_attestation = get_valid_attestation(spec, state, signed=True)
|
||||
incorrect_attestation = correct_attestation.copy()
|
||||
incorrect_attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, incorrect_attestation)
|
||||
|
||||
indexed_attestation = spec.get_indexed_attestation(state, correct_attestation)
|
||||
participants = get_indexed_attestation_participants(spec, indexed_attestation)
|
||||
|
||||
assert len(participants) > 0
|
||||
|
||||
single_correct_state = state.copy()
|
||||
dup_state = state.copy()
|
||||
|
||||
inclusion_slot = state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
add_attestations_to_state(spec, single_correct_state, [correct_attestation], inclusion_slot)
|
||||
add_attestations_to_state(spec, dup_state, [incorrect_attestation, correct_attestation], inclusion_slot)
|
||||
|
||||
next_epoch(spec, single_correct_state)
|
||||
next_epoch(spec, dup_state)
|
||||
|
||||
# Run non-duplicate inclusion rewards for comparison. Do not yield test vectors
|
||||
for _ in run_process_rewards_and_penalties(spec, single_correct_state):
|
||||
pass
|
||||
|
||||
# Output duplicate inclusion to test vectors
|
||||
yield from run_process_rewards_and_penalties(spec, dup_state)
|
||||
|
||||
for index in participants:
|
||||
assert state.balances[index] < single_correct_state.balances[index]
|
||||
# Inclusion delay does not take into account correctness so equal reward
|
||||
assert single_correct_state.balances[index] == dup_state.balances[index]
|
||||
|
||||
|
||||
# TODO: update to all phases when https://github.com/ethereum/eth2.0-specs/pull/2024 is merged
|
||||
# Currently disabled for Phase 1+ due to the mechanics of on-time-attestations complicating what should be a simple test
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_duplicate_participants_different_attestation_3(spec, state):
|
||||
"""
|
||||
Same attesters get two different attestations on chain for *different* inclusion delay
|
||||
Earlier attestation (by list order) has incorrect head, later is correct
|
||||
Note: although these are slashable, they can validly be included
|
||||
"""
|
||||
correct_attestation = get_valid_attestation(spec, state, signed=True)
|
||||
incorrect_attestation = correct_attestation.copy()
|
||||
incorrect_attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, incorrect_attestation)
|
||||
|
||||
indexed_attestation = spec.get_indexed_attestation(state, correct_attestation)
|
||||
participants = get_indexed_attestation_participants(spec, indexed_attestation)
|
||||
|
||||
assert len(participants) > 0
|
||||
|
||||
single_correct_state = state.copy()
|
||||
dup_state = state.copy()
|
||||
|
||||
inclusion_slot = state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
add_attestations_to_state(spec, single_correct_state, [correct_attestation], inclusion_slot)
|
||||
add_attestations_to_state(spec, dup_state, [incorrect_attestation], inclusion_slot)
|
||||
add_attestations_to_state(spec, dup_state, [correct_attestation], inclusion_slot + 1)
|
||||
|
||||
next_epoch(spec, single_correct_state)
|
||||
next_epoch(spec, dup_state)
|
||||
|
||||
# Run non-duplicate inclusion rewards for comparison. Do not yield test vectors
|
||||
for _ in run_process_rewards_and_penalties(spec, single_correct_state):
|
||||
pass
|
||||
|
||||
# Output duplicate inclusion to test vectors
|
||||
yield from run_process_rewards_and_penalties(spec, dup_state)
|
||||
|
||||
for index in participants:
|
||||
assert state.balances[index] < single_correct_state.balances[index]
|
||||
# Inclusion delay does not take into account correctness so equal reward
|
||||
assert single_correct_state.balances[index] == dup_state.balances[index]
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
# Case when some eligible attestations are slashed. Modifies attesting_balance and consequently rewards/penalties.
|
||||
|
|
|
@ -26,7 +26,12 @@ def slash_validators(spec, state, indices, out_epochs):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_max_penalties(spec, state):
|
||||
slashed_count = (len(state.validators) // 3) + 1
|
||||
# Slashed count to ensure that enough validators are slashed to induce maximum penalties
|
||||
slashed_count = min(
|
||||
(len(state.validators) // spec.PROPORTIONAL_SLASHING_MULTIPLIER) + 1,
|
||||
# Can't slash more than validator count!
|
||||
len(state.validators)
|
||||
)
|
||||
out_epoch = spec.get_current_epoch(state) + (spec.EPOCHS_PER_SLASHINGS_VECTOR // 2)
|
||||
|
||||
slashed_indices = list(range(slashed_count))
|
||||
|
@ -35,7 +40,7 @@ def test_max_penalties(spec, state):
|
|||
total_balance = spec.get_total_active_balance(state)
|
||||
total_penalties = sum(state.slashings)
|
||||
|
||||
assert total_balance // 3 <= total_penalties
|
||||
assert total_balance // spec.PROPORTIONAL_SLASHING_MULTIPLIER <= total_penalties
|
||||
|
||||
yield from run_process_slashings(spec, state)
|
||||
|
||||
|
@ -91,12 +96,12 @@ def test_scaled_penalties(spec, state):
|
|||
state.slashings[5] = base + (incr * 6)
|
||||
state.slashings[spec.EPOCHS_PER_SLASHINGS_VECTOR - 1] = base + (incr * 7)
|
||||
|
||||
slashed_count = len(state.validators) // 4
|
||||
slashed_count = len(state.validators) // (spec.PROPORTIONAL_SLASHING_MULTIPLIER + 1)
|
||||
|
||||
assert slashed_count > 10
|
||||
|
||||
# make the balances non-uniform.
|
||||
# Otherwise it would just be a simple 3/4 balance slashing. Test the per-validator scaled penalties.
|
||||
# Otherwise it would just be a simple balance slashing. Test the per-validator scaled penalties.
|
||||
diff = spec.MAX_EFFECTIVE_BALANCE - base
|
||||
increments = diff // incr
|
||||
for i in range(10):
|
||||
|
@ -129,7 +134,7 @@ def test_scaled_penalties(spec, state):
|
|||
v = state.validators[i]
|
||||
expected_penalty = (
|
||||
v.effective_balance // spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
* (3 * total_penalties)
|
||||
* (spec.PROPORTIONAL_SLASHING_MULTIPLIER * total_penalties)
|
||||
// (total_balance)
|
||||
* spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
from eth2spec.test.context import PHASE0, spec_test, with_phases, single_phase
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_genesis_deposits,
|
||||
prepare_full_genesis_deposits,
|
||||
prepare_random_genesis_deposits,
|
||||
)
|
||||
|
||||
|
||||
|
@ -9,7 +10,12 @@ from eth2spec.test.helpers.deposits import (
|
|||
@single_phase
|
||||
def test_initialize_beacon_state_from_eth1(spec):
|
||||
deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
||||
deposits, deposit_root, _ = prepare_genesis_deposits(spec, deposit_count, spec.MAX_EFFECTIVE_BALANCE, signed=True)
|
||||
deposits, deposit_root, _ = prepare_full_genesis_deposits(
|
||||
spec,
|
||||
spec.MAX_EFFECTIVE_BALANCE,
|
||||
deposit_count,
|
||||
signed=True,
|
||||
)
|
||||
|
||||
eth1_block_hash = b'\x12' * 32
|
||||
eth1_timestamp = spec.MIN_GENESIS_TIME
|
||||
|
@ -37,14 +43,18 @@ def test_initialize_beacon_state_from_eth1(spec):
|
|||
@single_phase
|
||||
def test_initialize_beacon_state_some_small_balances(spec):
|
||||
main_deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
||||
main_deposits, _, deposit_data_list = prepare_genesis_deposits(spec, main_deposit_count,
|
||||
spec.MAX_EFFECTIVE_BALANCE, signed=True)
|
||||
main_deposits, _, deposit_data_list = prepare_full_genesis_deposits(
|
||||
spec, spec.MAX_EFFECTIVE_BALANCE,
|
||||
deposit_count=main_deposit_count, signed=True,
|
||||
)
|
||||
# For deposits above, and for another deposit_count, add a balance of EFFECTIVE_BALANCE_INCREMENT
|
||||
small_deposit_count = main_deposit_count * 2
|
||||
small_deposits, deposit_root, _ = prepare_genesis_deposits(spec, small_deposit_count,
|
||||
spec.MIN_DEPOSIT_AMOUNT,
|
||||
signed=True,
|
||||
deposit_data_list=deposit_data_list)
|
||||
small_deposits, deposit_root, _ = prepare_full_genesis_deposits(
|
||||
spec, spec.MIN_DEPOSIT_AMOUNT,
|
||||
deposit_count=small_deposit_count,
|
||||
signed=True,
|
||||
deposit_data_list=deposit_data_list,
|
||||
)
|
||||
deposits = main_deposits + small_deposits
|
||||
|
||||
eth1_block_hash = b'\x12' * 32
|
||||
|
@ -67,3 +77,109 @@ def test_initialize_beacon_state_some_small_balances(spec):
|
|||
|
||||
# yield state
|
||||
yield 'state', state
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_initialize_beacon_state_one_topup_activation(spec):
|
||||
# Submit all but one deposit as MAX_EFFECTIVE_BALANCE
|
||||
main_deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT - 1
|
||||
main_deposits, _, deposit_data_list = prepare_full_genesis_deposits(
|
||||
spec, spec.MAX_EFFECTIVE_BALANCE,
|
||||
deposit_count=main_deposit_count, signed=True,
|
||||
)
|
||||
|
||||
# Submit last pubkey deposit as MAX_EFFECTIVE_BALANCE - MIN_DEPOSIT_AMOUNT
|
||||
partial_deposits, _, deposit_data_list = prepare_full_genesis_deposits(
|
||||
spec, spec.MAX_EFFECTIVE_BALANCE - spec.MIN_DEPOSIT_AMOUNT,
|
||||
deposit_count=1,
|
||||
min_pubkey_index=main_deposit_count,
|
||||
signed=True,
|
||||
deposit_data_list=deposit_data_list,
|
||||
)
|
||||
|
||||
# Top up thelast pubkey deposit as MIN_DEPOSIT_AMOUNT to complete the deposit
|
||||
top_up_deposits, _, _ = prepare_full_genesis_deposits(
|
||||
spec, spec.MIN_DEPOSIT_AMOUNT,
|
||||
deposit_count=1,
|
||||
min_pubkey_index=main_deposit_count,
|
||||
signed=True,
|
||||
deposit_data_list=deposit_data_list,
|
||||
)
|
||||
|
||||
deposits = main_deposits + partial_deposits + top_up_deposits
|
||||
|
||||
eth1_block_hash = b'\x13' * 32
|
||||
eth1_timestamp = spec.MIN_GENESIS_TIME
|
||||
|
||||
yield 'eth1_block_hash', eth1_block_hash
|
||||
yield 'eth1_timestamp', eth1_timestamp
|
||||
yield 'deposits', deposits
|
||||
|
||||
# initialize beacon_state
|
||||
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
|
||||
assert spec.is_valid_genesis_state(state)
|
||||
|
||||
# yield state
|
||||
yield 'state', state
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_initialize_beacon_state_random_invalid_genesis(spec):
|
||||
# Make a bunch of random deposits
|
||||
deposits, _, deposit_data_list = prepare_random_genesis_deposits(
|
||||
spec,
|
||||
deposit_count=20,
|
||||
max_pubkey_index=10,
|
||||
)
|
||||
eth1_block_hash = b'\x14' * 32
|
||||
eth1_timestamp = spec.MIN_GENESIS_TIME + 1
|
||||
|
||||
yield 'eth1_block_hash', eth1_block_hash
|
||||
yield 'eth1_timestamp', eth1_timestamp
|
||||
yield 'deposits', deposits
|
||||
|
||||
# initialize beacon_state
|
||||
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
|
||||
assert not spec.is_valid_genesis_state(state)
|
||||
|
||||
yield 'state', state
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_initialize_beacon_state_random_valid_genesis(spec):
|
||||
# Make a bunch of random deposits
|
||||
random_deposits, _, deposit_data_list = prepare_random_genesis_deposits(
|
||||
spec,
|
||||
deposit_count=20,
|
||||
min_pubkey_index=spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT - 5,
|
||||
max_pubkey_index=spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT + 5,
|
||||
)
|
||||
|
||||
# Then make spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT full deposits
|
||||
full_deposits, _, _ = prepare_full_genesis_deposits(
|
||||
spec,
|
||||
spec.MAX_EFFECTIVE_BALANCE,
|
||||
deposit_count=spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT,
|
||||
signed=True,
|
||||
deposit_data_list=deposit_data_list
|
||||
)
|
||||
|
||||
deposits = random_deposits + full_deposits
|
||||
eth1_block_hash = b'\x15' * 32
|
||||
eth1_timestamp = spec.MIN_GENESIS_TIME + 2
|
||||
|
||||
yield 'eth1_block_hash', eth1_block_hash
|
||||
yield 'eth1_timestamp', eth1_timestamp
|
||||
yield 'deposits', deposits
|
||||
|
||||
# initialize beacon_state
|
||||
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
|
||||
assert spec.is_valid_genesis_state(state)
|
||||
|
||||
yield 'state', state
|
||||
|
|
|
@ -1,12 +1,17 @@
|
|||
from eth2spec.test.context import PHASE0, spec_test, with_phases, single_phase
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_genesis_deposits,
|
||||
prepare_full_genesis_deposits,
|
||||
)
|
||||
|
||||
|
||||
def create_valid_beacon_state(spec):
|
||||
deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
||||
deposits, _, _ = prepare_genesis_deposits(spec, deposit_count, spec.MAX_EFFECTIVE_BALANCE, signed=True)
|
||||
deposits, _, _ = prepare_full_genesis_deposits(
|
||||
spec,
|
||||
amount=spec.MAX_EFFECTIVE_BALANCE,
|
||||
deposit_count=deposit_count,
|
||||
signed=True,
|
||||
)
|
||||
|
||||
eth1_block_hash = b'\x12' * 32
|
||||
eth1_timestamp = spec.MIN_GENESIS_TIME
|
||||
|
@ -54,22 +59,17 @@ def test_is_valid_genesis_state_true_more_balance(spec):
|
|||
yield from run_is_valid_genesis_state(spec, state, valid=True)
|
||||
|
||||
|
||||
# TODO: not part of the genesis function yet. Erroneously merged.
|
||||
# @with_phases([PHASE0])
|
||||
# @spec_test
|
||||
# def test_is_valid_genesis_state_false_not_enough_balance(spec):
|
||||
# state = create_valid_beacon_state(spec)
|
||||
# state.validators[0].effective_balance = spec.MAX_EFFECTIVE_BALANCE - 1
|
||||
#
|
||||
# yield from run_is_valid_genesis_state(spec, state, valid=False)
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_is_valid_genesis_state_true_one_more_validator(spec):
|
||||
deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT + 1
|
||||
deposits, _, _ = prepare_genesis_deposits(spec, deposit_count, spec.MAX_EFFECTIVE_BALANCE, signed=True)
|
||||
deposits, _, _ = prepare_full_genesis_deposits(
|
||||
spec,
|
||||
amount=spec.MAX_EFFECTIVE_BALANCE,
|
||||
deposit_count=deposit_count,
|
||||
signed=True,
|
||||
)
|
||||
|
||||
eth1_block_hash = b'\x12' * 32
|
||||
eth1_timestamp = spec.MIN_GENESIS_TIME
|
||||
|
@ -83,7 +83,12 @@ def test_is_valid_genesis_state_true_one_more_validator(spec):
|
|||
@single_phase
|
||||
def test_is_valid_genesis_state_false_not_enough_validator(spec):
|
||||
deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT - 1
|
||||
deposits, _, _ = prepare_genesis_deposits(spec, deposit_count, spec.MAX_EFFECTIVE_BALANCE, signed=True)
|
||||
deposits, _, _ = prepare_full_genesis_deposits(
|
||||
spec,
|
||||
amount=spec.MAX_EFFECTIVE_BALANCE,
|
||||
deposit_count=deposit_count,
|
||||
signed=True,
|
||||
)
|
||||
|
||||
eth1_block_hash = b'\x12' * 32
|
||||
eth1_timestamp = spec.MIN_GENESIS_TIME
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
from random import Random
|
||||
from eth2spec.utils import bls
|
||||
|
||||
from eth2spec.test.helpers.state import (
|
||||
|
@ -9,7 +10,7 @@ from eth2spec.test.helpers.block import (
|
|||
sign_block,
|
||||
transition_unsigned_block,
|
||||
)
|
||||
from eth2spec.test.helpers.keys import privkeys, pubkeys
|
||||
from eth2spec.test.helpers.keys import pubkeys
|
||||
from eth2spec.test.helpers.attester_slashings import (
|
||||
get_valid_attester_slashing_by_indices,
|
||||
get_valid_attester_slashing,
|
||||
|
@ -18,7 +19,12 @@ from eth2spec.test.helpers.attester_slashings import (
|
|||
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing, check_proposer_slashing_effect
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation
|
||||
from eth2spec.test.helpers.deposits import prepare_state_and_deposit
|
||||
from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits
|
||||
from eth2spec.test.helpers.shard_transitions import get_shard_transition_of_committee
|
||||
from eth2spec.test.helpers.multi_operations import (
|
||||
run_slash_and_exit,
|
||||
run_test_full_random_operations,
|
||||
)
|
||||
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, PHASE1, MINIMAL,
|
||||
|
@ -794,20 +800,6 @@ def test_attestation(spec, state):
|
|||
assert spec.hash_tree_root(state.previous_epoch_attestations) == pre_current_attestations_root
|
||||
|
||||
|
||||
def prepare_signed_exits(spec, state, indices):
|
||||
domain = spec.get_domain(state, spec.DOMAIN_VOLUNTARY_EXIT)
|
||||
|
||||
def create_signed_exit(index):
|
||||
exit = spec.VoluntaryExit(
|
||||
epoch=spec.get_current_epoch(state),
|
||||
validator_index=index,
|
||||
)
|
||||
signing_root = spec.compute_signing_root(exit, domain)
|
||||
return spec.SignedVoluntaryExit(message=exit, signature=bls.Sign(privkeys[index], signing_root))
|
||||
|
||||
return [create_signed_exit(index) for index in indices]
|
||||
|
||||
|
||||
# In phase1 a committee is computed for SHARD_COMMITTEE_PERIOD slots ago,
|
||||
# exceeding the minimal-config randao mixes memory size.
|
||||
# Applies to all voluntary-exit sanity block tests.
|
||||
|
@ -895,6 +887,23 @@ def test_multiple_different_validator_exits_same_block(spec, state):
|
|||
assert state.validators[index].exit_epoch < spec.FAR_FUTURE_EPOCH
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
def test_slash_and_exit_same_index(spec, state):
|
||||
validator_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
|
||||
yield from run_slash_and_exit(spec, state, validator_index, validator_index, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
def test_slash_and_exit_diff_index(spec, state):
|
||||
slash_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
|
||||
exit_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-2]
|
||||
yield from run_slash_and_exit(spec, state, slash_index, exit_index)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_balance_driven_status_transitions(spec, state):
|
||||
|
@ -1013,3 +1022,27 @@ def test_eth1_data_votes_no_consensus(spec, state):
|
|||
|
||||
yield 'blocks', blocks
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_full_random_operations_0(spec, state):
|
||||
yield from run_test_full_random_operations(spec, state, rng=Random(2020))
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_full_random_operations_1(spec, state):
|
||||
yield from run_test_full_random_operations(spec, state, rng=Random(2021))
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_full_random_operations_2(spec, state):
|
||||
yield from run_test_full_random_operations(spec, state, rng=Random(2022))
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_full_random_operations_3(spec, state):
|
||||
yield from run_test_full_random_operations(spec, state, rng=Random(2023))
|
||||
|
|
|
@ -10,6 +10,7 @@ bls = py_ecc_bls
|
|||
|
||||
STUB_SIGNATURE = b'\x11' * 96
|
||||
STUB_PUBKEY = b'\x22' * 48
|
||||
Z1_PUBKEY = b'\xc0' + b'\x00' * 47
|
||||
Z2_SIGNATURE = b'\xc0' + b'\x00' * 95
|
||||
STUB_COORDINATES = _signature_to_G2(Z2_SIGNATURE)
|
||||
|
||||
|
@ -99,4 +100,7 @@ def AggregatePKs(pubkeys):
|
|||
|
||||
@only_with_bls(alt_return=STUB_SIGNATURE)
|
||||
def SkToPk(SK):
|
||||
return bls.SkToPk(SK)
|
||||
if bls == py_ecc_bls:
|
||||
return bls.SkToPk(SK)
|
||||
else:
|
||||
return bls.SkToPk(SK.to_bytes(32, 'big'))
|
||||
|
|
|
@ -10,7 +10,7 @@ The test data is declared in a `data.yaml` file:
|
|||
input:
|
||||
privkey: bytes32 -- the private key used for signing
|
||||
message: bytes32 -- input message to sign (a hash)
|
||||
output: bytes96 -- expected signature
|
||||
output: BLS Signature -- expected output, single BLS signature or empty.
|
||||
```
|
||||
|
||||
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
||||
|
|
|
@ -40,6 +40,7 @@ MESSAGES = [
|
|||
bytes(b'\x56' * 32),
|
||||
bytes(b'\xab' * 32),
|
||||
]
|
||||
SAMPLE_MESSAGE = b'\x12' * 32
|
||||
|
||||
PRIVKEYS = [
|
||||
# Curve order is 256 so private keys are 32 bytes at most.
|
||||
|
@ -48,16 +49,30 @@ PRIVKEYS = [
|
|||
hex_to_int('0x0000000000000000000000000000000047b8192d77bf871b62e87859d653922725724a5c031afeabc60bcef5ff665138'),
|
||||
hex_to_int('0x00000000000000000000000000000000328388aff0d4a5b7dc9205abd374e7e98f3cd9f3418edb4eafda5fb16473d216'),
|
||||
]
|
||||
PUBKEYS = [bls.SkToPk(privkey) for privkey in PRIVKEYS]
|
||||
|
||||
Z1_PUBKEY = b'\xc0' + b'\x00' * 47
|
||||
NO_SIGNATURE = b'\x00' * 96
|
||||
Z2_SIGNATURE = b'\xc0' + b'\x00' * 95
|
||||
ZERO_PRIVKEY = 0
|
||||
ZERO_PRIVKEY_BYTES = b'\x00' * 32
|
||||
|
||||
|
||||
def expect_exception(func, *args):
|
||||
try:
|
||||
func(*args)
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
raise Exception("should have raised exception")
|
||||
|
||||
|
||||
def case01_sign():
|
||||
# Valid cases
|
||||
for privkey in PRIVKEYS:
|
||||
for message in MESSAGES:
|
||||
sig = bls.Sign(privkey, message)
|
||||
assert sig == milagro_bls.Sign(to_bytes(privkey), message) # double-check with milagro
|
||||
identifier = f'{int_to_hex(privkey)}_{encode_hex(message)}'
|
||||
yield f'sign_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
||||
'input': {
|
||||
|
@ -66,6 +81,16 @@ def case01_sign():
|
|||
},
|
||||
'output': encode_hex(sig)
|
||||
}
|
||||
# Edge case: privkey == 0
|
||||
expect_exception(bls.Sign, ZERO_PRIVKEY, message)
|
||||
expect_exception(milagro_bls.Sign, ZERO_PRIVKEY_BYTES, message)
|
||||
yield f'sign_case_zero_privkey', {
|
||||
'input': {
|
||||
'privkey': encode_hex(ZERO_PRIVKEY_BYTES),
|
||||
'message': encode_hex(message),
|
||||
},
|
||||
'output': None
|
||||
}
|
||||
|
||||
|
||||
def case02_verify():
|
||||
|
@ -120,42 +145,46 @@ def case02_verify():
|
|||
'output': False,
|
||||
}
|
||||
|
||||
# Valid pubkey and signature with the point at infinity
|
||||
assert bls.Verify(Z1_PUBKEY, message, Z2_SIGNATURE)
|
||||
assert milagro_bls.Verify(Z1_PUBKEY, message, Z2_SIGNATURE)
|
||||
yield f'verify_infinity_pubkey_and_infinity_signature', {
|
||||
'input': {
|
||||
'pubkey': encode_hex(Z1_PUBKEY),
|
||||
'message': encode_hex(message),
|
||||
'signature': encode_hex(Z2_SIGNATURE),
|
||||
},
|
||||
'output': True,
|
||||
}
|
||||
# Invalid pubkey and signature with the point at infinity
|
||||
assert not bls.Verify(Z1_PUBKEY, SAMPLE_MESSAGE, Z2_SIGNATURE)
|
||||
assert not milagro_bls.Verify(Z1_PUBKEY, SAMPLE_MESSAGE, Z2_SIGNATURE)
|
||||
yield f'verify_infinity_pubkey_and_infinity_signature', {
|
||||
'input': {
|
||||
'pubkey': encode_hex(Z1_PUBKEY),
|
||||
'message': encode_hex(SAMPLE_MESSAGE),
|
||||
'signature': encode_hex(Z2_SIGNATURE),
|
||||
},
|
||||
'output': False,
|
||||
}
|
||||
|
||||
|
||||
def case03_aggregate():
|
||||
for message in MESSAGES:
|
||||
sigs = [bls.Sign(privkey, message) for privkey in PRIVKEYS]
|
||||
aggregate_sig = bls.Aggregate(sigs)
|
||||
assert aggregate_sig == milagro_bls.Aggregate(sigs)
|
||||
yield f'aggregate_{encode_hex(message)}', {
|
||||
'input': [encode_hex(sig) for sig in sigs],
|
||||
'output': encode_hex(bls.Aggregate(sigs)),
|
||||
'output': encode_hex(aggregate_sig),
|
||||
}
|
||||
|
||||
# Invalid pubkeys -- len(pubkeys) == 0
|
||||
try:
|
||||
bls.Aggregate([])
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
raise Exception("Should have been INVALID")
|
||||
|
||||
expect_exception(bls.Aggregate, [])
|
||||
# No signatures to aggregate. Follow IETF BLS spec, return `None` to represent INVALID.
|
||||
# https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-02#section-2.8
|
||||
# https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04#section-2.8
|
||||
yield f'aggregate_na_signatures', {
|
||||
'input': [],
|
||||
'output': None,
|
||||
}
|
||||
|
||||
# Valid to aggregate G2 point at infinity
|
||||
aggregate_sig = bls.Aggregate([Z2_SIGNATURE])
|
||||
assert aggregate_sig == milagro_bls.Aggregate([Z2_SIGNATURE]) == Z2_SIGNATURE
|
||||
yield f'aggregate_infinity_signature', {
|
||||
'input': [encode_hex(Z2_SIGNATURE)],
|
||||
'output': encode_hex(aggregate_sig),
|
||||
}
|
||||
|
||||
|
||||
def case04_fast_aggregate_verify():
|
||||
for i, message in enumerate(MESSAGES):
|
||||
|
@ -231,6 +260,22 @@ def case04_fast_aggregate_verify():
|
|||
'output': False,
|
||||
}
|
||||
|
||||
# Invalid pubkeys and signature -- pubkeys contains point at infinity
|
||||
pubkeys = PUBKEYS.copy()
|
||||
pubkeys_with_infinity = pubkeys + [Z1_PUBKEY]
|
||||
signatures = [bls.Sign(privkey, SAMPLE_MESSAGE) for privkey in PRIVKEYS]
|
||||
aggregate_signature = bls.Aggregate(signatures)
|
||||
assert not bls.FastAggregateVerify(pubkeys_with_infinity, SAMPLE_MESSAGE, aggregate_signature)
|
||||
assert not milagro_bls.FastAggregateVerify(pubkeys_with_infinity, SAMPLE_MESSAGE, aggregate_signature)
|
||||
yield f'fast_aggregate_verify_infinity_pubkey', {
|
||||
'input': {
|
||||
'pubkeys': [encode_hex(pubkey) for pubkey in pubkeys_with_infinity],
|
||||
'message': encode_hex(SAMPLE_MESSAGE),
|
||||
'signature': encode_hex(aggregate_signature),
|
||||
},
|
||||
'output': False,
|
||||
}
|
||||
|
||||
|
||||
def case05_aggregate_verify():
|
||||
pubkeys = []
|
||||
|
@ -295,6 +340,20 @@ def case05_aggregate_verify():
|
|||
'output': False,
|
||||
}
|
||||
|
||||
# Invalid pubkeys and signature -- pubkeys contains point at infinity
|
||||
pubkeys_with_infinity = pubkeys + [Z1_PUBKEY]
|
||||
messages_with_sample = messages + [SAMPLE_MESSAGE]
|
||||
assert not bls.AggregateVerify(pubkeys_with_infinity, messages_with_sample, aggregate_signature)
|
||||
assert not milagro_bls.AggregateVerify(pubkeys_with_infinity, messages_with_sample, aggregate_signature)
|
||||
yield f'aggregate_verify_infinity_pubkey', {
|
||||
'input': {
|
||||
'pubkeys': [encode_hex(pubkey) for pubkey in pubkeys_with_infinity],
|
||||
'messages': [encode_hex(message) for message in messages_with_sample],
|
||||
'signature': encode_hex(aggregate_signature),
|
||||
},
|
||||
'output': False,
|
||||
}
|
||||
|
||||
|
||||
def create_provider(handler_name: str,
|
||||
test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]]) -> gen_typing.TestProvider:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
py_ecc==4.0.0
|
||||
py_ecc==5.0.0
|
||||
eth-utils==1.6.0
|
||||
../../core/gen_helpers
|
||||
../../../
|
||||
|
|
Loading…
Reference in New Issue