Merge branch 'dev' into hwwhww/phase-1-fork-slot

This commit is contained in:
Hsiao-Wei Wang 2020-07-28 23:59:19 +08:00
commit b1eb157539
No known key found for this signature in database
GPG Key ID: 95B070122902DEA4
38 changed files with 260 additions and 90 deletions

View File

@ -2,7 +2,7 @@
[![Join the chat at https://discord.gg/hpFs23p](https://img.shields.io/badge/chat-on%20discord-blue.svg)](https://discord.gg/hpFs23p) [![Join the chat at https://gitter.im/ethereum/sharding](https://badges.gitter.im/ethereum/sharding.svg)](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
To learn more about sharding and Ethereum 2.0 (Serenity), see the [sharding FAQ](https://github.com/ethereum/wiki/wiki/Sharding-FAQ) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm).
To learn more about sharding and Ethereum 2.0 (Serenity), see the [sharding FAQ](https://eth.wiki/sharding/Sharding-FAQs) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm).
This repository hosts the current Eth2 specifications. Discussions about design rationale and proposed changes can be brought up and discussed as issues. Solidified, agreed-upon changes to the spec can be made through pull requests.

View File

@ -51,6 +51,9 @@ SECONDS_PER_ETH1_BLOCK: 14
# Deposit contract
# ---------------------------------------------------------------
# Ethereum PoW Mainnet
DEPOSIT_CHAIN_ID: 1
DEPOSIT_NETWORK_ID: 1
# **TBD**
DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890

View File

@ -51,6 +51,9 @@ SECONDS_PER_ETH1_BLOCK: 14
# Deposit contract
# ---------------------------------------------------------------
# Ethereum Goerli testnet
DEPOSIT_CHAIN_ID: 5
DEPOSIT_NETWORK_ID: 5
# **TBD**
DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890

View File

@ -94,9 +94,9 @@ from dataclasses import (
from lru import LRU
from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy
from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes
from eth2spec.utils.ssz.ssz_typing import (
View, boolean, Container, List, Vector, uint64, uint8,
View, boolean, Container, List, Vector, uint8, uint32, uint64,
Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
)
from eth2spec.utils import bls
@ -118,9 +118,9 @@ from dataclasses import (
from lru import LRU
from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy
from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes
from eth2spec.utils.ssz.ssz_typing import (
View, boolean, Container, List, Vector, uint64, uint8, bit,
View, boolean, Container, List, Vector, uint8, uint32, uint64, bit,
ByteList, ByteVector, Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
)
from eth2spec.utils import bls
@ -141,11 +141,6 @@ def ceillog2(x: uint64) -> int:
return (x - 1).bit_length()
'''
PHASE0_SUNDRY_FUNCTIONS = '''
# Monkey patch hash cache
_hash = hash
hash_cache: Dict[bytes, Bytes32] = {}
def get_eth1_data(block: Eth1Block) -> Eth1Data:
"""
A stub function return mocking Eth1Data.
@ -156,12 +151,6 @@ def get_eth1_data(block: Eth1Block) -> Eth1Data:
block_hash=hash_tree_root(block))
def hash(x: bytes) -> Bytes32: # type: ignore
if x not in hash_cache:
hash_cache[x] = Bytes32(_hash(x))
return hash_cache[x]
def cache_this(key_fn, value_fn, lru_size): # type: ignore
cache_dict = LRU(size=lru_size)

View File

@ -55,8 +55,8 @@
- [Math](#math)
- [`integer_squareroot`](#integer_squareroot)
- [`xor`](#xor)
- [`int_to_bytes`](#int_to_bytes)
- [`bytes_to_int`](#bytes_to_int)
- [`uint_to_bytes`](#uint_to_bytes)
- [`bytes_to_uint64`](#bytes_to_uint64)
- [Crypto](#crypto)
- [`hash`](#hash)
- [`hash_tree_root`](#hash_tree_root)
@ -576,20 +576,14 @@ def xor(bytes_1: Bytes32, bytes_2: Bytes32) -> Bytes32:
return Bytes32(a ^ b for a, b in zip(bytes_1, bytes_2))
```
#### `int_to_bytes`
#### `uint_to_bytes`
`def uint_to_bytes(n: uint) -> bytes` is a function for serializing the `uint` type object to bytes in ``ENDIANNESS``-endian. The expected length of the output is the byte-length of the `uint` type.
#### `bytes_to_uint64`
```python
def int_to_bytes(n: uint64, length: int) -> bytes:
"""
Return the ``length``-byte serialization of ``n`` in ``ENDIANNESS``-endian.
"""
return n.to_bytes(length, ENDIANNESS)
```
#### `bytes_to_int`
```python
def bytes_to_int(data: bytes) -> uint64:
def bytes_to_uint64(data: bytes) -> uint64:
"""
Return the integer deserialization of ``data`` interpreted as ``ENDIANNESS``-endian.
"""
@ -733,10 +727,14 @@ def compute_shuffled_index(index: uint64, index_count: uint64, seed: Bytes32) ->
# Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf)
# See the 'generalized domain' algorithm on page 3
for current_round in range(SHUFFLE_ROUND_COUNT):
pivot = bytes_to_int(hash(seed + int_to_bytes(current_round, length=1))[0:8]) % index_count
pivot = bytes_to_uint64(hash(seed + uint_to_bytes(uint8(current_round)))[0:8]) % index_count
flip = (pivot + index_count - index) % index_count
position = max(index, flip)
source = hash(seed + int_to_bytes(current_round, length=1) + int_to_bytes(position // 256, length=4))
source = hash(
seed
+ uint_to_bytes(uint8(current_round))
+ uint_to_bytes(uint32(position // 256))
)
byte = uint8(source[(position % 256) // 8])
bit = (byte >> (position % 8)) % 2
index = flip if bit else index
@ -757,7 +755,7 @@ def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex]
total = uint64(len(indices))
while True:
candidate_index = indices[compute_shuffled_index(i % total, total, seed)]
random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32]
random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
effective_balance = state.validators[candidate_index].effective_balance
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
return candidate_index
@ -946,7 +944,7 @@ def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Bytes
Return the seed at ``epoch``.
"""
mix = get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)) # Avoid underflow
return hash(domain_type + int_to_bytes(epoch, length=8) + mix)
return hash(domain_type + uint_to_bytes(epoch) + mix)
```
#### `get_committee_count_per_slot`
@ -987,7 +985,7 @@ def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex:
Return the beacon proposer index at the current slot.
"""
epoch = get_current_epoch(state)
seed = hash(get_seed(state, epoch, DOMAIN_BEACON_PROPOSER) + int_to_bytes(state.slot, length=8))
seed = hash(get_seed(state, epoch, DOMAIN_BEACON_PROPOSER) + uint_to_bytes(state.slot))
indices = get_active_validator_indices(state, epoch)
return compute_proposer_index(state, indices, seed)
```
@ -1720,10 +1718,10 @@ def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSla
```python
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
data = attestation.data
assert data.index < get_committee_count_per_slot(state, data.target.epoch)
assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state))
assert data.target.epoch == compute_epoch_at_slot(data.slot)
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH
assert data.index < get_committee_count_per_slot(state, data.target.epoch)
committee = get_beacon_committee(state, data.slot, data.index)
assert len(attestation.aggregation_bits) == len(committee)

View File

@ -10,7 +10,7 @@
- [Introduction](#introduction)
- [Constants](#constants)
- [Contract](#contract)
- [Configuration](#configuration)
- [Ethereum 1.0 deposit contract](#ethereum-10-deposit-contract)
- [`deposit` function](#deposit-function)
- [Deposit amount](#deposit-amount)
@ -27,16 +27,29 @@ This document represents the specification for the beacon chain deposit contract
## Constants
### Contract
The following values are (non-configurable) constants used throughout the specification.
| Name | Value |
| - | - |
| `DEPOSIT_CONTRACT_ADDRESS` | **TBD** |
| `DEPOSIT_CONTRACT_TREE_DEPTH` | `2**5` (= 32) |
## Configuration
*Note*: The default mainnet configuration values are included here for spec-design purposes.
The different configurations for mainnet, testnets, and YAML-based testing can be found in the [`configs/constant_presets`](../../configs) directory.
These configurations are updated for releases and may be out of sync during `dev` changes.
| Name | Value |
| - | - |
| `DEPOSIT_CHAIN_ID` | `1` |
| `DEPOSIT_NETWORK_ID` | `1` |
| `DEPOSIT_CONTRACT_ADDRESS` | **TBD** |
## Ethereum 1.0 deposit contract
The initial deployment phases of Ethereum 2.0 are implemented without consensus changes to Ethereum 1.0. A deposit contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to Ethereum 1.0 for deposits of ETH to the beacon chain. Validator balances will be withdrawable to the shards in Phase 2.
The initial deployment phases of Ethereum 2.0 are implemented without consensus changes to Ethereum 1.0. A deposit contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to the Ethereum 1.0 chain defined by the [chain-id](https://eips.ethereum.org/EIPS/eip-155) -- `DEPOSIT_CHAIN_ID` -- and the network-id -- `DEPOSIT_NETWORK_ID` -- for deposits of ETH to the beacon chain. Validator balances will be withdrawable to the shards in Phase 2.
_Note_: See [here](https://chainid.network/) for a comprehensive list of public Ethereum chain chain-id's and network-id's.
### `deposit` function

View File

@ -41,7 +41,7 @@ It consists of four main sections:
- [Requesting side](#requesting-side)
- [Responding side](#responding-side)
- [Encoding strategies](#encoding-strategies)
- [SSZ-encoding strategy (with or without Snappy)](#ssz-encoding-strategy-with-or-without-snappy)
- [SSZ-snappy encoding strategy](#ssz-snappy-encoding-strategy)
- [Messages](#messages)
- [Status](#status)
- [Goodbye](#goodbye)
@ -79,6 +79,7 @@ It consists of four main sections:
- [Why must all clients use the same gossip topic instead of one negotiated between each peer pair?](#why-must-all-clients-use-the-same-gossip-topic-instead-of-one-negotiated-between-each-peer-pair)
- [Why are the topics strings and not hashes?](#why-are-the-topics-strings-and-not-hashes)
- [Why are we overriding the default libp2p pubsub `message-id`?](#why-are-we-overriding-the-default-libp2p-pubsub-message-id)
- [Why are these specific gossip parameters chosen?](#why-are-these-specific-gossip-parameters-chosen)
- [Why is there `MAXIMUM_GOSSIP_CLOCK_DISPARITY` when validating slot ranges of messages in gossip subnets?](#why-is-there-maximum_gossip_clock_disparity-when-validating-slot-ranges-of-messages-in-gossip-subnets)
- [Why are there `ATTESTATION_SUBNET_COUNT` attestation subnets?](#why-are-there-attestation_subnet_count-attestation-subnets)
- [Why are attestations limited to be broadcast on gossip channels within `SLOTS_PER_EPOCH` slots?](#why-are-attestations-limited-to-be-broadcast-on-gossip-channels-within-slots_per_epoch-slots)
@ -201,7 +202,7 @@ and will in most cases be out of sync with the ENR sequence number.
## The gossip domain: gossipsub
Clients MUST support the [gossipsub v1](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub) libp2p Protocol
Clients MUST support the [gossipsub v1](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md) libp2p Protocol
including the [gossipsub v1.1](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md) extension.
**Protocol ID:** `/meshsub/1.1.0`
@ -210,16 +211,17 @@ including the [gossipsub v1.1](https://github.com/libp2p/specs/blob/master/pubsu
*Note*: Parameters listed here are subject to a large-scale network feasibility study.
The following gossipsub [parameters](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub#meshsub-an-overlay-mesh-router) will be used:
The following gossipsub [parameters](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md#parameters) will be used:
- `D` (topic stable mesh target count): 6
- `D_low` (topic stable mesh low watermark): 4
- `D_low` (topic stable mesh low watermark): 5
- `D_high` (topic stable mesh high watermark): 12
- `D_lazy` (gossip target): 6
- `heartbeat_interval` (frequency of heartbeat, seconds): 0.7
- `fanout_ttl` (ttl for fanout maps for topics we are not subscribed to but have published to, seconds): 60
- `gossip_advertise` (number of windows to gossip about): 3
- `gossip_history` (number of heartbeat intervals to retain message IDs): 5
- `heartbeat_interval` (frequency of heartbeat, seconds): 1
- `mcache_len` (number of windows to retain full messages in cache for `IWANT` responses): 6
- `mcache_gossip` (number of windows to gossip about): 3
- `seen_ttl` (number of heartbeat intervals to retain message IDs): 550
### Topics and messages
@ -293,15 +295,18 @@ The following validations MUST pass before forwarding the `signed_beacon_block`
(a client MAY choose to validate and store such blocks for additional purposes -- e.g. slashing detection, archive nodes, etc).
- _[IGNORE]_ The block is the first block with valid signature received for the proposer for the slot, `signed_beacon_block.message.slot`.
- _[REJECT]_ The proposer signature, `signed_beacon_block.signature`, is valid with respect to the `proposer_index` pubkey.
- _[IGNORE]_ The block's parent (defined by `block.parent_root`) has been seen
(via both gossip and non-gossip sources)
(a client MAY queue blocks for processing once the parent block is retrieved).
- _[REJECT]_ The block's parent (defined by `block.parent_root`) passes validation.
- _[REJECT]_ The current `finalized_checkpoint` is an ancestor of `block` -- i.e.
`get_ancestor(store, block.parent_root, compute_start_slot_at_epoch(store.finalized_checkpoint.epoch))
== store.finalized_checkpoint.root`
- _[REJECT]_ The block is proposed by the expected `proposer_index` for the block's slot
in the context of the current shuffling (defined by `parent_root`/`slot`).
If the `proposer_index` cannot immediately be verified against the expected shuffling,
the block MAY be queued for later processing while proposers for the block's branch are calculated --
in such a case _do not_ `REJECT`, instead `IGNORE` this message.
- _[IGNORE]_ The block's parent (defined by `block.parent_root`) has been seen
(via both gossip and non-gossip sources)
(a client MAY queue blocks for processing once the parent block is retrieved).
- _[REJECT]_ The block's parent (defined by `block.parent_root`) passes validation.
##### `beacon_aggregate_and_proof`
@ -331,6 +336,10 @@ The following validations MUST pass before forwarding the `signed_aggregate_and_
(via both gossip and non-gossip sources)
(a client MAY queue aggregates for processing once block is retrieved).
- _[REJECT]_ The block being voted for (`aggregate.data.beacon_block_root`) passes validation.
- _[REJECT]_ The current `finalized_checkpoint` is an ancestor of the `block` defined by `aggregate.data.beacon_block_root` -- i.e.
`get_ancestor(store, aggregate.data.beacon_block_root, compute_start_slot_at_epoch(store.finalized_checkpoint.epoch))
== store.finalized_checkpoint.root`
##### `voluntary_exit`
@ -391,6 +400,11 @@ The following validations MUST pass before forwarding the `attestation` on the s
(via both gossip and non-gossip sources)
(a client MAY queue aggregates for processing once block is retrieved).
- _[REJECT]_ The block being voted for (`attestation.data.beacon_block_root`) passes validation.
- _[REJECT]_ The current `finalized_checkpoint` is an ancestor of the `block` defined by `attestation.data.beacon_block_root` -- i.e.
`get_ancestor(store, attestation.data.beacon_block_root, compute_start_slot_at_epoch(store.finalized_checkpoint.epoch))
== store.finalized_checkpoint.root`
#### Attestations and Aggregation
@ -548,20 +562,19 @@ Clients MUST treat as valid any byte sequences.
### Encoding strategies
The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction.
Two values are possible at this time:
Only one value is possible at this time:
- `ssz`: the contents are [SSZ-encoded](../../ssz/simple-serialize.md).
This encoding type MUST be supported by all clients.
- `ssz_snappy`: The contents are first [SSZ-encoded](../../ssz/simple-serialize.md)
and then compressed with [Snappy](https://github.com/google/snappy) frames compression.
For objects containing a single field, only the field is SSZ-encoded not a container with a single field.
For example, the `BeaconBlocksByRoot` request is an SSZ-encoded list of `Root`'s.
- `ssz_snappy`: The contents are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy) frames compression.
This encoding type MUST be supported by all clients.
#### SSZ-encoding strategy (with or without Snappy)
#### SSZ-snappy encoding strategy
The [SimpleSerialize (SSZ) specification](../../ssz/simple-serialize.md) outlines how objects are SSZ-encoded.
If the Snappy variant is selected, we feed the serialized form of the object to the Snappy compressor on encoding.
To achieve snappy encoding on top of SSZ, we feed the serialized form of the object to the Snappy compressor on encoding.
The inverse happens on decoding.
Snappy has two formats: "block" and "frames" (streaming).
@ -570,14 +583,14 @@ To support large requests and response chunks, snappy-framing is used.
Since snappy frame contents [have a maximum size of `65536` bytes](https://github.com/google/snappy/blob/master/framing_format.txt#L104)
and frame headers are just `identifier (1) + checksum (4)` bytes, the expected buffering of a single frame is acceptable.
**Encoding-dependent header:** Req/Resp protocols using the `ssz` or `ssz_snappy` encoding strategies MUST encode the length of the raw SSZ bytes,
**Encoding-dependent header:** Req/Resp protocols using the `ssz_snappy` encoding strategy MUST encode the length of the raw SSZ bytes,
encoded as an unsigned [protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints).
*Writing*: By first computing and writing the SSZ byte length, the SSZ encoder can then directly write the chunk contents to the stream.
If Snappy is applied, it can be passed through a buffered Snappy writer to compress frame by frame.
When Snappy is applied, it can be passed through a buffered Snappy writer to compress frame by frame.
*Reading*: After reading the expected SSZ byte length, the SSZ decoder can directly read the contents from the stream.
If snappy is applied, it can be passed through a buffered Snappy reader to decompress frame by frame.
When snappy is applied, it can be passed through a buffered Snappy reader to decompress frame by frame.
Before reading the payload, the header MUST be validated:
- The unsigned protobuf varint used for the length-prefix MUST not be longer than 10 bytes, which is sufficient for any `uint64`.
@ -586,7 +599,6 @@ Before reading the payload, the header MUST be validated:
After reading a valid header, the payload MAY be read, while maintaining the size constraints from the header.
A reader SHOULD NOT read more than `max_encoded_len(n)` bytes after reading the SSZ length-prefix `n` from the header.
- For `ssz` this is: `n`
- For `ssz_snappy` this is: `32 + n + n // 6`.
This is considered the [worst-case compression result](https://github.com/google/snappy/blob/537f4ad6240e586970fe554614542e9717df7902/snappy.cc#L98) by Snappy.
@ -1159,6 +1171,25 @@ Some examples of where messages could be duplicated:
Partial aggregates could be duplicated
* Clients re-publishing seen messages
### Why are these specific gossip parameters chosen?
- `D`, `D_low`, `D_high`, `D_lazy`: recommended defaults.
- `heartbeat_interval`: 0.7 seconds, recommended for eth2 in the [GossipSub evaluation report by Protocol Labs](https://gateway.ipfs.io/ipfs/QmRAFP5DBnvNjdYSbWhEhVRJJDFCLpPyvew5GwCCB4VxM4).
- `fanout_ttl`: 60 seconds, recommended default.
Fanout is primarily used by committees publishing attestations to subnets.
This happens once per epoch per validator and the subnet changes each epoch
so there is little to gain in having a `fanout_ttl` be increased from the recommended default.
- `mcache_len`: 6, increase by one to ensure that mcache is around for long
enough for `IWANT`s to respond to `IHAVE`s in the context of the shorter
`heartbeat_interval`. If `mcache_gossip` is increased, this param should be
increased to be at least `3` (~2 seconds) more than `mcache_gossip`.
- `mcache_gossip`: 3, recommended default. This can be increased to 5 or 6
(~4 seconds) if gossip times are longer than expected and the current window
does not provide enough responsiveness during adverse conditions.
- `seen_ttl`: `SLOTS_PER_EPOCH * SECONDS_PER_SLOT / heartbeat_interval = approx. 550`.
Attestation gossip validity is bounded by an epoch, so this is the safe max bound.
### Why is there `MAXIMUM_GOSSIP_CLOCK_DISPARITY` when validating slot ranges of messages in gossip subnets?
For some gossip channels (e.g. those for Attestations and BeaconBlocks),
@ -1278,7 +1309,7 @@ Thus, libp2p transparently handles message delimiting in the underlying stream.
libp2p streams are full-duplex, and each party is responsible for closing their write side (like in TCP).
We can therefore use stream closure to mark the end of the request and response independently.
Nevertheless, in the case of `ssz` and `ssz_snappy`, messages are still length-prefixed with the length of the underlying data:
Nevertheless, in the case of `ssz_snappy`, messages are still length-prefixed with the length of the underlying data:
* A basic reader can prepare a correctly sized buffer before reading the message
* A more advanced reader can stream-decode SSZ given the length of the SSZ data.
* Alignment with protocols like gRPC over HTTP/2 that prefix with length

View File

@ -111,7 +111,7 @@ The validator constructs their `withdrawal_credentials` via the following:
### Submit deposit
In Phase 0, all incoming validator deposits originate from the Ethereum 1.0 proof-of-work chain. Deposits are made to the [deposit contract](./deposit-contract.md) located at `DEPOSIT_CONTRACT_ADDRESS`.
In Phase 0, all incoming validator deposits originate from the Ethereum 1.0 chain defined by `DEPOSIT_CHAIN_ID` and `DEPOSIT_NETWORK_ID`. Deposits are made to the [deposit contract](./deposit-contract.md) located at `DEPOSIT_CONTRACT_ADDRESS`.
To submit a deposit:
@ -465,7 +465,7 @@ def get_slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSigna
def is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex, slot_signature: BLSSignature) -> bool:
committee = get_beacon_committee(state, slot, index)
modulo = max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE)
return bytes_to_int(hash(slot_signature)[0:8]) % modulo == 0
return bytes_to_uint64(hash(slot_signature)[0:8]) % modulo == 0
```
#### Construct aggregate

View File

@ -581,8 +581,8 @@ def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard
"""
epoch = compute_epoch_at_slot(slot)
committee = get_shard_committee(beacon_state, epoch, shard)
seed = hash(get_seed(beacon_state, epoch, DOMAIN_SHARD_COMMITTEE) + int_to_bytes(slot, length=8))
r = bytes_to_int(seed[:8])
seed = hash(get_seed(beacon_state, epoch, DOMAIN_SHARD_COMMITTEE) + uint_to_bytes(slot))
r = bytes_to_uint64(seed[:8])
return committee[r % len(committee)]
```

View File

@ -467,7 +467,7 @@ def get_light_client_slot_signature(state: BeaconState, slot: Slot, privkey: int
def is_light_client_aggregator(state: BeaconState, slot: Slot, slot_signature: BLSSignature) -> bool:
committee = get_light_client_committee(state, compute_epoch_at_slot(slot))
modulo = max(1, len(committee) // TARGET_LIGHT_CLIENT_AGGREGATORS_PER_SLOT)
return bytes_to_int(hash(slot_signature)[0:8]) % modulo == 0
return bytes_to_uint64(hash(slot_signature)[0:8]) % modulo == 0
```
#### Construct aggregate

View File

@ -1 +1 @@
0.12.1
0.12.2

View File

@ -59,8 +59,8 @@ def bls_default(request):
def bls_type(request):
bls_type = request.config.getoption("--bls-type")
if bls_type == "py_ecc":
bls_utils.bls = bls_utils.py_ecc_bls
bls_utils.use_py_ecc()
elif bls_type == "milagro":
bls_utils.bls = bls_utils.milagro_bls
bls_utils.use_milagro()
else:
raise Exception(f"unrecognized bls type: {bls_type}")

View File

@ -1,4 +1,4 @@
from eth2spec.test.context import spec_state_test, never_bls, with_all_phases
from eth2spec.test.context import spec_state_test, with_all_phases
from eth2spec.test.helpers.state import next_epoch_via_block
from eth2spec.test.helpers.attestations import next_epoch_with_attestations
@ -30,7 +30,6 @@ def check_finality(spec,
@with_all_phases
@spec_state_test
@never_bls
def test_finality_no_updates_at_genesis(spec, state):
assert spec.get_current_epoch(state) == spec.GENESIS_EPOCH
@ -54,7 +53,6 @@ def test_finality_no_updates_at_genesis(spec, state):
@with_all_phases
@spec_state_test
@never_bls
def test_finality_rule_4(spec, state):
# get past first two epochs that finality does not run on
next_epoch_via_block(spec, state)
@ -80,7 +78,6 @@ def test_finality_rule_4(spec, state):
@with_all_phases
@spec_state_test
@never_bls
def test_finality_rule_1(spec, state):
# get past first two epochs that finality does not run on
next_epoch_via_block(spec, state)
@ -108,7 +105,6 @@ def test_finality_rule_1(spec, state):
@with_all_phases
@spec_state_test
@never_bls
def test_finality_rule_2(spec, state):
# get past first two epochs that finality does not run on
next_epoch_via_block(spec, state)
@ -138,7 +134,6 @@ def test_finality_rule_2(spec, state):
@with_all_phases
@spec_state_test
@never_bls
def test_finality_rule_3(spec, state):
"""
Test scenario described here

View File

@ -313,6 +313,28 @@ def test_empty_epoch_transition_not_finalizing(spec, state):
assert state.balances[index] < pre_balances[index]
@with_all_phases
@spec_state_test
def test_proposer_self_slashing(spec, state):
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
assert not state.validators[block.proposer_index].slashed
proposer_slashing = get_valid_proposer_slashing(
spec, state, slashed_index=block.proposer_index, signed_1=True, signed_2=True)
block.body.proposer_slashings.append(proposer_slashing)
# The header is processed *before* the block body:
# the proposer was not slashed before the body, thus the block is valid.
signed_block = state_transition_and_sign_block(spec, state, block)
# The proposer slashed themselves.
assert state.validators[block.proposer_index].slashed
yield 'blocks', [signed_block]
yield 'post', state
@with_all_phases
@spec_state_test
def test_proposer_slashing(spec, state):

View File

@ -14,6 +14,22 @@ Z2_SIGNATURE = b'\xc0' + b'\x00' * 95
STUB_COORDINATES = _signature_to_G2(Z2_SIGNATURE)
def use_milagro():
"""
Shortcut to use Milagro as BLS library
"""
global bls
bls = milagro_bls
def use_py_ecc():
"""
Shortcut to use Py-ecc as BLS library
"""
global bls
bls = py_ecc_bls
def only_with_bls(alt_return=None):
"""
Decorator factory to make a function only run when BLS is active. Otherwise return the default.

View File

@ -1,17 +1,9 @@
from hashlib import sha256
from typing import Dict, Union
from remerkleable.byte_arrays import Bytes32
from typing import Union
ZERO_BYTES32 = b'\x00' * 32
def _hash(x: Union[bytes, bytearray, memoryview]) -> bytes:
return sha256(x).digest()
hash_cache: Dict[bytes, bytes] = {}
def hash(x: bytes) -> bytes:
if x in hash_cache:
return hash_cache[x]
return _hash(x)
def hash(x: Union[bytes, bytearray, memoryview]) -> Bytes32:
return Bytes32(sha256(x).digest())

View File

@ -1,4 +1,6 @@
from typing import TypeVar
from remerkleable.basic import uint
from remerkleable.core import View
from remerkleable.byte_arrays import Bytes32
@ -11,6 +13,10 @@ def hash_tree_root(obj: View) -> Bytes32:
return Bytes32(obj.get_backing().merkle_root())
def uint_to_bytes(n: uint) -> bytes:
return serialize(n)
V = TypeVar('V', bound=View)

View File

@ -0,0 +1,43 @@
# Finality tests
The aim of the tests for the finality rules.
- `finality`: transitions triggered by one or more blocks.
## Test case format
### `meta.yaml`
```yaml
description: string -- Optional. Description of test case, purely for debugging purposes.
bls_setting: int -- see general test-format spec.
blocks_count: int -- the number of blocks processed in this test.
```
### `pre.yaml`
A YAML-encoded `BeaconState`, the state before running the block transitions.
Also available as `pre.ssz`.
### `blocks_<index>.yaml`
A series of files, with `<index>` in range `[0, blocks_count)`. Blocks need to be processed in order,
following the main transition function (i.e. process slot and epoch transitions in between blocks as normal)
Each file is a YAML-encoded `SignedBeaconBlock`.
Each block is also available as `blocks_<index>.ssz`
### `post.yaml`
A YAML-encoded `BeaconState`, the state after applying the block transitions.
Also available as `post.ssz`.
## Condition
The resulting state should match the expected `post` state, or if the `post` state is left blank,
the handler should reject the series of blocks as invalid.

View File

@ -149,7 +149,9 @@ def case03_aggregate():
else:
raise Exception("Should have been INVALID")
yield f'aggregate_na_pubkeys', {
# No signatures to aggregate. Follow IETF BLS spec, return `None` to represent INVALID.
# https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-02#section-2.8
yield f'aggregate_na_signatures', {
'input': [],
'output': None,
}
@ -319,6 +321,7 @@ def create_provider(handler_name: str,
if __name__ == "__main__":
bls.use_py_ecc() # Py-ecc is chosen instead of Milagro, since the code is better understood to be correct.
gen_runner.run_generator("bls", [
create_provider('sign', case01_sign),
create_provider('verify', case02_verify),

View File

@ -14,6 +14,7 @@ from gen_from_tests.gen import generate_from_tests
from importlib import reload
from eth2spec.config import config_util
from eth2spec.test.context import PHASE0
from eth2spec.utils import bls
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
@ -22,6 +23,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
config_util.prepare_config(configs_path, config_name)
reload(spec_phase0)
reload(spec_phase1)
bls.use_milagro()
return config_name
def cases_fn() -> Iterable[gen_typing.TestCase]:

View File

@ -0,0 +1,5 @@
# Finality tests
Finality tests cover regular state-transitions in a common block-list format to test finality rules.
Information on the format of the tests can be found in the [finality test formats documentation](../../formats/finality/README.md).

View File

@ -0,0 +1,39 @@
from typing import Iterable
from importlib import reload
from gen_base import gen_runner, gen_typing
from gen_from_tests.gen import generate_from_tests
from eth2spec.test.context import PHASE0
from eth2spec.test.phase0.finality import test_finality
from eth2spec.config import config_util
from eth2spec.phase0 import spec as spec_phase0
from eth2spec.phase1 import spec as spec_phase1
from eth2spec.utils import bls
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
def prepare_fn(configs_path: str) -> str:
config_util.prepare_config(configs_path, config_name)
reload(spec_phase0)
reload(spec_phase1)
bls.use_milagro()
return config_name
def cases_fn() -> Iterable[gen_typing.TestCase]:
return generate_from_tests(
runner_name='finality',
handler_name=handler_name,
src=tests_src,
fork_name=PHASE0,
)
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
if __name__ == "__main__":
gen_runner.run_generator("finality", [
create_provider('finality', test_finality, 'minimal'),
create_provider('finality', test_finality, 'mainnet'),
])

View File

@ -0,0 +1,2 @@
../../core/gen_helpers
../../../

View File

@ -1,13 +1,14 @@
from typing import Iterable
from eth2spec.test.context import PHASE0
from eth2spec.test.genesis import test_initialization, test_validity
from eth2spec.test.phase0.genesis import test_initialization, test_validity
from gen_base import gen_runner, gen_typing
from gen_from_tests.gen import generate_from_tests
from eth2spec.phase0 import spec as spec
from importlib import reload
from eth2spec.config import config_util
from eth2spec.utils import bls
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
@ -15,6 +16,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
def prepare_fn(configs_path: str) -> str:
config_util.prepare_config(configs_path, config_name)
reload(spec)
bls.use_milagro()
return config_name
def cases_fn() -> Iterable[gen_typing.TestCase]:

View File

@ -16,6 +16,7 @@ from eth2spec.config import config_util
from eth2spec.phase0 import spec as spec_phase0
from eth2spec.phase1 import spec as spec_phase1
from eth2spec.test.context import PHASE0
from eth2spec.utils import bls
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
@ -24,6 +25,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
config_util.prepare_config(configs_path, config_name)
reload(spec_phase0)
reload(spec_phase1)
bls.use_milagro()
return config_name
def cases_fn() -> Iterable[gen_typing.TestCase]:

View File

@ -12,6 +12,7 @@ from gen_from_tests.gen import generate_from_tests
from importlib import reload
from eth2spec.config import config_util
from eth2spec.test.context import PHASE0
from eth2spec.utils import bls
def create_provider(tests_src, config_name: str) -> gen_typing.TestProvider:
@ -20,6 +21,7 @@ def create_provider(tests_src, config_name: str) -> gen_typing.TestProvider:
config_util.prepare_config(configs_path, config_name)
reload(spec_phase0)
reload(spec_phase1)
bls.use_milagro()
return config_name
def cases_fn() -> Iterable[gen_typing.TestCase]:

View File

@ -9,6 +9,7 @@ from eth2spec.test.phase0.sanity import test_blocks, test_slots
from eth2spec.config import config_util
from eth2spec.phase0 import spec as spec_phase0
from eth2spec.phase1 import spec as spec_phase1
from eth2spec.utils import bls
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
@ -17,6 +18,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
config_util.prepare_config(configs_path, config_name)
reload(spec_phase0)
reload(spec_phase1)
bls.use_milagro()
return config_name
def cases_fn() -> Iterable[gen_typing.TestCase]: