merge dev -> carl-exec-phase1

This commit is contained in:
Carl Beekhuizen 2019-05-20 18:20:13 +02:00
commit 6b062405c9
No known key found for this signature in database
GPG Key ID: D05CA176D0020646
10 changed files with 167 additions and 132 deletions

View File

@ -52,7 +52,7 @@ jobs:
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
paths:
- ~/specs-repo
install_test:
install_env:
docker:
- image: circleci/python:3.6
working_directory: ~/specs-repo
@ -60,13 +60,13 @@ jobs:
- restore_cache:
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
- restore_cached_venv:
venv_name: v1-pyspec
venv_name: v1-pyspec-03
reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}'
- run:
name: Install pyspec requirements
command: make install_test
command: make install_test && make install_lint
- save_cached_venv:
venv_name: v1-pyspec
venv_name: v1-pyspec-03
reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}'
venv_path: ./test_libs/pyspec/venv
test:
@ -77,7 +77,7 @@ jobs:
- restore_cache:
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
- restore_cached_venv:
venv_name: v1-pyspec
venv_name: v1-pyspec-03
reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}'
- run:
name: Run py-tests
@ -92,20 +92,22 @@ jobs:
- restore_cache:
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
- restore_cached_venv:
venv_name: v1-pyspec
venv_name: v1-pyspec-03
reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}'
- run:
name: Run linter
command: make install_lint && make pyspec && make lint
command: make lint
workflows:
version: 2.1
test_spec:
jobs:
- checkout_specs
- lint
- install_test:
- install_env:
requires:
- checkout_specs
- test:
requires:
- install_test
- install_env
- lint:
requires:
- test

View File

@ -54,7 +54,7 @@ citest: $(PY_SPEC_ALL_TARGETS)
install_lint:
cd $(PY_SPEC_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install flake8==3.5.0
lint:
lint: $(PY_SPEC_ALL_TARGETS)
cd $(PY_SPEC_DIR); . venv/bin/activate; \
flake8 --max-line-length=120 ./eth2spec;

View File

@ -62,6 +62,7 @@ NEW_TYPES = {
'BLSPubkey': 'bytes',
'BLSSignature': 'bytes',
'Store': 'None',
'Hash': 'bytes'
}
SUNDRY_FUNCTIONS = '''
# Monkey patch validator compute committee code

View File

@ -281,8 +281,9 @@ The types are defined topologically to aid in facilitating an executable version
{
# Shard number
'shard': 'uint64',
# Epoch number
'epoch': 'uint64',
# Crosslinking data from epochs [start....end-1]
'start_epoch': 'uint64',
'end_epoch': 'uint64',
# Root of the previous crosslink
'parent_root': 'bytes32',
# Root of the crosslinked shard data since the previous crosslink
@ -647,7 +648,7 @@ def get_previous_epoch(state: BeaconState) -> Epoch:
Return the current epoch if it's genesis epoch.
"""
current_epoch = get_current_epoch(state)
return (current_epoch - 1) if current_epoch > GENESIS_EPOCH else current_epoch
return GENESIS_EPOCH if current_epoch == GENESIS_EPOCH else current_epoch - 1
```
### `get_current_epoch`
@ -887,7 +888,7 @@ def get_shuffled_index(index: ValidatorIndex, index_count: int, seed: Bytes32) -
# See the 'generalized domain' algorithm on page 3
for round in range(SHUFFLE_ROUND_COUNT):
pivot = bytes_to_int(hash(seed + int_to_bytes(round, length=1))[0:8]) % index_count
flip = (pivot - index) % index_count
flip = (pivot + index_count - index) % index_count
position = max(index, flip)
source = hash(seed + int_to_bytes(round, length=1) + int_to_bytes(position // 256, length=4))
byte = source[(position % 256) // 8]
@ -1189,7 +1190,11 @@ Let `genesis_state = get_genesis_beacon_state(genesis_deposits, eth2genesis.gene
```python
def get_genesis_beacon_state(deposits: List[Deposit], genesis_time: int, genesis_eth1_data: Eth1Data) -> BeaconState:
state = BeaconState(genesis_time=genesis_time, latest_eth1_data=genesis_eth1_data)
state = BeaconState(
genesis_time=genesis_time,
latest_eth1_data=genesis_eth1_data,
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
)
# Process genesis deposits
for deposit in deposits:
@ -1362,19 +1367,19 @@ def process_justification_and_finalization(state: BeaconState) -> None:
# Process finalizations
bitfield = state.justification_bitfield
# The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source
if (bitfield >> 1) % 8 == 0b111 and old_previous_justified_epoch == current_epoch - 3:
if (bitfield >> 1) % 8 == 0b111 and old_previous_justified_epoch + 3 == current_epoch:
state.finalized_epoch = old_previous_justified_epoch
state.finalized_root = get_block_root(state, state.finalized_epoch)
# The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source
if (bitfield >> 1) % 4 == 0b11 and old_previous_justified_epoch == current_epoch - 2:
if (bitfield >> 1) % 4 == 0b11 and old_previous_justified_epoch + 2 == current_epoch:
state.finalized_epoch = old_previous_justified_epoch
state.finalized_root = get_block_root(state, state.finalized_epoch)
# The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source
if (bitfield >> 0) % 8 == 0b111 and old_current_justified_epoch == current_epoch - 2:
if (bitfield >> 0) % 8 == 0b111 and old_current_justified_epoch + 2 == current_epoch:
state.finalized_epoch = old_current_justified_epoch
state.finalized_root = get_block_root(state, state.finalized_epoch)
# The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source
if (bitfield >> 0) % 4 == 0b11 and old_current_justified_epoch == current_epoch - 1:
if (bitfield >> 0) % 4 == 0b11 and old_current_justified_epoch + 1 == current_epoch:
state.finalized_epoch = old_current_justified_epoch
state.finalized_root = get_block_root(state, state.finalized_epoch)
```
@ -1728,7 +1733,8 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
# Check FFG data, crosslink data, and signature
assert ffg_data == (data.source_epoch, data.source_root, data.target_epoch)
assert data.crosslink.epoch == min(data.target_epoch, parent_crosslink.epoch + MAX_EPOCHS_PER_CROSSLINK)
assert data.crosslink.start_epoch == parent_crosslink.end_epoch
assert data.crosslink.end_epoch == min(data.target_epoch, parent_crosslink.end_epoch + MAX_EPOCHS_PER_CROSSLINK)
assert data.crosslink.parent_root == hash_tree_root(parent_crosslink)
assert data.crosslink.data_root == ZERO_HASH # [to be removed in phase 1]
validate_indexed_attestation(state, convert_to_indexed(state, attestation))

View File

@ -35,19 +35,9 @@
- [`empty`](#empty)
- [`get_crosslink_chunk_count`](#get_crosslink_chunk_count)
- [`get_custody_chunk_bit`](#get_custody_chunk_bit)
- [`get_chunk_bits_root`](#get_chunk_bits_root)
- [`get_randao_epoch_for_custody_period`](#get_randao_epoch_for_custody_period)
- [`get_validators_custody_reveal_period`](#get_validators_custody_reveal_period)
- [`get_chunk_bits_root`](#get_chunk_bits_root)
- [`replace_empty_or_append`](#replace_empty_or_append)
- [Per-block processing](#per-block-processing)
- [Operations](#operations)
- [Custody key reveals](#custody-key-reveals)
- [Early derived secret reveals](#early-derived-secret-reveals)
- [Chunk challenges](#chunk-challenges)
- [Bit challenges](#bit-challenges)
- [Custody responses](#custody-responses)
- [Per-epoch processing](#per-epoch-processing)
- [Handling of custody-related deadlines](#handling-of-custody-related-deadlines)
<!-- /TOC -->
@ -148,8 +138,8 @@ This document details the beacon chain additions and changes in Phase 1 of Ether
'challenge_index': 'uint64',
'challenger_index': ValidatorIndex,
'responder_index': ValidatorIndex,
'deadline': Epoch,
'data_root': 'bytes32',
'inclusion_epoch': Epoch,
'data_root': Hash,
'depth': 'uint64',
'chunk_index': 'uint64',
}
@ -162,8 +152,8 @@ This document details the beacon chain additions and changes in Phase 1 of Ether
'challenge_index': 'uint64',
'challenger_index': ValidatorIndex,
'responder_index': ValidatorIndex,
'deadline': Epoch,
'data_root': 'bytes32',
'inclusion_epoch': Epoch,
'data_root': Hash,
'chunk_count': 'uint64',
'chunk_bits_merkle_root': 'bytes32',
'responder_key': BLSSignature,
@ -270,12 +260,10 @@ The `empty` function accepts an SSZ type as input and returns an object of that
### `get_crosslink_chunk_count`
```python
def get_custody_chunk_count(attestation: Attestation) -> int:
crosslink_start_epoch = attestation.data.latest_crosslink.epoch
crosslink_end_epoch = slot_to_epoch(attestation.data.slot)
crosslink_crosslink_length = min(MAX_EPOCHS_PER_CROSSLINK, crosslink_end_epoch - crosslink_start_epoch)
def get_custody_chunk_count(crosslink: Crosslink) -> int:
crosslink_length = min(MAX_EPOCHS_PER_CROSSLINK, crosslink.end_epoch - crosslink.start_epoch)
chunks_per_epoch = 2 * BYTES_PER_SHARD_BLOCK * SLOTS_PER_EPOCH // BYTES_PER_CUSTODY_CHUNK
return crosslink_crosslink_length * chunks_per_epoch
return crosslink_length * chunks_per_epoch
```
### `get_custody_chunk_bit`
@ -323,7 +311,6 @@ def get_validators_custody_reveal_period(state: BeaconState,
### `replace_empty_or_append`
```python
def replace_empty_or_append(list: List[Any], new_element: Any) -> int:
for i in range(len(list)):
@ -492,14 +479,14 @@ def process_chunk_challenge(state: BeaconState,
record.chunk_index != challenge.chunk_index
)
# Verify depth
depth = math.ceil(math.log2(get_custody_chunk_count(challenge.attestation)))
depth = math.ceil(math.log2(get_custody_chunk_count(challenge.attestation.data.crosslink)))
assert challenge.chunk_index < 2**depth
# Add new chunk challenge record
new_record = CustodyChunkChallengeRecord(
challenge_index=state.custody_challenge_index,
challenger_index=get_beacon_proposer_index(state),
responder_index=challenge.responder_index,
deadline=get_current_epoch(state) + CUSTODY_RESPONSE_DEADLINE,
inclusion_epoch=get_current_epoch(state),
data_root=challenge.attestation.data.crosslink.data_root,
depth=depth,
chunk_index=challenge.chunk_index,
@ -542,10 +529,9 @@ def process_bit_challenge(state: BeaconState,
attesters = get_attesting_indices(state, challenge.attestation.data, challenge.attestation.aggregation_bitfield)
assert challenge.responder_index in attesters
# A validator can be the challenger or responder for at most one challenge at a time
# A validator can be the challenger for at most one challenge at a time
for record in state.custody_bit_challenge_records:
assert record.challenger_index != challenge.challenger_index
assert record.responder_index != challenge.responder_index
# Verify the responder is a valid custody key
epoch_to_sign = get_randao_epoch_for_custody_period(
@ -567,7 +553,7 @@ def process_bit_challenge(state: BeaconState,
)
# Verify the chunk count
chunk_count = get_custody_chunk_count(challenge.attestation)
chunk_count = get_custody_chunk_count(challenge.attestation.data.crosslink)
assert verify_bitfield(challenge.chunk_bits, chunk_count)
# Verify the first bit of the hash of the chunk bits does not equal the custody bit
custody_bit = get_bitfield_bit(challenge.attestation.custody_bitfield, attesters.index(challenge.responder_index))
@ -577,7 +563,7 @@ def process_bit_challenge(state: BeaconState,
challenge_index=state.custody_challenge_index,
challenger_index=challenge.challenger_index,
responder_index=challenge.responder_index,
deadline=get_current_epoch(state) + CUSTODY_RESPONSE_DEADLINE,
inclusion_epoch=get_current_epoch(state),
data_root=challenge.attestation.data.crosslink.data_root,
chunk_count=chunk_count,
chunk_bits_merkle_root=hash_tree_root(challenge.chunk_bits),
@ -620,6 +606,8 @@ def process_chunk_challenge_response(state: BeaconState,
assert response.chunk_index == challenge.chunk_index
# Verify bit challenge data is null
assert response.chunk_bits_branch == [] and response.chunk_bits_leaf == ZERO_HASH
# Verify minimum delay
assert get_current_epoch(state) >= challenge.inclusion_epoch + ACTIVATION_EXIT_DELAY
# Verify the chunk matches the crosslink data root
assert verify_merkle_branch(
leaf=hash_tree_root(response.chunk),
@ -642,6 +630,9 @@ def process_bit_challenge_response(state: BeaconState,
challenge: CustodyBitChallengeRecord) -> None:
# Verify chunk index
assert response.chunk_index < challenge.chunk_count
# Verify responder has not been slashed
responder = state.validator_registry[challenge.responder_index]
assert not responder.slashed
# Verify the chunk matches the crosslink data root
assert verify_merkle_branch(
leaf=hash_tree_root(response.chunk),
@ -688,13 +679,13 @@ Run `process_challenge_deadlines(state)` immediately after `process_reveal_deadl
```python
def process_challenge_deadlines(state: BeaconState) -> None:
for challenge in state.custody_chunk_challenge_records:
if get_current_epoch(state) > challenge.deadline:
if get_current_epoch(state) > challenge.inclusion_epoch + CUSTODY_RESPONSE_DEADLINE:
slash_validator(state, challenge.responder_index, challenge.challenger_index)
records = state.custody_chunk_challenge_records
records[records.index(challenge)] = CustodyChunkChallengeRecord()
for challenge in state.custody_bit_challenge_records:
if get_current_epoch(state) > challenge.deadline:
if get_current_epoch(state) > challenge.inclusion_epoch + CUSTODY_RESPONSE_DEADLINE:
slash_validator(state, challenge.responder_index, challenge.challenger_index)
records = state.custody_bit_challenge_records
records[records.index(challenge)] = CustodyBitChallengeRecord()
@ -707,6 +698,15 @@ def after_process_final_updates(state: BeaconState) -> None:
current_epoch = get_current_epoch(state)
# Clean up exposed RANDAO key reveals
state.exposed_derived_secrets[current_epoch % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS] = []
# Reset withdrawable epochs if challenge records are empty
records = state.custody_chunk_challenge_records + state.bit_challenge_records
validator_indices_in_records = set(
[record.challenger_index for record in records] + [record.responder_index for record in records]
)
for index, validator in enumerate(state.validator_registry):
if index not in validator_indices_in_records:
if validator.exit_epoch != FAR_FUTURE_EPOCH and validator.withdrawable_epoch == FAR_FUTURE_EPOCH:
validator.withdrawable_epoch = validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
```
In `process_penalties_and_exits`, change the definition of `eligible` to the following (note that it is not a pure function because `state` is declared in the surrounding scope):

View File

@ -41,7 +41,7 @@ Remote method calls are wrapped in a "request" structure:
(
id: uint64
method_id: uint16
body: Request
body: (message_body...)
)
```
@ -55,15 +55,7 @@ and their corresponding responses are wrapped in a "response" structure:
)
```
If an error occurs, a variant of the response structure is returned:
```
(
id: uint64
response_code: uint16
result: bytes
)
```
A union type is used to determine the contents of the `body` field in the request structure. Each "body" entry in the RPC calls below corresponds to one subtype in the `body` type union.
The details of the RPC-Over-`libp2p` protocol are similar to [JSON-RPC 2.0](https://www.jsonrpc.org/specification). Specifically:

View File

@ -20,6 +20,8 @@
- [Process deposit](#process-deposit)
- [Validator index](#validator-index)
- [Activation](#activation)
- [Validator assignments](#validator-assignments)
- [Lookahead](#lookahead)
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
- [Block proposal](#block-proposal)
- [Block header](#block-header)
@ -45,8 +47,6 @@
- [Aggregation bitfield](#aggregation-bitfield)
- [Custody bitfield](#custody-bitfield)
- [Aggregate signature](#aggregate-signature)
- [Validator assignments](#validator-assignments)
- [Lookahead](#lookahead)
- [How to avoid slashing](#how-to-avoid-slashing)
- [Proposer slashing](#proposer-slashing)
- [Attester slashing](#attester-slashing)
@ -127,13 +127,62 @@ Once a validator is activated, the validator is assigned [responsibilities](#bea
*Note*: There is a maximum validator churn per finalized epoch so the delay until activation is variable depending upon finality, total active validator balance, and the number of validators in the queue to be activated.
## Validator assignments
A validator can get committee assignments for a given epoch using the following helper via `get_committee_assignment(state, epoch, validator_index)` where `epoch <= next_epoch`.
```python
def get_committee_assignment(
state: BeaconState,
epoch: Epoch,
validator_index: ValidatorIndex) -> Tuple[List[ValidatorIndex], Shard, Slot]:
"""
Return the committee assignment in the ``epoch`` for ``validator_index``.
``assignment`` returned is a tuple of the following form:
* ``assignment[0]`` is the list of validators in the committee
* ``assignment[1]`` is the shard to which the committee is assigned
* ``assignment[2]`` is the slot at which the committee is assigned
"""
next_epoch = get_current_epoch(state) + 1
assert epoch <= next_epoch
committees_per_slot = get_epoch_committee_count(state, epoch) // SLOTS_PER_EPOCH
epoch_start_slot = get_epoch_start_slot(epoch)
for slot in range(epoch_start_slot, epoch_start_slot + SLOTS_PER_EPOCH)
offset = committees_per_slot * (slot % SLOTS_PER_EPOCH)
slot_start_shard = (get_epoch_start_shard(state, epoch) + offset) % SHARD_COUNT
for i in range(committees_per_slot):
shard = (slot_start_shard + i) % SHARD_COUNT
committee = get_crosslink_committee(state, epoch, shard)
if validator_index in committee:
return committee, shard, slot
```
A validator can use the following function to see if they are supposed to propose during their assigned committee slot. This function can only be run with a `state` of the slot in question. Proposer selection is only stable within the context of the current epoch.
```python
def is_proposer(state: BeaconState,
validator_index: ValidatorIndex) -> bool:
return get_beacon_proposer_index(state) == validator_index
```
*Note*: To see if a validator is assigned to propose during the slot, the beacon state must be in the epoch in question. At the epoch boundaries, the validator must run an epoch transition into the epoch to successfully check the proposal assignment of the first slot.
### Lookahead
The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing, which must be checked during the epoch in question.
`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments by noting at which future slot they will have to attest and also which shard they should begin syncing (in Phase 1+).
Specifically, a validator should call `get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments.
## Beacon chain responsibilities
A validator has two primary responsibilities to the beacon chain: [proposing blocks](#block-proposal) and [creating attestations](#attestations-1). Proposals happen infrequently, whereas attestations should be created once per epoch.
### Block proposal
A validator is expected to propose a [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `get_beacon_proposer_index(state)` returns the validator's `validator_index`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator is to create, sign, and broadcast a `block` that is a child of `parent` and that executes a valid [beacon chain state transition](../core/0_beacon-chain.md#beacon-chain-state-transition-function).
A validator is expected to propose a [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `is_proposer(state, validator_index)` returns `True`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator creates, signs, and broadcasts a `block` that is a child of `parent` that satisfies a valid [beacon chain state transition](../core/0_beacon-chain.md#beacon-chain-state-transition-function).
There is one proposer per slot, so if there are N active validators any individual validator will on average be assigned to propose once per N slots (e.g. at 312500 validators = 10 million ETH, that's once per ~3 weeks).
@ -229,7 +278,7 @@ Up to `MAX_VOLUNTARY_EXITS` [`VoluntaryExit`](../core/0_beacon-chain.md#voluntar
### Attestations
A validator is expected to create, sign, and broadcast an attestation during each epoch. The slot during which the validator performs this role is any slot at which `get_crosslink_committees_at_slot(state, slot)` contains a committee that contains `validator_index`.
A validator is expected to create, sign, and broadcast an attestation during each epoch. The committee, assigned shard, and assigned slot for which the validator performs this role during an epoch is defined by `get_committee_assignment(state, epoch, validator_index)`.
A validator should create and broadcast the attestation halfway through the `slot` during which the validator is assigned ― that is, `SECONDS_PER_SLOT * 0.5` seconds after the start of `slot`.
@ -257,10 +306,12 @@ Set `attestation_data.beacon_block_root = signing_root(head_block)`.
##### Crosslink vote
Construct `attestation_data.crosslink` via the following
Construct `attestation_data.crosslink` via the following.
* Set `attestation_data.crosslink.shard = shard` where `shard` is the shard associated with the validator's committee defined by `get_crosslink_committees_at_slot`.
* Set `attestation_data.crosslink.epoch = min(attestation_data.target_epoch, head_state.current_crosslinks[shard].epoch + MAX_EPOCHS_PER_CROSSLINK)`.
* Set `attestation_data.crosslink.shard = shard` where `shard` is the shard associated with the validator's committee.
* Let `parent_crosslink = head_state.current_crosslinks[shard]`.
* Set `attestation_data.crosslink.start_epoch = parent_crosslink.end_epoch`.
* Set `attestation_data.crosslink.end_epoch = min(attestation_data.target_epoch, parent_crosslink.end_epoch + MAX_EPOCHS_PER_CROSSLINK)`.
* Set `attestation_data.crosslink.parent_root = hash_tree_root(head_state.current_crosslinks[shard])`.
* Set `attestation_data.crosslink.data_root = ZERO_HASH`. *Note*: This is a stub for Phase 0.
@ -310,67 +361,6 @@ signed_attestation_data = bls_sign(
)
```
## Validator assignments
A validator can get the current, previous, and next epoch committee assignments using the following helper via `get_committee_assignment(state, epoch, validator_index)` where `previous_epoch <= epoch <= next_epoch`.
```python
def get_committee_assignment(
state: BeaconState,
epoch: Epoch,
validator_index: ValidatorIndex) -> Tuple[List[ValidatorIndex], Shard, Slot]:
"""
Return the committee assignment in the ``epoch`` for ``validator_index``.
``assignment`` returned is a tuple of the following form:
* ``assignment[0]`` is the list of validators in the committee
* ``assignment[1]`` is the shard to which the committee is assigned
* ``assignment[2]`` is the slot at which the committee is assigned
"""
previous_epoch = get_previous_epoch(state)
next_epoch = get_current_epoch(state) + 1
assert previous_epoch <= epoch <= next_epoch
epoch_start_slot = get_epoch_start_slot(epoch)
for slot in range(epoch_start_slot, epoch_start_slot + SLOTS_PER_EPOCH):
crosslink_committees = get_crosslink_committees_at_slot(
state,
slot,
)
selected_committees = [
committee # Tuple[List[ValidatorIndex], Shard]
for committee in crosslink_committees
if validator_index in committee[0]
]
if len(selected_committees) > 0:
validators = selected_committees[0][0]
shard = selected_committees[0][1]
assignment = (validators, shard, slot)
return assignment
```
A validator can use the following function to see if they are supposed to propose during their assigned committee slot. This function can only be run during the slot in question. Proposer selection is only stable within the context of the current epoch.
```python
def is_proposer_at_slot(state: BeaconState,
slot: Slot,
validator_index: ValidatorIndex) -> bool:
assert state.slot == slot
return get_beacon_proposer_index(state) == validator_index
```
*Note*: To see if a validator is assigned to proposer during the slot, the validator must run an empty slot transition from the previous state to the current slot using `process_slots(state, current_slot)`.
### Lookahead
The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing, which must be checked during the slot in question.
`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments which involves noting at which future slot one will have to attest and also which shard one should begin syncing (in Phase 1+).
Specifically, a validator should call `get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments.
## How to avoid slashing
"Slashing" is the burning of some amount of validator funds and immediate ejection from the active validator set. In Phase 0, there are two ways in which funds can be slashed -- [proposer slashing](#proposer-slashing) and [attester slashing](#attester-slashing). Although being slashed has serious repercussions, it is simple enough to avoid being slashed all together by remaining _consistent_ with respect to the messages a validator has previously signed.

View File

@ -9,4 +9,4 @@ def bls_verify_multiple(pubkeys, message_hashes, signature, domain):
def bls_aggregate_pubkeys(pubkeys):
return b'\x42' * 96
return b'\x42' * 48

View File

@ -49,6 +49,22 @@ def test_success_prevous_epoch(state):
return pre_state, attestation, post_state
def test_success_since_max_epochs_per_crosslink(state):
for _ in range(spec.MAX_EPOCHS_PER_CROSSLINK + 2):
helpers.next_epoch(state)
attestation = helpers.get_valid_attestation(state)
data = attestation.data
assert data.crosslink.end_epoch - data.crosslink.start_epoch == spec.MAX_EPOCHS_PER_CROSSLINK
for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
helpers.next_slot(state)
pre_state, post_state = run_attestation_processing(state, attestation)
return pre_state, attestation, post_state
def test_before_inclusion_delay(state):
attestation = helpers.get_valid_attestation(state)
# do not increment slot to allow for inclusion delay
@ -109,7 +125,33 @@ def test_bad_previous_crosslink(state):
for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
helpers.next_slot(state)
state.current_crosslinks[attestation.data.crosslink.shard].epoch += 10
attestation.data.crosslink.parent_root = b'\x27' * 32
pre_state, post_state = run_attestation_processing(state, attestation, False)
return pre_state, attestation, post_state
def test_bad_crosslink_start_epoch(state):
helpers.next_epoch(state)
attestation = helpers.get_valid_attestation(state)
for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
helpers.next_slot(state)
attestation.data.crosslink.start_epoch += 1
pre_state, post_state = run_attestation_processing(state, attestation, False)
return pre_state, attestation, post_state
def test_bad_crosslink_end_epoch(state):
helpers.next_epoch(state)
attestation = helpers.get_valid_attestation(state)
for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
helpers.next_slot(state)
attestation.data.crosslink.end_epoch += 1
pre_state, post_state = run_attestation_processing(state, attestation, False)

View File

@ -138,6 +138,7 @@ def build_attestation_data(state, slot, shard):
justified_block_root = state.current_justified_root
crosslinks = state.current_crosslinks if spec.slot_to_epoch(slot) == spec.get_current_epoch(state) else state.previous_crosslinks
parent_crosslink = crosslinks[shard]
return spec.AttestationData(
beacon_block_root=block_root,
source_epoch=justified_epoch,
@ -146,9 +147,10 @@ def build_attestation_data(state, slot, shard):
target_root=epoch_boundary_root,
crosslink=spec.Crosslink(
shard=shard,
epoch=min(spec.slot_to_epoch(slot), crosslinks[shard].epoch + spec.MAX_EPOCHS_PER_CROSSLINK),
start_epoch=parent_crosslink.end_epoch,
end_epoch=min(spec.slot_to_epoch(slot), parent_crosslink.end_epoch + spec.MAX_EPOCHS_PER_CROSSLINK),
data_root=spec.ZERO_HASH,
parent_root=spec.hash_tree_root(crosslinks[shard]),
parent_root=spec.hash_tree_root(parent_crosslink),
),
)