Merge branch 'dev' into JustinDrake-patch-23
This commit is contained in:
commit
44bdf01948
2
Makefile
2
Makefile
|
@ -89,7 +89,7 @@ $(PY_SPEC_PHASE_0_TARGETS): $(PY_SPEC_PHASE_0_DEPS)
|
|||
python3 $(SCRIPT_DIR)/build_spec.py -p0 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/0_fork-choice.md $(SPEC_DIR)/validator/0_beacon-chain-validator.md $@
|
||||
|
||||
$(PY_SPEC_DIR)/eth2spec/phase1/spec.py: $(PY_SPEC_PHASE_1_DEPS)
|
||||
python3 $(SCRIPT_DIR)/build_spec.py -p1 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/0_fork-choice.md $(SPEC_DIR)/core/1_custody-game.md $(SPEC_DIR)/core/1_shard-data-chains.md $(SPEC_DIR)/light_client/merkle_proofs.md $@
|
||||
python3 $(SCRIPT_DIR)/build_spec.py -p1 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/0_fork-choice.md $(SPEC_DIR)/light_client/merkle_proofs.md $(SPEC_DIR)/core/1_custody-game.md $(SPEC_DIR)/core/1_shard-data-chains.md $(SPEC_DIR)/core/1_beacon-chain-misc.md $@
|
||||
|
||||
CURRENT_DIR = ${CURDIR}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ Core specifications for Eth 2.0 client validation can be found in [specs/core](s
|
|||
### Phase 1
|
||||
* [Custody Game](specs/core/1_custody-game.md)
|
||||
* [Shard Data Chains](specs/core/1_shard-data-chains.md)
|
||||
* [Misc beacon chain updates](specs/core/1_beacon-chain-misc.md)
|
||||
|
||||
### Phase 2
|
||||
|
||||
|
@ -34,8 +35,6 @@ See the [Eth 2.0 Phase 2 Wiki](https://hackmd.io/UzysWse1Th240HELswKqVA?view) fo
|
|||
* [General test format](specs/test_formats/README.md)
|
||||
* [Merkle proof formats](specs/light_client/merkle_proofs.md)
|
||||
* [Light client syncing protocol](specs/light_client/sync_protocol.md)
|
||||
* [Beacon node API for validator](specs/validator/0_beacon-node-validator-api.md)
|
||||
|
||||
|
||||
## Additional specifications for client implementers
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ SLOTS_PER_EPOCH: 64
|
|||
# 2**0 (= 1) epochs 6.4 minutes
|
||||
MIN_SEED_LOOKAHEAD: 1
|
||||
# 2**2 (= 4) epochs 25.6 minutes
|
||||
ACTIVATION_EXIT_DELAY: 4
|
||||
MAX_SEED_LOOKAHEAD: 4
|
||||
# 2**10 (= 1,024) slots ~1.7 hours
|
||||
SLOTS_PER_ETH1_VOTING_PERIOD: 1024
|
||||
# 2**13 (= 8,192) slots ~13 hours
|
||||
|
@ -123,8 +123,8 @@ MAX_TRANSFERS: 0
|
|||
# Signature domains
|
||||
# ---------------------------------------------------------------
|
||||
DOMAIN_BEACON_PROPOSER: 0x00000000
|
||||
DOMAIN_RANDAO: 0x01000000
|
||||
DOMAIN_ATTESTATION: 0x02000000
|
||||
DOMAIN_BEACON_ATTESTER: 0x01000000
|
||||
DOMAIN_RANDAO: 0x02000000
|
||||
DOMAIN_DEPOSIT: 0x03000000
|
||||
DOMAIN_VOLUNTARY_EXIT: 0x04000000
|
||||
DOMAIN_TRANSFER: 0x05000000
|
||||
|
|
|
@ -59,7 +59,7 @@ SLOTS_PER_EPOCH: 8
|
|||
# 2**0 (= 1) epochs
|
||||
MIN_SEED_LOOKAHEAD: 1
|
||||
# 2**2 (= 4) epochs
|
||||
ACTIVATION_EXIT_DELAY: 4
|
||||
MAX_SEED_LOOKAHEAD: 4
|
||||
# [customized] higher frequency new deposits from eth1 for testing
|
||||
SLOTS_PER_ETH1_VOTING_PERIOD: 16
|
||||
# [customized] smaller state
|
||||
|
@ -125,8 +125,8 @@ MAX_TRANSFERS: 0
|
|||
# Signature domains
|
||||
# ---------------------------------------------------------------
|
||||
DOMAIN_BEACON_PROPOSER: 0x00000000
|
||||
DOMAIN_RANDAO: 0x01000000
|
||||
DOMAIN_ATTESTATION: 0x02000000
|
||||
DOMAIN_BEACON_ATTESTER: 0x01000000
|
||||
DOMAIN_RANDAO: 0x02000000
|
||||
DOMAIN_DEPOSIT: 0x03000000
|
||||
DOMAIN_VOLUNTARY_EXIT: 0x04000000
|
||||
DOMAIN_TRANSFER: 0x05000000
|
||||
|
@ -141,5 +141,5 @@ SHARD_SLOTS_PER_BEACON_SLOT: 2
|
|||
EPOCHS_PER_SHARD_PERIOD: 4
|
||||
# PHASE_1_FORK_EPOCH >= EPOCHS_PER_SHARD_PERIOD * 2
|
||||
PHASE_1_FORK_EPOCH: 8
|
||||
# PHASE_1_FORK_SLOT = PHASE_1_FORK_EPOCH * SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH
|
||||
PHASE_1_FORK_SLOT: 128
|
||||
# PHASE_1_FORK_SLOT = PHASE_1_FORK_EPOCH * SLOTS_PER_EPOCH
|
||||
PHASE_1_FORK_SLOT: 64
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -1,10 +1,11 @@
|
|||
# Vyper target 0.1.0b12
|
||||
MIN_DEPOSIT_AMOUNT: constant(uint256) = 1000000000 # Gwei
|
||||
DEPOSIT_CONTRACT_TREE_DEPTH: constant(uint256) = 32
|
||||
MAX_DEPOSIT_COUNT: constant(uint256) = 4294967295 # 2**DEPOSIT_CONTRACT_TREE_DEPTH - 1
|
||||
PUBKEY_LENGTH: constant(uint256) = 48 # bytes
|
||||
WITHDRAWAL_CREDENTIALS_LENGTH: constant(uint256) = 32 # bytes
|
||||
AMOUNT_LENGTH: constant(uint256) = 8 # bytes
|
||||
SIGNATURE_LENGTH: constant(uint256) = 96 # bytes
|
||||
AMOUNT_LENGTH: constant(uint256) = 8 # bytes
|
||||
|
||||
DepositEvent: event({
|
||||
pubkey: bytes[48],
|
||||
|
@ -42,7 +43,7 @@ def to_little_endian_64(value: uint256) -> bytes[8]:
|
|||
|
||||
@public
|
||||
@constant
|
||||
def get_hash_tree_root() -> bytes32:
|
||||
def get_deposit_root() -> bytes32:
|
||||
zero_bytes32: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000
|
||||
node: bytes32 = zero_bytes32
|
||||
size: uint256 = self.deposit_count
|
||||
|
@ -65,13 +66,16 @@ def get_deposit_count() -> bytes[8]:
|
|||
@public
|
||||
def deposit(pubkey: bytes[PUBKEY_LENGTH],
|
||||
withdrawal_credentials: bytes[WITHDRAWAL_CREDENTIALS_LENGTH],
|
||||
signature: bytes[SIGNATURE_LENGTH]):
|
||||
signature: bytes[SIGNATURE_LENGTH],
|
||||
deposit_data_root: bytes32):
|
||||
# Avoid overflowing the Merkle tree (and prevent edge case in computing `self.branch`)
|
||||
assert self.deposit_count < MAX_DEPOSIT_COUNT
|
||||
|
||||
# Validate deposit data
|
||||
# Check deposit amount
|
||||
deposit_amount: uint256 = msg.value / as_wei_value(1, "gwei")
|
||||
assert deposit_amount >= MIN_DEPOSIT_AMOUNT
|
||||
|
||||
# Length checks to facilitate formal verification (see https://github.com/ethereum/eth2.0-specs/pull/1362/files#r320361859)
|
||||
assert len(pubkey) == PUBKEY_LENGTH
|
||||
assert len(withdrawal_credentials) == WITHDRAWAL_CREDENTIALS_LENGTH
|
||||
assert len(signature) == SIGNATURE_LENGTH
|
||||
|
@ -80,7 +84,7 @@ def deposit(pubkey: bytes[PUBKEY_LENGTH],
|
|||
amount: bytes[8] = self.to_little_endian_64(deposit_amount)
|
||||
log.DepositEvent(pubkey, withdrawal_credentials, amount, signature, self.to_little_endian_64(self.deposit_count))
|
||||
|
||||
# Compute `DepositData` hash tree root
|
||||
# Compute deposit data root (`DepositData` hash tree root)
|
||||
zero_bytes32: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000
|
||||
pubkey_root: bytes32 = sha256(concat(pubkey, slice(zero_bytes32, start=0, len=64 - PUBKEY_LENGTH)))
|
||||
signature_root: bytes32 = sha256(concat(
|
||||
|
@ -91,8 +95,10 @@ def deposit(pubkey: bytes[PUBKEY_LENGTH],
|
|||
sha256(concat(pubkey_root, withdrawal_credentials)),
|
||||
sha256(concat(amount, slice(zero_bytes32, start=0, len=32 - AMOUNT_LENGTH), signature_root)),
|
||||
))
|
||||
# Verify computed and expected deposit data roots match
|
||||
assert node == deposit_data_root
|
||||
|
||||
# Add `DepositData` hash tree root to Merkle tree (update a single `branch` node)
|
||||
# Add deposit data root to Merkle tree (update a single `branch` node)
|
||||
self.deposit_count += 1
|
||||
size: uint256 = self.deposit_count
|
||||
for height in range(DEPOSIT_CONTRACT_TREE_DEPTH):
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
eth-tester[py-evm]==0.1.0b39
|
||||
vyper==0.1.0b10
|
||||
vyper==0.1.0b12
|
||||
web3==5.0.0b2
|
||||
pytest==3.6.1
|
||||
../test_libs/pyspec
|
||||
|
|
|
@ -6,7 +6,6 @@ import pytest
|
|||
|
||||
import eth_utils
|
||||
from tests.contracts.conftest import (
|
||||
DEPOSIT_CONTRACT_TREE_DEPTH,
|
||||
FULL_DEPOSIT_AMOUNT,
|
||||
MIN_DEPOSIT_AMOUNT,
|
||||
)
|
||||
|
@ -14,29 +13,42 @@ from tests.contracts.conftest import (
|
|||
from eth2spec.phase0.spec import (
|
||||
DepositData,
|
||||
)
|
||||
from eth2spec.utils.hash_function import hash
|
||||
from eth2spec.utils.ssz.ssz_typing import List
|
||||
from eth2spec.utils.ssz.ssz_impl import (
|
||||
hash_tree_root,
|
||||
)
|
||||
|
||||
|
||||
SAMPLE_PUBKEY = b'\x11' * 48
|
||||
SAMPLE_WITHDRAWAL_CREDENTIALS = b'\x22' * 32
|
||||
SAMPLE_VALID_SIGNATURE = b'\x33' * 96
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def deposit_input():
|
||||
def deposit_input(amount):
|
||||
"""
|
||||
pubkey: bytes[48]
|
||||
withdrawal_credentials: bytes[32]
|
||||
signature: bytes[96]
|
||||
deposit_data_root: bytes[32]
|
||||
"""
|
||||
return (
|
||||
b'\x11' * 48,
|
||||
b'\x22' * 32,
|
||||
b'\x33' * 96,
|
||||
SAMPLE_PUBKEY,
|
||||
SAMPLE_WITHDRAWAL_CREDENTIALS,
|
||||
SAMPLE_VALID_SIGNATURE,
|
||||
hash_tree_root(
|
||||
DepositData(
|
||||
pubkey=SAMPLE_PUBKEY,
|
||||
withdrawal_credentials=SAMPLE_WITHDRAWAL_CREDENTIALS,
|
||||
amount=amount,
|
||||
signature=SAMPLE_VALID_SIGNATURE,
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'success,deposit_amount',
|
||||
('success', 'amount'),
|
||||
[
|
||||
(True, FULL_DEPOSIT_AMOUNT),
|
||||
(True, MIN_DEPOSIT_AMOUNT),
|
||||
|
@ -47,18 +59,24 @@ def deposit_input():
|
|||
def test_deposit_amount(registration_contract,
|
||||
w3,
|
||||
success,
|
||||
deposit_amount,
|
||||
amount,
|
||||
assert_tx_failed,
|
||||
deposit_input):
|
||||
call = registration_contract.functions.deposit(*deposit_input)
|
||||
if success:
|
||||
assert call.transact({"value": deposit_amount * eth_utils.denoms.gwei})
|
||||
assert call.transact({"value": amount * eth_utils.denoms.gwei})
|
||||
else:
|
||||
assert_tx_failed(
|
||||
lambda: call.transact({"value": deposit_amount * eth_utils.denoms.gwei})
|
||||
lambda: call.transact({"value": amount * eth_utils.denoms.gwei})
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'amount',
|
||||
[
|
||||
(FULL_DEPOSIT_AMOUNT)
|
||||
]
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
'invalid_pubkey,invalid_withdrawal_credentials,invalid_signature,success',
|
||||
[
|
||||
|
@ -71,38 +89,62 @@ def test_deposit_amount(registration_contract,
|
|||
def test_deposit_inputs(registration_contract,
|
||||
w3,
|
||||
assert_tx_failed,
|
||||
deposit_input,
|
||||
amount,
|
||||
invalid_pubkey,
|
||||
invalid_withdrawal_credentials,
|
||||
invalid_signature,
|
||||
success):
|
||||
pubkey = deposit_input[0][2:] if invalid_pubkey else deposit_input[0]
|
||||
if invalid_withdrawal_credentials: # this one is different to satisfy linter
|
||||
withdrawal_credentials = deposit_input[1][2:]
|
||||
else:
|
||||
withdrawal_credentials = deposit_input[1]
|
||||
signature = deposit_input[2][2:] if invalid_signature else deposit_input[2]
|
||||
pubkey = SAMPLE_PUBKEY[2:] if invalid_pubkey else SAMPLE_PUBKEY
|
||||
withdrawal_credentials = (
|
||||
SAMPLE_WITHDRAWAL_CREDENTIALS[2:] if invalid_withdrawal_credentials
|
||||
else SAMPLE_WITHDRAWAL_CREDENTIALS
|
||||
)
|
||||
signature = SAMPLE_VALID_SIGNATURE[2:] if invalid_signature else SAMPLE_VALID_SIGNATURE
|
||||
|
||||
call = registration_contract.functions.deposit(
|
||||
pubkey,
|
||||
withdrawal_credentials,
|
||||
signature,
|
||||
hash_tree_root(
|
||||
DepositData(
|
||||
pubkey=SAMPLE_PUBKEY if invalid_pubkey else pubkey,
|
||||
withdrawal_credentials=(
|
||||
SAMPLE_WITHDRAWAL_CREDENTIALS if invalid_withdrawal_credentials
|
||||
else withdrawal_credentials
|
||||
),
|
||||
amount=amount,
|
||||
signature=SAMPLE_VALID_SIGNATURE if invalid_signature else signature,
|
||||
),
|
||||
)
|
||||
)
|
||||
if success:
|
||||
assert call.transact({"value": FULL_DEPOSIT_AMOUNT * eth_utils.denoms.gwei})
|
||||
assert call.transact({"value": amount * eth_utils.denoms.gwei})
|
||||
else:
|
||||
assert_tx_failed(
|
||||
lambda: call.transact({"value": FULL_DEPOSIT_AMOUNT * eth_utils.denoms.gwei})
|
||||
lambda: call.transact({"value": amount * eth_utils.denoms.gwei})
|
||||
)
|
||||
|
||||
|
||||
def test_deposit_event_log(registration_contract, a0, w3, deposit_input):
|
||||
def test_deposit_event_log(registration_contract, a0, w3):
|
||||
log_filter = registration_contract.events.DepositEvent.createFilter(
|
||||
fromBlock='latest',
|
||||
)
|
||||
|
||||
deposit_amount_list = [randint(MIN_DEPOSIT_AMOUNT, FULL_DEPOSIT_AMOUNT * 2) for _ in range(3)]
|
||||
|
||||
for i in range(3):
|
||||
deposit_input = (
|
||||
SAMPLE_PUBKEY,
|
||||
SAMPLE_WITHDRAWAL_CREDENTIALS,
|
||||
SAMPLE_VALID_SIGNATURE,
|
||||
hash_tree_root(
|
||||
DepositData(
|
||||
pubkey=SAMPLE_PUBKEY,
|
||||
withdrawal_credentials=SAMPLE_WITHDRAWAL_CREDENTIALS,
|
||||
amount=deposit_amount_list[i],
|
||||
signature=SAMPLE_VALID_SIGNATURE,
|
||||
),
|
||||
)
|
||||
)
|
||||
registration_contract.functions.deposit(
|
||||
*deposit_input,
|
||||
).transact({"value": deposit_amount_list[i] * eth_utils.denoms.gwei})
|
||||
|
@ -118,7 +160,7 @@ def test_deposit_event_log(registration_contract, a0, w3, deposit_input):
|
|||
assert log['index'] == i.to_bytes(8, 'little')
|
||||
|
||||
|
||||
def test_deposit_tree(registration_contract, w3, assert_tx_failed, deposit_input):
|
||||
def test_deposit_tree(registration_contract, w3, assert_tx_failed):
|
||||
log_filter = registration_contract.events.DepositEvent.createFilter(
|
||||
fromBlock='latest',
|
||||
)
|
||||
|
@ -126,6 +168,20 @@ def test_deposit_tree(registration_contract, w3, assert_tx_failed, deposit_input
|
|||
deposit_amount_list = [randint(MIN_DEPOSIT_AMOUNT, FULL_DEPOSIT_AMOUNT * 2) for _ in range(10)]
|
||||
deposit_data_list = []
|
||||
for i in range(0, 10):
|
||||
deposit_data = DepositData(
|
||||
pubkey=SAMPLE_PUBKEY,
|
||||
withdrawal_credentials=SAMPLE_WITHDRAWAL_CREDENTIALS,
|
||||
amount=deposit_amount_list[i],
|
||||
signature=SAMPLE_VALID_SIGNATURE,
|
||||
)
|
||||
deposit_input = (
|
||||
SAMPLE_PUBKEY,
|
||||
SAMPLE_WITHDRAWAL_CREDENTIALS,
|
||||
SAMPLE_VALID_SIGNATURE,
|
||||
hash_tree_root(deposit_data),
|
||||
)
|
||||
deposit_data_list.append(deposit_data)
|
||||
|
||||
tx_hash = registration_contract.functions.deposit(
|
||||
*deposit_input,
|
||||
).transact({"value": deposit_amount_list[i] * eth_utils.denoms.gwei})
|
||||
|
@ -138,12 +194,8 @@ def test_deposit_tree(registration_contract, w3, assert_tx_failed, deposit_input
|
|||
|
||||
assert log["index"] == i.to_bytes(8, 'little')
|
||||
|
||||
deposit_data_list.append(DepositData(
|
||||
pubkey=deposit_input[0],
|
||||
withdrawal_credentials=deposit_input[1],
|
||||
amount=deposit_amount_list[i],
|
||||
signature=deposit_input[2],
|
||||
))
|
||||
|
||||
# Check deposit count and root
|
||||
count = len(deposit_data_list).to_bytes(8, 'little')
|
||||
assert count == registration_contract.functions.get_deposit_count().call()
|
||||
root = hash_tree_root(List[DepositData, 2**32](*deposit_data_list))
|
||||
assert root == registration_contract.functions.get_hash_tree_root().call()
|
||||
assert root == registration_contract.functions.get_deposit_root().call()
|
||||
|
|
|
@ -289,18 +289,20 @@ def build_phase0_spec(phase0_sourcefile: str, fork_choice_sourcefile: str,
|
|||
return spec
|
||||
|
||||
|
||||
def build_phase1_spec(phase0_sourcefile: str,
|
||||
fork_choice_sourcefile: str,
|
||||
def build_phase1_spec(phase0_beacon_sourcefile: str,
|
||||
phase0_fork_choice_sourcefile: str,
|
||||
merkle_proofs_sourcefile: str,
|
||||
phase1_custody_sourcefile: str,
|
||||
phase1_shard_sourcefile: str,
|
||||
merkle_proofs_sourcefile: str,
|
||||
phase1_beacon_misc_sourcefile: str,
|
||||
outfile: str=None) -> Optional[str]:
|
||||
all_sourcefiles = (
|
||||
phase0_sourcefile,
|
||||
fork_choice_sourcefile,
|
||||
phase0_beacon_sourcefile,
|
||||
phase0_fork_choice_sourcefile,
|
||||
merkle_proofs_sourcefile,
|
||||
phase1_custody_sourcefile,
|
||||
phase1_shard_sourcefile,
|
||||
merkle_proofs_sourcefile,
|
||||
phase1_beacon_misc_sourcefile,
|
||||
)
|
||||
all_spescs = [get_spec(spec) for spec in all_sourcefiles]
|
||||
for spec in all_spescs:
|
||||
|
@ -327,10 +329,11 @@ If building phase 0:
|
|||
If building phase 1:
|
||||
1st argument is input /core/0_beacon-chain.md
|
||||
2nd argument is input /core/0_fork-choice.md
|
||||
3rd argument is input /core/1_custody-game.md
|
||||
4th argument is input /core/1_shard-data-chains.md
|
||||
5th argument is input /light_client/merkle_proofs.md
|
||||
6th argument is output spec.py
|
||||
3rd argument is input /light_client/merkle_proofs.md
|
||||
4th argument is input /core/1_custody-game.md
|
||||
5th argument is input /core/1_shard-data-chains.md
|
||||
6th argument is input /core/1_beacon-chain-misc.md
|
||||
7th argument is output spec.py
|
||||
'''
|
||||
parser = ArgumentParser(description=description)
|
||||
parser.add_argument("-p", "--phase", dest="phase", type=int, default=0, help="Build for phase #")
|
||||
|
@ -343,14 +346,14 @@ If building phase 1:
|
|||
else:
|
||||
print(" Phase 0 requires spec, forkchoice, and v-guide inputs as well as an output file.")
|
||||
elif args.phase == 1:
|
||||
if len(args.files) == 6:
|
||||
if len(args.files) == 7:
|
||||
build_phase1_spec(*args.files)
|
||||
else:
|
||||
print(
|
||||
" Phase 1 requires input files as well as an output file:\n"
|
||||
"\t core/phase_0: (0_beacon-chain.md, 0_fork-choice.md)\n"
|
||||
"\t core/phase_1: (1_custody-game.md, 1_shard-data-chains.md)\n"
|
||||
"\t light_client: (merkle_proofs.md)\n"
|
||||
"\t core/phase_1: (1_custody-game.md, 1_shard-data-chains.md, 1_beacon-chain-misc.md)\n"
|
||||
"\t and output.py"
|
||||
)
|
||||
else:
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
- [State list lengths](#state-list-lengths)
|
||||
- [Rewards and penalties](#rewards-and-penalties)
|
||||
- [Max operations per block](#max-operations-per-block)
|
||||
- [Signature domain types](#signature-domain-types)
|
||||
- [Domain types](#domain-types)
|
||||
- [Containers](#containers)
|
||||
- [Misc dependencies](#misc-dependencies)
|
||||
- [`Fork`](#fork)
|
||||
|
@ -67,6 +67,7 @@
|
|||
- [`is_valid_merkle_branch`](#is_valid_merkle_branch)
|
||||
- [Misc](#misc-1)
|
||||
- [`compute_shuffled_index`](#compute_shuffled_index)
|
||||
- [`compute_proposer_index`](#compute_proposer_index)
|
||||
- [`compute_committee`](#compute_committee)
|
||||
- [`compute_epoch_of_slot`](#compute_epoch_of_slot)
|
||||
- [`compute_start_slot_of_epoch`](#compute_start_slot_of_epoch)
|
||||
|
@ -147,7 +148,7 @@ We define the following Python custom types for type hinting and readability:
|
|||
| `Gwei` | `uint64` | an amount in Gwei |
|
||||
| `Hash` | `Bytes32` | a hash |
|
||||
| `Version` | `Bytes4` | a fork version number |
|
||||
| `DomainType` | `Bytes4` | a signature domain type |
|
||||
| `DomainType` | `Bytes4` | a domain type |
|
||||
| `Domain` | `Bytes8` | a signature domain |
|
||||
| `BLSPubkey` | `Bytes48` | a BLS12-381 public key |
|
||||
| `BLSSignature` | `Bytes96` | a BLS12-381 signature |
|
||||
|
@ -209,7 +210,7 @@ The following values are (non-configurable) constants used throughout the specif
|
|||
| `MIN_ATTESTATION_INCLUSION_DELAY` | `2**0` (= 1) | slots | 6 seconds |
|
||||
| `SLOTS_PER_EPOCH` | `2**6` (= 64) | slots | 6.4 minutes |
|
||||
| `MIN_SEED_LOOKAHEAD` | `2**0` (= 1) | epochs | 6.4 minutes |
|
||||
| `ACTIVATION_EXIT_DELAY` | `2**2` (= 4) | epochs | 25.6 minutes |
|
||||
| `MAX_SEED_LOOKAHEAD` | `2**2` (= 4) | epochs | 25.6 minutes |
|
||||
| `SLOTS_PER_ETH1_VOTING_PERIOD` | `2**10` (= 1,024) | slots | ~1.7 hours |
|
||||
| `SLOTS_PER_HISTORICAL_ROOT` | `2**13` (= 8,192) | slots | ~13 hours |
|
||||
| `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours |
|
||||
|
@ -249,15 +250,15 @@ The following values are (non-configurable) constants used throughout the specif
|
|||
| `MAX_VOLUNTARY_EXITS` | `2**4` (= 16) |
|
||||
| `MAX_TRANSFERS` | `0` |
|
||||
|
||||
### Signature domain types
|
||||
### Domain types
|
||||
|
||||
The following types are defined, mapping into `DomainType` (little endian):
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `DOMAIN_BEACON_PROPOSER` | `0` |
|
||||
| `DOMAIN_RANDAO` | `1` |
|
||||
| `DOMAIN_ATTESTATION` | `2` |
|
||||
| `DOMAIN_BEACON_ATTESTER` | `1` |
|
||||
| `DOMAIN_RANDAO` | `2` |
|
||||
| `DOMAIN_DEPOSIT` | `3` |
|
||||
| `DOMAIN_VOLUNTARY_EXIT` | `4` |
|
||||
| `DOMAIN_TRANSFER` | `5` |
|
||||
|
@ -670,7 +671,7 @@ def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: Indexe
|
|||
hash_tree_root(AttestationDataAndCustodyBit(data=indexed_attestation.data, custody_bit=0b1)),
|
||||
],
|
||||
signature=indexed_attestation.signature,
|
||||
domain=get_domain(state, DOMAIN_ATTESTATION, indexed_attestation.data.target.epoch),
|
||||
domain=get_domain(state, DOMAIN_BEACON_ATTESTER, indexed_attestation.data.target.epoch),
|
||||
):
|
||||
return False
|
||||
return True
|
||||
|
@ -717,6 +718,25 @@ def compute_shuffled_index(index: ValidatorIndex, index_count: uint64, seed: Has
|
|||
return ValidatorIndex(index)
|
||||
```
|
||||
|
||||
#### `compute_proposer_index`
|
||||
|
||||
```python
|
||||
def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Hash) -> ValidatorIndex:
|
||||
"""
|
||||
Return from ``indices`` a random index sampled by effective balance.
|
||||
"""
|
||||
assert len(indices) > 0
|
||||
MAX_RANDOM_BYTE = 2**8 - 1
|
||||
i = 0
|
||||
while True:
|
||||
candidate_index = indices[compute_shuffled_index(ValidatorIndex(i % len(indices)), len(indices), seed)]
|
||||
random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32]
|
||||
effective_balance = state.validators[candidate_index].effective_balance
|
||||
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
|
||||
return ValidatorIndex(candidate_index)
|
||||
i += 1
|
||||
```
|
||||
|
||||
#### `compute_committee`
|
||||
|
||||
```python
|
||||
|
@ -759,7 +779,7 @@ def compute_activation_exit_epoch(epoch: Epoch) -> Epoch:
|
|||
"""
|
||||
Return the epoch during which validator activations and exits initiated in ``epoch`` take effect.
|
||||
"""
|
||||
return Epoch(epoch + 1 + ACTIVATION_EXIT_DELAY)
|
||||
return Epoch(epoch + 1 + MAX_SEED_LOOKAHEAD)
|
||||
```
|
||||
|
||||
#### `compute_domain`
|
||||
|
@ -850,12 +870,12 @@ def get_validator_churn_limit(state: BeaconState) -> uint64:
|
|||
#### `get_seed`
|
||||
|
||||
```python
|
||||
def get_seed(state: BeaconState, epoch: Epoch) -> Hash:
|
||||
def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Hash:
|
||||
"""
|
||||
Return the seed at ``epoch``.
|
||||
"""
|
||||
mix = get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)) # Avoid underflow
|
||||
return hash(mix + int_to_bytes(epoch, length=32))
|
||||
return hash(domain_type + int_to_bytes(epoch, length=8) + mix)
|
||||
```
|
||||
|
||||
#### `get_committee_count`
|
||||
|
@ -881,7 +901,7 @@ def get_crosslink_committee(state: BeaconState, epoch: Epoch, shard: Shard) -> S
|
|||
"""
|
||||
return compute_committee(
|
||||
indices=get_active_validator_indices(state, epoch),
|
||||
seed=get_seed(state, epoch),
|
||||
seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER),
|
||||
index=(shard + SHARD_COUNT - get_start_shard(state, epoch)) % SHARD_COUNT,
|
||||
count=get_committee_count(state, epoch),
|
||||
)
|
||||
|
@ -921,20 +941,9 @@ def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex:
|
|||
Return the beacon proposer index at the current slot.
|
||||
"""
|
||||
epoch = get_current_epoch(state)
|
||||
committees_per_slot = get_committee_count(state, epoch) // SLOTS_PER_EPOCH
|
||||
offset = committees_per_slot * (state.slot % SLOTS_PER_EPOCH)
|
||||
shard = Shard((get_start_shard(state, epoch) + offset) % SHARD_COUNT)
|
||||
first_committee = get_crosslink_committee(state, epoch, shard)
|
||||
MAX_RANDOM_BYTE = 2**8 - 1
|
||||
seed = get_seed(state, epoch)
|
||||
i = 0
|
||||
while True:
|
||||
candidate_index = first_committee[(epoch + i) % len(first_committee)]
|
||||
random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32]
|
||||
effective_balance = state.validators[candidate_index].effective_balance
|
||||
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
|
||||
return ValidatorIndex(candidate_index)
|
||||
i += 1
|
||||
seed = hash(get_seed(state, epoch, DOMAIN_BEACON_PROPOSER) + int_to_bytes(state.slot, length=8))
|
||||
indices = get_active_validator_indices(state, epoch)
|
||||
return compute_proposer_index(state, indices, seed)
|
||||
```
|
||||
|
||||
#### `get_attestation_data_slot`
|
||||
|
@ -1196,6 +1205,7 @@ def process_epoch(state: BeaconState) -> None:
|
|||
# @process_reveal_deadlines
|
||||
# @process_challenge_deadlines
|
||||
process_slashings(state)
|
||||
# @update_period_committee
|
||||
process_final_updates(state)
|
||||
# @after_process_final_updates
|
||||
```
|
||||
|
@ -1549,6 +1559,7 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
|||
(body.deposits, process_deposit),
|
||||
(body.voluntary_exits, process_voluntary_exit),
|
||||
(body.transfers, process_transfer),
|
||||
# @process_shard_receipt_proofs
|
||||
):
|
||||
for operation in operations:
|
||||
function(state, operation)
|
||||
|
@ -1559,9 +1570,8 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
|||
```python
|
||||
def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSlashing) -> None:
|
||||
proposer = state.validators[proposer_slashing.proposer_index]
|
||||
# Verify that the epoch is the same
|
||||
assert (compute_epoch_of_slot(proposer_slashing.header_1.slot)
|
||||
== compute_epoch_of_slot(proposer_slashing.header_2.slot))
|
||||
# Verify slots match
|
||||
assert proposer_slashing.header_1.slot == proposer_slashing.header_2.slot
|
||||
# But the headers are different
|
||||
assert proposer_slashing.header_1 != proposer_slashing.header_2
|
||||
# Check proposer is slashable
|
||||
|
@ -1615,11 +1625,6 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
|||
proposer_index=get_beacon_proposer_index(state),
|
||||
)
|
||||
|
||||
# Check bitlist lengths
|
||||
committee_size = get_committee_count(state, attestation.data.target.epoch)
|
||||
assert len(attestation.aggregation_bits) == committee_size
|
||||
assert len(attestation.custody_bits) == committee_size
|
||||
|
||||
if data.target.epoch == get_current_epoch(state):
|
||||
assert data.source == state.current_justified_checkpoint
|
||||
parent_crosslink = state.current_crosslinks[data.crosslink.shard]
|
||||
|
|
|
@ -34,11 +34,11 @@ This document represents the specification for the beacon chain deposit contract
|
|||
|
||||
## Ethereum 1.0 deposit contract
|
||||
|
||||
The initial deployment phases of Ethereum 2.0 are implemented without consensus changes to Ethereum 1.0. A deposit contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to Ethereum 1.0 for deposits of ETH to the beacon chain. Validator balances will be withdrawable to the shards in Phase 2 (i.e. when the EVM 2.0 is deployed and the shards have state).
|
||||
The initial deployment phases of Ethereum 2.0 are implemented without consensus changes to Ethereum 1.0. A deposit contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to Ethereum 1.0 for deposits of ETH to the beacon chain. Validator balances will be withdrawable to the shards in Phase 2.
|
||||
|
||||
### `deposit` function
|
||||
|
||||
The deposit contract has a public `deposit` function to make deposits. It takes as arguments `pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96]` corresponding to a [`DepositData`](./0_beacon-chain.md#depositdata) object.
|
||||
The deposit contract has a public `deposit` function to make deposits. It takes as arguments `pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96], deposit_data_root: bytes32`. The first three arguments populate a [`DepositData`](./0_beacon-chain.md#depositdata) object, and `deposit_data_root` is the expected `DepositData` root as a protection against malformatted calldata.
|
||||
|
||||
#### Deposit amount
|
||||
|
||||
|
|
|
@ -0,0 +1,238 @@
|
|||
# Phase 1 miscellaneous beacon chain changes
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
|
||||
- [Phase 1 miscellaneous beacon chain changes](#phase-1-miscellaneous-beacon-chain-changes)
|
||||
- [Table of contents](#table-of-contents)
|
||||
- [Configuration](#configuration)
|
||||
- [Containers](#containers)
|
||||
- [`CompactCommittee`](#compactcommittee)
|
||||
- [`ShardReceiptProof`](#shardreceiptproof)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [`pack_compact_validator`](#pack_compact_validator)
|
||||
- [`unpack_compact_validator`](#unpack_compact_validator)
|
||||
- [`committee_to_compact_committee`](#committee_to_compact_committee)
|
||||
- [`verify_merkle_proof`](#verify_merkle_proof)
|
||||
- [`compute_historical_state_generalized_index`](#compute_historical_state_generalized_index)
|
||||
- [`get_generalized_index_of_crosslink_header`](#get_generalized_index_of_crosslink_header)
|
||||
- [`process_shard_receipt_proof`](#process_shard_receipt_proof)
|
||||
- [Changes](#changes)
|
||||
- [Phase 0 container updates](#phase-0-container-updates)
|
||||
- [`BeaconState`](#beaconstate)
|
||||
- [`BeaconBlockBody`](#beaconblockbody)
|
||||
- [Persistent committees](#persistent-committees)
|
||||
- [Shard receipt processing](#shard-receipt-processing)
|
||||
|
||||
<!-- /TOC -->
|
||||
|
||||
## Configuration
|
||||
|
||||
| Name | Value | Unit | Duration
|
||||
| - | - | - | - |
|
||||
| `MAX_SHARD_RECEIPT_PROOFS` | `2**0` (= 1) | - | - |
|
||||
| `PERIOD_COMMITTEE_ROOT_LENGTH` | `2**8` (= 256) | periods | ~9 months |
|
||||
| `MINOR_REWARD_QUOTIENT` | `2**8` (=256) | - | - |
|
||||
|
||||
## Containers
|
||||
|
||||
#### `CompactCommittee`
|
||||
|
||||
```python
|
||||
class CompactCommittee(Container):
|
||||
pubkeys: List[BLSPubkey, MAX_VALIDATORS_PER_COMMITTEE]
|
||||
compact_validators: List[uint64, MAX_VALIDATORS_PER_COMMITTEE]
|
||||
```
|
||||
|
||||
#### `ShardReceiptProof`
|
||||
|
||||
```python
|
||||
class ShardReceiptProof(Container):
|
||||
shard: Shard
|
||||
proof: List[Hash, PLACEHOLDER]
|
||||
receipt: List[ShardReceiptDelta, PLACEHOLDER]
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
|
||||
#### `pack_compact_validator`
|
||||
|
||||
```python
|
||||
def pack_compact_validator(index: int, slashed: bool, balance_in_increments: int) -> int:
|
||||
"""
|
||||
Creates a compact validator object representing index, slashed status, and compressed balance.
|
||||
Takes as input balance-in-increments (// EFFECTIVE_BALANCE_INCREMENT) to preserve symmetry with
|
||||
the unpacking function.
|
||||
"""
|
||||
return (index << 16) + (slashed << 15) + balance_in_increments
|
||||
```
|
||||
|
||||
#### `unpack_compact_validator`
|
||||
|
||||
```python
|
||||
def unpack_compact_validator(compact_validator: int) -> Tuple[int, bool, int]:
|
||||
"""
|
||||
Returns validator index, slashed, balance // EFFECTIVE_BALANCE_INCREMENT
|
||||
"""
|
||||
return compact_validator >> 16, bool((compact_validator >> 15) % 2), compact_validator & (2**15 - 1)
|
||||
```
|
||||
|
||||
#### `committee_to_compact_committee`
|
||||
|
||||
```python
|
||||
def committee_to_compact_committee(state: BeaconState, committee: Sequence[ValidatorIndex]) -> CompactCommittee:
|
||||
"""
|
||||
Given a state and a list of validator indices, outputs the CompactCommittee representing them.
|
||||
"""
|
||||
validators = [state.validators[i] for i in committee]
|
||||
compact_validators = [
|
||||
pack_compact_validator(i, v.slashed, v.effective_balance // EFFECTIVE_BALANCE_INCREMENT)
|
||||
for i, v in zip(committee, validators)
|
||||
]
|
||||
pubkeys = [v.pubkey for v in validators]
|
||||
return CompactCommittee(pubkeys=pubkeys, compact_validators=compact_validators)
|
||||
```
|
||||
|
||||
#### `verify_merkle_proof`
|
||||
|
||||
```python
|
||||
def verify_merkle_proof(leaf: Hash, proof: Sequence[Hash], index: GeneralizedIndex, root: Hash) -> bool:
|
||||
assert len(proof) == get_generalized_index_length(index)
|
||||
for i, h in enumerate(proof):
|
||||
if get_generalized_index_bit(index, i):
|
||||
leaf = hash(h + leaf)
|
||||
else:
|
||||
leaf = hash(leaf + h)
|
||||
return leaf == root
|
||||
```
|
||||
|
||||
#### `compute_historical_state_generalized_index`
|
||||
|
||||
```python
|
||||
def compute_historical_state_generalized_index(earlier: ShardSlot, later: ShardSlot) -> GeneralizedIndex:
|
||||
"""
|
||||
Computes the generalized index of the state root of slot `frm` based on the state root of slot `to`.
|
||||
Relies on the `history_acc` in the `ShardState`, where `history_acc[i]` maintains the most recent 2**i'th
|
||||
slot state. Works by tracing a `log(later-earlier)` step path from `later` to `earlier` through intermediate
|
||||
blocks at the next available multiples of descending powers of two.
|
||||
"""
|
||||
o = GeneralizedIndex(1)
|
||||
for i in range(HISTORY_ACCUMULATOR_VECTOR - 1, -1, -1):
|
||||
if (later - 1) & 2**i > (earlier - 1) & 2**i:
|
||||
later = later - ((later - 1) % 2**i) - 1
|
||||
o = concat_generalized_indices(o, GeneralizedIndex(get_generalized_index(ShardState, ['history_acc', i])))
|
||||
return o
|
||||
```
|
||||
|
||||
#### `get_generalized_index_of_crosslink_header`
|
||||
|
||||
```python
|
||||
def get_generalized_index_of_crosslink_header(index: int) -> GeneralizedIndex:
|
||||
"""
|
||||
Gets the generalized index for the root of the index'th header in a crosslink.
|
||||
"""
|
||||
MAX_CROSSLINK_SIZE = (
|
||||
SHARD_BLOCK_SIZE_LIMIT * SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * MAX_EPOCHS_PER_CROSSLINK
|
||||
)
|
||||
assert MAX_CROSSLINK_SIZE == get_previous_power_of_two(MAX_CROSSLINK_SIZE)
|
||||
return GeneralizedIndex(MAX_CROSSLINK_SIZE // SHARD_HEADER_SIZE + index)
|
||||
```
|
||||
|
||||
#### `process_shard_receipt_proof`
|
||||
|
||||
```python
|
||||
def process_shard_receipt_proof(state: BeaconState, receipt_proof: ShardReceiptProof) -> None:
|
||||
"""
|
||||
Processes a ShardReceipt object.
|
||||
"""
|
||||
SHARD_SLOTS_PER_EPOCH = SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH
|
||||
receipt_slot = (
|
||||
state.next_shard_receipt_period[receipt_proof.shard] *
|
||||
SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD
|
||||
)
|
||||
first_slot_in_last_crosslink = state.current_crosslinks[receipt_proof.shard].start_epoch * SHARD_SLOTS_PER_EPOCH
|
||||
gindex = concat_generalized_indices(
|
||||
get_generalized_index_of_crosslink_header(0),
|
||||
GeneralizedIndex(get_generalized_index(ShardBlockHeader, 'state_root')),
|
||||
compute_historical_state_generalized_index(receipt_slot, first_slot_in_last_crosslink),
|
||||
GeneralizedIndex(get_generalized_index(ShardState, 'receipt_root'))
|
||||
)
|
||||
assert verify_merkle_proof(
|
||||
leaf=hash_tree_root(receipt_proof.receipt),
|
||||
proof=receipt_proof.proof,
|
||||
index=gindex,
|
||||
root=state.current_crosslinks[receipt_proof.shard].data_root
|
||||
)
|
||||
for delta in receipt_proof.receipt:
|
||||
if get_current_epoch(state) < state.validators[delta.index].withdrawable_epoch:
|
||||
increase_amount = (
|
||||
state.validators[delta.index].effective_balance * delta.reward_coefficient // REWARD_COEFFICIENT_BASE
|
||||
)
|
||||
increase_balance(state, delta.index, increase_amount)
|
||||
decrease_balance(state, delta.index, delta.block_fee)
|
||||
state.next_shard_receipt_period[receipt_proof.shard] += 1
|
||||
proposer_index = get_beacon_proposer_index(state)
|
||||
increase_balance(state, proposer_index, Gwei(get_base_reward(state, proposer_index) // MINOR_REWARD_QUOTIENT))
|
||||
```
|
||||
|
||||
## Changes
|
||||
|
||||
### Phase 0 container updates
|
||||
|
||||
Add the following fields to the end of the specified container objects.
|
||||
|
||||
#### `BeaconState`
|
||||
|
||||
```python
|
||||
class BeaconState(Container):
|
||||
# Period committees
|
||||
period_committee_roots: Vector[Hash, PERIOD_COMMITTEE_ROOT_LENGTH]
|
||||
next_shard_receipt_period: Vector[uint64, SHARD_COUNT]
|
||||
```
|
||||
|
||||
`period_committee_roots` values are initialized to `Bytes32()` (empty bytes value).
|
||||
`next_shard_receipt_period` values are initialized to `compute_epoch_of_slot(PHASE_1_FORK_SLOT) // EPOCHS_PER_SHARD_PERIOD`.
|
||||
|
||||
#### `BeaconBlockBody`
|
||||
|
||||
```python
|
||||
class BeaconBlockBody(Container):
|
||||
shard_receipt_proofs: List[ShardReceiptProof, MAX_SHARD_RECEIPT_PROOFS]
|
||||
```
|
||||
|
||||
`shard_receipt_proofs` is initialized to `[]`.
|
||||
|
||||
### Persistent committees
|
||||
|
||||
Run `update_period_committee` immediately before `process_final_updates`:
|
||||
|
||||
```python
|
||||
# begin insert @update_period_committee
|
||||
update_period_committee(state)
|
||||
# end insert @update_period_committee
|
||||
def update_period_committee(state: BeaconState) -> None:
|
||||
"""
|
||||
Updates period committee roots at boundary blocks.
|
||||
"""
|
||||
if (get_current_epoch(state) + 1) % EPOCHS_PER_SHARD_PERIOD == 0:
|
||||
period = (get_current_epoch(state) + 1) // EPOCHS_PER_SHARD_PERIOD
|
||||
committees = Vector[CompactCommittee, SHARD_COUNT]([
|
||||
committee_to_compact_committee(
|
||||
state,
|
||||
get_period_committee(state, Epoch(get_current_epoch(state) + 1), Shard(shard)),
|
||||
)
|
||||
for shard in range(SHARD_COUNT)
|
||||
])
|
||||
state.period_committee_roots[period % PERIOD_COMMITTEE_ROOT_LENGTH] = hash_tree_root(committees)
|
||||
```
|
||||
|
||||
### Shard receipt processing
|
||||
|
||||
Run `process_shard_receipt_proof` on each `ShardReceiptProof` during block processing.
|
||||
|
||||
```python
|
||||
# begin insert @process_shard_receipt_proofs
|
||||
(body.shard_receipt_proofs, process_shard_receipt_proof),
|
||||
# end insert @process_shard_receipt_proofs
|
||||
```
|
|
@ -77,7 +77,8 @@ This document details the beacon chain additions and changes in Phase 1 of Ether
|
|||
## Constants
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `BLS12_381_Q` | `4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787` |
|
||||
| `MINOR_REWARD_QUOTIENT` | `2**8` (= 256) |
|
||||
|
||||
|
@ -281,7 +282,7 @@ def ceillog2(x: uint64) -> int:
|
|||
### `is_valid_merkle_branch_with_mixin`
|
||||
|
||||
```python
|
||||
def is_valid_merkle_branch_with_mixin(leaf: Hash,
|
||||
def is_valid_merkle_branch_with_mixin(leaf: Hash,
|
||||
branch: Sequence[Hash],
|
||||
depth: uint64,
|
||||
index: uint64,
|
||||
|
@ -314,7 +315,7 @@ def legendre_bit(a: int, q: int) -> int:
|
|||
if a >= q:
|
||||
return legendre_bit(a % q, q)
|
||||
if a == 0:
|
||||
return 0
|
||||
return 0
|
||||
assert(q > a > 0 and q % 2 == 1)
|
||||
t = 1
|
||||
n = q
|
||||
|
@ -339,7 +340,7 @@ def legendre_bit(a: int, q: int) -> int:
|
|||
Given one proof of custody chunk, returns the proof of custody subchunks of the correct sizes.
|
||||
|
||||
```python
|
||||
def custody_subchunkify(bytez: bytes) -> list:
|
||||
def custody_subchunkify(bytez: bytes) -> Sequence[bytes]:
|
||||
bytez += b'\x00' * (-len(bytez) % BYTES_PER_CUSTODY_SUBCHUNK)
|
||||
return [bytez[i:i + BYTES_PER_CUSTODY_SUBCHUNK]
|
||||
for i in range(0, len(bytez), BYTES_PER_CUSTODY_SUBCHUNK)]
|
||||
|
@ -601,7 +602,7 @@ def process_bit_challenge(state: BeaconState, challenge: CustodyBitChallenge) ->
|
|||
# Verify attestation is eligible for challenging
|
||||
responder = state.validators[challenge.responder_index]
|
||||
assert get_current_epoch(state) <= get_randao_epoch_for_custody_period(
|
||||
get_custody_period_for_validator(state, challenge.responder_index, epoch),
|
||||
get_custody_period_for_validator(state, challenge.responder_index, epoch),
|
||||
challenge.responder_index
|
||||
) + 2 * EPOCHS_PER_CUSTODY_PERIOD + responder.max_reveal_lateness
|
||||
|
||||
|
@ -672,7 +673,7 @@ def process_chunk_challenge_response(state: BeaconState,
|
|||
# Verify bit challenge data is null
|
||||
assert response.chunk_bits_branch == [] and response.chunk_bits_leaf == Hash()
|
||||
# Verify minimum delay
|
||||
assert get_current_epoch(state) >= challenge.inclusion_epoch + ACTIVATION_EXIT_DELAY
|
||||
assert get_current_epoch(state) >= challenge.inclusion_epoch + MAX_SEED_LOOKAHEAD
|
||||
# Verify the chunk matches the crosslink data root
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(response.chunk),
|
||||
|
|
|
@ -70,9 +70,9 @@ This document describes the shard transition function (data layer only) and the
|
|||
|
||||
### Initial values
|
||||
|
||||
| Name | Value |
|
||||
| Name | Value | Unit |
|
||||
| - | - |
|
||||
| `SHARD_GENESIS_EPOCH` | **TBD** |
|
||||
| `SHARD_GENESIS_EPOCH` | **TBD** | Epoch |
|
||||
|
||||
### Time parameters
|
||||
|
||||
|
@ -182,7 +182,7 @@ def compute_shard_period_start_epoch(epoch: Epoch, lookback: uint64) -> Epoch:
|
|||
```python
|
||||
def get_period_committee(beacon_state: BeaconState, shard: Shard, epoch: Epoch) -> Sequence[ValidatorIndex]:
|
||||
active_validator_indices = get_active_validator_indices(beacon_state, epoch)
|
||||
seed = get_seed(beacon_state, epoch)
|
||||
seed = get_seed(beacon_state, epoch, DOMAIN_SHARD_ATTESTER)
|
||||
return compute_committee(active_validator_indices, seed, shard, SHARD_COUNT)[:MAX_PERIOD_COMMITTEE_SIZE]
|
||||
```
|
||||
|
||||
|
@ -201,12 +201,16 @@ def get_shard_committee(beacon_state: BeaconState, shard: Shard, epoch: Epoch) -
|
|||
#### `get_shard_proposer_index`
|
||||
|
||||
```python
|
||||
def get_shard_proposer_index(beacon_state: BeaconState, shard: Shard, slot: ShardSlot) -> ValidatorIndex:
|
||||
def get_shard_proposer_index(beacon_state: BeaconState, shard: Shard, slot: ShardSlot) -> Optional[ValidatorIndex]:
|
||||
epoch = get_current_epoch(beacon_state)
|
||||
shard_committee = get_shard_committee(beacon_state, shard, epoch)
|
||||
active_indices = [i for i in shard_committee if is_active_validator(beacon_state.validators[i], epoch)]
|
||||
seed = hash(get_seed(beacon_state, epoch) + int_to_bytes(slot, length=8) + int_to_bytes(shard, length=8))
|
||||
compute_proposer_index(beacon_state, active_indices, seed)
|
||||
if not any(active_indices):
|
||||
return None
|
||||
|
||||
epoch_seed = get_seed(beacon_state, epoch, DOMAIN_SHARD_PROPOSER)
|
||||
seed = hash(epoch_seed + int_to_bytes(slot, length=8) + int_to_bytes(shard, length=8))
|
||||
return compute_proposer_index(beacon_state, active_indices, seed)
|
||||
```
|
||||
|
||||
### Shard state mutators
|
||||
|
|
|
@ -62,7 +62,7 @@ Note that the generalized index has the convenient property that the two childre
|
|||
def merkle_tree(leaves: Sequence[Hash]) -> Sequence[Hash]:
|
||||
padded_length = get_next_power_of_two(len(leaves))
|
||||
o = [Hash()] * padded_length + list(leaves) + [Hash()] * (padded_length - len(leaves))
|
||||
for i in range(len(leaves) - 1, 0, -1):
|
||||
for i in range(padded_length - 1, 0, -1):
|
||||
o[i] = hash(o[i * 2] + o[i * 2 + 1])
|
||||
return o
|
||||
```
|
||||
|
@ -152,7 +152,7 @@ def get_item_position(typ: SSZType, index_or_variable_name: Union[int, SSZVariab
|
|||
```
|
||||
|
||||
```python
|
||||
def get_generalized_index(typ: SSZType, path: Sequence[Union[int, SSZVariableName]]) -> Optional[GeneralizedIndex]:
|
||||
def get_generalized_index(typ: SSZType, path: Sequence[Union[int, SSZVariableName]]) -> GeneralizedIndex:
|
||||
"""
|
||||
Converts a path (eg. `[7, "foo", 3]` for `x[7].foo[3]`, `[12, "bar", "__len__"]` for
|
||||
`len(x[12].bar)`) into the generalized index representing its position in the Merkle tree.
|
||||
|
@ -162,10 +162,8 @@ def get_generalized_index(typ: SSZType, path: Sequence[Union[int, SSZVariableNam
|
|||
assert not issubclass(typ, BasicValue) # If we descend to a basic type, the path cannot continue further
|
||||
if p == '__len__':
|
||||
typ = uint64
|
||||
if issubclass(typ, (List, Bytes)):
|
||||
root = GeneralizedIndex(root * 2 + 1)
|
||||
else:
|
||||
return None
|
||||
assert issubclass(typ, (List, Bytes))
|
||||
root = GeneralizedIndex(root * 2 + 1)
|
||||
else:
|
||||
pos, _, _ = get_item_position(typ, p)
|
||||
base_index = (GeneralizedIndex(2) if issubclass(typ, (List, Bytes)) else GeneralizedIndex(1))
|
||||
|
@ -181,7 +179,7 @@ _Usage note: functions outside this section should manipulate generalized indice
|
|||
#### `concat_generalized_indices`
|
||||
|
||||
```python
|
||||
def concat_generalized_indices(indices: Sequence[GeneralizedIndex]) -> GeneralizedIndex:
|
||||
def concat_generalized_indices(*indices: GeneralizedIndex) -> GeneralizedIndex:
|
||||
"""
|
||||
Given generalized indices i1 for A -> B, i2 for B -> C .... i_n for Y -> Z, returns
|
||||
the generalized index for A -> Z.
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
- [`LightClientUpdate`](#lightclientupdate)
|
||||
- [Helpers](#helpers)
|
||||
- [`LightClientMemory`](#lightclientmemory)
|
||||
- [`unpack_compact_validator`](#unpack_compact_validator)
|
||||
- [`get_persistent_committee_pubkeys_and_balances`](#get_persistent_committee_pubkeys_and_balances)
|
||||
- [Light client state updates](#light-client-state-updates)
|
||||
- [Data overhead](#data-overhead)
|
||||
|
@ -77,20 +76,6 @@ class LightClientMemory(object):
|
|||
next_committee: CompactCommittee
|
||||
```
|
||||
|
||||
### `unpack_compact_validator`
|
||||
|
||||
```python
|
||||
def unpack_compact_validator(compact_validator: CompactValidator) -> Tuple[ValidatorIndex, bool, uint64]:
|
||||
"""
|
||||
Return the index, slashed, effective_balance // EFFECTIVE_BALANCE_INCREMENT of ``compact_validator``.
|
||||
"""
|
||||
return (
|
||||
ValidatorIndex(compact_validator >> 16),
|
||||
(compact_validator >> 15) % 2,
|
||||
uint64(compact_validator & (2**15 - 1)),
|
||||
)
|
||||
```
|
||||
|
||||
### `get_persistent_committee_pubkeys_and_balances`
|
||||
|
||||
```python
|
||||
|
|
|
@ -160,7 +160,7 @@ Additional topics are used to propagate lower frequency validator messages. Thei
|
|||
|
||||
#### Interop
|
||||
|
||||
Unaggregated and aggregated attestations from all shards are sent to the `beacon_attestation` topic. Clients are not required to publish aggregate attestations but must be able to process them.
|
||||
Unaggregated and aggregated attestations from all shards are sent to the `beacon_attestation` topic. Clients are not required to publish aggregate attestations but must be able to process them. All validating clients SHOULD try to perform local attestation aggregation to prepare for block proposing.
|
||||
|
||||
#### Mainnet
|
||||
|
||||
|
@ -301,7 +301,7 @@ Here, `result` represents the 1-byte response code.
|
|||
|
||||
The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction. Two values are possible at this time:
|
||||
|
||||
- `ssz`: The contents are [SSZ-encoded](../simple-serialize.md). This encoding type MUST be supported by all clients. For objects containing a single field, only the field is SSZ-encoded not a container with a single field. For example, the `BeaconBlocks` response would be an SSZ-encoded list of `BeaconBlock`s. All SSZ-Lists in the Req/Resp domain will have a maximum list size of `SSZ_MAX_LIST_SIZE`.
|
||||
- `ssz`: the contents are [SSZ-encoded](#ssz-encoding). This encoding type MUST be supported by all clients. For objects containing a single field, only the field is SSZ-encoded not a container with a single field. For example, the `BeaconBlocks` response would be an SSZ-encoded list of `BeaconBlock`s. All SSZ-Lists in the Req/Resp domain will have a maximum list size of `SSZ_MAX_LIST_SIZE`.
|
||||
- `ssz_snappy`: The contents are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy). MAY be supported in the interoperability testnet; MUST be supported in mainnet.
|
||||
|
||||
#### SSZ-encoding strategy (with or without Snappy)
|
||||
|
|
|
@ -63,7 +63,7 @@
|
|||
* **bitlist**: ordered variable-length collection of `boolean` values, limited to `N` bits
|
||||
* notation `Bitlist[N]`
|
||||
* **union**: union type containing one of the given subtypes
|
||||
* notation `Union[type_1, type_2, ...]`, e.g. `union[null, uint64]`
|
||||
* notation `Union[type_0, type_1, ...]`, e.g. `union[null, uint64]`
|
||||
|
||||
### Variable-size and fixed-size
|
||||
|
||||
|
@ -79,8 +79,18 @@ For convenience we alias:
|
|||
* `null`: `{}`
|
||||
|
||||
### Default values
|
||||
Assuming a helper function `default(type)` which returns the default value for `type`, we can recursively define the default value for all types.
|
||||
|
||||
The default value of a type upon initialization is recursively defined using `0` for `uintN`, `False` for `boolean` and the elements of `Bitvector`, and `[]` for lists and `Bitlist`. Unions default to the first type in the union (with type index zero), which is `null` if present in the union.
|
||||
| Type | Default Value |
|
||||
| ---- | ------------- |
|
||||
| `uintN` | `0` |
|
||||
| `boolean` | `False` |
|
||||
| `Container` | `[default(type) for type in container]` |
|
||||
| `Vector[type, N]` | `[default(type)] * N` |
|
||||
| `Bitvector[boolean, N]` | `[False] * N` |
|
||||
| `List[type, N]` | `[]` |
|
||||
| `Bitlist[boolean, N]` | `[]` |
|
||||
| `Union[type_0, type_1, ...]` | `default(type_0)` |
|
||||
|
||||
#### `is_zero`
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ Once a validator has been processed and added to the beacon state's `validators`
|
|||
|
||||
### Activation
|
||||
|
||||
In normal operation, the validator is quickly activated, at which point the validator is added to the shuffling and begins validation after an additional `ACTIVATION_EXIT_DELAY` epochs (25.6 minutes).
|
||||
In normal operation, the validator is quickly activated, at which point the validator is added to the shuffling and begins validation after an additional `MAX_SEED_LOOKAHEAD` epochs (25.6 minutes).
|
||||
|
||||
The function [`is_active_validator`](../core/0_beacon-chain.md#is_active_validator) can be used to check if a validator is active during a given epoch. Usage is as follows:
|
||||
|
||||
|
@ -160,7 +160,7 @@ def get_committee_assignment(state: BeaconState,
|
|||
return None
|
||||
```
|
||||
|
||||
A validator can use the following function to see if they are supposed to propose during their assigned committee slot. This function can only be run with a `state` of the slot in question. Proposer selection is only stable within the context of the current epoch.
|
||||
A validator can use the following function to see if they are supposed to propose during a slot. This function can only be run with a `state` of the slot in question. Proposer selection is only stable within the context of the current epoch.
|
||||
|
||||
```python
|
||||
def is_proposer(state: BeaconState,
|
||||
|
@ -170,6 +170,8 @@ def is_proposer(state: BeaconState,
|
|||
|
||||
*Note*: To see if a validator is assigned to propose during the slot, the beacon state must be in the epoch in question. At the epoch boundaries, the validator must run an epoch transition into the epoch to successfully check the proposal assignment of the first slot.
|
||||
|
||||
*Note*: `BeaconBlock` proposal is distinct from crosslink committee assignment, and in a given epoch each responsibility might occur at different a different slot.
|
||||
|
||||
### Lookahead
|
||||
|
||||
The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing, which must be checked during the epoch in question.
|
||||
|
@ -218,7 +220,7 @@ def get_epoch_signature(state: BeaconState, block: BeaconBlock, privkey: int) ->
|
|||
|
||||
##### Eth1 Data
|
||||
|
||||
The `block.eth1_data` field is for block proposers to vote on recent Eth 1.0 data. This recent data contains an Eth 1.0 block hash as well as the associated deposit root (as calculated by the `get_hash_tree_root()` method of the deposit contract) and deposit count after execution of the corresponding Eth 1.0 block. If over half of the block proposers in the current Eth 1.0 voting period vote for the same `eth1_data` then `state.eth1_data` updates at the end of the voting period. Each deposit in `block.body.deposits` must verify against `state.eth1_data.eth1_deposit_root`.
|
||||
The `block.eth1_data` field is for block proposers to vote on recent Eth 1.0 data. This recent data contains an Eth 1.0 block hash as well as the associated deposit root (as calculated by the `get_deposit_root()` method of the deposit contract) and deposit count after execution of the corresponding Eth 1.0 block. If over half of the block proposers in the current Eth 1.0 voting period vote for the same `eth1_data` then `state.eth1_data` updates at the end of the voting period. Each deposit in `block.body.deposits` must verify against `state.eth1_data.eth1_deposit_root`.
|
||||
|
||||
Let `get_eth1_data(distance: uint64) -> Eth1Data` be the (subjective) function that returns the Eth 1.0 data at distance `distance` relative to the Eth 1.0 head at the start of the current Eth 1.0 voting period. Let `previous_eth1_distance` be the distance relative to the Eth 1.0 block corresponding to `state.eth1_data.block_hash` at the start of the current Eth 1.0 voting period. An honest block proposer sets `block.eth1_data = get_eth1_vote(state, previous_eth1_distance)` where:
|
||||
|
||||
|
@ -343,7 +345,7 @@ def get_signed_attestation_data(state: BeaconState, attestation: IndexedAttestat
|
|||
custody_bit=0b0,
|
||||
)
|
||||
|
||||
domain = get_domain(state, DOMAIN_ATTESTATION, attestation.data.target.epoch)
|
||||
domain = get_domain(state, DOMAIN_BEACON_ATTESTER, attestation.data.target.epoch)
|
||||
return bls_sign(privkey, hash_tree_root(attestation_data_and_custody_bit), domain)
|
||||
```
|
||||
|
||||
|
|
|
@ -5,11 +5,19 @@ from random import Random
|
|||
from eth2spec.debug.random_value import RandomizationMode, get_random_ssz_object
|
||||
|
||||
|
||||
def bitvector_case_fn(rng: Random, mode: RandomizationMode, size: int):
|
||||
return get_random_ssz_object(rng, Bitvector[size],
|
||||
def bitvector_case_fn(rng: Random, mode: RandomizationMode, size: int, invalid_making_pos: int=None):
|
||||
bits = get_random_ssz_object(rng, Bitvector[size],
|
||||
max_bytes_length=(size + 7) // 8,
|
||||
max_list_length=size,
|
||||
mode=mode, chaos=False)
|
||||
if invalid_making_pos is not None and invalid_making_pos <= size:
|
||||
already_invalid = False
|
||||
for i in range(invalid_making_pos, size):
|
||||
if bits[i]:
|
||||
already_invalid = True
|
||||
if not already_invalid:
|
||||
bits[invalid_making_pos] = True
|
||||
return bits
|
||||
|
||||
|
||||
def valid_cases():
|
||||
|
@ -23,8 +31,12 @@ def invalid_cases():
|
|||
# zero length bitvecors are illegal
|
||||
yield 'bitvec_0', invalid_test_case(lambda: b'')
|
||||
rng = Random(1234)
|
||||
# Create a vector with test_size bits, but make the type typ_size instead,
|
||||
# which is invalid when used with the given type size
|
||||
# (and a bit set just after typ_size bits if necessary to avoid the valid 0 padding-but-same-last-byte case)
|
||||
for (typ_size, test_size) in [(1, 2), (2, 3), (3, 4), (4, 5),
|
||||
(5, 6), (8, 9), (9, 8), (16, 8), (32, 33), (512, 513)]:
|
||||
for mode in [RandomizationMode.mode_random, RandomizationMode.mode_zero, RandomizationMode.mode_max]:
|
||||
yield f'bitvec_{typ_size}_{mode.to_name()}_{test_size}', \
|
||||
invalid_test_case(lambda: serialize(bitvector_case_fn(rng, mode, test_size)))
|
||||
invalid_test_case(lambda: serialize(bitvector_case_fn(rng, mode, test_size,
|
||||
invalid_making_pos=typ_size)))
|
||||
|
|
|
@ -94,6 +94,8 @@ def get_random_ssz_object(rng: Random,
|
|||
length = 1
|
||||
elif mode == RandomizationMode.mode_max_count:
|
||||
length = max_list_length
|
||||
elif mode == RandomizationMode.mode_nil_count:
|
||||
length = 0
|
||||
|
||||
if typ.length < length: # SSZ imposes a hard limit on lists, we can't put in more than that
|
||||
length = typ.length
|
||||
|
|
|
@ -10,7 +10,7 @@ from .utils import vector_test, with_meta_tags
|
|||
def with_state(fn):
|
||||
def entry(*args, **kw):
|
||||
try:
|
||||
kw['state'] = create_genesis_state(spec=kw['spec'], num_validators=spec_phase0.SLOTS_PER_EPOCH * 8)
|
||||
kw['state'] = create_genesis_state(spec=kw['spec'], num_validators=spec_phase0.SLOTS_PER_EPOCH * 10)
|
||||
except KeyError:
|
||||
raise TypeError('Spec decorator must come within state decorator to inject spec into state.')
|
||||
return fn(*args, **kw)
|
||||
|
|
|
@ -122,7 +122,7 @@ def get_attestation_signature(spec, state, attestation_data, privkey, custody_bi
|
|||
privkey=privkey,
|
||||
domain=spec.get_domain(
|
||||
state=state,
|
||||
domain_type=spec.DOMAIN_ATTESTATION,
|
||||
domain_type=spec.DOMAIN_BEACON_ATTESTER,
|
||||
message_epoch=attestation_data.target.epoch,
|
||||
)
|
||||
)
|
||||
|
|
|
@ -18,7 +18,6 @@ def get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=False):
|
|||
)
|
||||
header_2 = deepcopy(header_1)
|
||||
header_2.parent_root = b'\x99' * 32
|
||||
header_2.slot = slot + 1
|
||||
|
||||
if signed_1:
|
||||
sign_block_header(spec, state, header_1, privkey)
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
|
||||
import re
|
||||
from eth_utils import (
|
||||
to_tuple,
|
||||
)
|
||||
|
||||
from eth2spec.test.context import (
|
||||
expect_assertion_error,
|
||||
spec_state_test,
|
||||
with_all_phases_except,
|
||||
)
|
||||
|
@ -89,10 +89,14 @@ generalized_index_cases = [
|
|||
@spec_state_test
|
||||
def test_get_generalized_index(spec, state):
|
||||
for typ, path, generalized_index in generalized_index_cases:
|
||||
assert spec.get_generalized_index(
|
||||
typ=typ,
|
||||
path=path,
|
||||
) == generalized_index
|
||||
if generalized_index is not None:
|
||||
assert spec.get_generalized_index(
|
||||
typ=typ,
|
||||
path=path,
|
||||
) == generalized_index
|
||||
else:
|
||||
expect_assertion_error(lambda: spec.get_generalized_index(typ=typ, path=path))
|
||||
|
||||
yield 'typ', typ
|
||||
yield 'path', path
|
||||
yield 'generalized_index', generalized_index
|
||||
|
|
|
@ -21,7 +21,7 @@ def test_activation(spec, state):
|
|||
index = 0
|
||||
mock_deposit(spec, state, index)
|
||||
|
||||
for _ in range(spec.ACTIVATION_EXIT_DELAY + 1):
|
||||
for _ in range(spec.MAX_SEED_LOOKAHEAD + 1):
|
||||
next_epoch(spec, state)
|
||||
|
||||
yield from run_process_registry_updates(spec, state)
|
||||
|
@ -73,7 +73,7 @@ def test_ejection(spec, state):
|
|||
# Mock an ejection
|
||||
state.validators[index].effective_balance = spec.EJECTION_BALANCE
|
||||
|
||||
for _ in range(spec.ACTIVATION_EXIT_DELAY + 1):
|
||||
for _ in range(spec.MAX_SEED_LOOKAHEAD + 1):
|
||||
next_epoch(spec, state)
|
||||
|
||||
yield from run_process_registry_updates(spec, state)
|
||||
|
|
|
@ -212,13 +212,16 @@ def test_max_reveal_lateness_1(spec, state):
|
|||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
responder_index = challenge.responder_index
|
||||
target_epoch = attestation.data.target.epoch
|
||||
|
||||
state.validators[responder_index].max_reveal_lateness = 3
|
||||
|
||||
for i in range(spec.get_randao_epoch_for_custody_period(
|
||||
spec.get_custody_period_for_validator(state, responder_index),
|
||||
latest_reveal_epoch = spec.get_randao_epoch_for_custody_period(
|
||||
spec.get_custody_period_for_validator(state, responder_index, target_epoch),
|
||||
responder_index
|
||||
) + 2 * spec.EPOCHS_PER_CUSTODY_PERIOD + state.validators[responder_index].max_reveal_lateness - 2):
|
||||
) + 2 * spec.EPOCHS_PER_CUSTODY_PERIOD + state.validators[responder_index].max_reveal_lateness
|
||||
|
||||
while spec.get_current_epoch(state) < latest_reveal_epoch - 2:
|
||||
next_epoch(spec, state)
|
||||
apply_empty_block(spec, state)
|
||||
|
||||
|
|
Loading…
Reference in New Issue