Merge branch 'dev' into JustinDrake-patch-14
This commit is contained in:
commit
de01ffd38d
|
@ -1,89 +1,97 @@
|
||||||
version: 2.1
|
version: 2.1
|
||||||
|
commands:
|
||||||
|
restore_cached_venv:
|
||||||
|
description: "Restores a cached venv"
|
||||||
|
parameters:
|
||||||
|
reqs_checksum:
|
||||||
|
type: string
|
||||||
|
default: "1234"
|
||||||
|
venv_name:
|
||||||
|
type: string
|
||||||
|
default: "default-name"
|
||||||
|
steps:
|
||||||
|
- restore_cache:
|
||||||
|
keys:
|
||||||
|
- << parameters.venv_name >>-venv-<< parameters.reqs_checksum >>
|
||||||
|
# fallback to using the latest cache if no exact match is found
|
||||||
|
- << parameters.venv_name >>-venv-
|
||||||
|
save_cached_venv:
|
||||||
|
description: "Saves a venv into a cache"
|
||||||
|
parameters:
|
||||||
|
reqs_checksum:
|
||||||
|
type: string
|
||||||
|
default: "1234"
|
||||||
|
venv_path:
|
||||||
|
type: string
|
||||||
|
default: "venv"
|
||||||
|
venv_name:
|
||||||
|
type: string
|
||||||
|
default: "default-name"
|
||||||
|
steps:
|
||||||
|
- save_cache:
|
||||||
|
key: << parameters.venv_name >>-venv-<< parameters.reqs_checksum >>
|
||||||
|
paths: << parameters.venv_path >>
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
checkout_specs:
|
||||||
docker:
|
docker:
|
||||||
- image: circleci/python:3.6
|
- image: circleci/python:3.6
|
||||||
working_directory: ~/repo
|
working_directory: ~/specs-repo
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
# Restore git repo at point close to target branch/revision, to speed up checkout
|
||||||
|
- restore_cache:
|
||||||
|
keys:
|
||||||
|
- v1-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||||
|
- v1-specs-repo-{{ .Branch }}-
|
||||||
|
- v1-specs-repo-
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
name: Build pyspec
|
name: Clean up git repo to reduce cache size
|
||||||
command: make pyspec
|
command: git gc
|
||||||
|
# Save the git checkout as a cache, to make cloning next time faster.
|
||||||
|
- save_cache:
|
||||||
|
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||||
|
paths:
|
||||||
|
- ~/specs-repo
|
||||||
|
install_test:
|
||||||
|
docker:
|
||||||
|
- image: circleci/python:3.6
|
||||||
|
working_directory: ~/specs-repo
|
||||||
|
steps:
|
||||||
|
- restore_cache:
|
||||||
|
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||||
|
- restore_cached_venv:
|
||||||
|
venv_name: v1-pyspec
|
||||||
|
reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}'
|
||||||
|
- run:
|
||||||
|
name: Install pyspec requirements
|
||||||
|
command: make install_test
|
||||||
|
- save_cached_venv:
|
||||||
|
venv_name: v1-pyspec
|
||||||
|
reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}'
|
||||||
|
venv_path: ./test_libs/pyspec/venv
|
||||||
|
test:
|
||||||
|
docker:
|
||||||
|
- image: circleci/python:3.6
|
||||||
|
working_directory: ~/specs-repo
|
||||||
|
steps:
|
||||||
|
- restore_cache:
|
||||||
|
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||||
|
- restore_cached_venv:
|
||||||
|
venv_name: v1-pyspec
|
||||||
|
reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}'
|
||||||
- run:
|
- run:
|
||||||
name: Run py-tests
|
name: Run py-tests
|
||||||
command: make test
|
command: make citest
|
||||||
|
- store_test_results:
|
||||||
# TODO see #928: decide on CI triggering of yaml tests building,
|
path: test_libs/pyspec/test-reports
|
||||||
# and destination of output (new yaml tests LFS-configured repository)
|
workflows:
|
||||||
#
|
version: 2.1
|
||||||
# - run:
|
test_spec:
|
||||||
# name: Generate YAML tests
|
jobs:
|
||||||
# command: make gen_yaml_tests
|
- checkout_specs
|
||||||
#
|
- install_test:
|
||||||
# - store_artifacts:
|
requires:
|
||||||
# path: test-reports
|
- checkout_specs
|
||||||
# destination: test-reports
|
- test:
|
||||||
#
|
requires:
|
||||||
# - run:
|
- install_test
|
||||||
# name: Save YAML tests for deployment
|
|
||||||
# command: |
|
|
||||||
# mkdir /tmp/workspace
|
|
||||||
# cp -r yaml_tests /tmp/workspace/
|
|
||||||
# git log -1 >> /tmp/workspace/latest_commit_message
|
|
||||||
# - persist_to_workspace:
|
|
||||||
# root: /tmp/workspace
|
|
||||||
# paths:
|
|
||||||
# - yaml_tests
|
|
||||||
# - latest_commit_message
|
|
||||||
# commit:
|
|
||||||
# docker:
|
|
||||||
# - image: circleci/python:3.6
|
|
||||||
# steps:
|
|
||||||
# - attach_workspace:
|
|
||||||
# at: /tmp/workspace
|
|
||||||
# - add_ssh_keys:
|
|
||||||
# fingerprints:
|
|
||||||
# - "01:85:b6:36:96:a6:84:72:e4:9b:4e:38:ee:21:97:fa"
|
|
||||||
# - run:
|
|
||||||
# name: Checkout test repository
|
|
||||||
# command: |
|
|
||||||
# ssh-keyscan -H github.com >> ~/.ssh/known_hosts
|
|
||||||
# git clone git@github.com:ethereum/eth2.0-tests.git
|
|
||||||
# - run:
|
|
||||||
# name: Commit and push generated YAML tests
|
|
||||||
# command: |
|
|
||||||
# cd eth2.0-tests
|
|
||||||
# git config user.name 'eth2TestGenBot'
|
|
||||||
# git config user.email '47188154+eth2TestGenBot@users.noreply.github.com'
|
|
||||||
# for filename in /tmp/workspace/yaml_tests/*; do
|
|
||||||
# rm -rf $(basename $filename)
|
|
||||||
# cp -r $filename .
|
|
||||||
# done
|
|
||||||
# git add .
|
|
||||||
# if git diff --cached --exit-code >& /dev/null; then
|
|
||||||
# echo "No changes to commit"
|
|
||||||
# else
|
|
||||||
# echo -e "Update generated tests\n\nLatest commit message from eth2.0-specs:\n" > commit_message
|
|
||||||
# cat /tmp/workspace/latest_commit_message >> commit_message
|
|
||||||
# git commit -F commit_message
|
|
||||||
# git push origin master
|
|
||||||
# fi
|
|
||||||
#workflows:
|
|
||||||
# version: 2.1
|
|
||||||
#
|
|
||||||
# build_and_commit:
|
|
||||||
# jobs:
|
|
||||||
# - build:
|
|
||||||
# filters:
|
|
||||||
# tags:
|
|
||||||
# only: /.*/
|
|
||||||
# - commit:
|
|
||||||
# requires:
|
|
||||||
# - build
|
|
||||||
# filters:
|
|
||||||
# tags:
|
|
||||||
# only: /.*/
|
|
||||||
# branches:
|
|
||||||
# ignore: /.*/
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ venv
|
||||||
build/
|
build/
|
||||||
output/
|
output/
|
||||||
|
|
||||||
yaml_tests/
|
eth2.0-spec-tests/
|
||||||
.pytest_cache
|
.pytest_cache
|
||||||
|
|
||||||
# Dynamically built from Markdown spec
|
# Dynamically built from Markdown spec
|
||||||
|
|
48
Makefile
48
Makefile
|
@ -2,7 +2,7 @@ SPEC_DIR = ./specs
|
||||||
SCRIPT_DIR = ./scripts
|
SCRIPT_DIR = ./scripts
|
||||||
TEST_LIBS_DIR = ./test_libs
|
TEST_LIBS_DIR = ./test_libs
|
||||||
PY_SPEC_DIR = $(TEST_LIBS_DIR)/pyspec
|
PY_SPEC_DIR = $(TEST_LIBS_DIR)/pyspec
|
||||||
YAML_TEST_DIR = ./yaml_tests
|
YAML_TEST_DIR = ./eth2.0-spec-tests/tests
|
||||||
GENERATOR_DIR = ./test_generators
|
GENERATOR_DIR = ./test_generators
|
||||||
CONFIGS_DIR = ./configs
|
CONFIGS_DIR = ./configs
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@ PY_SPEC_PHASE_0_TARGETS = $(PY_SPEC_DIR)/eth2spec/phase0/spec.py
|
||||||
PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGETS)
|
PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGETS)
|
||||||
|
|
||||||
|
|
||||||
.PHONY: clean all test gen_yaml_tests pyspec phase0
|
.PHONY: clean all test citest gen_yaml_tests pyspec phase0 install_test
|
||||||
|
|
||||||
all: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_DIR) $(YAML_TEST_TARGETS)
|
all: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_DIR) $(YAML_TEST_TARGETS)
|
||||||
|
|
||||||
|
@ -27,11 +27,17 @@ clean:
|
||||||
rm -rf $(PY_SPEC_ALL_TARGETS)
|
rm -rf $(PY_SPEC_ALL_TARGETS)
|
||||||
|
|
||||||
# "make gen_yaml_tests" to run generators
|
# "make gen_yaml_tests" to run generators
|
||||||
gen_yaml_tests: $(YAML_TEST_DIR) $(YAML_TEST_TARGETS)
|
gen_yaml_tests: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_TARGETS)
|
||||||
|
|
||||||
|
# installs the packages to run pyspec tests
|
||||||
|
install_test:
|
||||||
|
cd $(PY_SPEC_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt;
|
||||||
|
|
||||||
# runs a limited set of tests against a minimal config
|
|
||||||
test: $(PY_SPEC_ALL_TARGETS)
|
test: $(PY_SPEC_ALL_TARGETS)
|
||||||
cd $(PY_SPEC_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt; python -m pytest -m minimal_config .
|
cd $(PY_SPEC_DIR); . venv/bin/activate; python -m pytest -m minimal_config .
|
||||||
|
|
||||||
|
citest: $(PY_SPEC_ALL_TARGETS)
|
||||||
|
cd $(PY_SPEC_DIR); mkdir -p test-reports/eth2spec; . venv/bin/activate; python -m pytest --junitxml=test-reports/eth2spec/test_results.xml -m minimal_config .
|
||||||
|
|
||||||
# "make pyspec" to create the pyspec for all phases.
|
# "make pyspec" to create the pyspec for all phases.
|
||||||
pyspec: $(PY_SPEC_ALL_TARGETS)
|
pyspec: $(PY_SPEC_ALL_TARGETS)
|
||||||
|
@ -48,26 +54,32 @@ CURRENT_DIR = ${CURDIR}
|
||||||
|
|
||||||
# The function that builds a set of suite files, by calling a generator for the given type (param 1)
|
# The function that builds a set of suite files, by calling a generator for the given type (param 1)
|
||||||
define build_yaml_tests
|
define build_yaml_tests
|
||||||
$(info running generator $(1))
|
# Started!
|
||||||
# Create the output
|
# Create output directory
|
||||||
mkdir -p $(YAML_TEST_DIR)$(1)
|
# Navigate to the generator
|
||||||
|
# Create a virtual environment, if it does not exist already
|
||||||
# 1) Create a virtual environment
|
# Activate the venv, this is where dependencies are installed for the generator
|
||||||
# 2) Activate the venv, this is where dependencies are installed for the generator
|
# Install all the necessary requirements
|
||||||
# 3) Install all the necessary requirements
|
# Run the generator. The generator is assumed to have an "main.py" file.
|
||||||
# 4) Run the generator. The generator is assumed to have an "main.py" file.
|
# We output to the tests dir (generator program should accept a "-o <filepath>" argument.
|
||||||
# 5) We output to the tests dir (generator program should accept a "-o <filepath>" argument.
|
echo "generator $(1) started"; \
|
||||||
cd $(GENERATOR_DIR)$(1); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt; python3 main.py -o $(CURRENT_DIR)/$(YAML_TEST_DIR)$(1) -c $(CURRENT_DIR)/$(CONFIGS_DIR)
|
mkdir -p $(YAML_TEST_DIR)$(1); \
|
||||||
|
cd $(GENERATOR_DIR)$(1); \
|
||||||
$(info generator $(1) finished)
|
if ! test -d venv; then python3 -m venv venv; fi; \
|
||||||
|
. venv/bin/activate; \
|
||||||
|
pip3 install -r requirements.txt; \
|
||||||
|
python3 main.py -o $(CURRENT_DIR)/$(YAML_TEST_DIR)$(1) -c $(CURRENT_DIR)/$(CONFIGS_DIR); \
|
||||||
|
echo "generator $(1) finished"
|
||||||
endef
|
endef
|
||||||
|
|
||||||
# The tests dir itself is simply build by creating the directory (recursively creating deeper directories if necessary)
|
# The tests dir itself is simply build by creating the directory (recursively creating deeper directories if necessary)
|
||||||
$(YAML_TEST_DIR):
|
$(YAML_TEST_DIR):
|
||||||
$(info creating directory, to output yaml targets to: ${YAML_TEST_TARGETS})
|
$(info creating directory, to output yaml targets to: ${YAML_TEST_TARGETS})
|
||||||
mkdir -p $@
|
mkdir -p $@
|
||||||
|
$(YAML_TEST_DIR)/:
|
||||||
|
$(info ignoring duplicate yaml tests dir)
|
||||||
|
|
||||||
# For any target within the tests dir, build it using the build_yaml_tests function.
|
# For any target within the tests dir, build it using the build_yaml_tests function.
|
||||||
# (creation of output dir is a dependency)
|
# (creation of output dir is a dependency)
|
||||||
$(YAML_TEST_DIR)%: $(YAML_TEST_DIR)
|
$(YAML_TEST_DIR)%: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_DIR)
|
||||||
$(call build_yaml_tests,$*)
|
$(call build_yaml_tests,$*)
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[![Join the chat at https://gitter.im/ethereum/sharding](https://badges.gitter.im/ethereum/sharding.svg)](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
[![Join the chat at https://gitter.im/ethereum/sharding](https://badges.gitter.im/ethereum/sharding.svg)](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||||
|
|
||||||
To learn more about sharding and eth2.0/Serenity, see the [sharding FAQ](https://github.com/ethereum/wiki/wiki/Sharding-FAQs) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm).
|
To learn more about sharding and eth2.0/Serenity, see the [sharding FAQ](https://github.com/ethereum/wiki/wiki/Sharding-FAQ) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm).
|
||||||
|
|
||||||
This repo hosts the current eth2.0 specifications. Discussions about design rationale and proposed changes can be brought up and discussed as issues. Solidified, agreed upon changes to spec can be made through pull requests.
|
This repo hosts the current eth2.0 specifications. Discussions about design rationale and proposed changes can be brought up and discussed as issues. Solidified, agreed upon changes to spec can be made through pull requests.
|
||||||
|
|
||||||
|
@ -11,10 +11,10 @@ This repo hosts the current eth2.0 specifications. Discussions about design rati
|
||||||
|
|
||||||
Core specifications for eth2.0 client validation can be found in [specs/core](specs/core). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are:
|
Core specifications for eth2.0 client validation can be found in [specs/core](specs/core). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are:
|
||||||
* [Phase 0 -- The Beacon Chain](specs/core/0_beacon-chain.md)
|
* [Phase 0 -- The Beacon Chain](specs/core/0_beacon-chain.md)
|
||||||
* [Phase 1 -- Custody game](specs/core/1_custody-game.md)
|
* [Phase 1 -- Custody Game](specs/core/1_custody-game.md)
|
||||||
* [Phase 1 -- Shard Data Chains](specs/core/1_shard-data-chains.md)
|
* [Phase 1 -- Shard Data Chains](specs/core/1_shard-data-chains.md)
|
||||||
|
|
||||||
Accompanying documents can be found in [specs](specs) and include
|
Accompanying documents can be found in [specs](specs) and include:
|
||||||
* [SimpleSerialize (SSZ) spec](specs/simple-serialize.md)
|
* [SimpleSerialize (SSZ) spec](specs/simple-serialize.md)
|
||||||
* [BLS signature verification](specs/bls_signature.md)
|
* [BLS signature verification](specs/bls_signature.md)
|
||||||
* [General test format](specs/test_formats/README.md)
|
* [General test format](specs/test_formats/README.md)
|
||||||
|
|
|
@ -42,8 +42,8 @@ HIGH_BALANCE_INCREMENT: 1000000000
|
||||||
# Initial values
|
# Initial values
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
GENESIS_FORK_VERSION: 0x00000000
|
GENESIS_FORK_VERSION: 0x00000000
|
||||||
# 2**32, GENESIS_EPOCH is derived from this constant
|
# 0, GENESIS_EPOCH is derived from this constant
|
||||||
GENESIS_SLOT: 4294967296
|
GENESIS_SLOT: 0
|
||||||
GENESIS_START_SHARD: 0
|
GENESIS_START_SHARD: 0
|
||||||
# 2**64 - 1
|
# 2**64 - 1
|
||||||
FAR_FUTURE_EPOCH: 18446744073709551615
|
FAR_FUTURE_EPOCH: 18446744073709551615
|
||||||
|
@ -116,7 +116,7 @@ MAX_TRANSFERS: 16
|
||||||
|
|
||||||
# Signature domains
|
# Signature domains
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
DOMAIN_BEACON_BLOCK: 0
|
DOMAIN_BEACON_PROPOSER: 0
|
||||||
DOMAIN_RANDAO: 1
|
DOMAIN_RANDAO: 1
|
||||||
DOMAIN_ATTESTATION: 2
|
DOMAIN_ATTESTATION: 2
|
||||||
DOMAIN_DEPOSIT: 3
|
DOMAIN_DEPOSIT: 3
|
||||||
|
|
|
@ -42,8 +42,8 @@ HIGH_BALANCE_INCREMENT: 1000000000
|
||||||
# Initial values
|
# Initial values
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
GENESIS_FORK_VERSION: 0x00000000
|
GENESIS_FORK_VERSION: 0x00000000
|
||||||
# 2**32, GENESIS_EPOCH is derived from this constant
|
# 0, GENESIS_EPOCH is derived from this constant
|
||||||
GENESIS_SLOT: 4294967296
|
GENESIS_SLOT: 0
|
||||||
GENESIS_START_SHARD: 0
|
GENESIS_START_SHARD: 0
|
||||||
# 2**64 - 1
|
# 2**64 - 1
|
||||||
FAR_FUTURE_EPOCH: 18446744073709551615
|
FAR_FUTURE_EPOCH: 18446744073709551615
|
||||||
|
@ -116,7 +116,7 @@ MAX_TRANSFERS: 16
|
||||||
|
|
||||||
# Signature domains
|
# Signature domains
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
DOMAIN_BEACON_BLOCK: 0
|
DOMAIN_BEACON_PROPOSER: 0
|
||||||
DOMAIN_RANDAO: 1
|
DOMAIN_RANDAO: 1
|
||||||
DOMAIN_ATTESTATION: 2
|
DOMAIN_ATTESTATION: 2
|
||||||
DOMAIN_DEPOSIT: 3
|
DOMAIN_DEPOSIT: 3
|
||||||
|
|
|
@ -86,9 +86,9 @@ def hash_to_G2(message_hash: Bytes32, domain: uint64) -> [uint384]:
|
||||||
|
|
||||||
### `modular_squareroot`
|
### `modular_squareroot`
|
||||||
|
|
||||||
`modular_squareroot(x)` returns a solution `y` to `y**2 % q == x`, and `None` if none exists. If there are two solutions the one with higher imaginary component is favored; if both solutions have equal imaginary component the one with higher real component is favored (note that this is equivalent to saying that the single solution with either imaginary component > p/2 or imaginary component zero and real component > p/2 is favored).
|
`modular_squareroot(x)` returns a solution `y` to `y**2 % q == x`, and `None` if none exists. If there are two solutions, the one with higher imaginary component is favored; if both solutions have equal imaginary component, the one with higher real component is favored (note that this is equivalent to saying that the single solution with either imaginary component > p/2 or imaginary component zero and real component > p/2 is favored).
|
||||||
|
|
||||||
The following is a sample implementation; implementers are free to implement modular square roots as they wish. Note that `x2 = -x1` is an _additive modular inverse_ so real and imaginary coefficients remain in `[0 .. q-1]`. `coerce_to_int(element: Fq) -> int` is a function that takes Fq element `element` (ie. integers `mod q`) and converts it to a regular integer.
|
The following is a sample implementation; implementers are free to implement modular square roots as they wish. Note that `x2 = -x1` is an _additive modular inverse_ so real and imaginary coefficients remain in `[0 .. q-1]`. `coerce_to_int(element: Fq) -> int` is a function that takes Fq element `element` (i.e. integers `mod q`) and converts it to a regular integer.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
Fq2_order = q ** 2 - 1
|
Fq2_order = q ** 2 - 1
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Ethereum 2.0 Phase 0 -- The Beacon Chain
|
# Ethereum 2.0 Phase 0 -- The Beacon Chain
|
||||||
|
|
||||||
**NOTICE**: This document is a work in progress for researchers and implementers. It reflects recent spec changes and takes precedence over the Python proof-of-concept implementation [[python-poc]](#ref-python-poc).
|
**NOTICE**: This document is a work in progress for researchers and implementers. It reflects recent spec changes and takes precedence over the Python proof-of-concept implementation [[python-poc]](https://github.com/ethereum/beacon_chain).
|
||||||
|
|
||||||
## Table of contents
|
## Table of contents
|
||||||
<!-- TOC -->
|
<!-- TOC -->
|
||||||
|
@ -91,7 +91,6 @@
|
||||||
- [`bls_verify_multiple`](#bls_verify_multiple)
|
- [`bls_verify_multiple`](#bls_verify_multiple)
|
||||||
- [`bls_aggregate_pubkeys`](#bls_aggregate_pubkeys)
|
- [`bls_aggregate_pubkeys`](#bls_aggregate_pubkeys)
|
||||||
- [Routines for updating validator status](#routines-for-updating-validator-status)
|
- [Routines for updating validator status](#routines-for-updating-validator-status)
|
||||||
- [`activate_validator`](#activate_validator)
|
|
||||||
- [`initiate_validator_exit`](#initiate_validator_exit)
|
- [`initiate_validator_exit`](#initiate_validator_exit)
|
||||||
- [`slash_validator`](#slash_validator)
|
- [`slash_validator`](#slash_validator)
|
||||||
- [Ethereum 1.0 deposit contract](#ethereum-10-deposit-contract)
|
- [Ethereum 1.0 deposit contract](#ethereum-10-deposit-contract)
|
||||||
|
@ -190,7 +189,7 @@ These configurations are updated for releases, but may be out of sync during `de
|
||||||
| Name | Value | Unit |
|
| Name | Value | Unit |
|
||||||
| - | - | :-: |
|
| - | - | :-: |
|
||||||
| `MIN_DEPOSIT_AMOUNT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei |
|
| `MIN_DEPOSIT_AMOUNT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei |
|
||||||
| `MAX_DEPOSIT_AMOUNT` | `2**5 * 10**9` (= 32,000,000,000) | Gwei |
|
| `MAX_EFFECTIVE_BALANCE` | `2**5 * 10**9` (= 32,000,000,000) | Gwei |
|
||||||
| `EJECTION_BALANCE` | `2**4 * 10**9` (= 16,000,000,000) | Gwei |
|
| `EJECTION_BALANCE` | `2**4 * 10**9` (= 16,000,000,000) | Gwei |
|
||||||
| `EFFECTIVE_BALANCE_INCREMENT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei |
|
| `EFFECTIVE_BALANCE_INCREMENT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei |
|
||||||
|
|
||||||
|
@ -240,7 +239,7 @@ These configurations are updated for releases, but may be out of sync during `de
|
||||||
| `INACTIVITY_PENALTY_QUOTIENT` | `2**25` (= 33,554,432) |
|
| `INACTIVITY_PENALTY_QUOTIENT` | `2**25` (= 33,554,432) |
|
||||||
| `MIN_SLASHING_PENALTY_QUOTIENT` | `2**5` (= 32) |
|
| `MIN_SLASHING_PENALTY_QUOTIENT` | `2**5` (= 32) |
|
||||||
|
|
||||||
* The `BASE_REWARD_QUOTIENT` parameter dictates the per-epoch reward. It corresponds to ~2.54% annual interest assuming 10 million participating ETH in every epoch.
|
* **The `BASE_REWARD_QUOTIENT` is NOT final. Once all other protocol details are finalized it will be adjusted, to target a theoretical maximum total issuance of `2**21` ETH per year if `2**27` ETH is validating (and therefore `2**20` per year if `2**25` ETH is validating, etc etc)**
|
||||||
* The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12 epochs` (~18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating [validators](#dfn-validator) to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline [validators](#dfn-validator) after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)` so after `INVERSE_SQRT_E_DROP_TIME` epochs it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`.
|
* The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12 epochs` (~18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating [validators](#dfn-validator) to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline [validators](#dfn-validator) after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)` so after `INVERSE_SQRT_E_DROP_TIME` epochs it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`.
|
||||||
|
|
||||||
### Max operations per block
|
### Max operations per block
|
||||||
|
@ -252,7 +251,7 @@ These configurations are updated for releases, but may be out of sync during `de
|
||||||
| `MAX_ATTESTATIONS` | `2**7` (= 128) |
|
| `MAX_ATTESTATIONS` | `2**7` (= 128) |
|
||||||
| `MAX_DEPOSITS` | `2**4` (= 16) |
|
| `MAX_DEPOSITS` | `2**4` (= 16) |
|
||||||
| `MAX_VOLUNTARY_EXITS` | `2**4` (= 16) |
|
| `MAX_VOLUNTARY_EXITS` | `2**4` (= 16) |
|
||||||
| `MAX_TRANSFERS` | `2**4` (= 16) |
|
| `MAX_TRANSFERS` | `0` |
|
||||||
|
|
||||||
### Signature domains
|
### Signature domains
|
||||||
|
|
||||||
|
@ -294,7 +293,7 @@ The types are defined topologically to aid in facilitating an executable version
|
||||||
'epoch': 'uint64',
|
'epoch': 'uint64',
|
||||||
# Root of the previous crosslink
|
# Root of the previous crosslink
|
||||||
'previous_crosslink_root': 'bytes32',
|
'previous_crosslink_root': 'bytes32',
|
||||||
# Shard data since the previous crosslink
|
# Root of the crosslinked shard data since the previous crosslink
|
||||||
'crosslink_data_root': 'bytes32',
|
'crosslink_data_root': 'bytes32',
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
@ -772,7 +771,7 @@ def get_split_offset(list_size: int, chunks: int, index: int) -> int:
|
||||||
```python
|
```python
|
||||||
def get_epoch_committee_count(state: BeaconState, epoch: Epoch) -> int:
|
def get_epoch_committee_count(state: BeaconState, epoch: Epoch) -> int:
|
||||||
"""
|
"""
|
||||||
Return the number of committees in one epoch.
|
Return the number of committees at ``epoch``.
|
||||||
"""
|
"""
|
||||||
active_validator_indices = get_active_validator_indices(state, epoch)
|
active_validator_indices = get_active_validator_indices(state, epoch)
|
||||||
return max(
|
return max(
|
||||||
|
@ -788,6 +787,9 @@ def get_epoch_committee_count(state: BeaconState, epoch: Epoch) -> int:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_shard_delta(state: BeaconState, epoch: Epoch) -> int:
|
def get_shard_delta(state: BeaconState, epoch: Epoch) -> int:
|
||||||
|
"""
|
||||||
|
Return the number of shards to increment ``state.latest_start_shard`` during ``epoch``.
|
||||||
|
"""
|
||||||
return min(get_epoch_committee_count(state, epoch), SHARD_COUNT - SHARD_COUNT // SLOTS_PER_EPOCH)
|
return min(get_epoch_committee_count(state, epoch), SHARD_COUNT - SHARD_COUNT // SLOTS_PER_EPOCH)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -921,12 +923,12 @@ def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex:
|
||||||
"""
|
"""
|
||||||
Return the beacon proposer index at ``slot``.
|
Return the beacon proposer index at ``slot``.
|
||||||
"""
|
"""
|
||||||
epoch = slot_to_epoch(state.slot)
|
current_epoch = slot_to_epoch(state.slot)
|
||||||
first_committee, _ = get_crosslink_committees_at_slot(state, state.slot)[0]
|
first_committee, _ = get_crosslink_committees_at_slot(state, state.slot)[0]
|
||||||
MAX_RANDOM_BYTE = 2**8 - 1
|
MAX_RANDOM_BYTE = 2**8 - 1
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
candidate_index = first_committee[(epoch + i) % len(first_committee)]
|
candidate_index = first_committee[(current_epoch + i) % len(first_committee)]
|
||||||
random_byte = hash(generate_seed(state, epoch) + int_to_bytes8(i // 32))[i % 32]
|
random_byte = hash(generate_seed(state, epoch) + int_to_bytes8(i // 32))[i % 32]
|
||||||
effective_balance = state.validator_registry[candidate_index].effective_balance
|
effective_balance = state.validator_registry[candidate_index].effective_balance
|
||||||
if effective_balance * MAX_RANDOM_BYTE >= MAX_DEPOSIT_AMOUNT * random_byte:
|
if effective_balance * MAX_RANDOM_BYTE >= MAX_DEPOSIT_AMOUNT * random_byte:
|
||||||
|
@ -1168,22 +1170,6 @@ def get_churn_limit(state: BeaconState) -> int:
|
||||||
|
|
||||||
Note: All functions in this section mutate `state`.
|
Note: All functions in this section mutate `state`.
|
||||||
|
|
||||||
#### `activate_validator`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def activate_validator(state: BeaconState, index: ValidatorIndex) -> None:
|
|
||||||
"""
|
|
||||||
Activate the validator of the given ``index``.
|
|
||||||
Note that this function mutates ``state``.
|
|
||||||
"""
|
|
||||||
validator = state.validator_registry[index]
|
|
||||||
if state.slot == GENESIS_SLOT:
|
|
||||||
validator.activation_eligibility_epoch = GENESIS_EPOCH
|
|
||||||
validator.activation_epoch = GENESIS_EPOCH
|
|
||||||
else:
|
|
||||||
validator.activation_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state))
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `initiate_validator_exit`
|
#### `initiate_validator_exit`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -1199,7 +1185,7 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None:
|
||||||
|
|
||||||
# Compute exit queue epoch
|
# Compute exit queue epoch
|
||||||
exit_epochs = [v.exit_epoch for v in state.validator_registry if v.exit_epoch != FAR_FUTURE_EPOCH]
|
exit_epochs = [v.exit_epoch for v in state.validator_registry if v.exit_epoch != FAR_FUTURE_EPOCH]
|
||||||
exit_queue_epoch = sorted(exit_epochs + [get_delayed_activation_exit_epoch(get_current_epoch(state))])[-1]
|
exit_queue_epoch = max(exit_epochs + [get_delayed_activation_exit_epoch(get_current_epoch(state))])
|
||||||
exit_queue_churn = len([v for v in state.validator_registry if v.exit_epoch == exit_queue_epoch])
|
exit_queue_churn = len([v for v in state.validator_registry if v.exit_epoch == exit_queue_epoch])
|
||||||
if exit_queue_churn >= get_churn_limit(state):
|
if exit_queue_churn >= get_churn_limit(state):
|
||||||
exit_queue_epoch += 1
|
exit_queue_epoch += 1
|
||||||
|
@ -1240,7 +1226,7 @@ The initial deployment phases of Ethereum 2.0 are implemented without consensus
|
||||||
|
|
||||||
### Deposit arguments
|
### Deposit arguments
|
||||||
|
|
||||||
The deposit contract has a single `deposit` function which takes as argument a SimpleSerialize'd `DepositData`.
|
The deposit contract has a single `deposit` function which takes as argument the `DepositData` elements.
|
||||||
|
|
||||||
### Withdrawal credentials
|
### Withdrawal credentials
|
||||||
|
|
||||||
|
@ -1253,7 +1239,7 @@ The private key corresponding to `withdrawal_pubkey` will be required to initiat
|
||||||
|
|
||||||
### `Deposit` logs
|
### `Deposit` logs
|
||||||
|
|
||||||
Every Ethereum 1.0 deposit, of size between `MIN_DEPOSIT_AMOUNT` and `MAX_DEPOSIT_AMOUNT`, emits a `Deposit` log for consumption by the beacon chain. The deposit contract does little validation, pushing most of the validator onboarding logic to the beacon chain. In particular, the proof of possession (a BLS12 signature) is not verified by the deposit contract.
|
Every Ethereum 1.0 deposit, of size at least `MIN_DEPOSIT_AMOUNT`, emits a `Deposit` log for consumption by the beacon chain. The deposit contract does little validation, pushing most of the validator onboarding logic to the beacon chain. In particular, the proof of possession (a BLS12-381 signature) is not verified by the deposit contract.
|
||||||
|
|
||||||
### `Eth2Genesis` log
|
### `Eth2Genesis` log
|
||||||
|
|
||||||
|
@ -1275,7 +1261,7 @@ For convenience, we provide the interface to the contract here:
|
||||||
|
|
||||||
* `__init__()`: initializes the contract
|
* `__init__()`: initializes the contract
|
||||||
* `get_deposit_root() -> bytes32`: returns the current root of the deposit tree
|
* `get_deposit_root() -> bytes32`: returns the current root of the deposit tree
|
||||||
* `deposit(bytes[512])`: adds a deposit instance to the deposit tree, incorporating the input argument and the value transferred in the given call. Note: the amount of value transferred *must* be within `MIN_DEPOSIT_AMOUNT` and `MAX_DEPOSIT_AMOUNT`, inclusive. Each of these constants are specified in units of Gwei.
|
* `deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96])`: adds a deposit instance to the deposit tree, incorporating the input arguments and the value transferred in the given call. Note: the amount of value transferred *must* be at least `MIN_DEPOSIT_AMOUNT`. Each of these constants are specified in units of Gwei.
|
||||||
|
|
||||||
## On genesis
|
## On genesis
|
||||||
|
|
||||||
|
@ -1306,7 +1292,8 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit],
|
||||||
# Process genesis activations
|
# Process genesis activations
|
||||||
for index, validator in enumerate(state.validator_registry):
|
for index, validator in enumerate(state.validator_registry):
|
||||||
if validator.effective_balance >= MAX_DEPOSIT_AMOUNT:
|
if validator.effective_balance >= MAX_DEPOSIT_AMOUNT:
|
||||||
activate_validator(state, index)
|
validator.activation_eligibility_epoch = GENESIS_EPOCH
|
||||||
|
validator.activation_epoch = GENESIS_EPOCH
|
||||||
|
|
||||||
genesis_active_index_root = hash_tree_root(get_active_validator_indices(state, GENESIS_EPOCH))
|
genesis_active_index_root = hash_tree_root(get_active_validator_indices(state, GENESIS_EPOCH))
|
||||||
for index in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH):
|
for index in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH):
|
||||||
|
@ -1681,14 +1668,16 @@ def process_registry_updates(state: BeaconState) -> None:
|
||||||
if is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance <= EJECTION_BALANCE:
|
if is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance <= EJECTION_BALANCE:
|
||||||
initiate_validator_exit(state, index)
|
initiate_validator_exit(state, index)
|
||||||
|
|
||||||
# Process activations
|
# Queue validators eligible for activation and not dequeued for activation prior to finalized epoch
|
||||||
activation_queue = sorted([
|
activation_queue = sorted([
|
||||||
index for index, validator in enumerate(state.validator_registry) if
|
index for index, validator in enumerate(state.validator_registry) if
|
||||||
validator.activation_eligibility_epoch != FAR_FUTURE_EPOCH and
|
validator.activation_eligibility_epoch != FAR_FUTURE_EPOCH and
|
||||||
validator.activation_epoch >= get_delayed_activation_exit_epoch(state.finalized_epoch)
|
validator.activation_epoch >= get_delayed_activation_exit_epoch(state.finalized_epoch)
|
||||||
], key=lambda index: state.validator_registry[index].activation_eligibility_epoch)
|
], key=lambda index: state.validator_registry[index].activation_eligibility_epoch)
|
||||||
|
# Dequeued validators for activation up to churn limit (without resetting activation epoch)
|
||||||
for index in activation_queue[:get_churn_limit(state)]:
|
for index in activation_queue[:get_churn_limit(state)]:
|
||||||
activate_validator(state, index)
|
if validator.activation_epoch == FAR_FUTURE_EPOCH:
|
||||||
|
validator.activation_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state))
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Slashings
|
#### Slashings
|
||||||
|
@ -1938,7 +1927,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||||
"""
|
"""
|
||||||
# Verify the Merkle branch
|
# Verify the Merkle branch
|
||||||
assert verify_merkle_branch(
|
assert verify_merkle_branch(
|
||||||
leaf=hash(serialize(deposit.data)), # 48 + 32 + 8 + 96 = 184 bytes serialization
|
leaf=hash_tree_root(deposit.data),
|
||||||
proof=deposit.proof,
|
proof=deposit.proof,
|
||||||
depth=DEPOSIT_CONTRACT_TREE_DEPTH,
|
depth=DEPOSIT_CONTRACT_TREE_DEPTH,
|
||||||
index=deposit.index,
|
index=deposit.index,
|
||||||
|
@ -2004,8 +1993,6 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None:
|
||||||
|
|
||||||
##### Transfers
|
##### Transfers
|
||||||
|
|
||||||
Note: Transfers are a temporary functionality for phases 0 and 1, to be removed in phase 2.
|
|
||||||
|
|
||||||
Verify that `len(block.body.transfers) <= MAX_TRANSFERS` and that all transfers are distinct.
|
Verify that `len(block.body.transfers) <= MAX_TRANSFERS` and that all transfers are distinct.
|
||||||
|
|
||||||
For each `transfer` in `block.body.transfers`, run the following function:
|
For each `transfer` in `block.body.transfers`, run the following function:
|
||||||
|
@ -2020,10 +2007,11 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None:
|
||||||
assert state.balances[transfer.sender] >= max(transfer.amount, transfer.fee)
|
assert state.balances[transfer.sender] >= max(transfer.amount, transfer.fee)
|
||||||
# A transfer is valid in only one slot
|
# A transfer is valid in only one slot
|
||||||
assert state.slot == transfer.slot
|
assert state.slot == transfer.slot
|
||||||
# Only withdrawn or not-yet-deposited accounts can transfer
|
# Sender must be not yet eligible for activation, withdrawn, or transfer balance over MAX_EFFECTIVE_BALANCE
|
||||||
assert (
|
assert (
|
||||||
|
state.validator_registry[transfer.sender].activation_eligibility_epoch == FAR_FUTURE_EPOCH or
|
||||||
get_current_epoch(state) >= state.validator_registry[transfer.sender].withdrawable_epoch or
|
get_current_epoch(state) >= state.validator_registry[transfer.sender].withdrawable_epoch or
|
||||||
state.validator_registry[transfer.sender].activation_epoch == FAR_FUTURE_EPOCH
|
transfer.amount + transfer.fee + MAX_EFFECTIVE_BALANCE <= get_balance(state, transfer.sender)
|
||||||
)
|
)
|
||||||
# Verify that the pubkey is valid
|
# Verify that the pubkey is valid
|
||||||
assert (
|
assert (
|
||||||
|
|
|
@ -28,9 +28,12 @@
|
||||||
- [`BeaconState`](#beaconstate)
|
- [`BeaconState`](#beaconstate)
|
||||||
- [`BeaconBlockBody`](#beaconblockbody)
|
- [`BeaconBlockBody`](#beaconblockbody)
|
||||||
- [Helpers](#helpers)
|
- [Helpers](#helpers)
|
||||||
|
- [`typeof`](#typeof)
|
||||||
|
- [`empty`](#empty)
|
||||||
- [`get_crosslink_chunk_count`](#get_crosslink_chunk_count)
|
- [`get_crosslink_chunk_count`](#get_crosslink_chunk_count)
|
||||||
- [`get_custody_chunk_bit`](#get_custody_chunk_bit)
|
- [`get_custody_chunk_bit`](#get_custody_chunk_bit)
|
||||||
- [`epoch_to_custody_period`](#epoch_to_custody_period)
|
- [`epoch_to_custody_period`](#epoch_to_custody_period)
|
||||||
|
- [`replace_empty_or_append`](#replace_empty_or_append)
|
||||||
- [`verify_custody_key`](#verify_custody_key)
|
- [`verify_custody_key`](#verify_custody_key)
|
||||||
- [Per-block processing](#per-block-processing)
|
- [Per-block processing](#per-block-processing)
|
||||||
- [Operations](#operations)
|
- [Operations](#operations)
|
||||||
|
@ -203,6 +206,14 @@ Add the following fields to the end of the specified container objects. Fields w
|
||||||
|
|
||||||
## Helpers
|
## Helpers
|
||||||
|
|
||||||
|
### `typeof`
|
||||||
|
|
||||||
|
The `typeof` function accepts and SSZ object as a single input and returns the corresponding SSZ type.
|
||||||
|
|
||||||
|
### `empty`
|
||||||
|
|
||||||
|
The `empty` function accepts and SSZ type as input and returns an object of that type with all fields initialized to default values.
|
||||||
|
|
||||||
### `get_crosslink_chunk_count`
|
### `get_crosslink_chunk_count`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -229,6 +240,18 @@ def epoch_to_custody_period(epoch: Epoch) -> int:
|
||||||
return epoch // EPOCHS_PER_CUSTODY_PERIOD
|
return epoch // EPOCHS_PER_CUSTODY_PERIOD
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `replace_empty_or_append`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def replace_empty_or_append(list: List[Any], new_element: Any) -> int:
|
||||||
|
for i in range(len(list)):
|
||||||
|
if list[i] == empty(typeof(new_element)):
|
||||||
|
list[i] = new_element
|
||||||
|
return i
|
||||||
|
list.append(new_element)
|
||||||
|
return len(list) - 1
|
||||||
|
```
|
||||||
|
|
||||||
### `verify_custody_key`
|
### `verify_custody_key`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -321,7 +344,7 @@ def process_chunk_challenge(state: BeaconState,
|
||||||
depth = math.log2(next_power_of_two(get_custody_chunk_count(challenge.attestation)))
|
depth = math.log2(next_power_of_two(get_custody_chunk_count(challenge.attestation)))
|
||||||
assert challenge.chunk_index < 2**depth
|
assert challenge.chunk_index < 2**depth
|
||||||
# Add new chunk challenge record
|
# Add new chunk challenge record
|
||||||
state.custody_chunk_challenge_records.append(CustodyChunkChallengeRecord(
|
new_record = CustodyChunkChallengeRecord(
|
||||||
challenge_index=state.custody_challenge_index,
|
challenge_index=state.custody_challenge_index,
|
||||||
challenger_index=get_beacon_proposer_index(state),
|
challenger_index=get_beacon_proposer_index(state),
|
||||||
responder_index=challenge.responder_index
|
responder_index=challenge.responder_index
|
||||||
|
@ -329,7 +352,9 @@ def process_chunk_challenge(state: BeaconState,
|
||||||
crosslink_data_root=challenge.attestation.data.crosslink_data_root,
|
crosslink_data_root=challenge.attestation.data.crosslink_data_root,
|
||||||
depth=depth,
|
depth=depth,
|
||||||
chunk_index=challenge.chunk_index,
|
chunk_index=challenge.chunk_index,
|
||||||
))
|
)
|
||||||
|
replace_empty_or_append(state.custody_chunk_challenge_records, new_record)
|
||||||
|
|
||||||
state.custody_challenge_index += 1
|
state.custody_challenge_index += 1
|
||||||
# Postpone responder withdrawability
|
# Postpone responder withdrawability
|
||||||
responder.withdrawable_epoch = FAR_FUTURE_EPOCH
|
responder.withdrawable_epoch = FAR_FUTURE_EPOCH
|
||||||
|
@ -385,7 +410,7 @@ def process_bit_challenge(state: BeaconState,
|
||||||
custody_bit = get_bitfield_bit(attestation.custody_bitfield, attesters.index(responder_index))
|
custody_bit = get_bitfield_bit(attestation.custody_bitfield, attesters.index(responder_index))
|
||||||
assert custody_bit != chunk_bits_xor
|
assert custody_bit != chunk_bits_xor
|
||||||
# Add new bit challenge record
|
# Add new bit challenge record
|
||||||
state.custody_bit_challenge_records.append(CustodyBitChallengeRecord(
|
new_record = CustodyBitChallengeRecord(
|
||||||
challenge_index=state.custody_challenge_index,
|
challenge_index=state.custody_challenge_index,
|
||||||
challenger_index=challenge.challenger_index,
|
challenger_index=challenge.challenger_index,
|
||||||
responder_index=challenge.responder_index,
|
responder_index=challenge.responder_index,
|
||||||
|
@ -393,7 +418,8 @@ def process_bit_challenge(state: BeaconState,
|
||||||
crosslink_data_root=challenge.attestation.crosslink_data_root,
|
crosslink_data_root=challenge.attestation.crosslink_data_root,
|
||||||
chunk_bits=challenge.chunk_bits,
|
chunk_bits=challenge.chunk_bits,
|
||||||
responder_key=challenge.responder_key,
|
responder_key=challenge.responder_key,
|
||||||
))
|
)
|
||||||
|
replace_empty_or_append(state.custody_bit_challenge_records, new_record)
|
||||||
state.custody_challenge_index += 1
|
state.custody_challenge_index += 1
|
||||||
# Postpone responder withdrawability
|
# Postpone responder withdrawability
|
||||||
responder.withdrawable_epoch = FAR_FUTURE_EPOCH
|
responder.withdrawable_epoch = FAR_FUTURE_EPOCH
|
||||||
|
@ -434,7 +460,8 @@ def process_chunk_challenge_response(state: BeaconState,
|
||||||
root=challenge.crosslink_data_root,
|
root=challenge.crosslink_data_root,
|
||||||
)
|
)
|
||||||
# Clear the challenge
|
# Clear the challenge
|
||||||
state.custody_chunk_challenge_records.remove(challenge)
|
records = state.custody_chunk_challenge_records
|
||||||
|
records[records.index(challenge)] = CustodyChunkChallengeRecord()
|
||||||
# Reward the proposer
|
# Reward the proposer
|
||||||
proposer_index = get_beacon_proposer_index(state)
|
proposer_index = get_beacon_proposer_index(state)
|
||||||
increase_balance(state, proposer_index, base_reward(state, index) // MINOR_REWARD_QUOTIENT)
|
increase_balance(state, proposer_index, base_reward(state, index) // MINOR_REWARD_QUOTIENT)
|
||||||
|
@ -457,7 +484,8 @@ def process_bit_challenge_response(state: BeaconState,
|
||||||
# Verify the chunk bit does not match the challenge chunk bit
|
# Verify the chunk bit does not match the challenge chunk bit
|
||||||
assert get_custody_chunk_bit(challenge.responder_key, response.chunk) != get_bitfield_bit(challenge.chunk_bits, response.chunk_index)
|
assert get_custody_chunk_bit(challenge.responder_key, response.chunk) != get_bitfield_bit(challenge.chunk_bits, response.chunk_index)
|
||||||
# Clear the challenge
|
# Clear the challenge
|
||||||
state.custody_bit_challenge_records.remove(challenge)
|
records = state.custody_bit_challenge_records
|
||||||
|
records[records.index(challenge)] = CustodyBitChallengeRecord()
|
||||||
# Slash challenger
|
# Slash challenger
|
||||||
slash_validator(state, challenge.challenger_index, challenge.responder_index)
|
slash_validator(state, challenge.challenger_index, challenge.responder_index)
|
||||||
```
|
```
|
||||||
|
@ -471,12 +499,14 @@ def process_challenge_deadlines(state: BeaconState) -> None:
|
||||||
for challenge in state.custody_chunk_challenge_records:
|
for challenge in state.custody_chunk_challenge_records:
|
||||||
if get_current_epoch(state) > challenge.deadline:
|
if get_current_epoch(state) > challenge.deadline:
|
||||||
slash_validator(state, challenge.responder_index, challenge.challenger_index)
|
slash_validator(state, challenge.responder_index, challenge.challenger_index)
|
||||||
state.custody_chunk_challenge_records.remove(challenge)
|
records = state.custody_chunk_challenge_records
|
||||||
|
records[records.index(challenge)] = CustodyChunkChallengeRecord()
|
||||||
|
|
||||||
for challenge in state.custody_bit_challenge_records:
|
for challenge in state.custody_bit_challenge_records:
|
||||||
if get_current_epoch(state) > challenge.deadline:
|
if get_current_epoch(state) > challenge.deadline:
|
||||||
slash_validator(state, challenge.responder_index, challenge.challenger_index)
|
slash_validator(state, challenge.responder_index, challenge.challenger_index)
|
||||||
state.custody_bit_challenge_records.remove(challenge)
|
records = state.custody_bit_challenge_records
|
||||||
|
records[records.index(challenge)] = CustodyBitChallengeRecord()
|
||||||
```
|
```
|
||||||
|
|
||||||
In `process_penalties_and_exits`, change the definition of `eligible` to the following (note that it is not a pure function because `state` is declared in the surrounding scope):
|
In `process_penalties_and_exits`, change the definition of `eligible` to the following (note that it is not a pure function because `state` is declared in the surrounding scope):
|
||||||
|
|
|
@ -102,7 +102,7 @@ def get_generalized_indices(obj: Any, path: List[int], root: int=1) -> List[int]
|
||||||
|
|
||||||
## Merkle multiproofs
|
## Merkle multiproofs
|
||||||
|
|
||||||
We define a Merkle multiproof as a minimal subset of nodes in a Merkle tree needed to fully authenticate that a set of nodes actually are part of a Merkle tree with some specified root, at a particular set of generalized indices. For example, here is the Merkle multiproof for positions 0, 1, 6 in an 8-node Merkle tree (ie. generalized indices 8, 9, 14):
|
We define a Merkle multiproof as a minimal subset of nodes in a Merkle tree needed to fully authenticate that a set of nodes actually are part of a Merkle tree with some specified root, at a particular set of generalized indices. For example, here is the Merkle multiproof for positions 0, 1, 6 in an 8-node Merkle tree (i.e. generalized indices 8, 9, 14):
|
||||||
|
|
||||||
```
|
```
|
||||||
.
|
.
|
||||||
|
|
|
@ -27,7 +27,7 @@ __NOTICE__: This document is a work-in-progress for researchers and implementers
|
||||||
|
|
||||||
### Expansions
|
### Expansions
|
||||||
|
|
||||||
We define an "expansion" of an object as an object where a field in an object that is meant to represent the `hash_tree_root` of another object is replaced by the object. Note that defining expansions is not a consensus-layer-change; it is merely a "re-interpretation" of the object. Particularly, the `hash_tree_root` of an expansion of an object is identical to that of the original object, and we can define expansions where, given a complete history, it is always possible to compute the expansion of any object in the history. The opposite of an expansion is a "summary" (eg. `BeaconBlockHeader` is a summary of `BeaconBlock`).
|
We define an "expansion" of an object as an object where a field in an object that is meant to represent the `hash_tree_root` of another object is replaced by the object. Note that defining expansions is not a consensus-layer-change; it is merely a "re-interpretation" of the object. Particularly, the `hash_tree_root` of an expansion of an object is identical to that of the original object, and we can define expansions where, given a complete history, it is always possible to compute the expansion of any object in the history. The opposite of an expansion is a "summary" (e.g. `BeaconBlockHeader` is a summary of `BeaconBlock`).
|
||||||
|
|
||||||
We define two expansions:
|
We define two expansions:
|
||||||
|
|
||||||
|
|
|
@ -247,7 +247,7 @@ Requests a list of block roots and slots from the peer. The `count` parameter MU
|
||||||
|
|
||||||
Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `1` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is empty for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were empty in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further empty, the array would contain `[2, 6, 8, 10]` - i.e., duplicate blocks MUST be collapsed. A `skip_slots` value of `0` returns all blocks.
|
Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `1` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is empty for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were empty in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further empty, the array would contain `[2, 6, 8, 10]` - i.e., duplicate blocks MUST be collapsed. A `skip_slots` value of `0` returns all blocks.
|
||||||
|
|
||||||
The function of the `skip_slots` parameter helps facilitate light client sync - for example, in [#459](https://github.com/ethereum/eth2.0-specs/issues/459) - and allows clients to balance the peers from whom they request headers. Clients could, for instance, request every 10th block from a set of peers where each per has a different starting block in order to populate block data.
|
The function of the `skip_slots` parameter helps facilitate light client sync - for example, in [#459](https://github.com/ethereum/eth2.0-specs/issues/459) - and allows clients to balance the peers from whom they request headers. Clients could, for instance, request every 10th block from a set of peers where each peer has a different starting block in order to populate block data.
|
||||||
|
|
||||||
### Beacon Block Bodies
|
### Beacon Block Bodies
|
||||||
|
|
||||||
|
@ -287,6 +287,6 @@ Requests the `block_bodies` associated with the provided `block_roots` from the
|
||||||
|
|
||||||
**Response Body:** TBD
|
**Response Body:** TBD
|
||||||
|
|
||||||
Requests contain the hashes of Merkle tree nodes that when merkelized yield the block's `state_root`.
|
Requests contain the hashes of Merkle tree nodes that when merkleized yield the block's `state_root`.
|
||||||
|
|
||||||
The response will contain the values that, when hashed, yield the hashes inside the request body.
|
The response will contain the values that, when hashed, yield the hashes inside the request body.
|
||||||
|
|
|
@ -118,7 +118,7 @@ Separation of configuration and tests aims to:
|
||||||
Note: Some clients prefer compile-time constants and optimizations.
|
Note: Some clients prefer compile-time constants and optimizations.
|
||||||
They should compile for each configuration once, and run the corresponding tests per build target.
|
They should compile for each configuration once, and run the corresponding tests per build target.
|
||||||
|
|
||||||
The format is described in `configs/constant_presets`.
|
The format is described in [`configs/constant_presets`](../../configs/constant_presets/README.md#format).
|
||||||
|
|
||||||
|
|
||||||
## Fork-timeline
|
## Fork-timeline
|
||||||
|
@ -129,7 +129,7 @@ A fork timeline is (preferably) loaded in as a configuration object into a clien
|
||||||
- we may decide on an epoch number for a fork based on external events (e.g. Eth1 log event),
|
- we may decide on an epoch number for a fork based on external events (e.g. Eth1 log event),
|
||||||
a client should be able to activate a fork dynamically.
|
a client should be able to activate a fork dynamically.
|
||||||
|
|
||||||
The format is described in `configs/fork_timelines`.
|
The format is described in [`configs/fork_timelines`](../../configs/fork_timelines/README.md#format).
|
||||||
|
|
||||||
## Config sourcing
|
## Config sourcing
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ input:
|
||||||
output: List[bytes48] -- length of two
|
output: List[bytes48] -- length of two
|
||||||
```
|
```
|
||||||
|
|
||||||
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`
|
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
||||||
|
|
||||||
|
|
||||||
## Condition
|
## Condition
|
||||||
|
|
|
@ -11,7 +11,7 @@ input:
|
||||||
output: List[List[bytes48]] -- 3 lists, each a length of two
|
output: List[List[bytes48]] -- 3 lists, each a length of two
|
||||||
```
|
```
|
||||||
|
|
||||||
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`
|
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
||||||
|
|
||||||
|
|
||||||
## Condition
|
## Condition
|
||||||
|
|
|
@ -9,7 +9,7 @@ input: bytes32 -- the private key
|
||||||
output: bytes48 -- the public key
|
output: bytes48 -- the public key
|
||||||
```
|
```
|
||||||
|
|
||||||
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`
|
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
||||||
|
|
||||||
|
|
||||||
## Condition
|
## Condition
|
||||||
|
|
|
@ -12,7 +12,7 @@ input:
|
||||||
output: bytes96 -- expected signature
|
output: bytes96 -- expected signature
|
||||||
```
|
```
|
||||||
|
|
||||||
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`
|
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
||||||
|
|
||||||
|
|
||||||
## Condition
|
## Condition
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Test format: SSZ static types
|
# Test format: SSZ static types
|
||||||
|
|
||||||
The goal of this type is to provide clients with a solid reference how the known SSZ objects should be encoded.
|
The goal of this type is to provide clients with a solid reference for how the known SSZ objects should be encoded.
|
||||||
Each object described in the Phase-0 spec is covered.
|
Each object described in the Phase-0 spec is covered.
|
||||||
This is important, as many of the clients aiming to serialize/deserialize objects directly into structs/classes
|
This is important, as many of the clients aiming to serialize/deserialize objects directly into structs/classes
|
||||||
do not support (or have alternatives for) generic SSZ encoding/decoding.
|
do not support (or have alternatives for) generic SSZ encoding/decoding.
|
||||||
|
@ -13,6 +13,7 @@ type_name: string -- string, object name, formatted as in spec. E.g. "BeaconBlo
|
||||||
value: dynamic -- the YAML-encoded value, of the type specified by type_name.
|
value: dynamic -- the YAML-encoded value, of the type specified by type_name.
|
||||||
serialized: bytes -- string, SSZ-serialized data, hex encoded, with prefix 0x
|
serialized: bytes -- string, SSZ-serialized data, hex encoded, with prefix 0x
|
||||||
root: bytes32 -- string, hash-tree-root of the value, hex encoded, with prefix 0x
|
root: bytes32 -- string, hash-tree-root of the value, hex encoded, with prefix 0x
|
||||||
|
signing_root: bytes32 -- string, signing-root of the value, hex encoded, with prefix 0x. Optional, present if type contains ``signature`` field
|
||||||
```
|
```
|
||||||
|
|
||||||
## Condition
|
## Condition
|
||||||
|
@ -20,4 +21,12 @@ root: bytes32 -- string, hash-tree-root of the value, hex encoded, with pre
|
||||||
A test-runner can implement the following assertions:
|
A test-runner can implement the following assertions:
|
||||||
- Serialization: After parsing the `value`, SSZ-serialize it: the output should match `serialized`
|
- Serialization: After parsing the `value`, SSZ-serialize it: the output should match `serialized`
|
||||||
- Hash-tree-root: After parsing the `value`, Hash-tree-root it: the output should match `root`
|
- Hash-tree-root: After parsing the `value`, Hash-tree-root it: the output should match `root`
|
||||||
|
- Optionally also check signing-root, if present.
|
||||||
- Deserialization: SSZ-deserialize the `serialized` value, and see if it matches the parsed `value`
|
- Deserialization: SSZ-deserialize the `serialized` value, and see if it matches the parsed `value`
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
|
||||||
|
**`serialized`**: [SSZ serialization](../../simple-serialize.md#serialization)
|
||||||
|
**`root`** - [hash_tree_root](../../simple-serialize.md#merkleization) function
|
||||||
|
**`signing_root`** - [signing_root](../../simple-serialize.md#self-signed-containers) function
|
||||||
|
|
|
@ -60,7 +60,7 @@ __NOTICE__: This document is a work-in-progress for researchers and implementers
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
This document represents the expected behavior of an "honest validator" with respect to Phase 0 of the Ethereum 2.0 protocol. This document does not distinguish between a "node" (ie. the functionality of following and reading the beacon chain) and a "validator client" (ie. the functionality of actively participating in consensus). The separation of concerns between these (potentially) two pieces of software is left as a design decision that is out of scope.
|
This document represents the expected behavior of an "honest validator" with respect to Phase 0 of the Ethereum 2.0 protocol. This document does not distinguish between a "node" (i.e. the functionality of following and reading the beacon chain) and a "validator client" (i.e. the functionality of actively participating in consensus). The separation of concerns between these (potentially) two pieces of software is left as a design decision that is out of scope.
|
||||||
|
|
||||||
A validator is an entity that participates in the consensus of the Ethereum 2.0 protocol. This is an optional role for users in which they can post ETH as collateral and verify and attest to the validity of blocks to seek financial returns in exchange for building and securing the protocol. This is similar to proof of work networks in which a miner provides collateral in the form of hardware/hash-power to seek returns in exchange for building and securing the protocol.
|
A validator is an entity that participates in the consensus of the Ethereum 2.0 protocol. This is an optional role for users in which they can post ETH as collateral and verify and attest to the validity of blocks to seek financial returns in exchange for building and securing the protocol. This is similar to proof of work networks in which a miner provides collateral in the form of hardware/hash-power to seek returns in exchange for building and securing the protocol.
|
||||||
|
|
||||||
|
@ -101,11 +101,10 @@ In phase 0, all incoming validator deposits originate from the Ethereum 1.0 PoW
|
||||||
To submit a deposit:
|
To submit a deposit:
|
||||||
|
|
||||||
* Pack the validator's [initialization parameters](#initialization) into `deposit_data`, a [`DepositData`](../core/0_beacon-chain.md#depositdata) SSZ object.
|
* Pack the validator's [initialization parameters](#initialization) into `deposit_data`, a [`DepositData`](../core/0_beacon-chain.md#depositdata) SSZ object.
|
||||||
* Let `proof_of_possession` be the result of `bls_sign` of the `signing_root(deposit_data)` with `domain=DOMAIN_DEPOSIT`.
|
|
||||||
* Set `deposit_data.proof_of_possession = proof_of_possession`.
|
|
||||||
* Let `amount` be the amount in Gwei to be deposited by the validator where `MIN_DEPOSIT_AMOUNT <= amount <= MAX_DEPOSIT_AMOUNT`.
|
* Let `amount` be the amount in Gwei to be deposited by the validator where `MIN_DEPOSIT_AMOUNT <= amount <= MAX_DEPOSIT_AMOUNT`.
|
||||||
* Set `deposit_data.amount = amount`.
|
* Set `deposit_data.amount = amount`.
|
||||||
* Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `deposit(deposit_input: bytes[512])` along with `serialize(deposit_data)` as the singular `bytes` input along with a deposit of `amount` Gwei.
|
* Let `signature` be the result of `bls_sign` of the `signing_root(deposit_data)` with `domain=DOMAIN_DEPOSIT`.
|
||||||
|
* Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `def deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96])` along with a deposit of `amount` Gwei.
|
||||||
|
|
||||||
_Note_: Deposits made for the same `pubkey` are treated as for the same validator. A singular `Validator` will be added to `state.validator_registry` with each additional deposit amount added to the validator's balance. A validator can only be activated when total deposits for the validator pubkey meet or exceed `MAX_DEPOSIT_AMOUNT`.
|
_Note_: Deposits made for the same `pubkey` are treated as for the same validator. A singular `Validator` will be added to `state.validator_registry` with each additional deposit amount added to the validator's balance. A validator can only be activated when total deposits for the validator pubkey meet or exceed `MAX_DEPOSIT_AMOUNT`.
|
||||||
|
|
||||||
|
@ -141,7 +140,7 @@ A validator has two primary responsibilities to the beacon chain -- [proposing b
|
||||||
|
|
||||||
A validator is expected to propose a [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `get_beacon_proposer_index(state, slot)` returns the validator's `validator_index`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator is to create, sign, and broadcast a `block` that is a child of `parent` and that executes a valid [beacon chain state transition](../core/0_beacon-chain.md#beacon-chain-state-transition-function).
|
A validator is expected to propose a [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `get_beacon_proposer_index(state, slot)` returns the validator's `validator_index`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator is to create, sign, and broadcast a `block` that is a child of `parent` and that executes a valid [beacon chain state transition](../core/0_beacon-chain.md#beacon-chain-state-transition-function).
|
||||||
|
|
||||||
There is one proposer per slot, so if there are N active validators any individual validator will on average be assigned to propose once per N slots (eg. at 312500 validators = 10 million ETH, that's once per ~3 weeks).
|
There is one proposer per slot, so if there are N active validators any individual validator will on average be assigned to propose once per N slots (e.g. at 312500 validators = 10 million ETH, that's once per ~3 weeks).
|
||||||
|
|
||||||
#### Block header
|
#### Block header
|
||||||
|
|
||||||
|
|
|
@ -28,9 +28,12 @@ make clean
|
||||||
This runs all the generators.
|
This runs all the generators.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
make gen_yaml_tests
|
make -j 4 gen_yaml_tests
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The `-j N` flag makes the generators run in parallel, with `N` being the amount of cores.
|
||||||
|
|
||||||
|
|
||||||
### Running a single generator
|
### Running a single generator
|
||||||
|
|
||||||
The make file auto-detects generators in the `test_generators/` directory,
|
The make file auto-detects generators in the `test_generators/` directory,
|
||||||
|
|
|
@ -29,7 +29,7 @@ def build_deposit_data(state,
|
||||||
message_hash=signing_root(deposit_data),
|
message_hash=signing_root(deposit_data),
|
||||||
privkey=privkey,
|
privkey=privkey,
|
||||||
domain=spec.get_domain(
|
domain=spec.get_domain(
|
||||||
state.fork,
|
state,
|
||||||
spec.get_current_epoch(state),
|
spec.get_current_epoch(state),
|
||||||
spec.DOMAIN_DEPOSIT,
|
spec.DOMAIN_DEPOSIT,
|
||||||
)
|
)
|
||||||
|
@ -46,7 +46,7 @@ def build_deposit(state,
|
||||||
|
|
||||||
deposit_data = build_deposit_data(state, pubkey, withdrawal_cred, privkey, amount)
|
deposit_data = build_deposit_data(state, pubkey, withdrawal_cred, privkey, amount)
|
||||||
|
|
||||||
item = spec.hash(deposit_data.serialize())
|
item = deposit_data.hash_tree_root()
|
||||||
index = len(deposit_data_leaves)
|
index = len(deposit_data_leaves)
|
||||||
deposit_data_leaves.append(item)
|
deposit_data_leaves.append(item)
|
||||||
tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
|
tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
|
||||||
|
@ -69,7 +69,7 @@ def build_deposit_for_index(initial_validator_count: int, index: int) -> Tuple[s
|
||||||
)
|
)
|
||||||
state = genesis.create_genesis_state(genesis_deposits)
|
state = genesis.create_genesis_state(genesis_deposits)
|
||||||
|
|
||||||
deposit_data_leaves = [spec.hash(dep.data.serialize()) for dep in genesis_deposits]
|
deposit_data_leaves = [dep.data.hash_tree_root() for dep in genesis_deposits]
|
||||||
|
|
||||||
deposit = build_deposit(
|
deposit = build_deposit(
|
||||||
state,
|
state,
|
||||||
|
|
|
@ -4,7 +4,7 @@ from typing import List
|
||||||
|
|
||||||
|
|
||||||
def create_genesis_state(deposits: List[spec.Deposit]) -> spec.BeaconState:
|
def create_genesis_state(deposits: List[spec.Deposit]) -> spec.BeaconState:
|
||||||
deposit_root = get_merkle_root((tuple([spec.hash(dep.data.serialize()) for dep in deposits])))
|
deposit_root = get_merkle_root((tuple([(dep.data.hash_tree_root()) for dep in deposits])))
|
||||||
|
|
||||||
return spec.get_genesis_beacon_state(
|
return spec.get_genesis_beacon_state(
|
||||||
deposits,
|
deposits,
|
||||||
|
@ -32,7 +32,7 @@ def create_deposits(pubkeys: List[spec.BLSPubkey], withdrawal_cred: List[spec.By
|
||||||
]
|
]
|
||||||
|
|
||||||
# Fill tree with existing deposits
|
# Fill tree with existing deposits
|
||||||
deposit_data_leaves = [spec.hash(data.serialize()) for data in deposit_data]
|
deposit_data_leaves = [data.hash_tree_root() for data in deposit_data]
|
||||||
tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
|
tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
|
||||||
|
|
||||||
return [
|
return [
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
# SSZ-static
|
# SSZ-static
|
||||||
|
|
||||||
The purpose of this test-generator is to provide test-vectors for the most important applications of SSZ:
|
The purpose of this test-generator is to provide test-vectors for the most important applications of SSZ:
|
||||||
the serialization and hashing of ETH 2.0 data types
|
the serialization and hashing of ETH 2.0 data types.
|
||||||
|
|
||||||
|
Test-format documentation can be found [here](../../specs/test_formats/ssz_static/README.md).
|
||||||
|
|
|
@ -2,7 +2,11 @@ from random import Random
|
||||||
|
|
||||||
from eth2spec.debug import random_value, encode
|
from eth2spec.debug import random_value, encode
|
||||||
from eth2spec.phase0 import spec
|
from eth2spec.phase0 import spec
|
||||||
from eth2spec.utils.minimal_ssz import hash_tree_root, serialize
|
from eth2spec.utils.minimal_ssz import (
|
||||||
|
hash_tree_root,
|
||||||
|
signing_root,
|
||||||
|
serialize,
|
||||||
|
)
|
||||||
from eth_utils import (
|
from eth_utils import (
|
||||||
to_tuple, to_dict
|
to_tuple, to_dict
|
||||||
)
|
)
|
||||||
|
@ -21,6 +25,8 @@ def create_test_case(rng: Random, name: str, mode: random_value.RandomizationMod
|
||||||
yield "value", encode.encode(value, typ)
|
yield "value", encode.encode(value, typ)
|
||||||
yield "serialized", '0x' + serialize(value).hex()
|
yield "serialized", '0x' + serialize(value).hex()
|
||||||
yield "root", '0x' + hash_tree_root(value).hex()
|
yield "root", '0x' + hash_tree_root(value).hex()
|
||||||
|
if hasattr(value, "signature"):
|
||||||
|
yield "signing_root", '0x' + signing_root(value).hex()
|
||||||
|
|
||||||
|
|
||||||
@to_tuple
|
@to_tuple
|
||||||
|
|
|
@ -19,6 +19,8 @@ Or, to build a single file, specify the path, e.g. `make test_libs/pyspec/eth2sp
|
||||||
|
|
||||||
## Py-tests
|
## Py-tests
|
||||||
|
|
||||||
|
After building, you can install the dependencies for running the `pyspec` tests with `make install_test`
|
||||||
|
|
||||||
These tests are not intended for client-consumption.
|
These tests are not intended for client-consumption.
|
||||||
These tests are sanity tests, to verify if the spec itself is consistent.
|
These tests are sanity tests, to verify if the spec itself is consistent.
|
||||||
|
|
||||||
|
@ -38,8 +40,9 @@ python3 -m venv venv
|
||||||
. venv/bin/activate
|
. venv/bin/activate
|
||||||
pip3 install -r requirements.txt
|
pip3 install -r requirements.txt
|
||||||
```
|
```
|
||||||
Note: make sure to run `make pyspec` from the root of the specs repository,
|
Note: make sure to run `make -B pyspec` from the root of the specs repository,
|
||||||
to build the parts of the pyspec module derived from the markdown specs.
|
to build the parts of the pyspec module derived from the markdown specs.
|
||||||
|
The `-B` flag may be helpful to force-overwrite the `pyspec` output after you made a change to the markdown source files.
|
||||||
|
|
||||||
Run the tests:
|
Run the tests:
|
||||||
```
|
```
|
||||||
|
@ -55,4 +58,4 @@ The pyspec is not a replacement.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
Same as the spec itself, see LICENSE file in spec repository root.
|
Same as the spec itself, see [LICENSE](../../LICENSE) file in spec repository root.
|
||||||
|
|
|
@ -32,7 +32,7 @@ def test_success(state):
|
||||||
deposit_data_leaves,
|
deposit_data_leaves,
|
||||||
pubkey,
|
pubkey,
|
||||||
privkey,
|
privkey,
|
||||||
spec.MAX_DEPOSIT_AMOUNT,
|
spec.MAX_EFFECTIVE_BALANCE,
|
||||||
)
|
)
|
||||||
|
|
||||||
pre_state.latest_eth1_data.deposit_root = root
|
pre_state.latest_eth1_data.deposit_root = root
|
||||||
|
@ -45,7 +45,7 @@ def test_success(state):
|
||||||
assert len(post_state.validator_registry) == len(state.validator_registry) + 1
|
assert len(post_state.validator_registry) == len(state.validator_registry) + 1
|
||||||
assert len(post_state.balances) == len(state.balances) + 1
|
assert len(post_state.balances) == len(state.balances) + 1
|
||||||
assert post_state.validator_registry[index].pubkey == pubkeys[index]
|
assert post_state.validator_registry[index].pubkey == pubkeys[index]
|
||||||
assert get_balance(post_state, index) == spec.MAX_DEPOSIT_AMOUNT
|
assert get_balance(post_state, index) == spec.MAX_EFFECTIVE_BALANCE
|
||||||
assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count
|
assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count
|
||||||
|
|
||||||
return pre_state, deposit, post_state
|
return pre_state, deposit, post_state
|
||||||
|
@ -56,7 +56,7 @@ def test_success_top_up(state):
|
||||||
deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry)
|
deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry)
|
||||||
|
|
||||||
validator_index = 0
|
validator_index = 0
|
||||||
amount = spec.MAX_DEPOSIT_AMOUNT // 4
|
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||||
pubkey = pubkeys[validator_index]
|
pubkey = pubkeys[validator_index]
|
||||||
privkey = privkeys[validator_index]
|
privkey = privkeys[validator_index]
|
||||||
deposit, root, deposit_data_leaves = build_deposit(
|
deposit, root, deposit_data_leaves = build_deposit(
|
||||||
|
@ -95,7 +95,7 @@ def test_wrong_index(state):
|
||||||
deposit_data_leaves,
|
deposit_data_leaves,
|
||||||
pubkey,
|
pubkey,
|
||||||
privkey,
|
privkey,
|
||||||
spec.MAX_DEPOSIT_AMOUNT,
|
spec.MAX_EFFECTIVE_BALANCE,
|
||||||
)
|
)
|
||||||
|
|
||||||
# mess up deposit_index
|
# mess up deposit_index
|
||||||
|
@ -124,7 +124,7 @@ def test_bad_merkle_proof(state):
|
||||||
deposit_data_leaves,
|
deposit_data_leaves,
|
||||||
pubkey,
|
pubkey,
|
||||||
privkey,
|
privkey,
|
||||||
spec.MAX_DEPOSIT_AMOUNT,
|
spec.MAX_EFFECTIVE_BALANCE,
|
||||||
)
|
)
|
||||||
|
|
||||||
# mess up merkle branch
|
# mess up merkle branch
|
||||||
|
|
|
@ -0,0 +1,143 @@
|
||||||
|
from copy import deepcopy
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
import eth2spec.phase0.spec as spec
|
||||||
|
|
||||||
|
from eth2spec.phase0.spec import (
|
||||||
|
get_active_validator_indices,
|
||||||
|
get_balance,
|
||||||
|
get_beacon_proposer_index,
|
||||||
|
get_current_epoch,
|
||||||
|
process_transfer,
|
||||||
|
set_balance,
|
||||||
|
)
|
||||||
|
from tests.helpers import (
|
||||||
|
get_valid_transfer,
|
||||||
|
next_epoch,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# mark entire file as 'transfers'
|
||||||
|
pytestmark = pytest.mark.transfers
|
||||||
|
|
||||||
|
|
||||||
|
def run_transfer_processing(state, transfer, valid=True):
|
||||||
|
"""
|
||||||
|
Run ``process_transfer`` returning the pre and post state.
|
||||||
|
If ``valid == False``, run expecting ``AssertionError``
|
||||||
|
"""
|
||||||
|
post_state = deepcopy(state)
|
||||||
|
|
||||||
|
if not valid:
|
||||||
|
with pytest.raises(AssertionError):
|
||||||
|
process_transfer(post_state, transfer)
|
||||||
|
return state, None
|
||||||
|
|
||||||
|
|
||||||
|
process_transfer(post_state, transfer)
|
||||||
|
|
||||||
|
proposer_index = get_beacon_proposer_index(state)
|
||||||
|
pre_transfer_sender_balance = state.balances[transfer.sender]
|
||||||
|
pre_transfer_recipient_balance = state.balances[transfer.recipient]
|
||||||
|
pre_transfer_proposer_balance = state.balances[proposer_index]
|
||||||
|
sender_balance = post_state.balances[transfer.sender]
|
||||||
|
recipient_balance = post_state.balances[transfer.recipient]
|
||||||
|
assert sender_balance == pre_transfer_sender_balance - transfer.amount - transfer.fee
|
||||||
|
assert recipient_balance == pre_transfer_recipient_balance + transfer.amount
|
||||||
|
assert post_state.balances[proposer_index] == pre_transfer_proposer_balance + transfer.fee
|
||||||
|
|
||||||
|
return state, post_state
|
||||||
|
|
||||||
|
|
||||||
|
def test_success_non_activated(state):
|
||||||
|
transfer = get_valid_transfer(state)
|
||||||
|
# un-activate so validator can transfer
|
||||||
|
state.validator_registry[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
pre_state, post_state = run_transfer_processing(state, transfer)
|
||||||
|
|
||||||
|
return pre_state, transfer, post_state
|
||||||
|
|
||||||
|
|
||||||
|
def test_success_withdrawable(state):
|
||||||
|
next_epoch(state)
|
||||||
|
|
||||||
|
transfer = get_valid_transfer(state)
|
||||||
|
|
||||||
|
# withdrawable_epoch in past so can transfer
|
||||||
|
state.validator_registry[transfer.sender].withdrawable_epoch = get_current_epoch(state) - 1
|
||||||
|
|
||||||
|
pre_state, post_state = run_transfer_processing(state, transfer)
|
||||||
|
|
||||||
|
return pre_state, transfer, post_state
|
||||||
|
|
||||||
|
|
||||||
|
def test_success_active_above_max_effective(state):
|
||||||
|
sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
|
||||||
|
amount = spec.MAX_EFFECTIVE_BALANCE // 32
|
||||||
|
set_balance(state, sender_index, spec.MAX_EFFECTIVE_BALANCE + amount)
|
||||||
|
transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount, fee=0)
|
||||||
|
|
||||||
|
pre_state, post_state = run_transfer_processing(state, transfer)
|
||||||
|
|
||||||
|
return pre_state, transfer, post_state
|
||||||
|
|
||||||
|
|
||||||
|
def test_active_but_transfer_past_effective_balance(state):
|
||||||
|
sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
|
||||||
|
amount = spec.MAX_EFFECTIVE_BALANCE // 32
|
||||||
|
set_balance(state, sender_index, spec.MAX_EFFECTIVE_BALANCE)
|
||||||
|
transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount, fee=0)
|
||||||
|
|
||||||
|
pre_state, post_state = run_transfer_processing(state, transfer, False)
|
||||||
|
|
||||||
|
return pre_state, transfer, post_state
|
||||||
|
|
||||||
|
|
||||||
|
def test_incorrect_slot(state):
|
||||||
|
transfer = get_valid_transfer(state, slot=state.slot+1)
|
||||||
|
# un-activate so validator can transfer
|
||||||
|
state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
pre_state, post_state = run_transfer_processing(state, transfer, False)
|
||||||
|
|
||||||
|
return pre_state, transfer, post_state
|
||||||
|
|
||||||
|
|
||||||
|
def test_insufficient_balance(state):
|
||||||
|
sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
|
||||||
|
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||||
|
set_balance(state, sender_index, spec.MAX_EFFECTIVE_BALANCE)
|
||||||
|
transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount + 1, fee=0)
|
||||||
|
|
||||||
|
# un-activate so validator can transfer
|
||||||
|
state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
pre_state, post_state = run_transfer_processing(state, transfer, False)
|
||||||
|
|
||||||
|
return pre_state, transfer, post_state
|
||||||
|
|
||||||
|
|
||||||
|
def test_no_dust(state):
|
||||||
|
sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
|
||||||
|
balance = state.balances[sender_index]
|
||||||
|
transfer = get_valid_transfer(state, sender_index=sender_index, amount=balance - spec.MIN_DEPOSIT_AMOUNT + 1, fee=0)
|
||||||
|
|
||||||
|
# un-activate so validator can transfer
|
||||||
|
state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
pre_state, post_state = run_transfer_processing(state, transfer, False)
|
||||||
|
|
||||||
|
return pre_state, transfer, post_state
|
||||||
|
|
||||||
|
|
||||||
|
def test_invalid_pubkey(state):
|
||||||
|
transfer = get_valid_transfer(state)
|
||||||
|
state.validator_registry[transfer.sender].withdrawal_credentials = spec.ZERO_HASH
|
||||||
|
|
||||||
|
# un-activate so validator can transfer
|
||||||
|
state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
pre_state, post_state = run_transfer_processing(state, transfer, False)
|
||||||
|
|
||||||
|
return pre_state, transfer, post_state
|
|
@ -21,10 +21,12 @@ from eth2spec.phase0.spec import (
|
||||||
DepositData,
|
DepositData,
|
||||||
Eth1Data,
|
Eth1Data,
|
||||||
ProposerSlashing,
|
ProposerSlashing,
|
||||||
|
Transfer,
|
||||||
VoluntaryExit,
|
VoluntaryExit,
|
||||||
# functions
|
# functions
|
||||||
convert_to_indexed,
|
convert_to_indexed,
|
||||||
get_active_validator_indices,
|
get_active_validator_indices,
|
||||||
|
get_balance,
|
||||||
get_attesting_indices,
|
get_attesting_indices,
|
||||||
get_block_root,
|
get_block_root,
|
||||||
get_crosslink_committees_at_slot,
|
get_crosslink_committees_at_slot,
|
||||||
|
@ -78,10 +80,10 @@ def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=N
|
||||||
pubkey=pubkey,
|
pubkey=pubkey,
|
||||||
# insecurely use pubkey as withdrawal key as well
|
# insecurely use pubkey as withdrawal key as well
|
||||||
withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:],
|
withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:],
|
||||||
amount=spec.MAX_DEPOSIT_AMOUNT,
|
amount=spec.MAX_EFFECTIVE_BALANCE,
|
||||||
signature=signature,
|
signature=signature,
|
||||||
)
|
)
|
||||||
item = hash(deposit_data.serialize())
|
item = deposit_data.hash_tree_root()
|
||||||
deposit_data_leaves.append(item)
|
deposit_data_leaves.append(item)
|
||||||
tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
|
tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
|
||||||
root = get_merkle_root((tuple(deposit_data_leaves)))
|
root = get_merkle_root((tuple(deposit_data_leaves)))
|
||||||
|
@ -206,7 +208,7 @@ def build_deposit(state,
|
||||||
amount):
|
amount):
|
||||||
deposit_data = build_deposit_data(state, pubkey, privkey, amount)
|
deposit_data = build_deposit_data(state, pubkey, privkey, amount)
|
||||||
|
|
||||||
item = hash(deposit_data.serialize())
|
item = deposit_data.hash_tree_root()
|
||||||
index = len(deposit_data_leaves)
|
index = len(deposit_data_leaves)
|
||||||
deposit_data_leaves.append(item)
|
deposit_data_leaves.append(item)
|
||||||
tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
|
tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
|
||||||
|
@ -325,6 +327,48 @@ def get_valid_attestation(state, slot=None):
|
||||||
return attestation
|
return attestation
|
||||||
|
|
||||||
|
|
||||||
|
def get_valid_transfer(state, slot=None, sender_index=None, amount=None, fee=None):
|
||||||
|
if slot is None:
|
||||||
|
slot = state.slot
|
||||||
|
current_epoch = get_current_epoch(state)
|
||||||
|
if sender_index is None:
|
||||||
|
sender_index = get_active_validator_indices(state, current_epoch)[-1]
|
||||||
|
recipient_index = get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
transfer_pubkey = pubkeys[-1]
|
||||||
|
transfer_privkey = privkeys[-1]
|
||||||
|
|
||||||
|
if fee is None:
|
||||||
|
fee = get_balance(state, sender_index) // 32
|
||||||
|
if amount is None:
|
||||||
|
amount = get_balance(state, sender_index) - fee
|
||||||
|
|
||||||
|
transfer = Transfer(
|
||||||
|
sender=sender_index,
|
||||||
|
recipient=recipient_index,
|
||||||
|
amount=amount,
|
||||||
|
fee=fee,
|
||||||
|
slot=slot,
|
||||||
|
pubkey=transfer_pubkey,
|
||||||
|
signature=ZERO_HASH,
|
||||||
|
)
|
||||||
|
transfer.signature = bls.sign(
|
||||||
|
message_hash=signing_root(transfer),
|
||||||
|
privkey=transfer_privkey,
|
||||||
|
domain=get_domain(
|
||||||
|
state=state,
|
||||||
|
domain_type=spec.DOMAIN_TRANSFER,
|
||||||
|
message_epoch=get_current_epoch(state),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# ensure withdrawal_credentials reproducable
|
||||||
|
state.validator_registry[transfer.sender].withdrawal_credentials = (
|
||||||
|
spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(transfer.pubkey)[1:]
|
||||||
|
)
|
||||||
|
|
||||||
|
return transfer
|
||||||
|
|
||||||
|
|
||||||
def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0):
|
def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0):
|
||||||
message_hash = AttestationDataAndCustodyBit(
|
message_hash = AttestationDataAndCustodyBit(
|
||||||
data=attestation_data,
|
data=attestation_data,
|
||||||
|
|
|
@ -231,9 +231,9 @@ def test_deposit_in_block(state):
|
||||||
index = len(test_deposit_data_leaves)
|
index = len(test_deposit_data_leaves)
|
||||||
pubkey = pubkeys[index]
|
pubkey = pubkeys[index]
|
||||||
privkey = privkeys[index]
|
privkey = privkeys[index]
|
||||||
deposit_data = build_deposit_data(pre_state, pubkey, privkey, spec.MAX_DEPOSIT_AMOUNT)
|
deposit_data = build_deposit_data(pre_state, pubkey, privkey, spec.MAX_EFFECTIVE_BALANCE)
|
||||||
|
|
||||||
item = hash(deposit_data.serialize())
|
item = deposit_data.hash_tree_root()
|
||||||
test_deposit_data_leaves.append(item)
|
test_deposit_data_leaves.append(item)
|
||||||
tree = calc_merkle_tree_from_leaves(tuple(test_deposit_data_leaves))
|
tree = calc_merkle_tree_from_leaves(tuple(test_deposit_data_leaves))
|
||||||
root = get_merkle_root((tuple(test_deposit_data_leaves)))
|
root = get_merkle_root((tuple(test_deposit_data_leaves)))
|
||||||
|
@ -255,7 +255,7 @@ def test_deposit_in_block(state):
|
||||||
state_transition(post_state, block)
|
state_transition(post_state, block)
|
||||||
assert len(post_state.validator_registry) == len(state.validator_registry) + 1
|
assert len(post_state.validator_registry) == len(state.validator_registry) + 1
|
||||||
assert len(post_state.balances) == len(state.balances) + 1
|
assert len(post_state.balances) == len(state.balances) + 1
|
||||||
assert get_balance(post_state, index) == spec.MAX_DEPOSIT_AMOUNT
|
assert get_balance(post_state, index) == spec.MAX_EFFECTIVE_BALANCE
|
||||||
assert post_state.validator_registry[index].pubkey == pubkeys[index]
|
assert post_state.validator_registry[index].pubkey == pubkeys[index]
|
||||||
|
|
||||||
return pre_state, [block], post_state
|
return pre_state, [block], post_state
|
||||||
|
@ -266,13 +266,13 @@ def test_deposit_top_up(state):
|
||||||
test_deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry)
|
test_deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry)
|
||||||
|
|
||||||
validator_index = 0
|
validator_index = 0
|
||||||
amount = spec.MAX_DEPOSIT_AMOUNT // 4
|
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||||
pubkey = pubkeys[validator_index]
|
pubkey = pubkeys[validator_index]
|
||||||
privkey = privkeys[validator_index]
|
privkey = privkeys[validator_index]
|
||||||
deposit_data = build_deposit_data(pre_state, pubkey, privkey, amount)
|
deposit_data = build_deposit_data(pre_state, pubkey, privkey, amount)
|
||||||
|
|
||||||
merkle_index = len(test_deposit_data_leaves)
|
merkle_index = len(test_deposit_data_leaves)
|
||||||
item = hash(deposit_data.serialize())
|
item = deposit_data.hash_tree_root()
|
||||||
test_deposit_data_leaves.append(item)
|
test_deposit_data_leaves.append(item)
|
||||||
tree = calc_merkle_tree_from_leaves(tuple(test_deposit_data_leaves))
|
tree = calc_merkle_tree_from_leaves(tuple(test_deposit_data_leaves))
|
||||||
root = get_merkle_root((tuple(test_deposit_data_leaves)))
|
root = get_merkle_root((tuple(test_deposit_data_leaves)))
|
||||||
|
@ -379,6 +379,9 @@ def test_voluntary_exit(state):
|
||||||
|
|
||||||
|
|
||||||
def test_transfer(state):
|
def test_transfer(state):
|
||||||
|
# overwrite default 0 to test
|
||||||
|
spec.MAX_TRANSFERS = 1
|
||||||
|
|
||||||
pre_state = deepcopy(state)
|
pre_state = deepcopy(state)
|
||||||
current_epoch = get_current_epoch(pre_state)
|
current_epoch = get_current_epoch(pre_state)
|
||||||
sender_index = get_active_validator_indices(pre_state, current_epoch)[-1]
|
sender_index = get_active_validator_indices(pre_state, current_epoch)[-1]
|
||||||
|
@ -409,7 +412,7 @@ def test_transfer(state):
|
||||||
spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(transfer_pubkey)[1:]
|
spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(transfer_pubkey)[1:]
|
||||||
)
|
)
|
||||||
# un-activate so validator can transfer
|
# un-activate so validator can transfer
|
||||||
pre_state.validator_registry[sender_index].activation_epoch = spec.FAR_FUTURE_EPOCH
|
pre_state.validator_registry[sender_index].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
post_state = deepcopy(pre_state)
|
post_state = deepcopy(pre_state)
|
||||||
#
|
#
|
||||||
|
|
|
@ -0,0 +1,60 @@
|
||||||
|
from copy import deepcopy
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
|
from build.phase0.spec import (
|
||||||
|
get_beacon_proposer_index,
|
||||||
|
cache_state,
|
||||||
|
advance_slot,
|
||||||
|
process_block_header,
|
||||||
|
)
|
||||||
|
from tests.phase0.helpers import (
|
||||||
|
build_empty_block_for_next_slot,
|
||||||
|
)
|
||||||
|
|
||||||
|
# mark entire file as 'header'
|
||||||
|
pytestmark = pytest.mark.header
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_state_for_header_processing(state):
|
||||||
|
cache_state(state)
|
||||||
|
advance_slot(state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_block_header_processing(state, block, valid=True):
|
||||||
|
"""
|
||||||
|
Run ``process_block_header`` returning the pre and post state.
|
||||||
|
If ``valid == False``, run expecting ``AssertionError``
|
||||||
|
"""
|
||||||
|
prepare_state_for_header_processing(state)
|
||||||
|
post_state = deepcopy(state)
|
||||||
|
|
||||||
|
if not valid:
|
||||||
|
with pytest.raises(AssertionError):
|
||||||
|
process_block_header(post_state, block)
|
||||||
|
return state, None
|
||||||
|
|
||||||
|
process_block_header(post_state, block)
|
||||||
|
return state, post_state
|
||||||
|
|
||||||
|
|
||||||
|
def test_success(state):
|
||||||
|
block = build_empty_block_for_next_slot(state)
|
||||||
|
pre_state, post_state = run_block_header_processing(state, block)
|
||||||
|
return state, block, post_state
|
||||||
|
|
||||||
|
|
||||||
|
def test_invalid_slot(state):
|
||||||
|
block = build_empty_block_for_next_slot(state)
|
||||||
|
block.slot = state.slot + 2 # invalid slot
|
||||||
|
|
||||||
|
pre_state, post_state = run_block_header_processing(state, block, valid=False)
|
||||||
|
return pre_state, block, None
|
||||||
|
|
||||||
|
|
||||||
|
def test_invalid_previous_block_root(state):
|
||||||
|
block = build_empty_block_for_next_slot(state)
|
||||||
|
block.previous_block_root = b'\12'*32 # invalid prev root
|
||||||
|
|
||||||
|
pre_state, post_state = run_block_header_processing(state, block, valid=False)
|
||||||
|
return pre_state, block, None
|
|
@ -0,0 +1,140 @@
|
||||||
|
from copy import deepcopy
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
import build.phase0.spec as spec
|
||||||
|
|
||||||
|
from build.phase0.spec import (
|
||||||
|
ZERO_HASH,
|
||||||
|
process_deposit,
|
||||||
|
)
|
||||||
|
from tests.phase0.helpers import (
|
||||||
|
build_deposit,
|
||||||
|
privkeys,
|
||||||
|
pubkeys,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# mark entire file as 'voluntary_exits'
|
||||||
|
pytestmark = pytest.mark.voluntary_exits
|
||||||
|
|
||||||
|
|
||||||
|
def test_success(state):
|
||||||
|
pre_state = deepcopy(state)
|
||||||
|
# fill previous deposits with zero-hash
|
||||||
|
deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry)
|
||||||
|
|
||||||
|
index = len(deposit_data_leaves)
|
||||||
|
pubkey = pubkeys[index]
|
||||||
|
privkey = privkeys[index]
|
||||||
|
deposit, root, deposit_data_leaves = build_deposit(
|
||||||
|
pre_state,
|
||||||
|
deposit_data_leaves,
|
||||||
|
pubkey,
|
||||||
|
privkey,
|
||||||
|
spec.MAX_DEPOSIT_AMOUNT,
|
||||||
|
)
|
||||||
|
|
||||||
|
pre_state.latest_eth1_data.deposit_root = root
|
||||||
|
pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves)
|
||||||
|
|
||||||
|
post_state = deepcopy(pre_state)
|
||||||
|
|
||||||
|
process_deposit(post_state, deposit)
|
||||||
|
|
||||||
|
assert len(post_state.validator_registry) == len(state.validator_registry) + 1
|
||||||
|
assert len(post_state.validator_balances) == len(state.validator_balances) + 1
|
||||||
|
assert post_state.validator_registry[index].pubkey == pubkeys[index]
|
||||||
|
assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count
|
||||||
|
|
||||||
|
return pre_state, deposit, post_state
|
||||||
|
|
||||||
|
|
||||||
|
def test_success_top_up(state):
|
||||||
|
pre_state = deepcopy(state)
|
||||||
|
deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry)
|
||||||
|
|
||||||
|
validator_index = 0
|
||||||
|
amount = spec.MAX_DEPOSIT_AMOUNT // 4
|
||||||
|
pubkey = pubkeys[validator_index]
|
||||||
|
privkey = privkeys[validator_index]
|
||||||
|
deposit, root, deposit_data_leaves = build_deposit(
|
||||||
|
pre_state,
|
||||||
|
deposit_data_leaves,
|
||||||
|
pubkey,
|
||||||
|
privkey,
|
||||||
|
amount,
|
||||||
|
)
|
||||||
|
|
||||||
|
pre_state.latest_eth1_data.deposit_root = root
|
||||||
|
pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves)
|
||||||
|
pre_balance = pre_state.validator_balances[validator_index]
|
||||||
|
|
||||||
|
post_state = deepcopy(pre_state)
|
||||||
|
|
||||||
|
process_deposit(post_state, deposit)
|
||||||
|
|
||||||
|
assert len(post_state.validator_registry) == len(state.validator_registry)
|
||||||
|
assert len(post_state.validator_balances) == len(state.validator_balances)
|
||||||
|
assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count
|
||||||
|
assert post_state.validator_balances[validator_index] == pre_balance + amount
|
||||||
|
|
||||||
|
return pre_state, deposit, post_state
|
||||||
|
|
||||||
|
|
||||||
|
def test_wrong_index(state):
|
||||||
|
pre_state = deepcopy(state)
|
||||||
|
deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry)
|
||||||
|
|
||||||
|
|
||||||
|
index = len(deposit_data_leaves)
|
||||||
|
pubkey = pubkeys[index]
|
||||||
|
privkey = privkeys[index]
|
||||||
|
deposit, root, deposit_data_leaves = build_deposit(
|
||||||
|
pre_state,
|
||||||
|
deposit_data_leaves,
|
||||||
|
pubkey,
|
||||||
|
privkey,
|
||||||
|
spec.MAX_DEPOSIT_AMOUNT,
|
||||||
|
)
|
||||||
|
|
||||||
|
# mess up deposit_index
|
||||||
|
deposit.index = pre_state.deposit_index + 1
|
||||||
|
|
||||||
|
pre_state.latest_eth1_data.deposit_root = root
|
||||||
|
pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves)
|
||||||
|
|
||||||
|
post_state = deepcopy(pre_state)
|
||||||
|
|
||||||
|
with pytest.raises(AssertionError):
|
||||||
|
process_deposit(post_state, deposit)
|
||||||
|
|
||||||
|
return pre_state, deposit, None
|
||||||
|
|
||||||
|
|
||||||
|
def test_bad_merkle_proof(state):
|
||||||
|
pre_state = deepcopy(state)
|
||||||
|
deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry)
|
||||||
|
|
||||||
|
index = len(deposit_data_leaves)
|
||||||
|
pubkey = pubkeys[index]
|
||||||
|
privkey = privkeys[index]
|
||||||
|
deposit, root, deposit_data_leaves = build_deposit(
|
||||||
|
pre_state,
|
||||||
|
deposit_data_leaves,
|
||||||
|
pubkey,
|
||||||
|
privkey,
|
||||||
|
spec.MAX_DEPOSIT_AMOUNT,
|
||||||
|
)
|
||||||
|
|
||||||
|
# mess up merkle branch
|
||||||
|
deposit.proof[-1] = spec.ZERO_HASH
|
||||||
|
|
||||||
|
pre_state.latest_eth1_data.deposit_root = root
|
||||||
|
pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves)
|
||||||
|
|
||||||
|
post_state = deepcopy(pre_state)
|
||||||
|
|
||||||
|
with pytest.raises(AssertionError):
|
||||||
|
process_deposit(post_state, deposit)
|
||||||
|
|
||||||
|
return pre_state, deposit, None
|
|
@ -0,0 +1,175 @@
|
||||||
|
from copy import deepcopy
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
import build.phase0.spec as spec
|
||||||
|
|
||||||
|
from build.phase0.spec import (
|
||||||
|
get_active_validator_indices,
|
||||||
|
get_current_epoch,
|
||||||
|
process_voluntary_exit,
|
||||||
|
)
|
||||||
|
from tests.phase0.helpers import (
|
||||||
|
build_voluntary_exit,
|
||||||
|
pubkey_to_privkey,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# mark entire file as 'voluntary_exits'
|
||||||
|
pytestmark = pytest.mark.voluntary_exits
|
||||||
|
|
||||||
|
|
||||||
|
def test_success(state):
|
||||||
|
pre_state = deepcopy(state)
|
||||||
|
#
|
||||||
|
# setup pre_state
|
||||||
|
#
|
||||||
|
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
|
||||||
|
pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
|
#
|
||||||
|
# build voluntary exit
|
||||||
|
#
|
||||||
|
current_epoch = get_current_epoch(pre_state)
|
||||||
|
validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0]
|
||||||
|
privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey]
|
||||||
|
|
||||||
|
voluntary_exit = build_voluntary_exit(
|
||||||
|
pre_state,
|
||||||
|
current_epoch,
|
||||||
|
validator_index,
|
||||||
|
privkey,
|
||||||
|
)
|
||||||
|
|
||||||
|
post_state = deepcopy(pre_state)
|
||||||
|
|
||||||
|
#
|
||||||
|
# test valid exit
|
||||||
|
#
|
||||||
|
process_voluntary_exit(post_state, voluntary_exit)
|
||||||
|
|
||||||
|
assert not pre_state.validator_registry[validator_index].initiated_exit
|
||||||
|
assert post_state.validator_registry[validator_index].initiated_exit
|
||||||
|
|
||||||
|
return pre_state, voluntary_exit, post_state
|
||||||
|
|
||||||
|
|
||||||
|
def test_validator_not_active(state):
|
||||||
|
pre_state = deepcopy(state)
|
||||||
|
current_epoch = get_current_epoch(pre_state)
|
||||||
|
validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0]
|
||||||
|
privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey]
|
||||||
|
|
||||||
|
#
|
||||||
|
# setup pre_state
|
||||||
|
#
|
||||||
|
pre_state.validator_registry[validator_index].activation_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
#
|
||||||
|
# build and test voluntary exit
|
||||||
|
#
|
||||||
|
voluntary_exit = build_voluntary_exit(
|
||||||
|
pre_state,
|
||||||
|
current_epoch,
|
||||||
|
validator_index,
|
||||||
|
privkey,
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(AssertionError):
|
||||||
|
process_voluntary_exit(pre_state, voluntary_exit)
|
||||||
|
|
||||||
|
return pre_state, voluntary_exit, None
|
||||||
|
|
||||||
|
|
||||||
|
def test_validator_already_exited(state):
|
||||||
|
pre_state = deepcopy(state)
|
||||||
|
#
|
||||||
|
# setup pre_state
|
||||||
|
#
|
||||||
|
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit
|
||||||
|
pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
|
current_epoch = get_current_epoch(pre_state)
|
||||||
|
validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0]
|
||||||
|
privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey]
|
||||||
|
|
||||||
|
# but validator already has exited
|
||||||
|
pre_state.validator_registry[validator_index].exit_epoch = current_epoch + 2
|
||||||
|
|
||||||
|
#
|
||||||
|
# build voluntary exit
|
||||||
|
#
|
||||||
|
voluntary_exit = build_voluntary_exit(
|
||||||
|
pre_state,
|
||||||
|
current_epoch,
|
||||||
|
validator_index,
|
||||||
|
privkey,
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(AssertionError):
|
||||||
|
process_voluntary_exit(pre_state, voluntary_exit)
|
||||||
|
|
||||||
|
return pre_state, voluntary_exit, None
|
||||||
|
|
||||||
|
|
||||||
|
def test_validator_already_initiated_exit(state):
|
||||||
|
pre_state = deepcopy(state)
|
||||||
|
#
|
||||||
|
# setup pre_state
|
||||||
|
#
|
||||||
|
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit
|
||||||
|
pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
|
current_epoch = get_current_epoch(pre_state)
|
||||||
|
validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0]
|
||||||
|
privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey]
|
||||||
|
|
||||||
|
# but validator already has initiated exit
|
||||||
|
pre_state.validator_registry[validator_index].initiated_exit = True
|
||||||
|
|
||||||
|
#
|
||||||
|
# build voluntary exit
|
||||||
|
#
|
||||||
|
voluntary_exit = build_voluntary_exit(
|
||||||
|
pre_state,
|
||||||
|
current_epoch,
|
||||||
|
validator_index,
|
||||||
|
privkey,
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(AssertionError):
|
||||||
|
process_voluntary_exit(pre_state, voluntary_exit)
|
||||||
|
|
||||||
|
return pre_state, voluntary_exit, None
|
||||||
|
|
||||||
|
|
||||||
|
def test_validator_not_active_long_enough(state):
|
||||||
|
pre_state = deepcopy(state)
|
||||||
|
#
|
||||||
|
# setup pre_state
|
||||||
|
#
|
||||||
|
current_epoch = get_current_epoch(pre_state)
|
||||||
|
validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0]
|
||||||
|
privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey]
|
||||||
|
|
||||||
|
# but validator already has initiated exit
|
||||||
|
pre_state.validator_registry[validator_index].initiated_exit = True
|
||||||
|
|
||||||
|
#
|
||||||
|
# build voluntary exit
|
||||||
|
#
|
||||||
|
voluntary_exit = build_voluntary_exit(
|
||||||
|
pre_state,
|
||||||
|
current_epoch,
|
||||||
|
validator_index,
|
||||||
|
privkey,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert (
|
||||||
|
current_epoch - pre_state.validator_registry[validator_index].activation_epoch <
|
||||||
|
spec.PERSISTENT_COMMITTEE_PERIOD
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(AssertionError):
|
||||||
|
process_voluntary_exit(pre_state, voluntary_exit)
|
||||||
|
|
||||||
|
return pre_state, voluntary_exit, None
|
|
@ -0,0 +1,203 @@
|
||||||
|
from copy import deepcopy
|
||||||
|
|
||||||
|
from py_ecc import bls
|
||||||
|
|
||||||
|
import build.phase0.spec as spec
|
||||||
|
from build.phase0.utils.minimal_ssz import signed_root
|
||||||
|
from build.phase0.spec import (
|
||||||
|
# constants
|
||||||
|
EMPTY_SIGNATURE,
|
||||||
|
# SSZ
|
||||||
|
AttestationData,
|
||||||
|
Deposit,
|
||||||
|
DepositInput,
|
||||||
|
DepositData,
|
||||||
|
Eth1Data,
|
||||||
|
VoluntaryExit,
|
||||||
|
# functions
|
||||||
|
get_block_root,
|
||||||
|
get_current_epoch,
|
||||||
|
get_domain,
|
||||||
|
get_empty_block,
|
||||||
|
get_epoch_start_slot,
|
||||||
|
get_genesis_beacon_state,
|
||||||
|
verify_merkle_branch,
|
||||||
|
hash,
|
||||||
|
)
|
||||||
|
from build.phase0.utils.merkle_minimal import (
|
||||||
|
calc_merkle_tree_from_leaves,
|
||||||
|
get_merkle_proof,
|
||||||
|
get_merkle_root,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
privkeys = [i + 1 for i in range(1000)]
|
||||||
|
pubkeys = [bls.privtopub(privkey) for privkey in privkeys]
|
||||||
|
pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)}
|
||||||
|
|
||||||
|
|
||||||
|
def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=None):
|
||||||
|
if not deposit_data_leaves:
|
||||||
|
deposit_data_leaves = []
|
||||||
|
deposit_timestamp = 0
|
||||||
|
proof_of_possession = b'\x33' * 96
|
||||||
|
|
||||||
|
deposit_data_list = []
|
||||||
|
for i in range(num_validators):
|
||||||
|
pubkey = pubkeys[i]
|
||||||
|
deposit_data = DepositData(
|
||||||
|
amount=spec.MAX_DEPOSIT_AMOUNT,
|
||||||
|
timestamp=deposit_timestamp,
|
||||||
|
deposit_input=DepositInput(
|
||||||
|
pubkey=pubkey,
|
||||||
|
# insecurely use pubkey as withdrawal key as well
|
||||||
|
withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:],
|
||||||
|
proof_of_possession=proof_of_possession,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
item = hash(deposit_data.serialize())
|
||||||
|
deposit_data_leaves.append(item)
|
||||||
|
tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
|
||||||
|
root = get_merkle_root((tuple(deposit_data_leaves)))
|
||||||
|
proof = list(get_merkle_proof(tree, item_index=i))
|
||||||
|
assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, i, root)
|
||||||
|
deposit_data_list.append(deposit_data)
|
||||||
|
|
||||||
|
genesis_validator_deposits = []
|
||||||
|
for i in range(num_validators):
|
||||||
|
genesis_validator_deposits.append(Deposit(
|
||||||
|
proof=list(get_merkle_proof(tree, item_index=i)),
|
||||||
|
index=i,
|
||||||
|
deposit_data=deposit_data_list[i]
|
||||||
|
))
|
||||||
|
return genesis_validator_deposits, root
|
||||||
|
|
||||||
|
|
||||||
|
def create_genesis_state(num_validators, deposit_data_leaves=None):
|
||||||
|
initial_deposits, deposit_root = create_mock_genesis_validator_deposits(
|
||||||
|
num_validators,
|
||||||
|
deposit_data_leaves,
|
||||||
|
)
|
||||||
|
return get_genesis_beacon_state(
|
||||||
|
initial_deposits,
|
||||||
|
genesis_time=0,
|
||||||
|
genesis_eth1_data=Eth1Data(
|
||||||
|
deposit_root=deposit_root,
|
||||||
|
deposit_count=len(initial_deposits),
|
||||||
|
block_hash=spec.ZERO_HASH,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def force_registry_change_at_next_epoch(state):
|
||||||
|
# artificially trigger registry update at next epoch transition
|
||||||
|
state.finalized_epoch = get_current_epoch(state) - 1
|
||||||
|
for crosslink in state.latest_crosslinks:
|
||||||
|
crosslink.epoch = state.finalized_epoch
|
||||||
|
state.validator_registry_update_epoch = state.finalized_epoch - 1
|
||||||
|
|
||||||
|
|
||||||
|
def build_empty_block_for_next_slot(state):
|
||||||
|
empty_block = get_empty_block()
|
||||||
|
empty_block.slot = state.slot + 1
|
||||||
|
previous_block_header = deepcopy(state.latest_block_header)
|
||||||
|
if previous_block_header.state_root == spec.ZERO_HASH:
|
||||||
|
previous_block_header.state_root = state.hash_tree_root()
|
||||||
|
empty_block.previous_block_root = signed_root(previous_block_header)
|
||||||
|
return empty_block
|
||||||
|
|
||||||
|
|
||||||
|
def build_deposit_data(state, pubkey, privkey, amount):
|
||||||
|
deposit_input = DepositInput(
|
||||||
|
pubkey=pubkey,
|
||||||
|
# insecurely use pubkey as withdrawal key as well
|
||||||
|
withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:],
|
||||||
|
proof_of_possession=EMPTY_SIGNATURE,
|
||||||
|
)
|
||||||
|
proof_of_possession = bls.sign(
|
||||||
|
message_hash=signed_root(deposit_input),
|
||||||
|
privkey=privkey,
|
||||||
|
domain=get_domain(
|
||||||
|
state.fork,
|
||||||
|
get_current_epoch(state),
|
||||||
|
spec.DOMAIN_DEPOSIT,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
deposit_input.proof_of_possession = proof_of_possession
|
||||||
|
deposit_data = DepositData(
|
||||||
|
amount=amount,
|
||||||
|
timestamp=0,
|
||||||
|
deposit_input=deposit_input,
|
||||||
|
)
|
||||||
|
return deposit_data
|
||||||
|
|
||||||
|
|
||||||
|
def build_attestation_data(state, slot, shard):
|
||||||
|
assert state.slot >= slot
|
||||||
|
|
||||||
|
block_root = build_empty_block_for_next_slot(state).previous_block_root
|
||||||
|
|
||||||
|
epoch_start_slot = get_epoch_start_slot(get_current_epoch(state))
|
||||||
|
if epoch_start_slot == slot:
|
||||||
|
epoch_boundary_root = block_root
|
||||||
|
else:
|
||||||
|
get_block_root(state, epoch_start_slot)
|
||||||
|
|
||||||
|
if slot < epoch_start_slot:
|
||||||
|
justified_block_root = state.previous_justified_root
|
||||||
|
else:
|
||||||
|
justified_block_root = state.current_justified_root
|
||||||
|
|
||||||
|
return AttestationData(
|
||||||
|
slot=slot,
|
||||||
|
shard=shard,
|
||||||
|
beacon_block_root=block_root,
|
||||||
|
source_epoch=state.current_justified_epoch,
|
||||||
|
source_root=justified_block_root,
|
||||||
|
target_root=epoch_boundary_root,
|
||||||
|
crosslink_data_root=spec.ZERO_HASH,
|
||||||
|
previous_crosslink=deepcopy(state.latest_crosslinks[shard]),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def build_voluntary_exit(state, epoch, validator_index, privkey):
|
||||||
|
voluntary_exit = VoluntaryExit(
|
||||||
|
epoch=epoch,
|
||||||
|
validator_index=validator_index,
|
||||||
|
signature=EMPTY_SIGNATURE,
|
||||||
|
)
|
||||||
|
voluntary_exit.signature = bls.sign(
|
||||||
|
message_hash=signed_root(voluntary_exit),
|
||||||
|
privkey=privkey,
|
||||||
|
domain=get_domain(
|
||||||
|
fork=state.fork,
|
||||||
|
epoch=epoch,
|
||||||
|
domain_type=spec.DOMAIN_VOLUNTARY_EXIT,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return voluntary_exit
|
||||||
|
|
||||||
|
|
||||||
|
def build_deposit(state,
|
||||||
|
deposit_data_leaves,
|
||||||
|
pubkey,
|
||||||
|
privkey,
|
||||||
|
amount):
|
||||||
|
deposit_data = build_deposit_data(state, pubkey, privkey, amount)
|
||||||
|
|
||||||
|
item = hash(deposit_data.serialize())
|
||||||
|
index = len(deposit_data_leaves)
|
||||||
|
deposit_data_leaves.append(item)
|
||||||
|
tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
|
||||||
|
root = get_merkle_root((tuple(deposit_data_leaves)))
|
||||||
|
proof = list(get_merkle_proof(tree, item_index=index))
|
||||||
|
assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, root)
|
||||||
|
|
||||||
|
deposit = Deposit(
|
||||||
|
proof=list(proof),
|
||||||
|
index=index,
|
||||||
|
deposit_data=deposit_data,
|
||||||
|
)
|
||||||
|
|
||||||
|
return deposit, root, deposit_data_leaves
|
|
@ -0,0 +1,52 @@
|
||||||
|
from .minimal_ssz import hash_tree_root
|
||||||
|
|
||||||
|
|
||||||
|
def jsonize(value, typ, include_hash_tree_roots=False):
|
||||||
|
if isinstance(typ, str) and typ[:4] == 'uint':
|
||||||
|
return value
|
||||||
|
elif typ == 'bool':
|
||||||
|
assert value in (True, False)
|
||||||
|
return value
|
||||||
|
elif isinstance(typ, list):
|
||||||
|
return [jsonize(element, typ[0], include_hash_tree_roots) for element in value]
|
||||||
|
elif isinstance(typ, str) and typ[:4] == 'byte':
|
||||||
|
return '0x' + value.hex()
|
||||||
|
elif hasattr(typ, 'fields'):
|
||||||
|
ret = {}
|
||||||
|
for field, subtype in typ.fields.items():
|
||||||
|
ret[field] = jsonize(getattr(value, field), subtype, include_hash_tree_roots)
|
||||||
|
if include_hash_tree_roots:
|
||||||
|
ret[field + "_hash_tree_root"] = '0x' + hash_tree_root(getattr(value, field), subtype).hex()
|
||||||
|
if include_hash_tree_roots:
|
||||||
|
ret["hash_tree_root"] = '0x' + hash_tree_root(value, typ).hex()
|
||||||
|
return ret
|
||||||
|
else:
|
||||||
|
print(value, typ)
|
||||||
|
raise Exception("Type not recognized")
|
||||||
|
|
||||||
|
|
||||||
|
def dejsonize(json, typ):
|
||||||
|
if isinstance(typ, str) and typ[:4] == 'uint':
|
||||||
|
return json
|
||||||
|
elif typ == 'bool':
|
||||||
|
assert json in (True, False)
|
||||||
|
return json
|
||||||
|
elif isinstance(typ, list):
|
||||||
|
return [dejsonize(element, typ[0]) for element in json]
|
||||||
|
elif isinstance(typ, str) and typ[:4] == 'byte':
|
||||||
|
return bytes.fromhex(json[2:])
|
||||||
|
elif hasattr(typ, 'fields'):
|
||||||
|
temp = {}
|
||||||
|
for field, subtype in typ.fields.items():
|
||||||
|
temp[field] = dejsonize(json[field], subtype)
|
||||||
|
if field + "_hash_tree_root" in json:
|
||||||
|
assert(json[field + "_hash_tree_root"][2:] ==
|
||||||
|
hash_tree_root(temp[field], subtype).hex())
|
||||||
|
ret = typ(**temp)
|
||||||
|
if "hash_tree_root" in json:
|
||||||
|
assert(json["hash_tree_root"][2:] ==
|
||||||
|
hash_tree_root(ret, typ).hex())
|
||||||
|
return ret
|
||||||
|
else:
|
||||||
|
print(json, typ)
|
||||||
|
raise Exception("Type not recognized")
|
Loading…
Reference in New Issue