From 2605dfba082ed57ad08951ab393483d6bf64a4f4 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Mon, 17 Jun 2019 11:16:00 -0400 Subject: [PATCH 001/250] Updates to SSZ partials --- specs/light_client/merkle_proofs.md | 226 +++++++++++++++++----------- 1 file changed, 140 insertions(+), 86 deletions(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index f009d9737..85d859a54 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -64,43 +64,71 @@ y_data_root len(y) We can now define a concept of a "path", a way of describing a function that takes as input an SSZ object and outputs some specific (possibly deeply nested) member. For example, `foo -> foo.x` is a path, as are `foo -> len(foo.y)` and `foo -> foo.y[5].w`. We'll describe paths as lists, which can have two representations. In "human-readable form", they are `["x"]`, `["y", "__len__"]` and `["y", 5, "w"]` respectively. In "encoded form", they are lists of `uint64` values, in these cases (assuming the fields of `foo` in order are `x` then `y`, and `w` is the first field of `y[i]`) `[0]`, `[1, 2**64-1]`, `[1, 5, 0]`. ```python -def path_to_encoded_form(obj: Any, path: List[Union[str, int]]) -> List[int]: - if len(path) == 0: - return [] - elif isinstance(path[0], "__len__"): - assert len(path) == 1 - return [LENGTH_FLAG] - elif isinstance(path[0], str) and hasattr(obj, "fields"): - return [list(obj.fields.keys()).index(path[0])] + path_to_encoded_form(getattr(obj, path[0]), path[1:]) - elif isinstance(obj, (Vector, List)): - return [path[0]] + path_to_encoded_form(obj[path[0]], path[1:]) +def item_length(typ: Type) -> int: + """ + Returns the number of bytes in a basic type, or 32 (a full hash) for compound types. + """ + if typ == bool: + return 1 + elif issubclass(typ, uint): + return typ.byte_len else: - raise Exception("Unknown type / path") -``` - -We can now define a function `get_generalized_indices(object: Any, path: List[int], root: int=1) -> List[int]` that converts an object and a path to a set of generalized indices (note that for constant-sized objects, there is only one generalized index and it only depends on the path, but for dynamically sized objects the indices may depend on the object itself too). For dynamically-sized objects, the set of indices will have more than one member because of the need to access an array's length to determine the correct generalized index for some array access. - -```python -def get_generalized_indices(obj: Any, path: List[int], root: int=1) -> List[int]: - if len(path) == 0: - return [root] - elif isinstance(obj, Vector): - items_per_chunk = (32 // len(serialize(x))) if isinstance(x, int) else 1 - new_root = root * next_power_of_2(len(obj) // items_per_chunk) + path[0] // items_per_chunk - return get_generalized_indices(obj[path[0]], path[1:], new_root) - elif isinstance(obj, List) and path[0] == LENGTH_FLAG: - return [root * 2 + 1] - elif isinstance(obj, List) and isinstance(path[0], int): - assert path[0] < len(obj) - items_per_chunk = (32 // len(serialize(x))) if isinstance(x, int) else 1 - new_root = root * 2 * next_power_of_2(len(obj) // items_per_chunk) + path[0] // items_per_chunk - return [root *2 + 1] + get_generalized_indices(obj[path[0]], path[1:], new_root) - elif hasattr(obj, "fields"): - field = list(fields.keys())[path[0]] - new_root = root * next_power_of_2(len(fields)) + path[0] - return get_generalized_indices(getattr(obj, field), path[1:], new_root) + return 32 + + +def get_elem_type(typ: Type, index: int) -> Type: + """ + Returns the type of the element of an object of the given type with the given index + or member variable name (eg. `7` for `x[7]`, `"foo"` for `x.foo`) + """ + return typ.get_fields_dict()[index] if is_container_type(typ) else typ.elem_type + + +def get_chunk_count(typ: Type) -> int: + """ + Returns the number of hashes needed to represent the top-level elements in the given type + (eg. `x.foo` or `x[7]` but not `x[7].bar` or `x.foo.baz`). In all cases except lists/vectors + of basic types, this is simply the number of top-level elements, as each element gets one + hash. For lists/vectors of basic types, it is often fewer because multiple basic elements + can be packed into one 32-byte chunk. + """ + if is_basic_type(typ): + return 1 + elif is_list_kind(typ) or is_vector_kind(typ): + return (typ.length * item_length(typ.elem_type) + 31) // 32 else: - raise Exception("Unknown type / path") + return len(typ.get_fields()) + + +def get_item_position(typ: Type, index: Union[int, str]) -> Tuple[int, int, int]: + """ + Returns three variables: (i) the index of the chunk in which the given element of the item is + represented, (ii) the starting byte position, (iii) the ending byte position. For example for + a 6-item list of uint64 values, index=2 will return (0, 16, 24), index=5 will return (1, 8, 16) + """ + if is_list_kind(typ) or is_vector_kind(typ): + start = index * item_length(typ.elem_type) + return start // 32, start % 32, start % 32 + item_length(typ.elem_type) + elif is_container_type(typ): + return typ.get_field_names().index(index), 0, item_length(get_elem_type(typ, index)) + else: + raise Exception("Only lists/vectors/containers supported") + + +def get_generalized_index(typ: Type, path: List[Union[int, str]]) -> int: + """ + Converts a path (eg. `[7, "foo", 3]` for `x[7].foo[3]`, `[12, "bar", "__len__"]` for + `len(x[12].bar)`) into the generalized index representing its position in the Merkle tree. + """ + for p in path: + assert not is_basic_type(typ) # If we descend to a basic type, the path cannot continue further + if p == '__len__': + typ, root = uint256, root * 2 + 1 if is_list_kind(typ) else None + else: + pos, _, _ = get_item_position(typ, p) + root = root * (2 if is_list_kind(typ) else 1) * next_power_of_two(get_chunk_count(typ)) + pos + typ = get_elem_type(typ, p) + return root ``` ## Merkle multiproofs @@ -116,72 +144,98 @@ x x . . . . x * . are unused nodes, * are used nodes, x are the values we are trying to prove. Notice how despite being a multiproof for 3 values, it requires only 3 auxiliary nodes, only one node more than would be required to prove a single value. Normally the efficiency gains are not quite that extreme, but the savings relative to individual Merkle proofs are still significant. As a rule of thumb, a multiproof for k nodes at the same level of an n-node tree has size `k * (n/k + log(n/k))`. -Here is code for creating and verifying a multiproof. First, a method for computing the generalized indices of the auxiliary tree nodes that a proof of a given set of generalized indices will require: +First, we provide a method for computing the generalized indices of the auxiliary tree nodes that a proof of a given set of generalized indices will require: -```python -def get_proof_indices(tree_indices: List[int]) -> List[int]: - # Get all indices touched by the proof - maximal_indices = set() - for i in tree_indices: - x = i - while x > 1: - maximal_indices.add(x ^ 1) - x //= 2 - maximal_indices = tree_indices + sorted(list(maximal_indices))[::-1] - # Get indices that cannot be recalculated from earlier indices - redundant_indices = set() - proof = [] - for index in maximal_indices: - if index not in redundant_indices: - proof.append(index) - while index > 1: - redundant_indices.add(index) - if (index ^ 1) not in redundant_indices: - break - index //= 2 - return [i for i in proof if i not in tree_indices] +``` +def get_branch_indices(tree_index: int) -> List[int]: + """ + Get the generalized indices of the sister chunks along the path from the chunk with the + given tree index to the root. + """ + o = [tree_index ^ 1] + while o[-1] > 1: + o.append((o[-1] // 2) ^ 1) + return o[:-1] + +def get_expanded_indices(indices: List[int]) -> List[int]: + """ + Get the generalized indices of all chunks in the tree needed to prove the chunks with the given + generalized indices. + """ + branches = set() + for index in indices: + branches = branches.union(set(get_branch_indices(index) + [index])) + return sorted(list([x for x in branches if x*2 not in branches or x*2+1 not in branches]))[::-1] ``` -Generating a proof is simply a matter of taking the node of the SSZ hash tree with the union of the given generalized indices for each index given by `get_proof_indices`, and outputting the list of nodes in the same order. +Generating a proof that covers paths `p1 ... pn` is simply a matter of taking the chunks in the SSZ hash tree with generalized indices `get_expanded_indices([p1 ... pn])`. -Here is the verification function: +We now provide the bulk of the proving machinery, a function that takes a `{generalized_index: chunk}` map and fills in chunks that can be inferred (inferring the parent by hashing its two children): ```python -def verify_multi_proof(root: Bytes32, indices: List[int], leaves: List[Bytes32], proof: List[Bytes32]) -> bool: - tree = {} - for index, leaf in zip(indices, leaves): - tree[index] = leaf - for index, proof_item in zip(get_proof_indices(indices), proof): - tree[index] = proof_item - index_queue = sorted(tree.keys())[:-1] - i = 0 - while i < len(index_queue): - index = index_queue[i] - if index >= 2 and index ^ 1 in tree: - tree[index // 2] = hash(tree[index - index % 2] + tree[index - index % 2 + 1]) - index_queue.append(index // 2) - i += 1 - return (indices == []) or (1 in tree and tree[1] == root) +def fill(objects: Dict[int, Bytes32]) -> Dict[int, Bytes32]: + """ + Fills in chunks that can be inferred from other chunks. For a set of chunks that constitutes + a valid proof, this includes the root (generalized index 1). + """ + objects = {k: v for k, v in objects.items()} + keys = sorted(objects.keys())[::-1] + pos = 0 + while pos < len(keys): + k = keys[pos] + if k in objects and k ^ 1 in objects and k // 2 not in objects: + objects[k // 2] = hash(objects[k & - 2] + objects[k | 1]) + keys.append(k // 2) + pos += 1 + # Completeness and consistency check + assert 1 in objects + for k in objects: + if k > 1: + assert objects[k // 2] == hash(objects[k & -2] + objects[k | 1]) + return objects ``` ## MerklePartial -We define: +We define a container that encodes an SSZ partial, and provide the methods for converting it into a `{generalized_index: chunk}` map, for which we provide a method to extract individual values. To determine the hash tree root of an object represented by an SSZ partial, simply check `decode_ssz_partial(partial)[1]`. ### `SSZMerklePartial` - ```python -{ - "root": "bytes32", - "indices": ["uint64"], - "values": ["bytes32"], - "proof": ["bytes32"] -} +class SSZMerklePartial(Container): + indices: List[uint64, 2**32] + chunks: List[Bytes32, 2**32] ``` -### Proofs for execution +### `decode_ssz_partial` -We define `MerklePartial(f, arg1, arg2..., focus=0)` as being a `SSZMerklePartial` object wrapping a Merkle multiproof of the set of nodes in the hash tree of the SSZ object `arg[focus]` that is needed to authenticate the parts of the object needed to compute `f(arg1, arg2...)`. +```python +def decode_ssz_partial(encoded: SSZMerklePartial) -> Dict[int, Bytes32]: + """ + Decodes an encoded SSZ partial into a generalized index -> chunk map, and verify hash consistency. + """ + full_indices = get_expanded_indices(encoded.indices) + return fill({k:v for k,v in zip(full_indices, encoded.chunks)}) +``` -Ideally, any function which accepts an SSZ object should also be able to accept a `SSZMerklePartial` object as a substitute. +### `extract_value_at_path` + +```python +def extract_value_at_path(chunks: Dict[int, Bytes32], typ: Type, path: List[Union[int, str]]) -> Any: + """ + Provides the value of the element in the object represented by the given encoded SSZ partial at + the given path. Returns a KeyError if that path is not covered by this SSZ partial. + """ + root = 1 + for p in path: + if p == '__len__': + return deserialize_basic(chunks[root * 2 + 1][:8], uint64) + if is_list_kind(typ): + assert 0 <= p < deserialize_basic(chunks[root * 2 + 1][:8], uint64) + pos, start, end = get_item_position(typ, p) + root = root * (2 if is_list_kind(typ) else 1) * next_power_of_two(get_chunk_count(typ)) + pos + typ = get_elem_type(typ, p) + return deserialize_basic(chunks[root][start: end], typ) +``` + +Here [link TBD] is a python implementation of SSZ partials that represents them as a class that can be read and written to just like the underlying objects, so you can eg. perform state transitions on SSZ partials and compute the resulting root From 7e3318318d0d2a534f11cc2d0ff671cfe211fd1f Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sun, 23 Jun 2019 00:29:45 -0400 Subject: [PATCH 002/250] Updated to newer SSZ --- specs/light_client/merkle_proofs.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 85d859a54..b058be7ca 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -94,7 +94,7 @@ def get_chunk_count(typ: Type) -> int: """ if is_basic_type(typ): return 1 - elif is_list_kind(typ) or is_vector_kind(typ): + elif issubclass(typ, (List, Vector, Bytes, BytesN)): return (typ.length * item_length(typ.elem_type) + 31) // 32 else: return len(typ.get_fields()) @@ -106,7 +106,7 @@ def get_item_position(typ: Type, index: Union[int, str]) -> Tuple[int, int, int] represented, (ii) the starting byte position, (iii) the ending byte position. For example for a 6-item list of uint64 values, index=2 will return (0, 16, 24), index=5 will return (1, 8, 16) """ - if is_list_kind(typ) or is_vector_kind(typ): + if issubclass(typ, (List, Vector, Bytes, BytesN)): start = index * item_length(typ.elem_type) return start // 32, start % 32, start % 32 + item_length(typ.elem_type) elif is_container_type(typ): @@ -123,10 +123,10 @@ def get_generalized_index(typ: Type, path: List[Union[int, str]]) -> int: for p in path: assert not is_basic_type(typ) # If we descend to a basic type, the path cannot continue further if p == '__len__': - typ, root = uint256, root * 2 + 1 if is_list_kind(typ) else None + typ, root = uint256, root * 2 + 1 if issubclass(typ, (List, Bytes)) else None else: pos, _, _ = get_item_position(typ, p) - root = root * (2 if is_list_kind(typ) else 1) * next_power_of_two(get_chunk_count(typ)) + pos + root = root * (2 if issubclass(typ, (List, Bytes)) else 1) * next_power_of_two(get_chunk_count(typ)) + pos typ = get_elem_type(typ, p) return root ``` @@ -230,10 +230,10 @@ def extract_value_at_path(chunks: Dict[int, Bytes32], typ: Type, path: List[Unio for p in path: if p == '__len__': return deserialize_basic(chunks[root * 2 + 1][:8], uint64) - if is_list_kind(typ): + if iissubclass(typ, (List, Bytes)): assert 0 <= p < deserialize_basic(chunks[root * 2 + 1][:8], uint64) pos, start, end = get_item_position(typ, p) - root = root * (2 if is_list_kind(typ) else 1) * next_power_of_two(get_chunk_count(typ)) + pos + root = root * (2 if issubclass(typ, (List, Bytes)) else 1) * next_power_of_two(get_chunk_count(typ)) + pos typ = get_elem_type(typ, p) return deserialize_basic(chunks[root][start: end], typ) ``` From 9c6ba9cd0f98bb0101d68d75e289e219ff55f31c Mon Sep 17 00:00:00 2001 From: Chih Cheng Liang Date: Wed, 24 Jul 2019 13:39:04 +0800 Subject: [PATCH 003/250] Fix md table --- specs/core/1_shard-data-chains.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index fb4f353ef..00737a87a 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -52,6 +52,7 @@ This document describes the shard data layer and the shard fork choice rule in P ### Initial values | Name | Value | +| - | - | | `PHASE_1_FORK_EPOCH` | **TBD** | | `PHASE_1_FORK_SLOT` | **TBD** | | `GENESIS_SHARD_SLOT` | 0 | From 6f208b6f4556e914a2fcd47eae35612f856e1c4a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 25 Jul 2019 10:18:10 +1000 Subject: [PATCH 004/250] Clarify length bit for Bitlist merklization --- specs/simple-serialize.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 802adc5a6..1e12aecfe 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -187,7 +187,7 @@ We first define helper functions: * `List[B, N]` and `Vector[B, N]`, where `B` is a basic type: `(N * size_of(B) + 31) // 32` (dividing by chunk size, rounding up) * `List[C, N]` and `Vector[C, N]`, where `C` is a composite type: `N` * containers: `len(fields)` -* `bitfield_bytes(bits)`: return the bits of the bitlist or bitvector, packed in bytes, aligned to the start. Exclusive length-delimiting bit for bitlists. +* `bitfield_bytes(bits)`: return the bits of the bitlist or bitvector, packed in bytes, aligned to the start. Length-delimiting bit for bitlists is excluded. * `pack`: Given ordered objects of the same basic type, serialize them, pack them into `BYTES_PER_CHUNK`-byte chunks, right-pad the last chunk with zero bytes, and return the chunks. * `next_pow_of_two(i)`: get the next power of 2 of `i`, if not already a power of 2, with 0 mapping to 1. Examples: `0->1, 1->1, 2->2, 3->4, 4->4, 6->8, 9->16` * `merkleize(chunks, limit=None)`: Given ordered `BYTES_PER_CHUNK`-byte chunks, merkleize the chunks, and return the root: From 3c575ceee2f8fe4c38d04e2955536c593095caf3 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Fri, 26 Jul 2019 09:15:36 -0700 Subject: [PATCH 005/250] The key in the provided test data is `block` not `block_header`. --- specs/test_formats/operations/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/test_formats/operations/README.md b/specs/test_formats/operations/README.md index 37c5df498..c7c7d93ad 100644 --- a/specs/test_formats/operations/README.md +++ b/specs/test_formats/operations/README.md @@ -24,7 +24,7 @@ Operations: |-------------------------|----------------------|----------------------|--------------------------------------------------------| | `attestation` | `Attestation` | `attestation` | `process_attestation(state, attestation)` | | `attester_slashing` | `AttesterSlashing` | `attester_slashing` | `process_attester_slashing(state, attester_slashing)` | -| `block_header` | `Block` | `block` | `process_block_header(state, block)` | +| `block` | `Block` | `block` | `process_block_header(state, block)` | | `deposit` | `Deposit` | `deposit` | `process_deposit(state, deposit)` | | `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` | | `transfer` | `Transfer` | `transfer` | `process_transfer(state, transfer)` | From de9b4f2d6d64e42fe00c070dd98e77cd6f37d24e Mon Sep 17 00:00:00 2001 From: vbuterin Date: Mon, 29 Jul 2019 09:47:35 -0400 Subject: [PATCH 006/250] Attestation changes + persistent committee changes (#1294) * Minimal attestation simplification * minor fix * Make the tests pass * Decrease `PLACEHOLDER`, Use `compute_epoch_of_shard_slot` * Fix proposer signature name and use get_seed() to calculate current_shuffling_seed * Fix linter error * Add the WIP `test_is_valid_shard_block` * Add `get_shard_block_attester_committee` * Simplified committee selection * Added some helpers and simplified * Update specs/core/1_shard-data-chains.md * Update 1_shard-data-chains.md * Simplified switchover epochs, changed block structure, changed crosslink structure * Update 1_shard-data-chains.md * Moved balance dependency to proposer selection * Update specs/core/1_shard-data-chains.md Co-Authored-By: Danny Ryan * Update specs/core/1_shard-data-chains.md Co-Authored-By: Danny Ryan * Update specs/core/1_shard-data-chains.md Co-Authored-By: Danny Ryan * Update specs/core/1_shard-data-chains.md Co-Authored-By: Danny Ryan * Update specs/core/1_shard-data-chains.md Co-Authored-By: Danny Ryan * Update specs/core/1_shard-data-chains.md Co-Authored-By: Danny Ryan * Update specs/core/1_shard-data-chains.md Co-Authored-By: Danny Ryan * Update specs/core/1_shard-data-chains.md * Fixed shard header flattening * Update specs/core/1_shard-data-chains.md * Minor fixes * Update specs/core/1_shard-data-chains.md * Update specs/core/1_shard-data-chains.md Co-Authored-By: Hsiao-Wei Wang * cleanup testing and lint * return none if not active validators in persistent committee * only allow active validators as shard proposer --- scripts/build_spec.py | 3 +- specs/core/1_shard-data-chains.md | 380 +++++++++--------- .../eth2spec/test/helpers/phase1/__init__.py | 0 .../test/helpers/phase1/shard_block.py | 47 +++ .../shard_data_chain/test_shard_block.py | 26 ++ 5 files changed, 263 insertions(+), 193 deletions(-) create mode 100644 test_libs/pyspec/eth2spec/test/helpers/phase1/__init__.py create mode 100644 test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py create mode 100644 test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py diff --git a/scripts/build_spec.py b/scripts/build_spec.py index 8b541ff50..96866cc8a 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -48,11 +48,10 @@ from dataclasses import ( from eth2spec.utils.ssz.ssz_impl import ( hash_tree_root, signing_root, - serialize, is_empty, ) from eth2spec.utils.ssz.ssz_typing import ( - bit, boolean, Container, List, Vector, Bytes, uint64, + uint64, bit, boolean, Container, List, Vector, Bytes, BytesN, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector, ) from eth2spec.utils.bls import ( diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 00737a87a..fc839930f 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -9,6 +9,7 @@ - [Ethereum 2.0 Phase 1 -- Shard Data Chains](#ethereum-20-phase-1----shard-data-chains) - [Table of contents](#table-of-contents) - [Introduction](#introduction) + - [Custom types](#custom-types) - [Configuration](#configuration) - [Misc](#misc) - [Initial values](#initial-values) @@ -16,21 +17,25 @@ - [Signature domain types](#signature-domain-types) - [TODO PLACEHOLDER](#todo-placeholder) - [Data structures](#data-structures) - - [`ShardBlockBody`](#shardblockbody) - - [`ShardAttestation`](#shardattestation) - - [`ShardBlock`](#shardblock) - [`ShardBlockHeader`](#shardblockheader) + - [`ShardBlock`](#shardblock) + - [`ShardBlockSignatures`](#shardblocksignatures) + - [`ShardBlockCore`](#shardblockcore) + - [`ExtendedShardBlockCore`](#extendedshardblockcore) - [Helper functions](#helper-functions) + - [`compute_epoch_of_shard_slot`](#compute_epoch_of_shard_slot) + - [`compute_slot_of_shard_slot`](#compute_slot_of_shard_slot) + - [`get_shard_period_start_epoch`](#get_shard_period_start_epoch) - [`get_period_committee`](#get_period_committee) - - [`get_switchover_epoch`](#get_switchover_epoch) - [`get_persistent_committee`](#get_persistent_committee) - - [`get_shard_proposer_index`](#get_shard_proposer_index) + - [`get_shard_block_proposer_index`](#get_shard_block_proposer_index) + - [`get_shard_block_attester_committee`](#get_shard_block_attester_committee) - [`get_shard_header`](#get_shard_header) - - [`verify_shard_attestation_signature`](#verify_shard_attestation_signature) + - [`pad`](#pad) + - [`flatten_shard_header`](#flatten_shard_header) - [`compute_crosslink_data_root`](#compute_crosslink_data_root) - [Object validity](#object-validity) - [Shard blocks](#shard-blocks) - - [Shard attestations](#shard-attestations) - [Beacon attestations](#beacon-attestations) - [Shard fork choice rule](#shard-fork-choice-rule) @@ -40,14 +45,24 @@ This document describes the shard data layer and the shard fork choice rule in Phase 1 of Ethereum 2.0. +## Custom types + +We define the following Python custom types for type hinting and readability: + +| Name | SSZ equivalent | Description | +| - | - | - | +| `ShardSlot` | `uint64` | a slot number in shard chain | + ## Configuration ### Misc | Name | Value | | - | - | -| `BYTES_PER_SHARD_BLOCK_BODY` | `2**14` (= 16,384) | -| `MAX_SHARD_ATTESTIONS` | `2**4` (= 16) | +| `SHARD_HEADER_SIZE` | `2**9` (= 512) | +| `SHARD_BLOCK_SIZE_LIMIT` | `2**16` (= 65,536) | +| `SHARD_SLOTS_PER_BEACON_SLOT` | `2**1` (= 2) | +| `MAX_PERSISTENT_COMMITTEE_SIZE` | `2**7` (= 128) | ### Initial values @@ -61,7 +76,8 @@ This document describes the shard data layer and the shard fork choice rule in P | Name | Value | Unit | Duration | | - | - | :-: | :-: | -| `CROSSLINK_LOOKBACK` | `2**0` (= 1) | epochs | 6.2 minutes | +| `CROSSLINK_LOOKBACK` | `2**0` (= 1) | epochs | 6.4 minutes | +| `EPOCHS_PER_SHARD_PERIOD` | `2**8` (= 256) | epochs | ~27 hours | ### Signature domain types @@ -76,85 +92,102 @@ The following types are defined, mapping into `DomainType` (little endian): | Name | Value | | - | - | -| `PLACEHOLDER` | `2**32` | +| `PLACEHOLDER` | `2**3` | ## Data structures -### `ShardBlockBody` +_Note: the shard block header structure is carefully designed so that all of the values have the same depth in a hash tree implementation, so `hash_tree_root(SSZ_partial(x)) == hash_tree_root(x)` (using the "left-to-right leaves" scheme [here](https://github.com/ethereum/eth2.0-specs/issues/1303)), which allows shard block headers to look like an SSZ object when in the crosslink structure. This is done by balancing it so that 7 or 8 items are on the left side (the "core") and two 96-byte (ie. 3*2 = 6 chunk) items are on the right side. Change with care._ + +### `ShardBlockHeader` ```python -class ShardBlockBody(Container): - data: Vector[Bytes[PLACEHOLDER], BYTES_PER_SHARD_BLOCK_BODY] -``` - -### `ShardAttestation` - -```python -class ShardAttestation(Container): - class data(Container): - slot: Slot - shard: Shard - shard_block_root: Hash - aggregation_bits: Bitlist[PLACEHOLDER] - aggregate_signature: BLSSignature +class ShardBlockHeader(Container): + core: ShardBlockCore + signatures: ShardBlockSignatures ``` ### `ShardBlock` ```python class ShardBlock(Container): - slot: Slot - shard: Shard - beacon_chain_root: Hash - parent_root: Hash - data: ShardBlockBody - state_root: Hash - attestations: List[ShardAttestation, PLACEHOLDER] - signature: BLSSignature + core: ExtendedShardBlockCore + signatures: ShardBlockSignatures ``` -### `ShardBlockHeader` +### `ShardBlockSignatures` ```python -class ShardBlockHeader(Container): - slot: Slot - shard: Shard +class ShardBlockSignatures(Container): + attestation_signature: BLSSignature + proposer_signature: BLSSignature +``` + +### `ShardBlockCore` + +```python +class ShardBlockCore(Container): + slot: ShardSlot beacon_chain_root: Hash parent_root: Hash - body_root: Hash + data_root: Hash state_root: Hash - attestations: List[ShardAttestation, PLACEHOLDER] - signature: BLSSignature + total_bytes: uint64 + attester_bitfield: Bitvector[MAX_PERSISTENT_COMMITTEE_SIZE * 2] +``` + +### `ExtendedShardBlockCore` + +```python +class ExtendedShardBlockCore(Container): + slot: ShardSlot + beacon_chain_root: Hash + parent_root: Hash + data: Bytes[SHARD_BLOCK_SIZE_LIMIT - SHARD_HEADER_SIZE] + state_root: Hash + total_bytes: uint64 + attester_bitfield: Bitvector[MAX_PERSISTENT_COMMITTEE_SIZE * 2] ``` ## Helper functions +### `compute_slot_of_shard_slot` + +```python +def compute_slot_of_shard_slot(slot: ShardSlot) -> Epoch: + return Epoch(slot // SHARD_SLOTS_PER_BEACON_SLOT) +``` + +### `compute_epoch_of_shard_slot` + +```python +def compute_epoch_of_shard_slot(slot: ShardSlot) -> Epoch: + return Epoch(slot // SHARD_SLOTS_PER_BEACON_SLOT // SLOTS_PER_EPOCH) +``` + +### `get_shard_period_start_epoch` + +```python +def get_shard_period_start_epoch(epoch: Epoch, lookback: Epoch=Epoch(0)) -> Epoch: + return Epoch(epoch - (epoch % EPOCHS_PER_SHARD_PERIOD) - lookback * EPOCHS_PER_SHARD_PERIOD) +``` + ### `get_period_committee` ```python def get_period_committee(state: BeaconState, epoch: Epoch, - shard: Shard, - index: uint64, - count: uint64) -> Sequence[ValidatorIndex]: + shard: Shard) -> List[ValidatorIndex, MAX_PERSISTENT_COMMITTEE_SIZE]: """ Return committee for a period. Used to construct persistent committees. """ - return compute_committee( + full_committee = compute_committee( indices=get_active_validator_indices(state, epoch), seed=get_seed(state, epoch), - index=shard * count + index, - count=SHARD_COUNT * count, + index=shard, + count=SHARD_COUNT, ) -``` -### `get_switchover_epoch` - -```python -def get_switchover_epoch(state: BeaconState, epoch: Epoch, index: ValidatorIndex) -> int: - earlier_start_epoch = Epoch(epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2) - return (bytes_to_int(hash(get_seed(state, earlier_start_epoch) + int_to_bytes(index, length=3)[0:8])) - % PERSISTENT_COMMITTEE_PERIOD) + return full_committee[:MAX_PERSISTENT_COMMITTEE_SIZE] ``` ### `get_persistent_committee` @@ -162,52 +195,47 @@ def get_switchover_epoch(state: BeaconState, epoch: Epoch, index: ValidatorIndex ```python def get_persistent_committee(state: BeaconState, shard: Shard, - slot: Slot) -> Sequence[ValidatorIndex]: + slot: ShardSlot) -> Sequence[ValidatorIndex]: """ Return the persistent committee for the given ``shard`` at the given ``slot``. """ - epoch = compute_epoch_of_slot(slot) - earlier_start_epoch = Epoch(epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2) - later_start_epoch = Epoch(epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD) + epoch = compute_epoch_of_shard_slot(slot) - committee_count = max( - len(get_active_validator_indices(state, earlier_start_epoch)) // - (SHARD_COUNT * TARGET_COMMITTEE_SIZE), - len(get_active_validator_indices(state, later_start_epoch)) // - (SHARD_COUNT * TARGET_COMMITTEE_SIZE), - ) + 1 - - index = slot % committee_count - earlier_committee = get_period_committee(state, earlier_start_epoch, shard, index, committee_count) - later_committee = get_period_committee(state, later_start_epoch, shard, index, committee_count) + earlier_committee = get_period_committee(state, get_shard_period_start_epoch(epoch, lookback=Epoch(2)), shard) + later_committee = get_period_committee(state, get_shard_period_start_epoch(epoch, lookback=Epoch(1)), shard) # Take not-yet-cycled-out validators from earlier committee and already-cycled-in validators from # later committee; return a sorted list of the union of the two, deduplicated - return sorted(list(set( - [i for i in earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(state, epoch, i)] - + [i for i in later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(state, epoch, i)] - ))) + return sorted(set( + [i for i in earlier_committee if epoch % EPOCHS_PER_SHARD_PERIOD < i % EPOCHS_PER_SHARD_PERIOD] + + [i for i in later_committee if epoch % EPOCHS_PER_SHARD_PERIOD >= i % EPOCHS_PER_SHARD_PERIOD] + )) ``` -### `get_shard_proposer_index` +### `get_shard_block_proposer_index` ```python -def get_shard_proposer_index(state: BeaconState, - shard: Shard, - slot: Slot) -> Optional[ValidatorIndex]: +def get_shard_block_proposer_index(state: BeaconState, + shard: Shard, + slot: ShardSlot) -> Optional[ValidatorIndex]: # Randomly shift persistent committee persistent_committee = list(get_persistent_committee(state, shard, slot)) - seed = hash(state.current_shuffling_seed + int_to_bytes(shard, length=8) + int_to_bytes(slot, length=8)) - random_index = bytes_to_int(seed[0:8]) % len(persistent_committee) - persistent_committee = persistent_committee[random_index:] + persistent_committee[:random_index] + current_epoch = get_current_epoch(state) - # Search for an active proposer - for index in persistent_committee: - if is_active_validator(state.validators[index], get_current_epoch(state)): - return index + active_indices = [i for i in persistent_committee if is_active_validator(state.validators[i], current_epoch)] + if not any(active_indices): + return None - # No block can be proposed if no validator is active - return None + MAX_RANDOM_BYTE = 2**8 - 1 + seed = hash(get_seed(state, current_epoch) + int_to_bytes(shard, length=8) + int_to_bytes(slot, length=8)) + i = 0 + while True: + candidate_index = active_indices[(slot + i) % len(active_indices)] + random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32] + effective_balance = state.validators[candidate_index].effective_balance + if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: + return ValidatorIndex(candidate_index) + i += 1 ``` ### `get_shard_header` @@ -215,35 +243,49 @@ def get_shard_proposer_index(state: BeaconState, ```python def get_shard_header(block: ShardBlock) -> ShardBlockHeader: return ShardBlockHeader( - slot=block.slot, - shard=block.shard, - beacon_chain_root=block.beacon_chain_root, - parent_root=block.parent_root, - body_root=hash_tree_root(block.body), - state_root=block.state_root, - attestations=block.attestations, - signature=block.signature, + core=ShardBlockCore( + slot=block.core.slot, + beacon_chain_root=block.core.beacon_chain_root, + parent_root=block.core.parent_root, + data_root=hash_tree_root(block.core.data), + state_root=block.core.state_root, + total_bytes=block.core.total_bytes, + attester_bitfield=block.core.attester_bitfield + ), + signatures=block.signatures ) ``` -### `verify_shard_attestation_signature` +### `pad` ```python -def verify_shard_attestation_signature(state: BeaconState, - attestation: ShardAttestation) -> None: - data = attestation.data - persistent_committee = get_persistent_committee(state, data.shard, data.slot) - pubkeys = [] - for i, index in enumerate(persistent_committee): - if attestation.aggregation_bits[i]: - validator = state.validators[index] - assert is_active_validator(validator, get_current_epoch(state)) - pubkeys.append(validator.pubkey) - assert bls_verify( - pubkey=bls_aggregate_pubkeys(pubkeys), - message_hash=data.shard_block_root, - signature=attestation.aggregate_signature, - domain=get_domain(state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_slot(data.slot)) +def pad(x: bytes, length: int) -> bytes: + assert len(x) <= length + return x + b'\x00' * (length - len(x)) +``` + +### `flatten_shard_header` + +```python +def flatten_shard_header(header: ShardBlockHeader) -> Bytes[SHARD_HEADER_SIZE]: + """ + Converts a shard block header into a flat object with the same hash tree root. Used + in the crosslink construction. + """ + committee_size = len(header.core.attester_bitfield) + attester_bits = [header.core.attester_bitfield[i] if i < committee_size else 0 for i in range(256)] + attester_bytes = bytes([sum([attester_bits[i + j] << j for j in range(8)]) for i in range(0, 256, 8)]) + return ( + pad(int_to_bytes(header.core.slot, length=8), 32) + + header.core.beacon_chain_root + + header.core.parent_root + + header.core.data_root + + header.core.state_root + + pad(int_to_bytes(header.core.total_bytes, length=8), 32) + + attester_bytes + + b'\x00' * 32 + + pad(header.signatures.attestation_signature, 128) + + pad(header.signatures.proposer_signature, 128) ) ``` @@ -251,32 +293,10 @@ def verify_shard_attestation_signature(state: BeaconState, ```python def compute_crosslink_data_root(blocks: Sequence[ShardBlock]) -> Hash: - def is_power_of_two(value: uint64) -> bool: - return (value > 0) and (value & (value - 1) == 0) - - def pad_to_power_of_2(values: MutableSequence[bytes]) -> Sequence[bytes]: - while not is_power_of_two(len(values)): - values.append(b'\x00' * BYTES_PER_SHARD_BLOCK_BODY) - return values - - def hash_tree_root_of_bytes(data: bytes) -> Hash: - return hash_tree_root([data[i:i + 32] for i in range(0, len(data), 32)]) - - def zpad(data: bytes, length: uint64) -> bytes: - return data + b'\x00' * (length - len(data)) - - return hash( - # TODO untested code. - # Need to either pass a typed list to hash-tree-root, or merkleize_chunks(values, pad_to=2**x) - hash_tree_root(pad_to_power_of_2([ - hash_tree_root_of_bytes( - zpad(serialize(get_shard_header(block)), BYTES_PER_SHARD_BLOCK_BODY) - ) for block in blocks - ])) - + hash_tree_root(pad_to_power_of_2([ - hash_tree_root_of_bytes(block.body) for block in blocks - ])) - ) + header = b''.join([flatten_shard_header(get_shard_header(block)) for block in blocks]) + footer = b''.join([block.core.data for block in blocks]) + MAX_SIZE = SHARD_BLOCK_SIZE_LIMIT * SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * MAX_EPOCHS_PER_CROSSLINK + return hash_tree_root(BytesN[MAX_SIZE](pad(header + footer, MAX_SIZE))) ``` ## Object validity @@ -287,12 +307,14 @@ Let: - `beacon_blocks` be the `BeaconBlock` list such that `beacon_blocks[slot]` is the canonical `BeaconBlock` at slot `slot` - `beacon_state` be the canonical `BeaconState` after processing `beacon_blocks[-1]` +- `shard` is the shard ID - `valid_shard_blocks` be the list of valid `ShardBlock`, recursively defined - `candidate` be a candidate `ShardBlock` for which validity is to be determined by running `is_valid_shard_block` ```python -def is_valid_shard_block(beacon_blocks: Sequence[BeaconBlock], - beacon_state: BeaconState, +def is_valid_shard_block(beacon_state: BeaconState, + beacon_blocks: Sequence[BeaconBlock], + shard: Shard, valid_shard_blocks: Sequence[ShardBlock], candidate: ShardBlock) -> bool: # Check if block is already determined valid @@ -301,80 +323,56 @@ def is_valid_shard_block(beacon_blocks: Sequence[BeaconBlock], return True # Check slot number - assert candidate.slot >= PHASE_1_FORK_SLOT - - # Check shard number - assert candidate.shard <= SHARD_COUNT + assert compute_slot_of_shard_slot(candidate.core.slot) >= PHASE_1_FORK_SLOT # Check beacon block - beacon_block = beacon_blocks[candidate.slot] - assert candidate.beacon_block_root == signing_root(beacon_block) - assert beacon_block.slot <= candidate.slot + beacon_block_slot = compute_start_slot_of_epoch(compute_epoch_of_shard_slot(candidate.core.slot)) + beacon_block = beacon_blocks[beacon_block_slot] + assert candidate.core.beacon_block_root == signing_root(beacon_block) + assert beacon_block.slot <= candidate.core.slot # Check state root - assert candidate.state_root == Hash() # [to be removed in phase 2] + assert candidate.core.state_root == Hash() # [to be removed in phase 2] # Check parent block - if candidate.slot == PHASE_1_FORK_SLOT: - assert candidate.parent_root == Hash() - else: + if candidate.core.parent_root != Hash(): parent_block = next( - (block for block in valid_shard_blocks if signing_root(block) == candidate.parent_root), + (block for block in valid_shard_blocks if hash_tree_root(block.core) == candidate.core.parent_root), None ) assert parent_block is not None - assert parent_block.shard == candidate.shard - assert parent_block.slot < candidate.slot - assert signing_root(beacon_blocks[parent_block.slot]) == parent_block.beacon_chain_root + assert parent_block.core.slot < candidate.core.slot + parent_beacon_block_slot = compute_start_slot_of_epoch(compute_epoch_of_shard_slot(parent_block.core.slot)) + assert signing_root(beacon_blocks[parent_beacon_block_slot]) == parent_block.core.beacon_chain_root # Check attestations - assert len(candidate.attestations) <= MAX_SHARD_ATTESTIONS - for _, attestation in enumerate(candidate.attestations): - assert max(GENESIS_SHARD_SLOT, candidate.slot - SLOTS_PER_EPOCH) <= attestation.data.slot - assert attestation.data.slot <= candidate.slot - MIN_ATTESTATION_INCLUSION_DELAY - assert attestation.data.crosslink.shard == candidate.shard - verify_shard_attestation_signature(beacon_state, attestation) + attester_committee = get_persistent_committee(beacon_state, shard, block.core.slot) + pubkeys = [] + for i, index in enumerate(attester_committee): + if block.core.attester_bitfield[i]: + pubkeys.append(beacon_state.validators[index].pubkey) + for i in range(len(attester_committee), MAX_PERSISTENT_COMMITTEE_SIZE * 2): + assert block.attester_bitfield[i] is False + assert bls_verify( + pubkey=bls_aggregate_pubkeys(pubkeys), + message_hash=candidate.core.parent_root, + signature=candidate.signatures.attestation_signature, + domain=get_domain(beacon_state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_shard_slot(candidate.core.slot)) + ) - # Check signature - proposer_index = get_shard_proposer_index(beacon_state, candidate.shard, candidate.slot) + # Check proposer + proposer_index = get_shard_block_proposer_index(beacon_state, shard, candidate.core.slot) assert proposer_index is not None assert bls_verify( pubkey=beacon_state.validators[proposer_index].pubkey, - message_hash=signing_root(candidate), - signature=candidate.signature, - domain=get_domain(beacon_state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_slot(candidate.slot)), + message_hash=hash_tree_root(candidate.core), + signature=candidate.signatures.proposer_signature, + domain=get_domain(beacon_state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(candidate.core.slot)), ) return True ``` -### Shard attestations - -Let: - -- `valid_shard_blocks` be the list of valid `ShardBlock` -- `beacon_state` be the canonical `BeaconState` -- `candidate` be a candidate `ShardAttestation` for which validity is to be determined by running `is_valid_shard_attestation` - -```python -def is_valid_shard_attestation(valid_shard_blocks: Sequence[ShardBlock], - beacon_state: BeaconState, - candidate: ShardAttestation) -> bool: - # Check shard block - shard_block = next( - (block for block in valid_shard_blocks if signing_root(block) == candidate.data.shard_block_root), - None, - ) - assert shard_block is not None - assert shard_block.slot == candidate.data.slot - assert shard_block.shard == candidate.data.shard - - # Check signature - verify_shard_attestation_signature(beacon_state, candidate) - - return True -``` - ### Beacon attestations Let: diff --git a/test_libs/pyspec/eth2spec/test/helpers/phase1/__init__.py b/test_libs/pyspec/eth2spec/test/helpers/phase1/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py b/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py new file mode 100644 index 000000000..4e1981727 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py @@ -0,0 +1,47 @@ +from eth2spec.test.helpers.keys import privkeys +from eth2spec.utils.bls import ( + bls_sign, + only_with_bls, +) +from eth2spec.utils.ssz.ssz_impl import ( + signing_root, +) + + +@only_with_bls() +def sign_shard_block(spec, state, block, shard, proposer_index=None): + if proposer_index is None: + proposer_index = spec.get_shard_block_proposer_index(state, shard, block.core.slot) + + privkey = privkeys[proposer_index] + + block.signatures.proposer_signature = bls_sign( + message_hash=signing_root(block), + privkey=privkey, + domain=spec.get_domain( + state, + spec.DOMAIN_SHARD_PROPOSER, + spec.compute_epoch_of_shard_slot(block.core.slot), + ) + ) + + +def build_empty_shard_block(spec, state, slot, shard, parent_root, signed=False): + if slot is None: + slot = state.slot + block = spec.ShardBlock( + core=spec.ExtendedShardBlockCore( + slot=slot, + beacon_chain_root=state.block_roots[state.slot % spec.SLOTS_PER_HISTORICAL_ROOT], + parent_root=parent_root, + ), + signatures=spec.ShardBlockSignatures( + attestation_signature=b'\x12' * 96, + proposer_signature=b'\x25' * 96, + ) + ) + + if signed: + sign_shard_block(spec, state, block, shard) + + return block diff --git a/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py b/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py new file mode 100644 index 000000000..359350d39 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py @@ -0,0 +1,26 @@ +from eth2spec.test.helpers.phase1.shard_block import ( + build_empty_shard_block, +) +from eth2spec.test.context import ( + with_all_phases_except, + spec_state_test, + always_bls, +) + + +@with_all_phases_except(['phase0']) +@always_bls +@spec_state_test +def test_is_valid_shard_block(spec, state): + block = build_empty_shard_block( + spec, + state, + slot=spec.Slot(spec.PERSISTENT_COMMITTEE_PERIOD * 100), + shard=spec.Shard(1), + parent_root=spec.Hash(), + signed=True, + ) + + # TODO: test `is_valid_shard_block` + + yield 'blocks', (block,) From 4b2b5815c919ed7263ae1a5105854a9b73ed1ef9 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Mon, 29 Jul 2019 16:43:55 -0400 Subject: [PATCH 007/250] Add shard state transition function --- specs/core/1_shard-data-chains.md | 216 +++++++++++++++++++++++------- 1 file changed, 170 insertions(+), 46 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index fc839930f..1c94741d8 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -34,6 +34,7 @@ - [`pad`](#pad) - [`flatten_shard_header`](#flatten_shard_header) - [`compute_crosslink_data_root`](#compute_crosslink_data_root) + - [`get_default_shard_state`](#get_default_shard_state) - [Object validity](#object-validity) - [Shard blocks](#shard-blocks) - [Beacon attestations](#beacon-attestations) @@ -61,8 +62,11 @@ We define the following Python custom types for type hinting and readability: | - | - | | `SHARD_HEADER_SIZE` | `2**9` (= 512) | | `SHARD_BLOCK_SIZE_LIMIT` | `2**16` (= 65,536) | +| `SHARD_BLOCK_SIZE_TARGET` | `2**14` (= 16,384) | | `SHARD_SLOTS_PER_BEACON_SLOT` | `2**1` (= 2) | | `MAX_PERSISTENT_COMMITTEE_SIZE` | `2**7` (= 128) | +| `REWARD_COEFFICIENT_BASE` | `2**20` ( = 1,048,576) | +| `BASEFEE_ADJUSTMENT_FACTOR` | `2**3` (= 8) | ### Initial values @@ -148,6 +152,31 @@ class ExtendedShardBlockCore(Container): attester_bitfield: Bitvector[MAX_PERSISTENT_COMMITTEE_SIZE * 2] ``` +### `ShardState` + +```python +class ShardState(Container): + history_acc: Vector[Hash, 64] + earlier_committee_rewards: List[uint64, MAX_PERSISTENT_COMMITTEE_SIZE] + later_committee_rewards: List[uint64, MAX_PERSISTENT_COMMITTEE_SIZE] + earlier_committee_fees: List[Gwei, MAX_PERSISTENT_COMMITTEE_SIZE] + later_committee_fees: List[Gwei, MAX_PERSISTENT_COMMITTEE_SIZE] + basefee: Gwei + slot: ShardSlot + shard: Shard + most_recent_block_core: ShardBlockCore + receipt_root: Hash +``` + +### `ShardReceiptDelta` + +```python +class ShardReceiptDelta(Container): + index: ValidatorIndex + reward_coefficient: uint64 + block_fee: Gwei +``` + ## Helper functions ### `compute_slot_of_shard_slot` @@ -167,7 +196,7 @@ def compute_epoch_of_shard_slot(slot: ShardSlot) -> Epoch: ### `get_shard_period_start_epoch` ```python -def get_shard_period_start_epoch(epoch: Epoch, lookback: Epoch=Epoch(0)) -> Epoch: +def get_shard_period_start_epoch(epoch: Epoch, lookback: uint64=0) -> Epoch: return Epoch(epoch - (epoch % EPOCHS_PER_SHARD_PERIOD) - lookback * EPOCHS_PER_SHARD_PERIOD) ``` @@ -201,8 +230,8 @@ def get_persistent_committee(state: BeaconState, """ epoch = compute_epoch_of_shard_slot(slot) - earlier_committee = get_period_committee(state, get_shard_period_start_epoch(epoch, lookback=Epoch(2)), shard) - later_committee = get_period_committee(state, get_shard_period_start_epoch(epoch, lookback=Epoch(1)), shard) + earlier_committee = get_period_committee(state, get_shard_period_start_epoch(epoch, lookback=2), shard) + later_committee = get_period_committee(state, get_shard_period_start_epoch(epoch, lookback=1), shard) # Take not-yet-cycled-out validators from earlier committee and already-cycled-in validators from # later committee; return a sorted list of the union of the two, deduplicated @@ -299,60 +328,130 @@ def compute_crosslink_data_root(blocks: Sequence[ShardBlock]) -> Hash: return hash_tree_root(BytesN[MAX_SIZE](pad(header + footer, MAX_SIZE))) ``` -## Object validity - -### Shard blocks - -Let: - -- `beacon_blocks` be the `BeaconBlock` list such that `beacon_blocks[slot]` is the canonical `BeaconBlock` at slot `slot` -- `beacon_state` be the canonical `BeaconState` after processing `beacon_blocks[-1]` -- `shard` is the shard ID -- `valid_shard_blocks` be the list of valid `ShardBlock`, recursively defined -- `candidate` be a candidate `ShardBlock` for which validity is to be determined by running `is_valid_shard_block` +### `get_default_shard_state` ```python -def is_valid_shard_block(beacon_state: BeaconState, - beacon_blocks: Sequence[BeaconBlock], - shard: Shard, - valid_shard_blocks: Sequence[ShardBlock], - candidate: ShardBlock) -> bool: - # Check if block is already determined valid - for _, block in enumerate(valid_shard_blocks): - if candidate == block: - return True +def get_default_shard_state(beacon_state: BeaconState, shard: Shard) -> ShardState: + earlier_committee = get_period_committee(beacon_state, PHASE_1_FORK_SLOT - SHARD_SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD * 2, shard) + later_committee = get_period_committee(beacon_state, PHASE_1_FORK_SLOT - SHARD_SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD, shard) + return ShardState( + basefee=1, + shard=shard, + slot=PHASE_1_FORK_SLOT, + earlier_committee_rewards=[REWARD_COEFFICIENT_BASE for _ in range(len(earlier_committee))], + later_committee_rewards=[REWARD_COEFFICIENT_BASE for _ in range(len(later_committee))], + earlier_committee_fees=[0 for _ in range(len(earlier_committee))], + later_committee_fees=[0 for _ in range(len(later_committee))], + ) +``` +## Object validity + +### Shard block validation: preliminary + +Accept a shard block `block` only if all of the following are correct: + +* Either `block.core.parent_root == ZERO_HASH` or a block `parent` such that `hash_tree_root(parent.core) == block.core.parent_root` has already been accepted. +* `block.core.beacon_chain_root == get_block_root(head_beacon_state, compute_epoch_of_shard_slot(parent.core.slot))` where `head_beacon_state` is the current beacon chain head state. Alternatively phrased, a beacon chain block `beacon_ref` such that `signing_root(beacon_ref) == block.core.beacon_chain_root` has already been accepted and is part of the canonical chain, and no block with slot `beacon_ref.slot < slot <= compute_start_slot_of_epoch(compute_epoch_of_shard_slot(parent.core.slot))` is part of the canonical chain. +* Let `beacon_state` be the state where `beacon_ref.state_root == hash_tree_root(beacon_state)`. Let `prev_state` be the post-state of the `parent` if the `parent` exists, otherwise let it be `get_default_shard_state(beacon_state, shard)` (defined below). `block.core.state_root` must equal the `hash_tree_root` of the state after applying `shard_state_transition(prev_state, beacon_state, block)`. + +Note that these acceptance conditions depend on the canonical beacon chain; when the canonical beacon chain reorganizes, the eligibility of shard blocks should be re-evaluated. + +### Shard state transition function helpers + +```python +def add_reward(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: uint): + epoch = compute_epoch_of_shard_slot(state.slot) + earlier_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=2), state.shard) + later_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=1), state.shard) + if index in earlier_committee: + state.earlier_committee_rewards[earlier_committee.index(index)] += delta + elif index in later_committee: + state.later_committee_rewards[later_committee.index(index)] += delta + else: + raise Exception("Should never be here") +``` + +```python +def add_fee(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: uint): + epoch = compute_epoch_of_shard_slot(state.slot) + earlier_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=2), state.shard) + later_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=1), state.shard) + if index in earlier_committee: + state.earlier_committee_fees[earlier_committee.index(index)] += delta + elif index in later_committee: + state.later_committee_fees[later_committee.index(index)] += delta + else: + raise Exception("Should never be here") +``` + +### Shard state transition function + +```python +def shard_state_transition(state: ShardState, beacon_state: BeaconState, block: ShardBlock): + assert block.core.slot > state.slot + for slot in range(state.slot, block.core.slot): + shard_slot_transition(state, beacon_state) + shard_block_transition(state, beacon_state, block) +``` + +```python +def shard_slot_transition(state: ShardState, beacon_state: BeaconState): + # Correct saved state root + if state.most_recent_block_core.state_root == ZERO_HASH: + state.most_recent_block_core.state_root = hash_tree_root(state) + + # Save states in history accumulator + depth = 0 + h = hash_tree_root(state) + while state.slot % 2**depth == 0: + state.history_acc[depth] = h + + # Period transitions + if (state.slot + 1) % (SHARD_SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD) == 0: + epoch = compute_epoch_of_shard_slot(state.slot) + earlier_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=2), state.shard) + later_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=1), state.shard) + state.receipt_root = hash_tree_root(List[ShardReceiptDelta, PLACEHOLDER]([ + ShardReceiptDelta(index, state.earlier_committee_rewards[i], state.earlier_committee_fees[i]) + for i, index in enumerate(committee) + ])) + state.earlier_committee_rewards = state.later_committee_rewards + state.earlier_committee_fees = state.later_committee_fees + state.later_committee_rewards = [REWARD_COEFFICIENT_BASE for _ in range(len(later_committee))], + state.later_committee_fees = [0 for _ in range(len(later_committee))], + else: + state.receipt_root = ZERO_HASH + state.slot += 1 +``` + +```python +def shard_block_transition(state: ShardState, beacon_state: BeaconState, block: ShardBlock): # Check slot number - assert compute_slot_of_shard_slot(candidate.core.slot) >= PHASE_1_FORK_SLOT - - # Check beacon block - beacon_block_slot = compute_start_slot_of_epoch(compute_epoch_of_shard_slot(candidate.core.slot)) - beacon_block = beacon_blocks[beacon_block_slot] - assert candidate.core.beacon_block_root == signing_root(beacon_block) - assert beacon_block.slot <= candidate.core.slot - - # Check state root - assert candidate.core.state_root == Hash() # [to be removed in phase 2] - + assert candidate.core.slot == state.slot + # Check parent block if candidate.core.parent_root != Hash(): - parent_block = next( - (block for block in valid_shard_blocks if hash_tree_root(block.core) == candidate.core.parent_root), - None - ) - assert parent_block is not None - assert parent_block.core.slot < candidate.core.slot - parent_beacon_block_slot = compute_start_slot_of_epoch(compute_epoch_of_shard_slot(parent_block.core.slot)) - assert signing_root(beacon_blocks[parent_beacon_block_slot]) == parent_block.core.beacon_chain_root - + assert candidate.core.parent_root == hash_tree_root(state.most_recent_block_core) + + # Calculate base reward + total_balance = get_total_active_balance(beacon_state) + base_reward = Gwei(REWARD_COEFFICIENT_BASE * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH) + # Check attestations attester_committee = get_persistent_committee(beacon_state, shard, block.core.slot) pubkeys = [] + attestations = 0 + for i, index in enumerate(attester_committee): if block.core.attester_bitfield[i]: pubkeys.append(beacon_state.validators[index].pubkey) - for i in range(len(attester_committee), MAX_PERSISTENT_COMMITTEE_SIZE * 2): + add_reward(state, beacon_state, index, base_reward) + attestations += 1 + + for i in range(len(attester_committee), MAX_PERSISTENT_COMMITTEE_SIZE): assert block.attester_bitfield[i] is False + assert bls_verify( pubkey=bls_aggregate_pubkeys(pubkeys), message_hash=candidate.core.parent_root, @@ -363,14 +462,39 @@ def is_valid_shard_block(beacon_state: BeaconState, # Check proposer proposer_index = get_shard_block_proposer_index(beacon_state, shard, candidate.core.slot) assert proposer_index is not None + add_reward(state, beacon_state, proposer_index, attestations * base_reward // PROPOSER_REWARD_QUOTIENT) assert bls_verify( pubkey=beacon_state.validators[proposer_index].pubkey, message_hash=hash_tree_root(candidate.core), signature=candidate.signatures.proposer_signature, domain=get_domain(beacon_state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(candidate.core.slot)), ) - - return True + + # Process and update block data fees + add_fee(state, beacon_state, proposer_index, state.basefee * len(block.core.data) // SHARD_BLOCK_SIZE_LIMIT) + QUOTIENT = SHARD_BLOCK_SIZE_LIMIT * BASEFEE_ADJUSTMENT_FACTOR) + if len(block.core.data) > SHARD_BLOCK_SIZE_TARGET: + state.basefee += min(1, state.basefee * (len(block.core.data) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT) + elif len(block.core.data) < SHARD_BLOCK_SIZE_TARGET: + state.basefee -= min(1, state.basefee * (len(block.core.data) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT) + state.basefee = max(1, min(EFFECTIVE_BALANCE_INCREMENT // EPOCHS_PER_SHARD_PERIOD // SHARD_SLOTS_PER_EPOCH, state.basefee)) + + # Check total bytes + assert block.core.total_bytes == state.most_recent_block_core.total_bytes + len(block.core.data) + + # Update in-state block header + state.most_recent_block_core = ShardBlockCore( + slot=block.core.slot, + beacon_chain_root=block.core.beacon_chain_root, + parent_root=block.core.parent_root, + data_root=block.core.data_root, + state_root=ZERO_HASH, + total_bytes=block.core.total_bytes, + attester_bitfield=block.core.attester_bitfield + ) + + # Check state root + assert hash_tree_root(state) == block.core.state_root ``` ### Beacon attestations From cf7d65e8ff837abfd529fa4ab0381610c7ffd021 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 30 Jul 2019 12:15:46 -0400 Subject: [PATCH 008/250] Added generalized index handling functions --- specs/light_client/merkle_proofs.md | 38 ++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index b058be7ca..f62dc8d5c 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -115,7 +115,7 @@ def get_item_position(typ: Type, index: Union[int, str]) -> Tuple[int, int, int] raise Exception("Only lists/vectors/containers supported") -def get_generalized_index(typ: Type, path: List[Union[int, str]]) -> int: +def get_generalized_index(typ: Type, path: List[Union[int, str]]) -> GeneralizedIndex: """ Converts a path (eg. `[7, "foo", 3]` for `x[7].foo[3]`, `[12, "bar", "__len__"]` for `len(x[12].bar)`) into the generalized index representing its position in the Merkle tree. @@ -131,6 +131,42 @@ def get_generalized_index(typ: Type, path: List[Union[int, str]]) -> int: return root ``` +### Helpers for generalized indices + +#### `concat_generalized_indices` + +```python +def concat_generalized_indices(*indices: Sequence[GeneralizedIndex]) -> GeneralizedIndex: + """ + Given generalized indices i1 for A -> B, i2 for B -> C .... i_n for Y -> Z, returns + the generalized index for A -> Z. + """ + o = GeneralizedIndex(1) + for i in indices: + o = o * get_previous_power_of_2(i) + i + return o +``` + +#### `get_generalized_index_length` + +```python +def get_generalized_index_length(index: GeneralizedIndex) -> int: + """ + Returns the length of a path represented by a generalized index. + """ + return log(index) +``` + +#### `get_generalized_index_bit` + +```python +def get_generalized_index_bit(index: GeneralizedIndex, bit: int) -> bool: + """ + Returns the i'th bit of a generalized index. + """ + return (index & (1 << bit)) > 0 +``` + ## Merkle multiproofs We define a Merkle multiproof as a minimal subset of nodes in a Merkle tree needed to fully authenticate that a set of nodes actually are part of a Merkle tree with some specified root, at a particular set of generalized indices. For example, here is the Merkle multiproof for positions 0, 1, 6 in an 8-node Merkle tree (i.e. generalized indices 8, 9, 14): From 058e63654d41df84a3e7bffd7bda3e6967fd11e0 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 31 Jul 2019 17:44:33 +0800 Subject: [PATCH 009/250] Fix typo --- specs/core/1_shard-data-chains.md | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 1c94741d8..d9eff358e 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -22,21 +22,24 @@ - [`ShardBlockSignatures`](#shardblocksignatures) - [`ShardBlockCore`](#shardblockcore) - [`ExtendedShardBlockCore`](#extendedshardblockcore) + - [`ShardState`](#shardstate) + - [`ShardReceiptDelta`](#shardreceiptdelta) - [Helper functions](#helper-functions) - - [`compute_epoch_of_shard_slot`](#compute_epoch_of_shard_slot) - [`compute_slot_of_shard_slot`](#compute_slot_of_shard_slot) + - [`compute_epoch_of_shard_slot`](#compute_epoch_of_shard_slot) - [`get_shard_period_start_epoch`](#get_shard_period_start_epoch) - [`get_period_committee`](#get_period_committee) - [`get_persistent_committee`](#get_persistent_committee) - [`get_shard_block_proposer_index`](#get_shard_block_proposer_index) - - [`get_shard_block_attester_committee`](#get_shard_block_attester_committee) - [`get_shard_header`](#get_shard_header) - [`pad`](#pad) - [`flatten_shard_header`](#flatten_shard_header) - [`compute_crosslink_data_root`](#compute_crosslink_data_root) - [`get_default_shard_state`](#get_default_shard_state) - [Object validity](#object-validity) - - [Shard blocks](#shard-blocks) + - [Shard block validation: preliminary](#shard-block-validation-preliminary) + - [Shard state transition function helpers](#shard-state-transition-function-helpers) + - [Shard state transition function](#shard-state-transition-function) - [Beacon attestations](#beacon-attestations) - [Shard fork choice rule](#shard-fork-choice-rule) @@ -332,8 +335,8 @@ def compute_crosslink_data_root(blocks: Sequence[ShardBlock]) -> Hash: ```python def get_default_shard_state(beacon_state: BeaconState, shard: Shard) -> ShardState: - earlier_committee = get_period_committee(beacon_state, PHASE_1_FORK_SLOT - SHARD_SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD * 2, shard) - later_committee = get_period_committee(beacon_state, PHASE_1_FORK_SLOT - SHARD_SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD, shard) + earlier_committee = get_period_committee(beacon_state, PHASE_1_FORK_SLOT - SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD * 2, shard) + later_committee = get_period_committee(beacon_state, PHASE_1_FORK_SLOT - SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD, shard) return ShardState( basefee=1, shard=shard, @@ -360,7 +363,7 @@ Note that these acceptance conditions depend on the canonical beacon chain; when ### Shard state transition function helpers ```python -def add_reward(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: uint): +def add_reward(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: epoch = compute_epoch_of_shard_slot(state.slot) earlier_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=2), state.shard) later_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=1), state.shard) @@ -373,7 +376,7 @@ def add_reward(state: ShardState, beacon_state: BeaconState, index: ValidatorInd ``` ```python -def add_fee(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: uint): +def add_fee(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: epoch = compute_epoch_of_shard_slot(state.slot) earlier_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=2), state.shard) later_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=1), state.shard) @@ -388,7 +391,7 @@ def add_fee(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, ### Shard state transition function ```python -def shard_state_transition(state: ShardState, beacon_state: BeaconState, block: ShardBlock): +def shard_state_transition(state: ShardState, beacon_state: BeaconState, block: ShardBlock) -> None: assert block.core.slot > state.slot for slot in range(state.slot, block.core.slot): shard_slot_transition(state, beacon_state) @@ -396,7 +399,7 @@ def shard_state_transition(state: ShardState, beacon_state: BeaconState, block: ``` ```python -def shard_slot_transition(state: ShardState, beacon_state: BeaconState): +def shard_slot_transition(state: ShardState, beacon_state: BeaconState) -> None: # Correct saved state root if state.most_recent_block_core.state_root == ZERO_HASH: state.most_recent_block_core.state_root = hash_tree_root(state) @@ -408,7 +411,7 @@ def shard_slot_transition(state: ShardState, beacon_state: BeaconState): state.history_acc[depth] = h # Period transitions - if (state.slot + 1) % (SHARD_SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD) == 0: + if (state.slot + 1) % (SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD) == 0: epoch = compute_epoch_of_shard_slot(state.slot) earlier_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=2), state.shard) later_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=1), state.shard) @@ -426,7 +429,7 @@ def shard_slot_transition(state: ShardState, beacon_state: BeaconState): ``` ```python -def shard_block_transition(state: ShardState, beacon_state: BeaconState, block: ShardBlock): +def shard_block_transition(state: ShardState, beacon_state: BeaconState, block: ShardBlock) -> None: # Check slot number assert candidate.core.slot == state.slot @@ -472,12 +475,12 @@ def shard_block_transition(state: ShardState, beacon_state: BeaconState, block: # Process and update block data fees add_fee(state, beacon_state, proposer_index, state.basefee * len(block.core.data) // SHARD_BLOCK_SIZE_LIMIT) - QUOTIENT = SHARD_BLOCK_SIZE_LIMIT * BASEFEE_ADJUSTMENT_FACTOR) + QUOTIENT = SHARD_BLOCK_SIZE_LIMIT * BASEFEE_ADJUSTMENT_FACTOR if len(block.core.data) > SHARD_BLOCK_SIZE_TARGET: state.basefee += min(1, state.basefee * (len(block.core.data) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT) elif len(block.core.data) < SHARD_BLOCK_SIZE_TARGET: state.basefee -= min(1, state.basefee * (len(block.core.data) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT) - state.basefee = max(1, min(EFFECTIVE_BALANCE_INCREMENT // EPOCHS_PER_SHARD_PERIOD // SHARD_SLOTS_PER_EPOCH, state.basefee)) + state.basefee = max(1, min(EFFECTIVE_BALANCE_INCREMENT // EPOCHS_PER_SHARD_PERIOD // SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH, state.basefee)) # Check total bytes assert block.core.total_bytes == state.most_recent_block_core.total_bytes + len(block.core.data) From f263b718759e6e94c9c2be5dfc3b3df2945083fa Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 31 Jul 2019 17:50:55 +0800 Subject: [PATCH 010/250] ZERO_HASH -> Hash() --- specs/core/1_shard-data-chains.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index d9eff358e..4d4ab3897 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -354,7 +354,7 @@ def get_default_shard_state(beacon_state: BeaconState, shard: Shard) -> ShardSta Accept a shard block `block` only if all of the following are correct: -* Either `block.core.parent_root == ZERO_HASH` or a block `parent` such that `hash_tree_root(parent.core) == block.core.parent_root` has already been accepted. +* Either `block.core.parent_root == Hash()` or a block `parent` such that `hash_tree_root(parent.core) == block.core.parent_root` has already been accepted. * `block.core.beacon_chain_root == get_block_root(head_beacon_state, compute_epoch_of_shard_slot(parent.core.slot))` where `head_beacon_state` is the current beacon chain head state. Alternatively phrased, a beacon chain block `beacon_ref` such that `signing_root(beacon_ref) == block.core.beacon_chain_root` has already been accepted and is part of the canonical chain, and no block with slot `beacon_ref.slot < slot <= compute_start_slot_of_epoch(compute_epoch_of_shard_slot(parent.core.slot))` is part of the canonical chain. * Let `beacon_state` be the state where `beacon_ref.state_root == hash_tree_root(beacon_state)`. Let `prev_state` be the post-state of the `parent` if the `parent` exists, otherwise let it be `get_default_shard_state(beacon_state, shard)` (defined below). `block.core.state_root` must equal the `hash_tree_root` of the state after applying `shard_state_transition(prev_state, beacon_state, block)`. @@ -401,7 +401,7 @@ def shard_state_transition(state: ShardState, beacon_state: BeaconState, block: ```python def shard_slot_transition(state: ShardState, beacon_state: BeaconState) -> None: # Correct saved state root - if state.most_recent_block_core.state_root == ZERO_HASH: + if state.most_recent_block_core.state_root == Hash(): state.most_recent_block_core.state_root = hash_tree_root(state) # Save states in history accumulator @@ -424,7 +424,7 @@ def shard_slot_transition(state: ShardState, beacon_state: BeaconState) -> None: state.later_committee_rewards = [REWARD_COEFFICIENT_BASE for _ in range(len(later_committee))], state.later_committee_fees = [0 for _ in range(len(later_committee))], else: - state.receipt_root = ZERO_HASH + state.receipt_root = Hash() state.slot += 1 ``` @@ -491,7 +491,7 @@ def shard_block_transition(state: ShardState, beacon_state: BeaconState, block: beacon_chain_root=block.core.beacon_chain_root, parent_root=block.core.parent_root, data_root=block.core.data_root, - state_root=ZERO_HASH, + state_root=Hash(), total_bytes=block.core.total_bytes, attester_bitfield=block.core.attester_bitfield ) From fe2adfa0e231519adc60fb3c8fc2e89abcecc11e Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 31 Jul 2019 18:18:07 +0800 Subject: [PATCH 011/250] Fix many typos and lint errors --- specs/core/1_shard-data-chains.md | 102 +++++++++++++++++++----------- 1 file changed, 65 insertions(+), 37 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 4d4ab3897..bc8a8817f 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -335,8 +335,16 @@ def compute_crosslink_data_root(blocks: Sequence[ShardBlock]) -> Hash: ```python def get_default_shard_state(beacon_state: BeaconState, shard: Shard) -> ShardState: - earlier_committee = get_period_committee(beacon_state, PHASE_1_FORK_SLOT - SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD * 2, shard) - later_committee = get_period_committee(beacon_state, PHASE_1_FORK_SLOT - SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD, shard) + earlier_committee = get_period_committee( + beacon_state, + Epoch(PHASE_1_FORK_SLOT - SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD * 2), + shard, + ) + later_committee = get_period_committee( + beacon_state, + Epoch(PHASE_1_FORK_SLOT - SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD), + shard, + ) return ShardState( basefee=1, shard=shard, @@ -344,7 +352,7 @@ def get_default_shard_state(beacon_state: BeaconState, shard: Shard) -> ShardSta earlier_committee_rewards=[REWARD_COEFFICIENT_BASE for _ in range(len(earlier_committee))], later_committee_rewards=[REWARD_COEFFICIENT_BASE for _ in range(len(later_committee))], earlier_committee_fees=[0 for _ in range(len(earlier_committee))], - later_committee_fees=[0 for _ in range(len(later_committee))], + later_committee_fees=[0 for _ in range(len(later_committee))], ) ``` @@ -365,7 +373,11 @@ Note that these acceptance conditions depend on the canonical beacon chain; when ```python def add_reward(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: epoch = compute_epoch_of_shard_slot(state.slot) - earlier_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=2), state.shard) + earlier_committee = get_period_committee( + beacon_state, + get_shard_period_start_epoch(epoch, lookback=2), + state.shard, + ) later_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=1), state.shard) if index in earlier_committee: state.earlier_committee_rewards[earlier_committee.index(index)] += delta @@ -403,21 +415,29 @@ def shard_slot_transition(state: ShardState, beacon_state: BeaconState) -> None: # Correct saved state root if state.most_recent_block_core.state_root == Hash(): state.most_recent_block_core.state_root = hash_tree_root(state) - + # Save states in history accumulator depth = 0 h = hash_tree_root(state) while state.slot % 2**depth == 0: state.history_acc[depth] = h - + # Period transitions if (state.slot + 1) % (SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD) == 0: epoch = compute_epoch_of_shard_slot(state.slot) - earlier_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=2), state.shard) - later_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=1), state.shard) + earlier_committee = get_period_committee( + beacon_state, + get_shard_period_start_epoch(epoch, lookback=2), + state.shard, + ) + later_committee = get_period_committee( + beacon_state, + get_shard_period_start_epoch(epoch, lookback=1), + state.shard, + ) state.receipt_root = hash_tree_root(List[ShardReceiptDelta, PLACEHOLDER]([ ShardReceiptDelta(index, state.earlier_committee_rewards[i], state.earlier_committee_fees[i]) - for i, index in enumerate(committee) + for i, index in enumerate(earlier_committee) ])) state.earlier_committee_rewards = state.later_committee_rewards state.earlier_committee_fees = state.later_committee_fees @@ -425,66 +445,74 @@ def shard_slot_transition(state: ShardState, beacon_state: BeaconState) -> None: state.later_committee_fees = [0 for _ in range(len(later_committee))], else: state.receipt_root = Hash() - state.slot += 1 + state.slot += ShardSlot(1) ``` ```python def shard_block_transition(state: ShardState, beacon_state: BeaconState, block: ShardBlock) -> None: # Check slot number - assert candidate.core.slot == state.slot - + assert block.core.slot == state.slot + # Check parent block - if candidate.core.parent_root != Hash(): - assert candidate.core.parent_root == hash_tree_root(state.most_recent_block_core) - + if block.core.parent_root != Hash(): + assert block.core.parent_root == hash_tree_root(state.most_recent_block_core) + # Calculate base reward total_balance = get_total_active_balance(beacon_state) - base_reward = Gwei(REWARD_COEFFICIENT_BASE * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH) - + base_reward = Gwei( + REWARD_COEFFICIENT_BASE * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH + ) + # Check attestations - attester_committee = get_persistent_committee(beacon_state, shard, block.core.slot) + attester_committee = get_persistent_committee(beacon_state, state.shard, block.core.slot) pubkeys = [] attestations = 0 - + for i, index in enumerate(attester_committee): if block.core.attester_bitfield[i]: pubkeys.append(beacon_state.validators[index].pubkey) add_reward(state, beacon_state, index, base_reward) attestations += 1 - + for i in range(len(attester_committee), MAX_PERSISTENT_COMMITTEE_SIZE): assert block.attester_bitfield[i] is False - + assert bls_verify( pubkey=bls_aggregate_pubkeys(pubkeys), - message_hash=candidate.core.parent_root, - signature=candidate.signatures.attestation_signature, - domain=get_domain(beacon_state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_shard_slot(candidate.core.slot)) + message_hash=block.core.parent_root, + signature=block.signatures.attestation_signature, + domain=get_domain(beacon_state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_shard_slot(block.core.slot)) ) # Check proposer - proposer_index = get_shard_block_proposer_index(beacon_state, shard, candidate.core.slot) + proposer_index = get_shard_block_proposer_index(beacon_state, state.shard, block.core.slot) assert proposer_index is not None - add_reward(state, beacon_state, proposer_index, attestations * base_reward // PROPOSER_REWARD_QUOTIENT) + add_reward(state, beacon_state, proposer_index, Gwei(attestations * base_reward // PROPOSER_REWARD_QUOTIENT)) assert bls_verify( pubkey=beacon_state.validators[proposer_index].pubkey, - message_hash=hash_tree_root(candidate.core), - signature=candidate.signatures.proposer_signature, - domain=get_domain(beacon_state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(candidate.core.slot)), + message_hash=hash_tree_root(block.core), + signature=block.signatures.proposer_signature, + domain=get_domain(beacon_state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(block.core.slot)), ) - + # Process and update block data fees - add_fee(state, beacon_state, proposer_index, state.basefee * len(block.core.data) // SHARD_BLOCK_SIZE_LIMIT) + add_fee(state, beacon_state, proposer_index, Gwei(state.basefee * len(block.core.data) // SHARD_BLOCK_SIZE_LIMIT)) QUOTIENT = SHARD_BLOCK_SIZE_LIMIT * BASEFEE_ADJUSTMENT_FACTOR if len(block.core.data) > SHARD_BLOCK_SIZE_TARGET: - state.basefee += min(1, state.basefee * (len(block.core.data) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT) + state.basefee += Gwei(min(1, state.basefee * (len(block.core.data) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT)) elif len(block.core.data) < SHARD_BLOCK_SIZE_TARGET: - state.basefee -= min(1, state.basefee * (len(block.core.data) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT) - state.basefee = max(1, min(EFFECTIVE_BALANCE_INCREMENT // EPOCHS_PER_SHARD_PERIOD // SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH, state.basefee)) - + state.basefee -= Gwei(min(1, state.basefee * (len(block.core.data) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT)) + state.basefee = Gwei(max( + 1, + min( + EFFECTIVE_BALANCE_INCREMENT // EPOCHS_PER_SHARD_PERIOD // SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH, + state.basefee, + ) + )) + # Check total bytes assert block.core.total_bytes == state.most_recent_block_core.total_bytes + len(block.core.data) - + # Update in-state block header state.most_recent_block_core = ShardBlockCore( slot=block.core.slot, @@ -495,7 +523,7 @@ def shard_block_transition(state: ShardState, beacon_state: BeaconState, block: total_bytes=block.core.total_bytes, attester_bitfield=block.core.attester_bitfield ) - + # Check state root assert hash_tree_root(state) == block.core.state_root ``` From 13d6a31c5cf26e7fdbb22fc56dd2a7fe80db31e6 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 1 Aug 2019 00:27:31 +0800 Subject: [PATCH 012/250] misc fix --- specs/core/1_shard-data-chains.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index bc8a8817f..0fcbfe991 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -282,9 +282,9 @@ def get_shard_header(block: ShardBlock) -> ShardBlockHeader: data_root=hash_tree_root(block.core.data), state_root=block.core.state_root, total_bytes=block.core.total_bytes, - attester_bitfield=block.core.attester_bitfield + attester_bitfield=block.core.attester_bitfield, ), - signatures=block.signatures + signatures=block.signatures, ) ``` @@ -475,7 +475,7 @@ def shard_block_transition(state: ShardState, beacon_state: BeaconState, block: attestations += 1 for i in range(len(attester_committee), MAX_PERSISTENT_COMMITTEE_SIZE): - assert block.attester_bitfield[i] is False + assert block.core.attester_bitfield[i] is False or block.core.attester_bitfield[i] == 0 # TODO: FIX Bitvector assert bls_verify( pubkey=bls_aggregate_pubkeys(pubkeys), @@ -521,7 +521,7 @@ def shard_block_transition(state: ShardState, beacon_state: BeaconState, block: data_root=block.core.data_root, state_root=Hash(), total_bytes=block.core.total_bytes, - attester_bitfield=block.core.attester_bitfield + attester_bitfield=block.core.attester_bitfield, ) # Check state root From e08c365e2fa3f8a8200258a59230b2b6b37f9653 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 31 Jul 2019 17:25:55 -0400 Subject: [PATCH 013/250] Update specs/core/1_shard-data-chains.md Co-Authored-By: Hsiao-Wei Wang --- specs/core/1_shard-data-chains.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 0fcbfe991..266e46d63 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -421,6 +421,7 @@ def shard_slot_transition(state: ShardState, beacon_state: BeaconState) -> None: h = hash_tree_root(state) while state.slot % 2**depth == 0: state.history_acc[depth] = h + depth += 1 # Period transitions if (state.slot + 1) % (SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD) == 0: From b57aed2380f8840cc96e7884f7eb4bd31c3d3d04 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 31 Jul 2019 21:26:47 -0400 Subject: [PATCH 014/250] A few fixes --- specs/core/1_shard-data-chains.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 266e46d63..3f54390d6 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -199,7 +199,7 @@ def compute_epoch_of_shard_slot(slot: ShardSlot) -> Epoch: ### `get_shard_period_start_epoch` ```python -def get_shard_period_start_epoch(epoch: Epoch, lookback: uint64=0) -> Epoch: +def get_shard_period_start_epoch(epoch: Epoch, lookback: int=0) -> Epoch: return Epoch(epoch - (epoch % EPOCHS_PER_SHARD_PERIOD) - lookback * EPOCHS_PER_SHARD_PERIOD) ``` @@ -337,12 +337,12 @@ def compute_crosslink_data_root(blocks: Sequence[ShardBlock]) -> Hash: def get_default_shard_state(beacon_state: BeaconState, shard: Shard) -> ShardState: earlier_committee = get_period_committee( beacon_state, - Epoch(PHASE_1_FORK_SLOT - SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD * 2), + PHASE_1_FORK_EPOCH - EPOCHS_PER_SHARD_PERIOD * 2, shard, ) later_committee = get_period_committee( beacon_state, - Epoch(PHASE_1_FORK_SLOT - SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD), + PHASE_1_FORK_EPOCH - EPOCHS_PER_SHARD_PERIOD * 2, shard, ) return ShardState( @@ -519,7 +519,7 @@ def shard_block_transition(state: ShardState, beacon_state: BeaconState, block: slot=block.core.slot, beacon_chain_root=block.core.beacon_chain_root, parent_root=block.core.parent_root, - data_root=block.core.data_root, + data_root=hash_tree_root(block.core.data), state_root=Hash(), total_bytes=block.core.total_bytes, attester_bitfield=block.core.attester_bitfield, From 52705f6fab88bfef04dfbb0686830111c8e2fe6b Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 31 Jul 2019 21:28:07 -0400 Subject: [PATCH 015/250] Quick fix --- specs/core/1_shard-data-chains.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 3f54390d6..5348322c4 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -500,9 +500,9 @@ def shard_block_transition(state: ShardState, beacon_state: BeaconState, block: add_fee(state, beacon_state, proposer_index, Gwei(state.basefee * len(block.core.data) // SHARD_BLOCK_SIZE_LIMIT)) QUOTIENT = SHARD_BLOCK_SIZE_LIMIT * BASEFEE_ADJUSTMENT_FACTOR if len(block.core.data) > SHARD_BLOCK_SIZE_TARGET: - state.basefee += Gwei(min(1, state.basefee * (len(block.core.data) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT)) + state.basefee += Gwei(max(1, state.basefee * (len(block.core.data) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT)) elif len(block.core.data) < SHARD_BLOCK_SIZE_TARGET: - state.basefee -= Gwei(min(1, state.basefee * (len(block.core.data) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT)) + state.basefee -= Gwei(max(1, state.basefee * (len(block.core.data) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT)) state.basefee = Gwei(max( 1, min( From 7a4a136d6cecc74b57fc0aed7166428ad5a6c674 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 1 Aug 2019 13:32:37 +0800 Subject: [PATCH 016/250] Fix `later_committee` --- specs/core/1_shard-data-chains.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 5348322c4..7286692cb 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -342,7 +342,7 @@ def get_default_shard_state(beacon_state: BeaconState, shard: Shard) -> ShardSta ) later_committee = get_period_committee( beacon_state, - PHASE_1_FORK_EPOCH - EPOCHS_PER_SHARD_PERIOD * 2, + PHASE_1_FORK_EPOCH - EPOCHS_PER_SHARD_PERIOD, shard, ) return ShardState( From ce3df38028da15f982980bda1170644f00f22396 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 1 Aug 2019 14:17:05 +0800 Subject: [PATCH 017/250] Some updates: 1. Clean up configurations 2. Add `HISTORY_ACCUMULATOR_VECTOR` 3. Add `validate_state_root` flag in `shard_state_transition` for testing 4. Rename `history_acc` to `history_accumulator` --- specs/core/1_shard-data-chains.md | 47 ++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 14 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 7286692cb..0ef54570a 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -14,6 +14,8 @@ - [Misc](#misc) - [Initial values](#initial-values) - [Time parameters](#time-parameters) + - [State list lengths](#state-list-lengths) + - [Rewards and penalties](#rewards-and-penalties) - [Signature domain types](#signature-domain-types) - [TODO PLACEHOLDER](#todo-placeholder) - [Data structures](#data-structures) @@ -63,13 +65,11 @@ We define the following Python custom types for type hinting and readability: | Name | Value | | - | - | -| `SHARD_HEADER_SIZE` | `2**9` (= 512) | -| `SHARD_BLOCK_SIZE_LIMIT` | `2**16` (= 65,536) | -| `SHARD_BLOCK_SIZE_TARGET` | `2**14` (= 16,384) | | `SHARD_SLOTS_PER_BEACON_SLOT` | `2**1` (= 2) | | `MAX_PERSISTENT_COMMITTEE_SIZE` | `2**7` (= 128) | -| `REWARD_COEFFICIENT_BASE` | `2**20` ( = 1,048,576) | -| `BASEFEE_ADJUSTMENT_FACTOR` | `2**3` (= 8) | +| `SHARD_HEADER_SIZE` | `2**9` (= 512) | +| `SHARD_BLOCK_SIZE_TARGET` | `2**14` (= 16,384) | +| `SHARD_BLOCK_SIZE_LIMIT` | `2**16` (= 65,536) | ### Initial values @@ -77,7 +77,6 @@ We define the following Python custom types for type hinting and readability: | - | - | | `PHASE_1_FORK_EPOCH` | **TBD** | | `PHASE_1_FORK_SLOT` | **TBD** | -| `GENESIS_SHARD_SLOT` | 0 | ### Time parameters @@ -86,6 +85,19 @@ We define the following Python custom types for type hinting and readability: | `CROSSLINK_LOOKBACK` | `2**0` (= 1) | epochs | 6.4 minutes | | `EPOCHS_PER_SHARD_PERIOD` | `2**8` (= 256) | epochs | ~27 hours | +### State list lengths + +| Name | Value | Unit | +| - | - | :-: | +| `HISTORY_ACCUMULATOR_VECTOR` | `2**6` (= 64) | state tree maximum depth | + +### Rewards and penalties + +| Name | Value | +| - | - | +| `BASEFEE_ADJUSTMENT_FACTOR` | `2**3` (= 8) | +| `REWARD_COEFFICIENT_BASE` | `2**20` ( = 1,048,576) | + ### Signature domain types The following types are defined, mapping into `DomainType` (little endian): @@ -159,7 +171,7 @@ class ExtendedShardBlockCore(Container): ```python class ShardState(Container): - history_acc: Vector[Hash, 64] + history_accumulator: Vector[Hash, HISTORY_ACCUMULATOR_VECTOR] earlier_committee_rewards: List[uint64, MAX_PERSISTENT_COMMITTEE_SIZE] later_committee_rewards: List[uint64, MAX_PERSISTENT_COMMITTEE_SIZE] earlier_committee_fees: List[Gwei, MAX_PERSISTENT_COMMITTEE_SIZE] @@ -337,12 +349,12 @@ def compute_crosslink_data_root(blocks: Sequence[ShardBlock]) -> Hash: def get_default_shard_state(beacon_state: BeaconState, shard: Shard) -> ShardState: earlier_committee = get_period_committee( beacon_state, - PHASE_1_FORK_EPOCH - EPOCHS_PER_SHARD_PERIOD * 2, + Epoch(PHASE_1_FORK_EPOCH - EPOCHS_PER_SHARD_PERIOD * 2), shard, ) later_committee = get_period_committee( beacon_state, - PHASE_1_FORK_EPOCH - EPOCHS_PER_SHARD_PERIOD, + Epoch(PHASE_1_FORK_EPOCH - EPOCHS_PER_SHARD_PERIOD), shard, ) return ShardState( @@ -403,11 +415,14 @@ def add_fee(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, ### Shard state transition function ```python -def shard_state_transition(state: ShardState, beacon_state: BeaconState, block: ShardBlock) -> None: +def shard_state_transition(state: ShardState, + beacon_state: BeaconState, + block: ShardBlock, + validate_state_root: bool=False) -> None: assert block.core.slot > state.slot for slot in range(state.slot, block.core.slot): shard_slot_transition(state, beacon_state) - shard_block_transition(state, beacon_state, block) + shard_block_transition(state, beacon_state, block, validate_state_root=validate_state_root) ``` ```python @@ -420,7 +435,7 @@ def shard_slot_transition(state: ShardState, beacon_state: BeaconState) -> None: depth = 0 h = hash_tree_root(state) while state.slot % 2**depth == 0: - state.history_acc[depth] = h + state.history_accumulator[depth] = h depth += 1 # Period transitions @@ -450,7 +465,10 @@ def shard_slot_transition(state: ShardState, beacon_state: BeaconState) -> None: ``` ```python -def shard_block_transition(state: ShardState, beacon_state: BeaconState, block: ShardBlock) -> None: +def shard_block_transition(state: ShardState, + beacon_state: BeaconState, + block: ShardBlock, + validate_state_root: bool) -> None: # Check slot number assert block.core.slot == state.slot @@ -526,7 +544,8 @@ def shard_block_transition(state: ShardState, beacon_state: BeaconState, block: ) # Check state root - assert hash_tree_root(state) == block.core.state_root + if validate_state_root: + assert block.core.state_root == hash_tree_root(state) ``` ### Beacon attestations From 3aba05e252298db960ace81c6113558c1f91d24c Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 1 Aug 2019 14:19:08 +0800 Subject: [PATCH 018/250] Add simple tests for `shard_state_transition` --- configs/constant_presets/minimal.yaml | 10 ++++ .../test/helpers/phase1/shard_block.py | 44 +++++++++++++-- .../shard_data_chain/test_shard_block.py | 54 ++++++++++++++++--- 3 files changed, 97 insertions(+), 11 deletions(-) diff --git a/configs/constant_presets/minimal.yaml b/configs/constant_presets/minimal.yaml index 34419a223..ab8aab3c4 100644 --- a/configs/constant_presets/minimal.yaml +++ b/configs/constant_presets/minimal.yaml @@ -129,3 +129,13 @@ DOMAIN_TRANSFER: 0x05000000 DOMAIN_CUSTODY_BIT_CHALLENGE: 0x06000000 DOMAIN_SHARD_PROPOSER: 0x80000000 DOMAIN_SHARD_ATTESTER: 0x81000000 + + +# Phase 1 +# --------------------------------------------------------------- +SHARD_SLOTS_PER_BEACON_SLOT: 2 +EPOCHS_PER_SHARD_PERIOD: 4 +# PHASE_1_FORK_EPOCH >= EPOCHS_PER_SHARD_PERIOD * 2 +PHASE_1_FORK_EPOCH: 8 +# PHASE_1_FORK_SLOT = PHASE_1_FORK_EPOCH * SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH +PHASE_1_FORK_SLOT: 128 diff --git a/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py b/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py index 4e1981727..42e2765ea 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py +++ b/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py @@ -7,6 +7,10 @@ from eth2spec.utils.ssz.ssz_impl import ( signing_root, ) +from .attestations import ( + sign_shard_attestation, +) + @only_with_bls() def sign_shard_block(spec, state, block, shard, proposer_index=None): @@ -26,22 +30,52 @@ def sign_shard_block(spec, state, block, shard, proposer_index=None): ) -def build_empty_shard_block(spec, state, slot, shard, parent_root, signed=False): +def build_empty_shard_block(spec, + shard_state, + beacon_state, + slot, + parent_root, + signed=False, + full_attestation=False): if slot is None: - slot = state.slot + slot = shard_state.slot + block = spec.ShardBlock( core=spec.ExtendedShardBlockCore( slot=slot, - beacon_chain_root=state.block_roots[state.slot % spec.SLOTS_PER_HISTORICAL_ROOT], + beacon_chain_root=beacon_state.block_roots[beacon_state.slot % spec.SLOTS_PER_HISTORICAL_ROOT], parent_root=parent_root, ), signatures=spec.ShardBlockSignatures( - attestation_signature=b'\x12' * 96, + attestation_signature=b'\x00' * 96, proposer_signature=b'\x25' * 96, ) ) + # attestation + if full_attestation: + attester_committee = spec.get_persistent_committee(beacon_state, shard_state.shard, block.core.slot) + block.core.attester_bitfield = list( + (True,) * len(attester_committee) + + (False,) * (spec.MAX_PERSISTENT_COMMITTEE_SIZE * 2 - len(attester_committee)) + ) + block.signatures.attestation_signature = sign_shard_attestation( + spec, + shard_state, + beacon_state, + block, + participants=attester_committee, + ) + else: + block.signatures.attestation_signature = sign_shard_attestation( + spec, + shard_state, + beacon_state, + block, + participants=(), + ) + if signed: - sign_shard_block(spec, state, block, shard) + sign_shard_block(spec, beacon_state, block, shard_state.shard) return block diff --git a/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py b/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py index 359350d39..2bb0232f0 100644 --- a/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py +++ b/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py @@ -11,16 +11,58 @@ from eth2spec.test.context import ( @with_all_phases_except(['phase0']) @always_bls @spec_state_test -def test_is_valid_shard_block(spec, state): +def test_process_empty_shard_block(spec, state): + beacon_state = state + + shard_slot = spec.PHASE_1_FORK_SLOT + beacon_state.slot = spec.Slot(spec.PHASE_1_FORK_EPOCH * spec.SLOTS_PER_EPOCH) + shard_state = spec.get_default_shard_state(beacon_state, shard=spec.Shard(0)) + shard_state.slot = shard_slot + block = build_empty_shard_block( spec, - state, - slot=spec.Slot(spec.PERSISTENT_COMMITTEE_PERIOD * 100), - shard=spec.Shard(1), + shard_state, + beacon_state, + slot=shard_slot + 1, parent_root=spec.Hash(), signed=True, + full_attestation=False, ) - # TODO: test `is_valid_shard_block` + yield 'pre', shard_state + yield 'beacon_state', beacon_state + yield 'block', block - yield 'blocks', (block,) + spec.shard_state_transition(shard_state, beacon_state, block) + + yield 'post', shard_state + + +@with_all_phases_except(['phase0']) +@always_bls +@spec_state_test +def test_process_full_attestation_shard_block(spec, state): + beacon_state = state + + shard_slot = spec.PHASE_1_FORK_SLOT + beacon_state.slot = spec.Slot(spec.PHASE_1_FORK_EPOCH * spec.SLOTS_PER_EPOCH) + shard_state = spec.get_default_shard_state(beacon_state, shard=spec.Shard(0)) + shard_state.slot = shard_slot + + block = build_empty_shard_block( + spec, + shard_state, + beacon_state, + slot=shard_slot + 1, + parent_root=spec.Hash(), + signed=True, + full_attestation=True, + ) + + yield 'pre', shard_state + yield 'beacon_state', beacon_state + yield 'block', block + + spec.shard_state_transition(shard_state, beacon_state, block) + + yield 'post', shard_state From db292502567c67b9cec08d8d19bbde6ae8ab6aad Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 1 Aug 2019 14:22:01 +0800 Subject: [PATCH 019/250] Add testing helpers --- .../test/helpers/phase1/attestations.py | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 test_libs/pyspec/eth2spec/test/helpers/phase1/attestations.py diff --git a/test_libs/pyspec/eth2spec/test/helpers/phase1/attestations.py b/test_libs/pyspec/eth2spec/test/helpers/phase1/attestations.py new file mode 100644 index 000000000..750ab5048 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/helpers/phase1/attestations.py @@ -0,0 +1,37 @@ +from eth2spec.test.helpers.keys import privkeys +from eth2spec.utils.bls import ( + bls_aggregate_signatures, + bls_sign, +) + + +def sign_shard_attestation(spec, shard_state, beacon_state, block, participants): + signatures = [] + message_hash = block.core.parent_root + block_epoch = spec.compute_epoch_of_shard_slot(block.core.slot) + for validator_index in participants: + privkey = privkeys[validator_index] + signatures.append( + get_attestation_signature( + spec, + shard_state, + beacon_state, + message_hash, + block_epoch, + privkey, + ) + ) + + return bls_aggregate_signatures(signatures) + + +def get_attestation_signature(spec, shard_state, beacon_state, message_hash, block_epoch, privkey): + return bls_sign( + message_hash=message_hash, + privkey=privkey, + domain=spec.get_domain( + state=beacon_state, + domain_type=spec.DOMAIN_SHARD_ATTESTER, + message_epoch=block_epoch, + ) + ) From 4163053ccad9f55aefd13f8bcc51ec086a12acf6 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 1 Aug 2019 16:45:01 +0800 Subject: [PATCH 020/250] Clean up type hintings, especially `reward` is denominated in uint, and `fee` is in `Gwei` --- specs/core/1_shard-data-chains.md | 35 ++++++++++++++++--------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 0ef54570a..c4d8e2701 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -218,9 +218,7 @@ def get_shard_period_start_epoch(epoch: Epoch, lookback: int=0) -> Epoch: ### `get_period_committee` ```python -def get_period_committee(state: BeaconState, - epoch: Epoch, - shard: Shard) -> List[ValidatorIndex, MAX_PERSISTENT_COMMITTEE_SIZE]: +def get_period_committee(state: BeaconState, epoch: Epoch, shard: Shard) -> Sequence[ValidatorIndex]: """ Return committee for a period. Used to construct persistent committees. """ @@ -363,8 +361,8 @@ def get_default_shard_state(beacon_state: BeaconState, shard: Shard) -> ShardSta slot=PHASE_1_FORK_SLOT, earlier_committee_rewards=[REWARD_COEFFICIENT_BASE for _ in range(len(earlier_committee))], later_committee_rewards=[REWARD_COEFFICIENT_BASE for _ in range(len(later_committee))], - earlier_committee_fees=[0 for _ in range(len(earlier_committee))], - later_committee_fees=[0 for _ in range(len(later_committee))], + earlier_committee_fees=[Gwei(0) for _ in range(len(earlier_committee))], + later_committee_fees=[Gwei(0) for _ in range(len(later_committee))], ) ``` @@ -383,7 +381,7 @@ Note that these acceptance conditions depend on the canonical beacon chain; when ### Shard state transition function helpers ```python -def add_reward(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: +def add_reward(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: int) -> None: epoch = compute_epoch_of_shard_slot(state.slot) earlier_committee = get_period_committee( beacon_state, @@ -400,7 +398,7 @@ def add_reward(state: ShardState, beacon_state: BeaconState, index: ValidatorInd ``` ```python -def add_fee(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: +def add_fee(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: int) -> None: epoch = compute_epoch_of_shard_slot(state.slot) earlier_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=2), state.shard) later_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=1), state.shard) @@ -452,13 +450,17 @@ def shard_slot_transition(state: ShardState, beacon_state: BeaconState) -> None: state.shard, ) state.receipt_root = hash_tree_root(List[ShardReceiptDelta, PLACEHOLDER]([ - ShardReceiptDelta(index, state.earlier_committee_rewards[i], state.earlier_committee_fees[i]) - for i, index in enumerate(earlier_committee) + ShardReceiptDelta( + index=validator_index, + reward_coefficient=state.earlier_committee_rewards[i], + block_fee=state.earlier_committee_fees[i], + ) + for i, validator_index in enumerate(earlier_committee) ])) state.earlier_committee_rewards = state.later_committee_rewards state.earlier_committee_fees = state.later_committee_fees state.later_committee_rewards = [REWARD_COEFFICIENT_BASE for _ in range(len(later_committee))], - state.later_committee_fees = [0 for _ in range(len(later_committee))], + state.later_committee_fees = [Gwei(0) for _ in range(len(later_committee))], else: state.receipt_root = Hash() state.slot += ShardSlot(1) @@ -478,19 +480,18 @@ def shard_block_transition(state: ShardState, # Calculate base reward total_balance = get_total_active_balance(beacon_state) - base_reward = Gwei( + base_reward = ( REWARD_COEFFICIENT_BASE * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH ) - # Check attestations attester_committee = get_persistent_committee(beacon_state, state.shard, block.core.slot) pubkeys = [] attestations = 0 - for i, index in enumerate(attester_committee): + for i, validator_index in enumerate(attester_committee): if block.core.attester_bitfield[i]: - pubkeys.append(beacon_state.validators[index].pubkey) - add_reward(state, beacon_state, index, base_reward) + pubkeys.append(beacon_state.validators[validator_index].pubkey) + add_reward(state, beacon_state, validator_index, base_reward) attestations += 1 for i in range(len(attester_committee), MAX_PERSISTENT_COMMITTEE_SIZE): @@ -506,7 +507,7 @@ def shard_block_transition(state: ShardState, # Check proposer proposer_index = get_shard_block_proposer_index(beacon_state, state.shard, block.core.slot) assert proposer_index is not None - add_reward(state, beacon_state, proposer_index, Gwei(attestations * base_reward // PROPOSER_REWARD_QUOTIENT)) + add_reward(state, beacon_state, proposer_index, attestations * base_reward // PROPOSER_REWARD_QUOTIENT) assert bls_verify( pubkey=beacon_state.validators[proposer_index].pubkey, message_hash=hash_tree_root(block.core), @@ -515,7 +516,7 @@ def shard_block_transition(state: ShardState, ) # Process and update block data fees - add_fee(state, beacon_state, proposer_index, Gwei(state.basefee * len(block.core.data) // SHARD_BLOCK_SIZE_LIMIT)) + add_fee(state, beacon_state, proposer_index, state.basefee * len(block.core.data) // SHARD_BLOCK_SIZE_LIMIT) QUOTIENT = SHARD_BLOCK_SIZE_LIMIT * BASEFEE_ADJUSTMENT_FACTOR if len(block.core.data) > SHARD_BLOCK_SIZE_TARGET: state.basefee += Gwei(max(1, state.basefee * (len(block.core.data) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT)) From cb1a0cbd5f49eed89a0aa058a60e5e134543fffe Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 1 Aug 2019 07:57:34 -0400 Subject: [PATCH 021/250] Update specs/light_client/merkle_proofs.md --- specs/light_client/merkle_proofs.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index f62dc8d5c..5b88fbbca 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -120,6 +120,7 @@ def get_generalized_index(typ: Type, path: List[Union[int, str]]) -> Generalized Converts a path (eg. `[7, "foo", 3]` for `x[7].foo[3]`, `[12, "bar", "__len__"]` for `len(x[12].bar)`) into the generalized index representing its position in the Merkle tree. """ + root = 1 for p in path: assert not is_basic_type(typ) # If we descend to a basic type, the path cannot continue further if p == '__len__': From ed3749264b9ce5cbc444911da99ed289739e1d30 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 1 Aug 2019 08:07:57 -0400 Subject: [PATCH 022/250] Update specs/light_client/merkle_proofs.md Co-Authored-By: Danny Ryan --- specs/light_client/merkle_proofs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 5b88fbbca..114947326 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -202,7 +202,7 @@ def get_expanded_indices(indices: List[int]) -> List[int]: branches = set() for index in indices: branches = branches.union(set(get_branch_indices(index) + [index])) - return sorted(list([x for x in branches if x*2 not in branches or x*2+1 not in branches]))[::-1] + return sorted([x for x in branches if x*2 not in branches or x*2+1 not in branches])[::-1] ``` Generating a proof that covers paths `p1 ... pn` is simply a matter of taking the chunks in the SSZ hash tree with generalized indices `get_expanded_indices([p1 ... pn])`. From 92fc0f2b81a2cc8a589fee54109533d843fdc182 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 1 Aug 2019 08:08:12 -0400 Subject: [PATCH 023/250] Update specs/light_client/merkle_proofs.md Co-Authored-By: Danny Ryan --- specs/light_client/merkle_proofs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 114947326..81650bdbf 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -221,7 +221,7 @@ def fill(objects: Dict[int, Bytes32]) -> Dict[int, Bytes32]: while pos < len(keys): k = keys[pos] if k in objects and k ^ 1 in objects and k // 2 not in objects: - objects[k // 2] = hash(objects[k & - 2] + objects[k | 1]) + objects[k // 2] = hash(objects[k & -2] + objects[k | 1]) keys.append(k // 2) pos += 1 # Completeness and consistency check From 446ad3c392439fb916cf54cd6911d5dc5df1aab6 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 1 Aug 2019 08:08:18 -0400 Subject: [PATCH 024/250] Update specs/light_client/merkle_proofs.md Co-Authored-By: Danny Ryan --- specs/light_client/merkle_proofs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 81650bdbf..dae2a1704 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -267,7 +267,7 @@ def extract_value_at_path(chunks: Dict[int, Bytes32], typ: Type, path: List[Unio for p in path: if p == '__len__': return deserialize_basic(chunks[root * 2 + 1][:8], uint64) - if iissubclass(typ, (List, Bytes)): + if issubclass(typ, (List, Bytes)): assert 0 <= p < deserialize_basic(chunks[root * 2 + 1][:8], uint64) pos, start, end = get_item_position(typ, p) root = root * (2 if issubclass(typ, (List, Bytes)) else 1) * next_power_of_two(get_chunk_count(typ)) + pos From e5350c10264139b163055773dc8e080201618de6 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Thu, 1 Aug 2019 12:44:41 +0200 Subject: [PATCH 025/250] Update libp2p networking spec --- specs/networking/libp2p-standardization.md | 158 ----- specs/networking/messaging.md | 45 -- specs/networking/node-identification.md | 31 - specs/networking/p2p-interface.md | 712 +++++++++++++++++++++ specs/networking/rpc-interface.md | 283 -------- 5 files changed, 712 insertions(+), 517 deletions(-) delete mode 100644 specs/networking/libp2p-standardization.md delete mode 100644 specs/networking/messaging.md delete mode 100644 specs/networking/node-identification.md create mode 100644 specs/networking/p2p-interface.md delete mode 100644 specs/networking/rpc-interface.md diff --git a/specs/networking/libp2p-standardization.md b/specs/networking/libp2p-standardization.md deleted file mode 100644 index d1ba07e65..000000000 --- a/specs/networking/libp2p-standardization.md +++ /dev/null @@ -1,158 +0,0 @@ -ETH 2.0 Networking Spec - Libp2p standard protocols -=== - -# Abstract - -Ethereum 2.0 clients plan to use the libp2p protocol networking stack for -mainnet release. This document aims to standardize the libp2p client protocols, -configuration and messaging formats. - -# Libp2p Components - -## Transport - -This section details the libp2p transport layer that underlies the -[protocols](#protocols) that are listed in this document. - -Libp2p allows composition of multiple transports. Eth2.0 clients should support -TCP/IP and optionally websockets. Websockets are useful for implementations -running in the browser and therefore native clients would ideally support these implementations -by supporting websockets. - -An ideal libp2p transport would therefore support both TCP/IP and websockets. - -*Note: There is active development in libp2p to facilitate the -[QUIC](https://github.com/libp2p/go-libp2p-quic-transport) transport, which may -be adopted in the future* - -### Encryption - -Libp2p currently offers [Secio](https://github.com/libp2p/specs/pull/106) which -can upgrade a transport which will then encrypt all future communication. Secio -generates a symmetric ephemeral key which peers use to encrypt their -communication. It can support a range of ciphers and currently supports key -derivation for elliptic curve-based public keys. - -Current defaults are: -- Key agreement: `ECDH-P256` (also supports `ECDH-P384`) -- Cipher: `AES-128` (also supports `AES-256`, `TwofishCTR`) -- Digests: `SHA256` (also supports `SHA512`) - -*Note: Secio is being deprecated in favour of [TLS -1.3](https://github.com/libp2p/specs/blob/master/tls/tls.md). It is our -intention to transition to use TLS 1.3 for encryption between nodes, rather -than Secio.* - - -## Protocols - -This section lists the necessary libp2p protocols required by Ethereum 2.0 -running a libp2p network stack. - -## Multistream-select - -#### Protocol id: `/multistream/1.0.0` - -Clients running libp2p should support the -[multistream-select](https://github.com/multiformats/multistream-select/) -protocol which allows clients to negotiate libp2p protocols establish streams -per protocol. - -## Multiplexing - -Libp2p allows clients to compose multiple multiplexing methods. Clients should -support [mplex](https://github.com/libp2p/specs/tree/master/mplex) and -optionally [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md) -(these can be composed). - -**Mplex protocol id: `/mplex/6.7.0`** - -**Yamux protocol id: `/yamux/1.0.0`** - -## Gossipsub - -#### Protocol id: `/eth/serenity/gossipsub/1.0.0` - -*Note: Parameters listed here are subject to a large-scale network feasibility -study* - -The [Gossipsub](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub) -protocol is used for block and attestation propagation across the -network. - -### Configuration Parameters - -Gossipsub has a number of internal configuration parameters which directly -effect the network performance. Clients can implement independently, however -we aim to standardize these across clients to optimize the gossip network for -propagation times and message duplication. Current network-related defaults are: - -``` -( - // The target number of peers in the overlay mesh network (D in the libp2p specs). - mesh_size: 6 - // The minimum number of peers in the mesh network before adding more (D_lo in the libp2p specs). - mesh_lo: 4 - // The maximum number of peers in the mesh network before removing some (D_high in the libp2p sepcs). - mesh_high: 12 - // The number of peers to gossip to during a heartbeat (D_lazy in the libp2p sepcs). - gossip_lazy: 6 // defaults to `mesh_size` - // Time to live for fanout peers (seconds). - fanout_ttl: 60 - // The number of heartbeats to gossip about. - gossip_history: 3 - // Time between each heartbeat (seconds). - heartbeat_interval: 1 -) -``` - -### Topics - -*The Go and Js implementations use string topics - This is likely to be -updated to topic hashes in later versions - https://github.com/libp2p/rust-libp2p/issues/473* - -For Eth2.0 clients, topics are sent as `SHA2-256` hashes of the topic string. - -There are two main topics used to propagate attestations and beacon blocks to -all nodes on the network. - -- The `beacon_block` topic - This topic is used solely for propagating new - beacon blocks to all nodes on the networks. -- The `beacon_attestation` topic - This topic is used to propagate - aggregated attestations to subscribing nodes (typically block proposers) to - be included into future blocks. Attestations are aggregated in their - respective subnets before publishing on this topic. - -Shards are grouped into their own subnets (defined by a shard topic). The -number of shard subnets is defined via `SHARD_SUBNET_COUNT` and the shard -`shard_number % SHARD_SUBNET_COUNT` is assigned to the topic: -`shard{shard_number % SHARD_SUBNET_COUNT}_attestation`. - -### Messages - -*Note: The message format here is Eth2.0-specific* - -Each Gossipsub -[Message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24) -has a maximum size of 512KB (estimated from expected largest uncompressed block -size). - -The `data` field of a Gossipsub `Message` is an SSZ-encoded object. For the `beacon_block` topic, -this is a `beacon_block`. For the `beacon_attestation` topic, this is -an `attestation`. - -## Eth-2 RPC - -#### Protocol Id: `/eth/serenity/beacon/rpc/1` - -The [RPC Interface](./rpc-interface.md) is specified in this repository. - -## Discovery - -Discovery Version 5 -([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) -will be used for discovery. This protocol uses a UDP transport and specifies -its own encryption, ip-discovery and topic advertisement. Therefore, it has no -need to establish streams through `multistream-select`, rather, act -as a standalone implementation that feeds discovered peers/topics (ENR-records) as -`multiaddrs` into the libp2p service. diff --git a/specs/networking/messaging.md b/specs/networking/messaging.md deleted file mode 100644 index d7cb5bb5b..000000000 --- a/specs/networking/messaging.md +++ /dev/null @@ -1,45 +0,0 @@ -# Eth 2.0 Networking Spec - Messaging - -## Abstract - -This specification describes how individual Ethereum 2.0 messages are represented on the wire. - -The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL”, NOT", “SHOULD”, “SHOULD NOT”, “RECOMMENDED”, “MAY”, and “OPTIONAL” in this document are to be interpreted as described in [RFC 2119](https://tools.ietf.org/html/rfc2119). - -## Motivation - -This specification seeks to define a messaging protocol that is flexible enough to be changed easily as the Eth 2.0 specification evolves. - -Note that while `libp2p` is the chosen networking stack for Ethereum 2.0, as of this writing some clients do not have workable `libp2p` implementations. To allow those clients to communicate, we define a message envelope that includes the body's compression, encoding, and body length. Once `libp2p` is available across all implementations, this message envelope will be removed because `libp2p` will negotiate the values defined in the envelope upfront. - -## Specification - -### Message structure - -An Eth 2.0 message consists of an envelope that defines the message's compression, encoding, and length followed by the body itself. - -Visually, a message looks like this: - -``` -+--------------------------+ -| compression nibble | -+--------------------------+ -| encoding nibble | -+--------------------------+ -| body length (uint64) | -+--------------------------+ -| | -| body | -| | -+--------------------------+ -``` - -Clients MUST ignore messages with malformed bodies. The compression/encoding nibbles MUST be one of the following values: - -### Compression nibble values - -- `0x0`: no compression - -### Encoding nibble values - -- `0x1`: SSZ diff --git a/specs/networking/node-identification.md b/specs/networking/node-identification.md deleted file mode 100644 index 32ec4dfad..000000000 --- a/specs/networking/node-identification.md +++ /dev/null @@ -1,31 +0,0 @@ -# Eth 2.0 Networking Spec - Node Identification - -## Abstract - -This specification describes how Ethereum 2.0 nodes identify and address each other on the network. - -The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL", NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in [RFC 2119](https://tools.ietf.org/html/rfc2119). - -## Specification - -Clients use Ethereum Node Records (as described in [EIP-778](http://eips.ethereum.org/EIPS/eip-778)) to discover one another. Each ENR includes, among other things, the following keys: - -- The node's IP. -- The node's TCP port. -- The node's public key. - -For clients to be addressable, their ENR responses MUST contain all of the above keys. Client MUST verify the signature of any received ENRs, and disconnect from peers whose ENR signatures are invalid. Each node's public key MUST be unique. - -The keys above are enough to construct a [multiaddr](https://github.com/multiformats/multiaddr) for use with the rest of the `libp2p` stack. - -It is RECOMMENDED that clients set their TCP port to the default of `9000`. - -### Peer ID generation - -The `libp2p` networking stack identifies peers via a "peer ID." Simply put, a node's Peer ID is the SHA2-256 `multihash` of the node's public key struct (serialized in protobuf, refer to the [Peer ID spec](https://github.com/libp2p/specs/pull/100)). `go-libp2p-crypto` contains the canonical implementation of how to hash `secp256k1` keys for use as a peer ID. - -## See also - -- [multiaddr](https://github.com/multiformats/multiaddr) -- [multihash](https://multiformats.io/multihash/) -- [go-libp2p-crypto](https://github.com/libp2p/go-libp2p-crypto) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md new file mode 100644 index 000000000..72f5c0fd6 --- /dev/null +++ b/specs/networking/p2p-interface.md @@ -0,0 +1,712 @@ +# Overview + +This document contains the network specification for Ethereum 2.0 clients. + +It consists of four main sections: + +1. A specification of the network fundamentals detailing the two network configurations: interoperability test network, and mainnet launch. +2. A specification of the three network interaction _domains_ of ETH2.0: (a) the gossip domain, (b) the discovery domain, \(c\) the Req/Resp domain. +3. The rationale and further explanation for the design choices made in the previous two sections. +4. An analysis of the maturity/state of the libp2p features required by this spec across the languages in which ETH 2.0 clients are being developed. + +## Table of Contents +[TOC] + +# Network Fundamentals + +This section outlines the specification for the networking stack in Ethereum 2.0 clients. + +Sections that have differing parameters for mainnet launch and interoperability testing are split into subsections. Sections that are not split have the same parameters for interoperability testing as mainnet launch. + +## Transport + +Even though libp2p is a multi-transport stack (designed to listen on multiple simultaneous transports and endpoints transparently), we hereby define a profile for basic interoperability. + +#### Interop + +All implementations MUST support the TCP libp2p transport, and it MUST be enabled for both dialing and listening (i.e. outbound and inbound connections). + +The libp2p TCP transport supports listening on IPv4 and IPv6 addresses (and on multiple simultaneously). Clients SHOULD allow the operator to configure the listen IP addresses and ports, including the addressing schemes (IPv4, IPv6). + +To facilitate connectivity, and avert possible IPv6 routability/support issues, clients participating in the interoperability testnet MUST expose at least ONE IPv4 endpoint. + +All listening endpoints must be publicly dialable, and thus not rely on libp2p circuit relay, AutoNAT or AutoRelay facilities. + +Nodes operating behind a NAT, or otherwise undialable by default (e.g. container runtime, firewall, etc.), MUST have their infrastructure configured to enable inbound traffic on the announced public listening endpoint. + +#### Mainnet + +All requirements from the interoperability testnet apply, except for the IPv4 addressing scheme requirement. + +At this stage, clients are licensed to drop IPv4 support if they wish to do so, cognizant of the potential disadvantages in terms of Internet-wide routability/support. Clients MAY choose to listen only on IPv6, but MUST retain capability to dial both IPv4 and IPv6 addresses. + +Usage of circuit relay, AutoNAT or AutoRelay will be specifically re-examined closer to the time. + +## Encryption and identification + +#### Interop + +[SecIO](https://github.com/libp2p/specs/tree/master/secio) with `secp256k1` identities will be used for initial interoperability testing. + +The following SecIO parameters MUST be supported by all stacks: + +- Key agreement: ECDH-P256. +- Cipher: AES-128. +- Digest: SHA256. + +#### Mainnet + +[Noise Framework](http://www.noiseprotocol.org/) handshakes will be used for mainnet. libp2p Noise support [is in the process of being standardised](https://github.com/libp2p/specs/issues/195) in the libp2p project. + +Noise support will presumably include IX, IK and XX handshake patterns, and may rely on Curve25519 keys, ChaCha20 and Poly1305 ciphers, and SHA256 as a hash function. These aspects are being actively debated in the referenced issue [ETH 2.0 implementers are welcome to comment and contribute to the discussion.] + +## Protocol Negotiation + +#### Interop + +Connection-level and stream-level (see the rationale section below for explanations) protocol negotiation MUST be conducted using [multistream-select v1.0](https://github.com/multiformats/multistream-select/). Its protocol ID is: `/multistream/1.0.0`. + +#### Mainnet + +Clients MUST support [multistream-select 1.0](https://github.com/multiformats/multistream-select/) and MAY support [multiselect 2.0](https://github.com/libp2p/specs/pull/95). Depending on the number of clients that have implementations for multiselect 2.0 by mainnet, [multistream-select 1.0](https://github.com/multiformats/multistream-select/) may be phased out. + +## Multiplexing + +During connection bootstrapping, libp2p dynamically negotiates a mutually supported multiplexing method to conduct parallel conversations. This applies to transports that are natively incapable of multiplexing (e.g. TCP, WebSockets, WebRTC), and is omitted for capable transports (e.g. QUIC). + +Two multiplexers are commonplace in libp2p implementations: [mplex](https://github.com/libp2p/specs/tree/master/mplex) and [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). Their protocol IDs are, respectively: `/mplex/6.7.0` and `/yamux/1.0.0`. + +Clients MUST support [mplex](https://github.com/libp2p/specs/tree/master/mplex) and MAY support [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). If both are supported by the client, yamux must take precedence during negotiation. See the Rationale section of this document for tradeoffs. + +# ETH2 network interaction domains + +## Constants + +This section outlines constants that are used in this spec. + +- `RQRP_MAX_SIZE`: The max size of uncompressed req/resp messages that clients will allow. + Value: TBD +- `GOSSIP_MAX_SIZE`: The max size of uncompressed gossip messages + Value: 1MB (estimated from expected largest uncompressed block size). +- `SHARD_SUBNET_COUNT`: The number of shard subnets used in the gossipsub protocol. + Value: TBD + +## The gossip domain: gossipsub + +Clients MUST support the [gossipsub](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub) libp2p protocol. + +**Protocol ID:** `/meshsub/1.0.0` + +**Gossipsub Parameters** + +*Note: Parameters listed here are subject to a large-scale network feasibility study.* + +The following gossipsub parameters will be used: + +- `D` (topic stable mesh target count): 6 +- `D_low` (topic stable mesh low watermark): 4 +- `D_high` (topic stable mesh high watermark): 12 +- `D_lazy` (gossip target): 6 +- `fanout_ttl` (ttl for fanout maps for topics we are not subscribed to but have published to, seconds): 60 +- `gossip_advertise` (number of windows to gossip about): 3 +- `gossip_history` (number of heartbeat intervals to retain message IDs): 5 +- `heartbeat_interval` (frequency of heartbeat, seconds): 1 + +### Topics + +Topics are plain UTF-8 strings, and are encoded on the wire as determined by protobuf (gossipsub messages are enveloped in protobuf messages). + +Topic strings have form: `/eth2/TopicName/TopicEncoding`. This defines both the type of data being sent on the topic and how the data field of the message is encoded. (Further details can be found in [Messages](#Messages)). + +There are two main topics used to propagate attestations and beacon blocks to all nodes on the network. Their `TopicName`'s are: + +- `beacon_block` - This topic is used solely for propagating new beacon blocks to all nodes on the networks. Blocks are sent in their entirety. Clients who receive a block on this topic MUST validate the block proposer signature before forwarding it across the network. +- `beacon_attestation` - This topic is used to propagate aggregated attestations (in their entirety) to subscribing nodes (typically block proposers) to be included in future blocks. Similarly to beacon blocks, clients will be expected to perform some sort of validation before forwarding, but the precise mechanism is still TBD. + +Additional topics are used to propagate lower frequency validator messages. Their `TopicName`’s are: + +- `voluntary_exit` - This topic is used solely for propagating voluntary validator exits to proposers on the network. Voluntary exits are sent in their entirety. Clients who receive a voluntary exit on this topic MUST validate the conditions within `process_voluntary_exit` before forwarding it across the network. +- `proposer_slashing` - This topic is used solely for propagating proposer slashings to proposers on the network. Proposer slashings are sent in their entirety. Clients who receive a proposer slashing on this topic MUST validate the conditions within `process_proposer_slashing` before forwarding it across the network. +- `attester_slashing` - This topic is used solely for propagating attester slashings to proposers on the network. Attester slashings are sent in their entirety. Clients who receive an attester slashing on this topic MUST validate the conditions within `process_attester_slashing` before forwarding it across the network. + +#### Interop + +Unaggregated attestations from all shards are sent to the `beacon_attestation` topic. + +#### Mainnet + +Shards are grouped into their own subnets (defined by a shard topic). The number of shard subnets is defined via `SHARD_SUBNET_COUNT` and the shard `shard_number % SHARD_SUBNET_COUNT` is assigned to the topic: `shard{shard_number % SHARD_SUBNET_COUNT}_beacon_attestation`. Unaggregated attestations are sent to the subnet topic. Aggregated attestations are sent to the `beacon_attestation` topic. + +### Messages + +Each gossipsub [message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24) has a maximum size of `GOSSIP_MAX_SIZE`. + +Clients MUST reject (fail validation) messages that are over this size limit. Likewise, clients MUST NOT emit or propagate messages larger than this limit. + +The payload is carried in the `data` field of a gossipsub message, and varies depending on the topic: + + +| Topic | Message Type | +|------------------------------|-------------------| +| beacon_block | BeaconBlock | +| beacon_attestation | Attestation | +| shard{N}\_beacon_attestation | Attestation | +| voluntary_exit | VoluntaryExit | +| proposer_slashing | ProposerSlashing | +| attester_slashing | AttesterSlashing | + +Clients MUST reject (fail validation) messages containing an incorrect type, or invalid payload. + +When processing incoming gossip, clients MAY descore or disconnect peers who fail to observe these constraints. + +### Encodings + +Topics are post-fixed with an encoding. Encodings define how the payload of a gossipsub message is encoded. + +#### Interop + +- `ssz` - All objects are SSZ-encoded. Example: The beacon block topic string is: `/beacon_block/ssz` and the data field of a gossipsub message is an ssz-encoded `BeaconBlock`. + +#### Mainnet + +- `ssz_snappy` - All objects are ssz-encoded and then compressed with snappy. Example: The beacon attestation topic string is: `/beacon_attestation/ssz_snappy` and the data field of a gossipsub message is an `Attestation` that has been ssz-encoded then compressed with snappy. + +Implementations MUST use a single encoding. Changing an encoding will require coordination between participating implementations. + +## The discovery domain: discv5 + +Discovery Version 5 ([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) is used for peer discovery, both in the interoperability testnet and mainnet. + +`discv5` is a standalone protocol, running on UDP on a dedicated port, meant for peer discovery only. `discv5` supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are (or will be) requirements in this context. + +### Integration into libp2p stacks + +`discv5` SHOULD be integrated into the client’s libp2p stack by implementing an adaptor to make it conform to the [service discovery](https://github.com/libp2p/go-libp2p-core/blob/master/discovery/discovery.go) and [peer routing](https://github.com/libp2p/go-libp2p-core/blob/master/routing/routing.go#L36-L44) abstractions and interfaces (go-libp2p links provided). + +Inputs to operations include peer IDs (when locating a specific peer), or capabilities (when searching for peers with a specific capability), and the outputs will be multiaddrs converted from the ENR records returned by the discv5 backend. + +This integration enables the libp2p stack to subsequently form connections and streams with discovered peers. + +### ENR structure + +The Ethereum Node Record (ENR) for an Ethereum 2.0 client MUST contain the following entries (exclusive of the sequence number and signature, which MUST be present in an ENR): + +- The compressed secp256k1 publickey, 33 bytes (`secp256k1` field). +- An IPv4 address (`ip` field) and/or IPv6 address (`ip6` field). +- A TCP port (`tcp` field) representing the local libp2p listening port. +- A UDP port (`udp` field) representing the local discv5 listening port. + +Specifications of these parameters can be found in the [ENR Specification](http://eips.ethereum.org/EIPS/eip-778). + +#### Interop + +In the interoperability testnet, all peers will support all capabilities defined in this document (gossip, full Req/Resp suite, discovery protocol), therefore the ENR record does not need to carry ETH2 capability information, as it would be superfluous. + +Nonetheless, ENRs MUST carry a generic `eth2` key with nil value, denoting that the peer is indeed a ETH2 peer, in order to eschew connecting to ETH1 peers. + +#### Mainnet + +On mainnet, ENRs MUST include a structure enumerating the capabilities offered by the peer in an efficient manner. The concrete solution is currently undefined. Proposals include using namespaced bloom filters mapping capabilities to specific protocol IDs supported under that capability. + +### Topic advertisement + +#### Interop + +This feature will not be used in the interoperability testnet. + +#### Mainnet + +In mainnet, we plan to use discv5’s topic advertisement feature as a rendezvous facility for peers on shards (thus subscribing to the relevant gossipsub topics). + +## The Req/Resp domain + +### Protocol identification + +Each message type is segregated into its own libp2p protocol ID, which is a case-sensitive UTF-8 string of the form: + +``` +/ProtocolPrefix/MessageName/SchemaVersion/Encoding +``` + +With: + +- `ProtocolPrefix` - messages are grouped into families identified by a shared libp2p protocol name prefix. In this case, we use `/eth2/beacon_chain/req`. +- `MessageName` - each request is identified by a name consisting of English alphabet, digits and underscores (`_`). +- `SchemaVersion` - an ordinal version number (e.g. 1, 2, 3…) Each schema is versioned to facilitate backward and forward-compatibility when possible. +- `Encoding` - while the schema defines the data types in more abstract terms, the encoding strategy describes a specific representation of bytes that will be transmitted over the wire. See the [Encodings](#Encoding-strategies) section, for further details. + +This protocol segregation allows libp2p `multistream-select 1.0` / `multiselect 2.0` to handle the request type, version and encoding negotiation before establishing the underlying streams. + +### Req/Resp interaction + +We use ONE stream PER request/response interaction. Streams are closed when the interaction finishes, whether in success or in error. + +Request/response messages MUST adhere to the encoding specified in the protocol name, and follow this structure (relaxed BNF grammar): + +``` +request ::= | +response ::= | | +result ::= “0” | “1” | “2” | [“128” ... ”255”] +``` + +The encoding-dependent header may carry metadata or assertions such as the encoded payload length, for integrity and attack proofing purposes. It is not strictly necessary to length-prefix payloads, because req/resp streams are single-use, and stream closures implicitly delimit the boundaries, but certain encodings like SSZ do, for added security. + +`encoded-payload` has a maximum byte size of `RQRP_MAX_SIZE`. + +Clients MUST ensure the payload size is less than or equal to `RQRP_MAX_SIZE`, if not, they SHOULD reset the stream immediately. Clients tracking peer reputation MAY decrement the score of the misbehaving peer under this circumstance. + +#### Requesting side + +Once a new stream with the protocol ID for the request type has been negotiated, the full request message should be sent immediately. It should be encoded according to the encoding strategy. + +The requester MUST close the write side of the stream once it finishes writing the request message - at this point, the stream will be half-closed. + +The requester MUST wait a maximum of **5 seconds** for the first response byte to arrive (time to first byte – or TTFB – timeout). On that happening, the requester will allow further **10 seconds** to receive the full response. + +If any of these timeouts fire, the requester SHOULD reset the stream and deem the req/resp operation to have failed. + +#### Responding side + +Once a new stream with the protocol ID for the request type has been negotiated, the responder must process the incoming request message according to the encoding strategy, until EOF (denoting stream half-closure by the requester). + +The responder MUST: + +1. Use the encoding strategy to read the optional header. +2. If there are any length assertions for length `N`, it should read exactly `N` bytes from the stream, at which point an EOF should arise (no more bytes). Should this is not the case, it should be treated as a failure. +3. Deserialize the expected type, and process the request. +4. Write the response (result, optional header, payload). +5. Close their write side of the stream. At this point, the stream will be fully closed. + +If steps (1), (2) or (3) fail due to invalid, malformed or inconsistent data, the responder MUST respond in error. Clients tracking peer reputation MAY record such failures, as well as unexpected events, e.g. early stream resets. + +The entire request should be read in no more than **5 seconds**. Upon a timeout, the responder SHOULD reset the stream. + +The responder SHOULD send a response promptly, starting with a **single-byte** response code which determines the contents of the response (`result` particle in the BNF grammar above). + +It can have one of the following values: + +- 0: **Success** -- a normal response follows, with contents matching the expected message schema and encoding specified in the request. +- 1: **InvalidRequest** -- the contents of the request are semantically invalid, or the payload is malformed, or could not be understood. The response payload adheres to the ErrorMessage schema (described below). +- 2: **ServerError** -- the responder encountered an error while processing the request. The response payload adheres to the ErrorMessage schema (described below). + +Clients MAY use response codes above `128` to indicate alternative, erroneous request-specific responses. + +The range `[3, 127]` is RESERVED for future usages, and should be treated as error if not recognised expressly. + +The `ErrorMessage` schema is: + +``` +( + error_message: String +) +``` + +*Note that the String type is encoded as UTF-8 bytes when SSZ-encoded.* + +A response therefore has the form: +``` + +--------+--------+--------+--------+--------+--------+ + | result | header (opt) | encoded_response | + +--------+--------+--------+--------+--------+--------+ +``` +Here `result` represents the 1-byte response code. + +### Encoding strategies + +The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction. Two values are possible at this time: + +- `ssz`: the contents are [SSZ](https://github.com/ethereum/eth2.0-specs/blob/192442be51a8a6907d6401dffbf5c73cb220b760/specs/networking/libp2p-standardization.md#ssz-encoding) encoded. This encoding type MUST be supported by all clients. +- `ssz_snappy`: the contents are SSZ encoded, and subsequently compressed with [Snappy](https://github.com/google/snappy). MAY be supported in the interoperability testnet; and MUST be supported in mainnet. + +#### SSZ encoding strategy (with or without Snappy) + +The [SimpleSerialize (SSZ) specification](https://github.com/ethereum/eth2.0-specs/blob/192442be51a8a6907d6401dffbf5c73cb220b760/specs/simple-serialize.md) outlines how objects are SSZ-encoded. If the Snappy variant is selected, we feed the serialised form to the Snappy compressor on encoding. The inverse happens on decoding. + +**Encoding-dependent header:** Req/Resp protocols using the `ssz` or `ssz_snappy` encoding strategies MUST prefix all encoded and compressed (if applicable) payloads with an unsigned [protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). + +Note that parameters defined as `[]VariableName` are SSZ-encoded containerless vectors. + +### Messages + +#### Hello + +**Protocol ID:** ``/eth2/beacon_chain/req/hello/1/`` + +**Content**: +``` +( + fork_version: bytes4 + finalized_root: bytes32 + finalized_epoch: uint64 + head_root: bytes32 + head_slot: uint64 +) +``` +The fields are: + +- `fork_version`: The beacon_state `Fork` version +- `finalized_root`: The latest finalized root the node knows about +- `finalized_epoch`: The latest finalized epoch the node knows about +- `head_root`: The block hash tree root corresponding to the head of the chain as seen by the sending node +- `head_slot`: The slot corresponding to the `head_root`. + +Clients exchange hello messages upon connection, forming a two-phase handshake. The first message the initiating client sends MUST be the hello message. In response, the receiving client MUST respond with its own hello message. + +Clients SHOULD immediately disconnect from one another following the handshake above under the following conditions: + +1. If `fork_version` doesn’t match the local fork version, since the client’s chain is on another fork. `fork_version` can also be used to segregate testnets. +2. If the (`finalized_root`, `finalized_epoch`) shared by the peer is not in the client's chain at the expected epoch. For example, if Peer 1 sends (root, epoch) of (A, 5) and Peer 2 sends (B, 3) but Peer 1 has root C at epoch 3, then Peer 1 would disconnect because it knows that their chains are irreparably disjoint. + +Once the handshake completes, the client with the lower `finalized_epoch` or `head_slot` (if the clients have equal `finalized_epoch`s) SHOULD request beacon blocks from its counterparty via the `BeaconBlocks` request. + +#### Goodbye + +**Protocol ID:** ``/eth2/beacon_chain/req/goodbye/1/`` + +**Content:** +``` +( + reason: uint64 +) +``` +Client MAY send goodbye messages upon disconnection. The reason field MAY be one of the following values: + +- 1: Client shut down. +- 2: Irrelevant network. +- 3: Fault/error. + +Clients MAY use reason codes above `128` to indicate alternative, erroneous request-specific responses. + +The range `[4, 127]` is RESERVED for future usage. + +#### BeaconBlocks + +**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks/1/` + +Request Content +``` +( + head_block_root: HashTreeRoot + start_slot: uint64 + count: uint64 + step: uint64 +) +``` + +Response Content: +``` +( + blocks: []BeaconBlock +) +``` + +Requests count beacon blocks from the peer starting from `start_slot` on the chain defined by `head_block_root`. The response MUST contain no more than count blocks. step defines the slot increment between blocks. For example, requesting blocks starting at `start_slot` 2 with a step value of 2 would return the blocks at [2, 4, 6, …]. In cases where a slot is empty for a given slot number, no block is returned. For example, if slot 4 were empty in the previous example, the returned array would contain [2, 6, …]. A step value of 1 returns all blocks on the range `[start_slot, start_slot + count)`. + +`BeaconBlocks` is primarily used to sync historical blocks. + +Clients MUST support requesting blocks since the start of the weak subjectivity period and up to the given `head_block_root`. + +Clients MUST support `head_block_root` values since the latest finalized epoch. + +#### RecentBeaconBlocks + +**Protocol ID:** `/eth2/beacon_chain/req/recent_beacon_blocks/1/` + +Request Content: + +``` +( + block_roots: []HashTreeRoot +) +``` + +Response Content: + +``` +( + blocks: []BeaconBlock +) +``` + +Requests blocks by their block roots. The response is a list of `BeaconBlock` with the same length as the request. Blocks are returned in order of the request and any missing/unknown blocks are left empty (SSZ null `BeaconBlock`). + +`RecentBeaconBlocks` is primarily used to recover recent blocks, for example when receiving a block or attestation whose parent is unknown. + +Clients MUST support requesting blocks since the latest finalized epoch. + +# Design Decision Rationale + +## Transport + +### Why are we defining specific transports? + +libp2p peers can listen on multiple transports concurrently, and these can change over time. multiaddrs not only encode the address, but also the transport to be used to dial. + +Due to this dynamic nature, agreeing on specific transports like TCP, QUIC or WebSockets on paper becomes irrelevant. + +However, it is useful to define a minimum baseline for interoperability purposes. + +### Can clients support other transports/handshakes than the ones mandated by the spec? + +Clients may support other transports such as libp2p QUIC, WebSockets, and WebRTC transports, if available in the language of choice. While interoperability shall not be harmed by lack of such support, the advantages are desirable: + +- better latency, performance and other QoS characteristics (QUIC). +- paving the way for interfacing with future light clients (WebSockets, WebRTC). + +The libp2p QUIC transport inherently relies on TLS 1.3 per requirement in section 7 of the [QUIC protocol specification](https://tools.ietf.org/html/draft-ietf-quic-transport-22#section-7), and the accompanying [QUIC-TLS document](https://tools.ietf.org/html/draft-ietf-quic-tls-22). + +The usage of one handshake procedure or the other shall be transparent to the ETH 2.0 application layer, once the libp2p Host/Node object has been configured appropriately. + +### What are advantages of using TCP/QUIC/Websockets? + +TCP is a reliable, ordered, full-duplex, congestion controlled network protocol that powers much of the Internet as we know it today. HTTP/1.1 and HTTP/2 run atop TCP. + +QUIC is a new protocol that’s in the final stages of specification by the IETF QUIC WG. It emerged from Google’s SPDY experiment. The QUIC transport is undoubtedly promising. It’s UDP based yet reliable, ordered, reduces latency vs. TCP, is multiplexed, natively secure (TLS 1.3), offers stream-level and connection-level congestion control (thus removing head-of-line blocking), 0-RTT connection establishment, and endpoint migration, amongst other features. UDP also has better NAT traversal properties than TCP -- something we desperately pursue in peer-to-peer networks. + +QUIC is being adopted as the underlying protocol for HTTP/3. This has the potential to award us censorship resistance via deep packet inspection for free. Provided that we use the same port numbers and encryption mechanisms as HTTP/3, our traffic may be indistinguishable from standard web traffic, and we may only become subject to standard IP-based firewall filtering -- something we can counteract via other mechanisms. + +WebSockets and/or WebRTC transports are necessary for interaction with browsers, and will become increasingly important as we incorporate browser-based light clients to the ETH2 network. + +### Why do we not just support a single transport? + +Networks evolve. Hardcoding design decisions leads to ossification, preventing the evolution of networks alongside the state of the art. Introducing changes on an ossified protocol is very costly, and sometimes, downright impracticable without causing undesirable breakage. + +Modelling for upgradeability and dynamic transport selection from the get-go lays the foundation for a future-proof stack. + +Clients can adopt new transports without breaking old ones; and the multi-transport ability enables constrained and sandboxed environments (e.g. browsers, embedded devices) to interact with the network as first-class citizens via suitable/native transports (e.g. WSS), without the need for proxying or trust delegation to servers. + +### Why are we not using QUIC for mainnet from the start? + +The QUIC standard is still not finalised (at working draft 22 at the time of writing), and not all mainstream runtimes/languages have mature, standard, and/or fully-interoperable [QUIC support](https://github.com/quicwg/base-drafts/wiki/Implementations). One remarkable example is node.js, where the QUIC implementation is [in early development](https://github.com/nodejs/quic). + +## Multiplexing + +### Why are we using mplex/yamux? + +[Yamux](https://github.com/hashicorp/yamux/blob/master/spec.md) is a multiplexer invented by Hashicorp that supports stream-level congestion control. Implementations exist in a limited set of languages, and it’s not a trivial piece to develop. + +Conscious of that, the libp2p community conceptualised [mplex](https://github.com/libp2p/specs/blob/master/mplex/README.md) as a simple, minimal multiplexer for usage with libp2p. It does not support stream-level congestion control, and is subject to head-of-line blocking. + +Overlay multiplexers are not necessary with QUIC, as the protocol provides native multiplexing, but they need to be layered atop TCP, WebSockets, and other transports that lack such support. + +## Protocol Negotiation + +### When is multiselect 2.0 due and why are we using it for mainnet? + +multiselect 2.0 is currently being conceptualised. Debate started [on this issue](https://github.com/libp2p/specs/pull/95), but it got overloaded – as it tends to happen with large conceptual OSS discussions that touch the heart and core of a system. + +In the following weeks (August 2019), there will be a renewed initiative to first define the requirements, constraints, assumptions and features, in order to lock in basic consensus upfront, to subsequently build on that consensus by submitting a specification for implementation. + +We plan to use multiselect 2.0 for mainnet because it will: + +1. Reduce round trips during connection bootstrapping and stream protocol negotiation. +2. Enable efficient one-stream-per-request interaction patterns. +3. Leverage *push data* mechanisms of underlying protocols to expedite negotiation. +4. Provide the building blocks for enhanced censorship resistance. + +### What is the difference between connection-level and stream-level protocol negotiation? + +All libp2p connections must be authenticated, encrypted, and multiplexed. Connections using network transports unsupportive of native authentication/encryption and multiplexing (e.g. TCP) need to undergo protocol negotiation to agree on a mutually supported: + +1. authentication/encryption mechanism (such as SecIO, TLS 1.3, Noise). +2. overlay multiplexer (such as mplex, Yamux, spdystream). + +In this specification, we refer to these two as *connection-level negotiations*. Transports supporting those features natively (such as QUIC) omit those negotiations. + +After successfully selecting a multiplexer, all subsequent I/O happens over *streams*. When opening streams, peers pin a protocol to that stream, by conducting *stream-level protocol negotiation*. + +At present, multistream-select 1.0 is used for both types of negotiation, but multiselect 2.0 will use dedicated mechanisms for connection bootstrapping process and stream protocol negotiation. + +## Encryption + +### Why are we using SecIO for interop? Why not for mainnet? + +SecIO has been the default encryption layer for libp2p for years. It is used in IPFS and Filecoin. And although it will be superseded shortly, it is proven to work at scale. + +SecIO is the common denominator across the various language libraries at this stage. It is widely implemented. That’s why we have chosen to use it for initial interop to minimize overhead in getting to a basic interoperability testnet. + +We won’t be using it for mainnet because, amongst other things, it requires several round trips to be sound, and doesn’t support early data (0-RTT data), a mechanism that multiselect 2.0 will leverage to reduce round trips during connection bootstrapping. + +SecIO is not considered secure for the purposes of this spec. + +## Why are we using Noise/TLS 1.3 for mainnet? + +Copied from the Noise Protocol Framework website: + +> Noise is a framework for building crypto protocols. Noise protocols support mutual and optional authentication, identity hiding, forward secrecy, zero round-trip encryption, and other advanced features. + +Noise in itself does not specify a single handshake procedure, but provides a framework to build secure handshakes based on Diffie-Hellman key agreement with a variety of tradeoffs and guarantees. + +Noise handshakes are lightweight and simple to understand, and are used in major cryptographic-centric projects like WireGuard, I2P, Lightning. [Various](https://www.wireguard.com/papers/kobeissi-bhargavan-noise-explorer-2018.pdf) [studies](https://eprint.iacr.org/2019/436.pdf) have assessed the stated security goals of several Noise handshakes with positive results. + +On the other hand, TLS 1.3 is the newest, simplified iteration of TLS. Old, insecure, obsolete ciphers and algorithms have been removed, adopting Ed25519 as the sole ECDH key agreement function. Handshakes are faster, 1-RTT data is supported, and session resumption is a reality, amongst other features. + +Note that [TLS 1.3 is a prerequisite of the QUIC transport](https://tools.ietf.org/html/draft-ietf-quic-transport-22#section-7), although an experiment exists to integrate Noise as the QUIC crypto layer: [nQUIC](https://eprint.iacr.org/2019/028). + +### Why are we using encryption at all? + +Transport level encryption secures message exchange and provides properties that are useful for privacy, safety, and censorship resistance. These properties are derived from the following security guarantees that apply to the entire communication between two peers: + +- Peer authentication: the peer I’m talking to is really who they claim to be, and who I expect them to be. +- Confidentiality: no observer can eavesdrop on the content of our messages. +- Integrity: the data has not been tampered with by a third-party while in transit. +- Non-repudiation: the originating peer cannot dispute that they sent the message. +- Depending on the chosen algorithms and mechanisms (e.g. continuous HMAC), we may obtain additional guarantees, such as non-replayability (this byte could’ve only been sent *now;* e.g. by using continuous HMACs), or perfect forward secrecy (in the case that a peer key is compromised, the content of a past conversation will not be compromised). + +Note that transport-level encryption is not exclusive of application-level encryption or cryptography. Transport-level encryption secures the communication itself, while application-level cryptography is necessary for the application’s use cases (e.g. signatures, randomness, etc.) + +### Will mainnnet networking be untested when it launches? + +Before launching mainnet, the testnet will be switched over to mainnet networking parameters, including Noise handshakes, and other new protocols. This gives us an opportunity to drill coordinated network upgrades and verifying that there are no significant upgradeability gaps. + + +## Gossipsub + +### Why are we using a pub/sub algorithm for block and attestation propagation? + +Pubsub is a technique to broadcast/disseminate data across a network rapidly. Such data is packaged in fire-and-forget messages that do not require a response from every recipient. Peers subscribed to a topic participate in the propagation of messages in that topic. + +The alternative is to maintain a fully connected mesh (all peers connected to each other 1:1), which scales poorly (O(n^2)). + +### Why are we using topics to segregate encodings, yet only support one encoding? + +For future extensibility with almost zero overhead now (besides the extra bytes in the topic name). + +### How do we upgrade gossip channels (e.g. changes in encoding, compression)? + +Such upgrades lead to fragmentation, so they’ll need to be carried out in a coordinated manner most likely during a hard fork. + +### Why are the topics strings and not hashes? + +Topics names have a hierarchical structure. In the future, gossipsub may support wildcard subscriptions (e.g. subscribe to all children topics under a root prefix). Using hashes as topic names would preclude us from leveraging such features going forward. No security guarantees are lost as a result of choosing plaintext topic names, since the domain is finite anyway. + +### Why are there `SHARD_SUBNET_COUNT` subnets, and why is this not defined? + +Depending on the number of validators, it may be more efficient to group shard subnets and might provide better stability for the gossipsub channel. The exact grouping will be dependent on more involved network tests. This constant allows for more flexibility in setting up the network topology for attestation aggregation (as aggregation should happen on each subnet). + +### Why are we sending entire objects in the pubsub and not just hashes? + +Entire objects should be sent to get the greatest propagation speeds. If only hashes are sent, then block and attestation propagation is dependent on recursive requests from each peer. In a hash-only scenario, peers could receive hashes without knowing who to download the actual contents from. Sending entire objects ensures that they get propagated through the entire network. + +### Should clients gossip blocks if they *cannot* validate the proposer signature due to not yet being synced, not knowing the head block, etc? + +The prohibition of unverified-block-gossiping extends to nodes that cannot verify a signature due to not being fully synced to ensure that such (amplified) DOS attacks are not possible. + +### How are we going to discover peers in a gossipsub topic? + +Via discv5 topics. ENRs should not be used for this purpose, as they store identity, location and capability info, not volatile advertisements. + +In the interoperability testnet, all peers will be subscribed to all global beacon chain topics, so discovering peers in specific shard topics will be unnecessary. + +## Req/Resp + +### Why segregate requests into dedicated protocol IDs? + +Requests are segregated by protocol ID to: + +1. Leverage protocol routing in libp2p, such that the libp2p stack will route the incoming stream to the appropriate handler. This allows each the handler function for each request type to be self-contained. For an analogy, think about how you attach HTTP handlers to a REST API server. +2. Version requests independently. In a coarser-grained umbrella protocol, the entire protocol would have to be versioned even if just one field in a single message changed. +3. Enable clients to select the individual requests/versions they support. It would no longer be a strict requirement to support all requests, and clients, in principle, could support a subset of equests and variety of versions. +4. Enable flexibility and agility for clients adopting spec changes that impact the request, by signalling to peers exactly which subset of new/old requests they support. +5. Enable clients to explicitly choose backwards compatibility at the request granularity. Without this, clients would be forced to support entire versions of the coarser request protocol. +6. Parallelise RFCs (or ETH2 EIPs). By decoupling requests from one another, each RFC that affects the request protocol can be deployed/tested/debated independently without relying on a synchronisation point to version the general top-level protocol. + 1. This has the benefit that clients can explicitly choose which RFCs to deploy without buying into all other RFCs that may be included in that top-level version. + 2. Affording this level of granularity with a top-level protocol would imply creating as many variants (e.g. /protocol/43-{a,b,c,d,...}) as the cartesian product of RFCs inflight, O(n^2). +7. Allow us to simplify the payload of requests. Request-id’s and method-ids no longer need to be sent. The encoding/request type and version can all be handled by the framework. + +CAVEAT: the protocol negotiation component in the current version of libp2p is called multistream-select 1.0. It is somewhat naïve and introduces overhead on every request when negotiating streams, although implementation-specific optimizations are possible to save this cost. Multiselect 2.0 will remove this overhead by memoizing previously selected protocols, and modelling shared protocol tables. Fortunately this req/resp protocol is not the expected network bottleneck in the protocol so the additional overhead is not expected to hinder interop testing. More info is to be released from the libp2p community in the coming weeks. + +### Why are messages length-prefixed with a protobuf varint in the SSZ encoding? + +In stream-oriented protocols, we need to delimit messages from one another, so that the reader knows where one message ends and the next one starts. Length-prefixing is an effective solution. Alternatively, one could set a delimiter char/string, but this can readily cause ambiguity if the message itself may contain the delimiter. It also introduces another set of edge cases to model for, thus causing unnecessary complexity, especially if messages are to be compressed (and thus mutated beyond our control). + +That said, in our case, streams are single-use. libp2p streams are full-duplex, and each party is responsible for closing their write side (like in TCP). We therefore use stream closure to mark the end of a request. + +Nevertheless, messages are still length-prefixed to prevent DOS attacks where malicious actors send large amounts of data disguised as a request. A length prefix allows clients to set a maximum limit, and once that limit is read, the client can cease reading and disconnect the stream. This allows a client to determine the exact length of the packet being sent, and it capacitates it to reset the stream early if the other party expresses they intend to send too much data. + +[Protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints) is an efficient technique to encode variable-length ints. Instead of reserving a fixed-size field of as many bytes as necessary to convey the maximum possible value, this field is elastic in exchange for 1-bit overhead per byte. + +### Why do we version protocol strings with ordinals instead of semver? + +Using semver for network protocols is confusing. It is never clear what a change in a field, even if backwards compatible on deserialisation, actually implies. Network protocol agreement should be explicit. Imagine two peers: + +- Peer A supporting v1.1.1 of protocol X. +- Peer B supporting v1.1.2 of protocol X. + +These two peers should never speak to each other because the results can be unpredictable. This is an oversimplification: imagine the same problem with a set of 10 possible versions. We now have 10^2 (100) possible outcomes that peers need to model for. The resulting complexity is unwieldy. + +For this reason, we rely on negotiation of explicit, verbatim protocols. In the above case, peer B would provide backwards compatibility by supporting and advertising both v1.1.1 and v1.1.2 of the protocol. + +Therefore, semver would be relegated to convey expectations at the human level, and it wouldn't do a good job there either, because it's unclear if "backwards-compatibility" and "breaking change" apply only to wire schema level, to behaviour, etc. + +For this reason, we remove semver out of the picture and replace it with ordinals that require explicit agreement and do not mandate a specific policy for changes. + +### Why is it called Req/Resp and not RPC? + +Req/Resp is used to avoid confusion with JSON-RPC and similar user-client interaction mechanisms. + +## Discovery + +### Why are we using discv5 and not libp2p Kademlia DHT? + +discv5 is a standalone protocol, running on UDP on a dedicated port, meant for peer and service discovery only. discv5 supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are, or will be, requirements in this context. + +On the other hand, libp2p Kademlia DHT is a fully-fledged DHT protocol/implementation with content routing and storage capabilities, both of which are irrelevant in this context. + +We assume that ETH1 nodes will evolve to support discv5. By sharing the discovery network between ETH1 and ETH2, we benefit from the additive effect on network size that enhances resilience and resistance against certain attacks, to which smaller networks are more vulnerable. It should also assist light clients of both networks find nodes with specific capabilities. + +discv5 is in the process of being audited. + +### What is the difference between an ENR and a multiaddr, and why are we using ENRs? + +Ethereum Node Records are self-certified node records. Nodes craft and disseminate ENRs for themselves, proving authorship via a cryptographic signature. ENRs are sequentially indexed, enabling conflicts to be resolved. + +ENRs are key-value records with string-indexed ASCII keys. They can store arbitrary information, but EIP-778 specifies a pre-defined dictionary, including IPv4 and IPv6 addresses, secp256k1 public keys, etc. + +Comparing ENRs and multiaddrs is like comparing apples and bananas. ENRs are self-certified containers of identity, addresses, and metadata about a node. Multiaddrs are address strings with the peculiarity that they’re self-describing, composable and future-proof. An ENR can contain multiaddrs, and multiaddrs can be derived securely from the fields of an authenticated ENR. + +discv5 uses ENRs and we will presumably need to: + +1. Add `multiaddr` to the dictionary, so that nodes can advertise their multiaddr under a reserved namespace in ENRs. – and/or – +2. Define a bi-directional conversion function between multiaddrs and the corresponding denormalized fields in an ENR (ip, ip6, tcp, tcp6, etc.), for compatibility with nodes that do not support multiaddr natively (e.g. ETH1 nodes). + +## Compression/Encoding + +### Why are we using SSZ for encoding? + +SSZ is used at the consensus layer and all implementations should have support for ssz encoding/decoding requiring no further dependencies to be added to client implementations. This is a natural choice for serializing objects to be sent across the wire. The actual data in most protocols will be further compressed for efficiency. + +SSZ has well defined schema’s for consensus objects (typically sent across the wire) reducing any serialization schema data that needs to be sent. It also has defined all required types that are required for this network specification. + +### Why are we compressing, and at which layers? + +We compress on the wire to achieve smaller payloads per-message, which, in aggregate, result in higher efficiency, better utilisation of available bandwidth, and overall reduction in network-wide traffic overhead. + +At this time, libp2p does not have an out-of-the-box compression feature that can be dynamically negotiated and layered atop connections and streams, but this will be raised in the libp2p community for consideration. + +This is a non-trivial feature because the behaviour of network IO loops, kernel buffers, chunking, packet fragmentation, amongst others, need to be taken into account. libp2p streams are unbounded streams, whereas compression algorithms work best on bounded byte streams of which we have some prior knowledge. + +Compression tends not to be a one-size-fits-all problem. Lots of variables need careful evaluation, and generic approaches/choices lead to poor size shavings, which may even be counterproductive when factoring in the CPU and memory tradeoff. + +For all these reasons, generically negotiating compression algorithms may be treated as a research problem at the libp2p community, one we’re happy to tackle in the medium-term. + +At this stage, the wisest choice is to consider libp2p a messenger of bytes, and to make application layer participate in compressing those bytes. This looks different depending on the interaction layer: + +- Gossip domain: since gossipsub has a framing protocol and exposes an API, we compress the payload (when dictated by the encoding token in the topic name) prior to publishing the message via the API. No length prefixing is necessary because protobuf takes care of bounding the field in the serialised form. +- Req/Resp domain: since we define custom protocols that operate on byte streams, implementers are encouraged to encapsulate the encoding and compression logic behind MessageReader and MessageWriter components/strategies that can be layered on top of the raw byte streams. + +### Why are using Snappy for compression? + +Snappy is used in Ethereum 1.0. It is well maintained by Google, has good benchmarks and can calculate the size of the uncompressed object without inflating it in memory. This prevents DOS vectors where large uncompressed data is sent. + +### Can I get access to unencrypted bytes on the wire for debugging purposes? + +Yes, you can add loggers in your libp2p protocol handlers to log incoming and outgoing messages. It is recommended to use programming design patterns to encapsulate the logging logic cleanly. + +If your libp2p library relies on frameworks/runtimes such as Netty (jvm) or Node.js (javascript), you can use logging facilities in those frameworks/runtimes to enable message tracing. + +For specific ad-hoc testing scenarios, you can use the [plaintext/2.0.0 secure channel](https://github.com/libp2p/specs/blob/master/plaintext/README.md) (which is essentially no-op encryption or message authentication), in combination with tcpdump or Wireshark to inspect the wire. + +# libp2p Implementations Matrix + +This section will soon contain a matrix showing the maturity/state of the libp2p features required by this spec across the languages in which ETH 2.0 clients are being developed. diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md deleted file mode 100644 index be154075c..000000000 --- a/specs/networking/rpc-interface.md +++ /dev/null @@ -1,283 +0,0 @@ -# Eth 2.0 Networking Spec - RPC Interface - -## Abstract - -The Ethereum 2.0 networking stack uses two modes of communication: a broadcast protocol that gossips information to interested parties via GossipSub, and an RPC protocol that retrieves information from specific clients. This specification defines the RPC protocol. - -The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL", NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in [RFC 2119](https://tools.ietf.org/html/rfc2119). - -## Dependencies - -This specification assumes familiarity with the [Messaging](./messaging.md), [Node Identification](./node-identification.md), and [Beacon Chain](../core/0_beacon-chain.md) specifications. - -# Specification - -## Message schemas - -Message body schemas are notated like this: - -``` -( - field_name_1: type - field_name_2: type -) -``` - -Embedded types are serialized as SSZ Containers unless otherwise noted. - -All referenced data structures can be found in the [Beacon Chain](../core/0_beacon-chain.md#data-structures) specification. - -## `libp2p` protocol names - -A "Protocol ID" in `libp2p` parlance refers to a human-readable identifier `libp2p` uses in order to identify sub-protocols and stream messages of different types over the same connection. Peers exchange supported protocol IDs via the `Identify` protocol upon connection. When opening a new stream, peers pin a particular protocol ID to it, and the stream remains contextualized thereafter. Since messages are sent inside a stream, they do not need to bear the protocol ID. - -## RPC-over-`libp2p` - -To facilitate RPC-over-`libp2p`, a single protocol name is used: `/eth/serenity/beacon/rpc/1`. The version number in the protocol name is neither backwards or forwards compatible, and will be incremented whenever changes to the below structures are required. - -Remote method calls are wrapped in a "request" structure: - -``` -( - id: uint64 - method_id: uint16 - body: (message_body...) -) -``` - -and their corresponding responses are wrapped in a "response" structure: - -``` -( - id: uint64 - response_code: uint16 - result: bytes -) -``` - -A union type is used to determine the contents of the `body` field in the request structure. Each "body" entry in the RPC calls below corresponds to one subtype in the `body` type union. - -The details of the RPC-Over-`libp2p` protocol are similar to [JSON-RPC 2.0](https://www.jsonrpc.org/specification). Specifically: - -1. The `id` member is REQUIRED. -2. The `id` member in the response MUST be the same as the value of the `id` in the request. -3. The `id` member MUST be unique within the context of a single connection. Monotonically increasing `id`s are RECOMMENDED. -4. The `method_id` member is REQUIRED. -5. The `result` member is REQUIRED on success. -6. The `result` member is OPTIONAL on errors, and MAY contain additional information about the error. -7. `response_code` MUST be `0` on success. - -Structuring RPC requests in this manner allows multiple calls and responses to be multiplexed over the same stream without switching. Note that this implies that responses MAY arrive in a different order than requests. - -The "method ID" fields in the below messages refer to the `method` field in the request structure above. - -The first 1,000 values in `response_code` are reserved for system use. The following response codes are predefined: - -1. `0`: No error. -2. `10`: Parse error. -2. `20`: Invalid request. -3. `30`: Method not found. -4. `40`: Server error. - -### Alternative for non-`libp2p` clients - -Since some clients are waiting for `libp2p` implementations in their respective languages. As such, they MAY listen for raw TCP messages on port `9000`. To distinguish RPC messages from other messages on that port, a byte prefix of `ETH` (`0x455448`) MUST be prepended to all messages. This option will be removed once `libp2p` is ready in all supported languages. - -## Messages - -### Hello - -**Method ID:** `0` - -**Body**: - -``` -( - network_id: uint8 - chain_id: uint64 - finalized_root: bytes32 - finalized_epoch: uint64 - best_root: bytes32 - best_slot: uint64 -) -``` - -Clients exchange `hello` messages upon connection, forming a two-phase handshake. The first message the initiating client sends MUST be the `hello` message. In response, the receiving client MUST respond with its own `hello` message. - -Clients SHOULD immediately disconnect from one another following the handshake above under the following conditions: - -1. If `network_id` belongs to a different chain, since the client definitionally cannot sync with this client. -2. If the `finalized_root` shared by the peer is not in the client's chain at the expected epoch. For example, if Peer 1 in the diagram below has `(root, epoch)` of `(A, 5)` and Peer 2 has `(B, 3)`, Peer 1 would disconnect because it knows that `B` is not the root in their chain at epoch 3: - -``` - Root A - - +---+ - |xxx| +----+ Epoch 5 - +-+-+ - ^ - | - +-+-+ - | | +----+ Epoch 4 - +-+-+ -Root B ^ - | -+---+ +-+-+ -|xxx+<---+--->+ | +----+ Epoch 3 -+---+ | +---+ - | - +-+-+ - | | +-----------+ Epoch 2 - +-+-+ - ^ - | - +-+-+ - | | +-----------+ Epoch 1 - +---+ -``` - -Once the handshake completes, the client with the higher `finalized_epoch` or `best_slot` (if the clients have equal `finalized_epoch`s) SHOULD request beacon block roots from its counterparty via `beacon_block_roots` (i.e. RPC method `10`). - -### Goodbye - -**Method ID:** `1` - -**Body:** - -``` -( - reason: uint64 -) -``` - -Client MAY send `goodbye` messages upon disconnection. The reason field MAY be one of the following values: - -- `1`: Client shut down. -- `2`: Irrelevant network. -- `3`: Fault/error. - -Clients MAY define custom goodbye reasons as long as the value is larger than `1000`. - -### Get status - -**Method ID:** `2` - -**Request body:** - -``` -( - sha: bytes32 - user_agent: bytes - timestamp: uint64 -) -``` - -**Response body:** - -``` -( - sha: bytes32 - user_agent: bytes - timestamp: uint64 -) -``` - -Returns metadata about the remote node. - -### Request beacon block roots - -**Method ID:** `10` - -**Request body** - -``` -( - start_slot: uint64 - count: uint64 -) -``` - -**Response body:** - -``` -# BlockRootSlot -( - block_root: bytes32 - slot: uint64 -) - -( - roots: []BlockRootSlot -) -``` - -Requests a list of block roots and slots from the peer. The `count` parameter MUST be less than or equal to `32768`. The slots MUST be returned in ascending slot order. - -### Beacon block headers - -**Method ID:** `11` - -**Request body** - -``` -( - start_root: HashTreeRoot - start_slot: uint64 - max_headers: uint64 - skip_slots: uint64 -) -``` - -**Response body:** - -``` -( - headers: []BeaconBlockHeader -) -``` - -Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `1` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is empty for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were empty in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further empty, the array would contain `[2, 6, 8, 10]`—i.e. duplicate blocks MUST be collapsed. A `skip_slots` value of `0` returns all blocks. - -The function of the `skip_slots` parameter helps facilitate light client sync - for example, in [#459](https://github.com/ethereum/eth2.0-specs/issues/459) - and allows clients to balance the peers from whom they request headers. Clients could, for instance, request every 10th block from a set of peers where each peer has a different starting block in order to populate block data. - -### Beacon block bodies - -**Method ID:** `12` - -**Request body:** - -``` -( - block_roots: []HashTreeRoot -) -``` - -**Response body:** - -``` -( - block_bodies: []BeaconBlockBody -) -``` - -Requests the `block_bodies` associated with the provided `block_roots` from the peer. Responses MUST return `block_roots` in the order provided in the request. If the receiver does not have a particular `block_root`, it must return a zero-value `block_body` (i.e. a `block_body` container with all zero fields). - -### Beacon chain state - -*Note*: This section is preliminary, pending the definition of the data structures to be transferred over the wire during fast sync operations. - -**Method ID:** `13` - -**Request body:** - -``` -( - hashes: []HashTreeRoot -) -``` - -**Response body:** TBD - -Requests contain the hashes of Merkle tree nodes that when merkleized yield the block's `state_root`. - -The response will contain the values that, when hashed, yield the hashes inside the request body. From c108444c06f09a9d2f11d98e3934c632d647b53d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Kripalani?= Date: Thu, 1 Aug 2019 15:47:11 +0100 Subject: [PATCH 026/250] add table of contents; amend heading level. --- specs/networking/p2p-interface.md | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 72f5c0fd6..3b5da6eb0 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -10,7 +10,33 @@ It consists of four main sections: 4. An analysis of the maturity/state of the libp2p features required by this spec across the languages in which ETH 2.0 clients are being developed. ## Table of Contents -[TOC] + + + + + +- [Network Fundamentals](#network-fundamentals) + - [Transport](#transport) + - [Encryption and identification](#encryption-and-identification) + - [Protocol Negotiation](#protocol-negotiation) + - [Multiplexing](#multiplexing) +- [ETH2 network interaction domains](#eth2-network-interaction-domains) + - [Constants](#constants) + - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) + - [The discovery domain: discv5](#the-discovery-domain-discv5) + - [The Req/Resp domain](#the-reqresp-domain) +- [Design Decision Rationale](#design-decision-rationale) + - [Transport](#transport-1) + - [Multiplexing](#multiplexing-1) + - [Protocol Negotiation](#protocol-negotiation-1) + - [Encryption](#encryption) + - [Gossipsub](#gossipsub) + - [Req/Resp](#reqresp) + - [Discovery](#discovery) + - [Compression/Encoding](#compressionencoding) +- [libp2p Implementations Matrix](#libp2p-implementations-matrix) + + # Network Fundamentals @@ -529,7 +555,7 @@ We won’t be using it for mainnet because, amongst other things, it requires se SecIO is not considered secure for the purposes of this spec. -## Why are we using Noise/TLS 1.3 for mainnet? +### Why are we using Noise/TLS 1.3 for mainnet? Copied from the Noise Protocol Framework website: From 55f5f106f175d64d48befc910025f1f9c33b39c1 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 1 Aug 2019 10:56:31 -0400 Subject: [PATCH 027/250] Updated type checkers for generalized index functions. --- specs/light_client/merkle_proofs.md | 44 +++++++++++++---------------- 1 file changed, 20 insertions(+), 24 deletions(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index dae2a1704..6107e459c 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -17,12 +17,6 @@ -## Constants - -| Name | Value | -| - | - | -| `LENGTH_FLAG` | `2**64 - 1` | - ## Generalized Merkle tree index In a binary Merkle tree, we define a "generalized index" of a node as `2**depth + index`. Visually, this looks as follows: @@ -38,7 +32,8 @@ Note that the generalized index has the convenient property that the two childre ```python def merkle_tree(leaves: List[Bytes32]) -> List[Bytes32]: - o = [0] * len(leaves) + leaves + padded_length = next_power_of_2(len(leaves)) + o = [ZERO_HASH] * padded_length + leaves + [ZERO_HASH] * (padded_length - len(leaves)) for i in range(len(leaves) - 1, 0, -1): o[i] = hash(o[i * 2] + o[i * 2 + 1]) return o @@ -64,27 +59,24 @@ y_data_root len(y) We can now define a concept of a "path", a way of describing a function that takes as input an SSZ object and outputs some specific (possibly deeply nested) member. For example, `foo -> foo.x` is a path, as are `foo -> len(foo.y)` and `foo -> foo.y[5].w`. We'll describe paths as lists, which can have two representations. In "human-readable form", they are `["x"]`, `["y", "__len__"]` and `["y", 5, "w"]` respectively. In "encoded form", they are lists of `uint64` values, in these cases (assuming the fields of `foo` in order are `x` then `y`, and `w` is the first field of `y[i]`) `[0]`, `[1, 2**64-1]`, `[1, 5, 0]`. ```python -def item_length(typ: Type) -> int: +def item_length(typ: SSZType) -> int: """ Returns the number of bytes in a basic type, or 32 (a full hash) for compound types. """ - if typ == bool: - return 1 - elif issubclass(typ, uint): + if issubclass(typ, BasicValue): return typ.byte_len else: return 32 -def get_elem_type(typ: Type, index: int) -> Type: +def get_elem_type(typ: ComplexType, index: int) -> Type: """ Returns the type of the element of an object of the given type with the given index or member variable name (eg. `7` for `x[7]`, `"foo"` for `x.foo`) """ - return typ.get_fields_dict()[index] if is_container_type(typ) else typ.elem_type + return typ.get_fields()[key] if issubclass(typ, Container) else typ.elem_type - -def get_chunk_count(typ: Type) -> int: +def chunk_count(typ: SSZType) -> int: """ Returns the number of hashes needed to represent the top-level elements in the given type (eg. `x.foo` or `x[7]` but not `x[7].bar` or `x.foo.baz`). In all cases except lists/vectors @@ -92,24 +84,28 @@ def get_chunk_count(typ: Type) -> int: hash. For lists/vectors of basic types, it is often fewer because multiple basic elements can be packed into one 32-byte chunk. """ - if is_basic_type(typ): + if issubclass(typ, BasicValue): return 1 - elif issubclass(typ, (List, Vector, Bytes, BytesN)): + elif issubclass(typ, Bits): + return (typ.length + 255) // 256 + elif issubclass(typ, Elements): return (typ.length * item_length(typ.elem_type) + 31) // 32 - else: + elif issubclass(typ, Container): return len(typ.get_fields()) + else: + raise Exception(f"Type not supported: {typ}") -def get_item_position(typ: Type, index: Union[int, str]) -> Tuple[int, int, int]: +def get_item_position(typ: SSZType, index: Union[int, str]) -> Tuple[int, int, int]: """ Returns three variables: (i) the index of the chunk in which the given element of the item is represented, (ii) the starting byte position, (iii) the ending byte position. For example for a 6-item list of uint64 values, index=2 will return (0, 16, 24), index=5 will return (1, 8, 16) """ - if issubclass(typ, (List, Vector, Bytes, BytesN)): + if issubclass(typ, Elements): start = index * item_length(typ.elem_type) return start // 32, start % 32, start % 32 + item_length(typ.elem_type) - elif is_container_type(typ): + elif issubclass(typ, Container): return typ.get_field_names().index(index), 0, item_length(get_elem_type(typ, index)) else: raise Exception("Only lists/vectors/containers supported") @@ -122,12 +118,12 @@ def get_generalized_index(typ: Type, path: List[Union[int, str]]) -> Generalized """ root = 1 for p in path: - assert not is_basic_type(typ) # If we descend to a basic type, the path cannot continue further + assert not issubclass(typ, BasicValue) # If we descend to a basic type, the path cannot continue further if p == '__len__': typ, root = uint256, root * 2 + 1 if issubclass(typ, (List, Bytes)) else None else: pos, _, _ = get_item_position(typ, p) - root = root * (2 if issubclass(typ, (List, Bytes)) else 1) * next_power_of_two(get_chunk_count(typ)) + pos + root = root * (2 if issubclass(typ, (List, Bytes)) else 1) * next_power_of_two(chunk_count(typ)) + pos typ = get_elem_type(typ, p) return root ``` @@ -197,7 +193,7 @@ def get_branch_indices(tree_index: int) -> List[int]: def get_expanded_indices(indices: List[int]) -> List[int]: """ Get the generalized indices of all chunks in the tree needed to prove the chunks with the given - generalized indices. + generalized indices, including the leaves. """ branches = set() for index in indices: From 1ba03b4c33a6ade51dbd60dae9a6a15acd7be531 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Kripalani?= Date: Thu, 1 Aug 2019 15:56:53 +0100 Subject: [PATCH 028/250] gossip domain: clarify why we use plaintext topic names. --- specs/networking/p2p-interface.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 3b5da6eb0..4480773b3 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -604,7 +604,11 @@ Such upgrades lead to fragmentation, so they’ll need to be carried out in a co ### Why are the topics strings and not hashes? -Topics names have a hierarchical structure. In the future, gossipsub may support wildcard subscriptions (e.g. subscribe to all children topics under a root prefix). Using hashes as topic names would preclude us from leveraging such features going forward. No security guarantees are lost as a result of choosing plaintext topic names, since the domain is finite anyway. +Topics names have a hierarchical structure. In the future, gossipsub may support wildcard subscriptions (e.g. subscribe to all children topics under a root prefix) by way of prefix matching. Enforcing hashes for topic names would preclude us from leveraging such features going forward. + +No security or privacy guarantees are lost as a result of choosing plaintext topic names, since the domain is finite anyway, and calculating a digest's preimage would be trivial. + +Furthermore, the ETH2 topic names are shorter their digest equivalents (asuming SHA-256 hash), so hashing topics would bloat messages unnecessarily. ### Why are there `SHARD_SUBNET_COUNT` subnets, and why is this not defined? From 3d4dde412bdf9e5817e7f63cdb3d7cdb9beddc54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Kripalani?= Date: Thu, 1 Aug 2019 16:04:59 +0100 Subject: [PATCH 029/250] document doctoc command for posterity. --- specs/networking/p2p-interface.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 4480773b3..20fc803c8 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -11,6 +11,7 @@ It consists of four main sections: ## Table of Contents + From 725bdf822340db5048c161b54192f457bcde1ba2 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 1 Aug 2019 11:40:40 -0400 Subject: [PATCH 030/250] Update specs/light_client/merkle_proofs.md --- specs/light_client/merkle_proofs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 6107e459c..16cbd2908 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -120,7 +120,7 @@ def get_generalized_index(typ: Type, path: List[Union[int, str]]) -> Generalized for p in path: assert not issubclass(typ, BasicValue) # If we descend to a basic type, the path cannot continue further if p == '__len__': - typ, root = uint256, root * 2 + 1 if issubclass(typ, (List, Bytes)) else None + typ, root = uint64, root * 2 + 1 if issubclass(typ, (List, Bytes)) else None else: pos, _, _ = get_item_position(typ, p) root = root * (2 if issubclass(typ, (List, Bytes)) else 1) * next_power_of_two(chunk_count(typ)) + pos From 02bb92e71455adaa7da101563a6c367efe9e1cc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Kripalani?= Date: Thu, 1 Aug 2019 16:57:04 +0100 Subject: [PATCH 031/250] fmt. --- specs/networking/p2p-interface.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 20fc803c8..66b1fa694 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -313,8 +313,8 @@ The responder SHOULD send a response promptly, starting with a **single-byte** r It can have one of the following values: - 0: **Success** -- a normal response follows, with contents matching the expected message schema and encoding specified in the request. -- 1: **InvalidRequest** -- the contents of the request are semantically invalid, or the payload is malformed, or could not be understood. The response payload adheres to the ErrorMessage schema (described below). -- 2: **ServerError** -- the responder encountered an error while processing the request. The response payload adheres to the ErrorMessage schema (described below). +- 1: **InvalidRequest** -- the contents of the request are semantically invalid, or the payload is malformed, or could not be understood. The response payload adheres to the `ErrorMessage` schema (described below). +- 2: **ServerError** -- the responder encountered an error while processing the request. The response payload adheres to the `ErrorMessage` schema (described below). Clients MAY use response codes above `128` to indicate alternative, erroneous request-specific responses. From 26641aa6993e6a8b119b5da6768e41b1c535f571 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Thu, 1 Aug 2019 17:03:23 +0100 Subject: [PATCH 032/250] Remove light client infrastructure (better stuff to be added in phase 1) --- specs/core/0_beacon-chain.md | 55 +------------------ .../pyspec/eth2spec/test/helpers/genesis.py | 8 --- 2 files changed, 3 insertions(+), 60 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index f0169f1d2..1ab7c9a10 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -33,7 +33,6 @@ - [`Eth1Data`](#eth1data) - [`HistoricalBatch`](#historicalbatch) - [`DepositData`](#depositdata) - - [`CompactCommittee`](#compactcommittee) - [`BeaconBlockHeader`](#beaconblockheader) - [Beacon operations](#beacon-operations) - [`ProposerSlashing`](#proposerslashing) @@ -88,7 +87,6 @@ - [`get_shard_delta`](#get_shard_delta) - [`get_beacon_proposer_index`](#get_beacon_proposer_index) - [`get_attestation_data_slot`](#get_attestation_data_slot) - - [`get_compact_committees_root`](#get_compact_committees_root) - [`get_total_balance`](#get_total_balance) - [`get_total_active_balance`](#get_total_active_balance) - [`get_domain`](#get_domain) @@ -386,14 +384,6 @@ class DepositData(Container): signature: BLSSignature ``` -#### `CompactCommittee` - -```python -class CompactCommittee(Container): - pubkeys: List[BLSPubkey, MAX_VALIDATORS_PER_COMMITTEE] - compact_validators: List[uint64, MAX_VALIDATORS_PER_COMMITTEE] -``` - #### `BeaconBlockHeader` ```python @@ -518,8 +508,6 @@ class BeaconState(Container): # Shuffling start_shard: Shard randao_mixes: Vector[Hash, EPOCHS_PER_HISTORICAL_VECTOR] - active_index_roots: Vector[Hash, EPOCHS_PER_HISTORICAL_VECTOR] # Active index digests for light clients - compact_committees_roots: Vector[Hash, EPOCHS_PER_HISTORICAL_VECTOR] # Committee digests for light clients # Slashings slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances # Attestations @@ -867,8 +855,7 @@ def get_seed(state: BeaconState, epoch: Epoch) -> Hash: Return the seed at ``epoch``. """ mix = get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)) # Avoid underflow - active_index_root = state.active_index_roots[epoch % EPOCHS_PER_HISTORICAL_VECTOR] - return hash(mix + active_index_root + int_to_bytes(epoch, length=32)) + return hash(mix + int_to_bytes(epoch, length=32)) ``` #### `get_committee_count` @@ -962,27 +949,6 @@ def get_attestation_data_slot(state: BeaconState, data: AttestationData) -> Slot return Slot(compute_start_slot_of_epoch(data.target.epoch) + offset // (committee_count // SLOTS_PER_EPOCH)) ``` -#### `get_compact_committees_root` - -```python -def get_compact_committees_root(state: BeaconState, epoch: Epoch) -> Hash: - """ - Return the compact committee root at ``epoch``. - """ - committees = [CompactCommittee() for _ in range(SHARD_COUNT)] - start_shard = get_start_shard(state, epoch) - for committee_number in range(get_committee_count(state, epoch)): - shard = Shard((start_shard + committee_number) % SHARD_COUNT) - for index in get_crosslink_committee(state, epoch, shard): - validator = state.validators[index] - committees[shard].pubkeys.append(validator.pubkey) - compact_balance = validator.effective_balance // EFFECTIVE_BALANCE_INCREMENT - # `index` (top 6 bytes) + `slashed` (16th bit) + `compact_balance` (bottom 15 bits) - compact_validator = uint64((index << 16) + (validator.slashed << 15) + compact_balance) - committees[shard].compact_validators.append(compact_validator) - return hash_tree_root(Vector[CompactCommittee, SHARD_COUNT](committees)) -``` - #### `get_total_balance` ```python @@ -1154,13 +1120,6 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash, validator.activation_eligibility_epoch = GENESIS_EPOCH validator.activation_epoch = GENESIS_EPOCH - # Populate active_index_roots and compact_committees_roots - indices_list = List[ValidatorIndex, VALIDATOR_REGISTRY_LIMIT](get_active_validator_indices(state, GENESIS_EPOCH)) - active_index_root = hash_tree_root(indices_list) - committee_root = get_compact_committees_root(state, GENESIS_EPOCH) - for index in range(EPOCHS_PER_HISTORICAL_VECTOR): - state.active_index_roots[index] = active_index_root - state.compact_committees_roots[index] = committee_root return state ``` @@ -1506,14 +1465,6 @@ def process_final_updates(state: BeaconState) -> None: validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) # Update start shard state.start_shard = Shard((state.start_shard + get_shard_delta(state, current_epoch)) % SHARD_COUNT) - # Set active index root - index_epoch = Epoch(next_epoch + ACTIVATION_EXIT_DELAY) - index_root_position = index_epoch % EPOCHS_PER_HISTORICAL_VECTOR - indices_list = List[ValidatorIndex, VALIDATOR_REGISTRY_LIMIT](get_active_validator_indices(state, index_epoch)) - state.active_index_roots[index_root_position] = hash_tree_root(indices_list) - # Set committees root - committee_root_position = next_epoch % EPOCHS_PER_HISTORICAL_VECTOR - state.compact_committees_roots[committee_root_position] = get_compact_committees_root(state, next_epoch) # Reset slashings state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0) # Set randao mix @@ -1549,9 +1500,9 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None: state.latest_block_header = BeaconBlockHeader( slot=block.slot, parent_root=block.parent_root, - # state_root: zeroed, overwritten in the next `process_slot` call + # `state_root` is zeroed and overwritten in the next `process_slot` call body_root=hash_tree_root(block.body), - # signature is always zeroed + # `signature` is zeroed ) # Verify proposer is not slashed proposer = state.validators[get_beacon_proposer_index(state)] diff --git a/test_libs/pyspec/eth2spec/test/helpers/genesis.py b/test_libs/pyspec/eth2spec/test/helpers/genesis.py index 11ab76b79..ae536eef9 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/genesis.py +++ b/test_libs/pyspec/eth2spec/test/helpers/genesis.py @@ -43,12 +43,4 @@ def create_genesis_state(spec, num_validators): validator.activation_eligibility_epoch = spec.GENESIS_EPOCH validator.activation_epoch = spec.GENESIS_EPOCH - genesis_active_index_root = hash_tree_root(List[spec.ValidatorIndex, spec.VALIDATOR_REGISTRY_LIMIT]( - spec.get_active_validator_indices(state, spec.GENESIS_EPOCH))) - genesis_compact_committees_root = hash_tree_root(List[spec.ValidatorIndex, spec.VALIDATOR_REGISTRY_LIMIT]( - spec.get_active_validator_indices(state, spec.GENESIS_EPOCH))) - for index in range(spec.EPOCHS_PER_HISTORICAL_VECTOR): - state.active_index_roots[index] = genesis_active_index_root - state.compact_committees_roots[index] = genesis_compact_committees_root - return state From e5e97a51e69888e70c34e26c5f89924e06f1621c Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Thu, 1 Aug 2019 17:16:36 +0100 Subject: [PATCH 033/250] Fix lint issues --- test_libs/pyspec/eth2spec/test/helpers/genesis.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/test_libs/pyspec/eth2spec/test/helpers/genesis.py b/test_libs/pyspec/eth2spec/test/helpers/genesis.py index ae536eef9..9e3c77b7b 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/genesis.py +++ b/test_libs/pyspec/eth2spec/test/helpers/genesis.py @@ -1,6 +1,4 @@ from eth2spec.test.helpers.keys import pubkeys -from eth2spec.utils.ssz.ssz_impl import hash_tree_root -from eth2spec.utils.ssz.ssz_typing import List def build_mock_validator(spec, i: int, balance: int): From 1b852adef1d48e51e38d5f5c9f760345a766202d Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 1 Aug 2019 18:11:30 -0400 Subject: [PATCH 034/250] Simplified merkle multiproofs --- specs/light_client/merkle_proofs.md | 134 +++++++++++++--------------- 1 file changed, 63 insertions(+), 71 deletions(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 16cbd2908..8f9b14fb5 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -157,11 +157,32 @@ def get_generalized_index_length(index: GeneralizedIndex) -> int: #### `get_generalized_index_bit` ```python -def get_generalized_index_bit(index: GeneralizedIndex, bit: int) -> bool: +def get_generalized_index_bit(index: GeneralizedIndex, position: int) -> bool: """ - Returns the i'th bit of a generalized index. + Returns the given bit of a generalized index. """ - return (index & (1 << bit)) > 0 + return (index & (1 << position)) > 0 +``` + +#### `generalized_index_sibling` + +```python +def generalized_index_sibling(index: GeneralizedIndex) -> GeneralizedIndex: + return index ^ 1 +``` + +#### `generalized_index_child` + +```python +def generalized_index_child(index: GeneralizedIndex, right_side: bool) -> GeneralizedIndex: + return index * 2 + right_side +``` + +#### `generalized_index_parent` + +```python +def generalized_index_parent(index: GeneralizedIndex) -> GeneralizedIndex: + return index // 2 ``` ## Merkle multiproofs @@ -180,38 +201,57 @@ x x . . . . x * First, we provide a method for computing the generalized indices of the auxiliary tree nodes that a proof of a given set of generalized indices will require: ``` -def get_branch_indices(tree_index: int) -> List[int]: +def get_branch_indices(tree_index: GeneralizedIndex) -> List[GeneralizedIndex]: """ Get the generalized indices of the sister chunks along the path from the chunk with the given tree index to the root. """ - o = [tree_index ^ 1] + o = [generalized_index_sibling(tree_index)] while o[-1] > 1: - o.append((o[-1] // 2) ^ 1) + o.append(generalized_index_sibling(generalized_index_parent(o[-1]))) return o[:-1] -def get_expanded_indices(indices: List[int]) -> List[int]: +def get_helper_indices(indices: List[GeneralizedIndex]) -> List[GeneralizedIndex]: """ - Get the generalized indices of all chunks in the tree needed to prove the chunks with the given - generalized indices, including the leaves. + Get the generalized indices of all "extra" chunks in the tree needed to prove the chunks with the given + generalized indices. Note that the decreasing order is chosen deliberately to ensure equivalence to the + order of hashes in a regular single-item Merkle proof in the single-item case. """ - branches = set() + all_indices = set() for index in indices: - branches = branches.union(set(get_branch_indices(index) + [index])) - return sorted([x for x in branches if x*2 not in branches or x*2+1 not in branches])[::-1] + all_indices = all_indices.union(set(get_branch_indices(index) + [index])) + + return sorted([ + x for x in all_indices if not + (generalized_index_child(x, 0) in all_indices and generalized_index_child(x, 1) in all_indices) and not + (x in indices) + ])[::-1] ``` -Generating a proof that covers paths `p1 ... pn` is simply a matter of taking the chunks in the SSZ hash tree with generalized indices `get_expanded_indices([p1 ... pn])`. - -We now provide the bulk of the proving machinery, a function that takes a `{generalized_index: chunk}` map and fills in chunks that can be inferred (inferring the parent by hashing its two children): +Now we provide the Merkle proof verification functions. First, for single item proofs: ```python -def fill(objects: Dict[int, Bytes32]) -> Dict[int, Bytes32]: - """ - Fills in chunks that can be inferred from other chunks. For a set of chunks that constitutes - a valid proof, this includes the root (generalized index 1). - """ - objects = {k: v for k, v in objects.items()} +def verify_merkle_proof(leaf: Hash, proof: Sequence[Hash], index: GeneralizedIndex, root: Hash) -> bool: + assert len(proof) == get_generalized_index_length(index) + for i, h in enumerate(proof): + if get_generalized_index_bit(index, i): + leaf = hash(h + leaf) + else: + leaf = hash(leaf + h) + return leaf == root +``` + +Now for multi-item proofs: + +```python +def verify_merkle_multiproof(leaves: Sequence[Hash], proof: Sequence[Hash], indices: Sequence[GeneralizedIndex], root: Hash) -> bool: + assert len(leaves) == len(indices) + helper_indices = get_helper_indices(indices) + assert len(proof) == len(helper_indices) + objects = { + **{index:node for index, node in zip(indices, leaves)}, + **{index:node for index, node in zip(helper_indices, proof)} + } keys = sorted(objects.keys())[::-1] pos = 0 while pos < len(keys): @@ -220,55 +260,7 @@ def fill(objects: Dict[int, Bytes32]) -> Dict[int, Bytes32]: objects[k // 2] = hash(objects[k & -2] + objects[k | 1]) keys.append(k // 2) pos += 1 - # Completeness and consistency check - assert 1 in objects - for k in objects: - if k > 1: - assert objects[k // 2] == hash(objects[k & -2] + objects[k | 1]) - return objects + return objects[1] == root ``` -## MerklePartial - -We define a container that encodes an SSZ partial, and provide the methods for converting it into a `{generalized_index: chunk}` map, for which we provide a method to extract individual values. To determine the hash tree root of an object represented by an SSZ partial, simply check `decode_ssz_partial(partial)[1]`. - -### `SSZMerklePartial` - -```python -class SSZMerklePartial(Container): - indices: List[uint64, 2**32] - chunks: List[Bytes32, 2**32] -``` - -### `decode_ssz_partial` - -```python -def decode_ssz_partial(encoded: SSZMerklePartial) -> Dict[int, Bytes32]: - """ - Decodes an encoded SSZ partial into a generalized index -> chunk map, and verify hash consistency. - """ - full_indices = get_expanded_indices(encoded.indices) - return fill({k:v for k,v in zip(full_indices, encoded.chunks)}) -``` - -### `extract_value_at_path` - -```python -def extract_value_at_path(chunks: Dict[int, Bytes32], typ: Type, path: List[Union[int, str]]) -> Any: - """ - Provides the value of the element in the object represented by the given encoded SSZ partial at - the given path. Returns a KeyError if that path is not covered by this SSZ partial. - """ - root = 1 - for p in path: - if p == '__len__': - return deserialize_basic(chunks[root * 2 + 1][:8], uint64) - if issubclass(typ, (List, Bytes)): - assert 0 <= p < deserialize_basic(chunks[root * 2 + 1][:8], uint64) - pos, start, end = get_item_position(typ, p) - root = root * (2 if issubclass(typ, (List, Bytes)) else 1) * next_power_of_two(get_chunk_count(typ)) + pos - typ = get_elem_type(typ, p) - return deserialize_basic(chunks[root][start: end], typ) -``` - -Here [link TBD] is a python implementation of SSZ partials that represents them as a class that can be read and written to just like the underlying objects, so you can eg. perform state transitions on SSZ partials and compute the resulting root +Note that the single-item proof is a special case of a multi-item proof; a valid single-item proof verifies correctly when put into the multi-item verification function (making the natural trivial changes to input arguments, `index -> [index]` and `leaf -> [leaf]`). From 44bd00164ed272e92d8a8a47c267502e8d4ae6e8 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 1 Aug 2019 18:11:45 -0400 Subject: [PATCH 035/250] Update specs/light_client/merkle_proofs.md Co-Authored-By: Danny Ryan --- specs/light_client/merkle_proofs.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 8f9b14fb5..fcc8031a8 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -75,7 +75,8 @@ def get_elem_type(typ: ComplexType, index: int) -> Type: or member variable name (eg. `7` for `x[7]`, `"foo"` for `x.foo`) """ return typ.get_fields()[key] if issubclass(typ, Container) else typ.elem_type - + + def chunk_count(typ: SSZType) -> int: """ Returns the number of hashes needed to represent the top-level elements in the given type From e93e7a3f1a29179bdfa8bd9f606a79927fa0c610 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 1 Aug 2019 18:12:27 -0400 Subject: [PATCH 036/250] Update specs/light_client/merkle_proofs.md --- specs/light_client/merkle_proofs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index fcc8031a8..af0ff760d 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -69,7 +69,7 @@ def item_length(typ: SSZType) -> int: return 32 -def get_elem_type(typ: ComplexType, index: int) -> Type: +def get_elem_type(typ: ComplexType, index: Union[int, str]) -> Type: """ Returns the type of the element of an object of the given type with the given index or member variable name (eg. `7` for `x[7]`, `"foo"` for `x.foo`) From 2e7c8fa529d1385cb10d990b6b150b75bafff7c3 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 1 Aug 2019 18:12:35 -0400 Subject: [PATCH 037/250] Update specs/light_client/merkle_proofs.md --- specs/light_client/merkle_proofs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index af0ff760d..afa42f184 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -74,7 +74,7 @@ def get_elem_type(typ: ComplexType, index: Union[int, str]) -> Type: Returns the type of the element of an object of the given type with the given index or member variable name (eg. `7` for `x[7]`, `"foo"` for `x.foo`) """ - return typ.get_fields()[key] if issubclass(typ, Container) else typ.elem_type + return typ.get_fields()[index] if issubclass(typ, Container) else typ.elem_type def chunk_count(typ: SSZType) -> int: From 5248bb6a15134ebc949e90e481c6e6ad7920a035 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Kripalani?= Date: Fri, 2 Aug 2019 12:35:50 +0100 Subject: [PATCH 038/250] apply editorial suggestions. Co-Authored-By: Hsiao-Wei Wang Co-Authored-By: Preston Van Loon --- specs/networking/p2p-interface.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 66b1fa694..fa87635e0 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -79,13 +79,13 @@ The following SecIO parameters MUST be supported by all stacks: - Key agreement: ECDH-P256. - Cipher: AES-128. -- Digest: SHA256. +- Digest: SHA-256. #### Mainnet [Noise Framework](http://www.noiseprotocol.org/) handshakes will be used for mainnet. libp2p Noise support [is in the process of being standardised](https://github.com/libp2p/specs/issues/195) in the libp2p project. -Noise support will presumably include IX, IK and XX handshake patterns, and may rely on Curve25519 keys, ChaCha20 and Poly1305 ciphers, and SHA256 as a hash function. These aspects are being actively debated in the referenced issue [ETH 2.0 implementers are welcome to comment and contribute to the discussion.] +Noise support will presumably include IX, IK and XX handshake patterns, and may rely on Curve25519 keys, ChaCha20 and Poly1305 ciphers, and SHA-256 as a hash function. These aspects are being actively debated in the referenced issue [ETH 2.0 implementers are welcome to comment and contribute to the discussion.] ## Protocol Negotiation @@ -427,7 +427,7 @@ Response Content: ) ``` -Requests count beacon blocks from the peer starting from `start_slot` on the chain defined by `head_block_root`. The response MUST contain no more than count blocks. step defines the slot increment between blocks. For example, requesting blocks starting at `start_slot` 2 with a step value of 2 would return the blocks at [2, 4, 6, …]. In cases where a slot is empty for a given slot number, no block is returned. For example, if slot 4 were empty in the previous example, the returned array would contain [2, 6, …]. A step value of 1 returns all blocks on the range `[start_slot, start_slot + count)`. +Requests count beacon blocks from the peer starting from `start_slot` on the chain defined by `head_block_root`. The response MUST contain no more than count blocks. `step` defines the slot increment between blocks. For example, requesting blocks starting at `start_slot` 2 with a step value of 2 would return the blocks at [2, 4, 6, …]. In cases where a slot is empty for a given slot number, no block is returned. For example, if slot 4 were empty in the previous example, the returned array would contain [2, 6, …]. A step value of 1 returns all blocks on the range `[start_slot, start_slot + count)`. `BeaconBlocks` is primarily used to sync historical blocks. @@ -609,7 +609,7 @@ Topics names have a hierarchical structure. In the future, gossipsub may support No security or privacy guarantees are lost as a result of choosing plaintext topic names, since the domain is finite anyway, and calculating a digest's preimage would be trivial. -Furthermore, the ETH2 topic names are shorter their digest equivalents (asuming SHA-256 hash), so hashing topics would bloat messages unnecessarily. +Furthermore, the ETH2 topic names are shorter their digest equivalents (assuming SHA-256 hash), so hashing topics would bloat messages unnecessarily. ### Why are there `SHARD_SUBNET_COUNT` subnets, and why is this not defined? From f3c11852d79bc5488b016c0538e2dd0a24609237 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Kripalani?= Date: Fri, 2 Aug 2019 12:38:59 +0100 Subject: [PATCH 039/250] apply more editorial suggestions. Co-Authored-By: Hsiao-Wei Wang --- specs/networking/p2p-interface.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index fa87635e0..2661ecdb0 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -229,7 +229,7 @@ Specifications of these parameters can be found in the [ENR Specification](http: In the interoperability testnet, all peers will support all capabilities defined in this document (gossip, full Req/Resp suite, discovery protocol), therefore the ENR record does not need to carry ETH2 capability information, as it would be superfluous. -Nonetheless, ENRs MUST carry a generic `eth2` key with nil value, denoting that the peer is indeed a ETH2 peer, in order to eschew connecting to ETH1 peers. +Nonetheless, ENRs MUST carry a generic `eth2` key with nil value, denoting that the peer is indeed an ETH2 peer, in order to eschew connecting to ETH1 peers. #### Mainnet @@ -609,7 +609,7 @@ Topics names have a hierarchical structure. In the future, gossipsub may support No security or privacy guarantees are lost as a result of choosing plaintext topic names, since the domain is finite anyway, and calculating a digest's preimage would be trivial. -Furthermore, the ETH2 topic names are shorter their digest equivalents (assuming SHA-256 hash), so hashing topics would bloat messages unnecessarily. +Furthermore, the ETH2 topic names are shorter than their digest equivalents (assuming SHA-256 hash), so hashing topics would bloat messages unnecessarily. ### Why are there `SHARD_SUBNET_COUNT` subnets, and why is this not defined? @@ -637,7 +637,7 @@ Requests are segregated by protocol ID to: 1. Leverage protocol routing in libp2p, such that the libp2p stack will route the incoming stream to the appropriate handler. This allows each the handler function for each request type to be self-contained. For an analogy, think about how you attach HTTP handlers to a REST API server. 2. Version requests independently. In a coarser-grained umbrella protocol, the entire protocol would have to be versioned even if just one field in a single message changed. -3. Enable clients to select the individual requests/versions they support. It would no longer be a strict requirement to support all requests, and clients, in principle, could support a subset of equests and variety of versions. +3. Enable clients to select the individual requests/versions they support. It would no longer be a strict requirement to support all requests, and clients, in principle, could support a subset of requests and variety of versions. 4. Enable flexibility and agility for clients adopting spec changes that impact the request, by signalling to peers exactly which subset of new/old requests they support. 5. Enable clients to explicitly choose backwards compatibility at the request granularity. Without this, clients would be forced to support entire versions of the coarser request protocol. 6. Parallelise RFCs (or ETH2 EIPs). By decoupling requests from one another, each RFC that affects the request protocol can be deployed/tested/debated independently without relying on a synchronisation point to version the general top-level protocol. From c29d85aafb99639930e49129e0c173082de450f5 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Fri, 2 Aug 2019 09:40:26 -0400 Subject: [PATCH 040/250] Update specs/core/1_shard-data-chains.md Co-Authored-By: Hsiao-Wei Wang --- specs/core/1_shard-data-chains.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index c4d8e2701..f284574f2 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -432,7 +432,7 @@ def shard_slot_transition(state: ShardState, beacon_state: BeaconState) -> None: # Save states in history accumulator depth = 0 h = hash_tree_root(state) - while state.slot % 2**depth == 0: + while state.slot % 2**depth == 0 and depth <= HISTORY_ACCUMULATOR_VECTOR: state.history_accumulator[depth] = h depth += 1 From ddd43ad99d1e75affe77627d9f0c06f7a2825a35 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Fri, 2 Aug 2019 09:40:49 -0400 Subject: [PATCH 041/250] <= becomes < --- specs/core/1_shard-data-chains.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index f284574f2..317011716 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -432,7 +432,7 @@ def shard_slot_transition(state: ShardState, beacon_state: BeaconState) -> None: # Save states in history accumulator depth = 0 h = hash_tree_root(state) - while state.slot % 2**depth == 0 and depth <= HISTORY_ACCUMULATOR_VECTOR: + while state.slot % 2**depth == 0 and depth < HISTORY_ACCUMULATOR_VECTOR: state.history_accumulator[depth] = h depth += 1 From 0a874528a8e9ce31703554133393405b6c4ed438 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Fri, 2 Aug 2019 09:43:03 -0400 Subject: [PATCH 042/250] Update specs/light_client/merkle_proofs.md Co-Authored-By: Danny Ryan --- specs/light_client/merkle_proofs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index afa42f184..9afa96738 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -201,7 +201,7 @@ x x . . . . x * First, we provide a method for computing the generalized indices of the auxiliary tree nodes that a proof of a given set of generalized indices will require: -``` +```python def get_branch_indices(tree_index: GeneralizedIndex) -> List[GeneralizedIndex]: """ Get the generalized indices of the sister chunks along the path from the chunk with the From 845daf5b1f22d6b4e91c2713c8887f41d78750de Mon Sep 17 00:00:00 2001 From: vbuterin Date: Fri, 2 Aug 2019 09:43:24 -0400 Subject: [PATCH 043/250] Update specs/light_client/merkle_proofs.md Co-Authored-By: Diederik Loerakker --- specs/light_client/merkle_proofs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 9afa96738..2a1103ca2 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -100,7 +100,7 @@ def chunk_count(typ: SSZType) -> int: def get_item_position(typ: SSZType, index: Union[int, str]) -> Tuple[int, int, int]: """ Returns three variables: (i) the index of the chunk in which the given element of the item is - represented, (ii) the starting byte position, (iii) the ending byte position. For example for + represented, (ii) the starting byte position within the chunk, (iii) the ending byte position within the chunk. For example for a 6-item list of uint64 values, index=2 will return (0, 16, 24), index=5 will return (1, 8, 16) """ if issubclass(typ, Elements): From 59307d1380de871ac1d031c48c781aae0c0c60b1 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Fri, 2 Aug 2019 09:43:42 -0400 Subject: [PATCH 044/250] Update specs/light_client/merkle_proofs.md Co-Authored-By: Diederik Loerakker --- specs/light_client/merkle_proofs.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 2a1103ca2..bf6cd7a61 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -85,6 +85,7 @@ def chunk_count(typ: SSZType) -> int: hash. For lists/vectors of basic types, it is often fewer because multiple basic elements can be packed into one 32-byte chunk. """ + # typ.length describes the limit for list types, or the length for vector types. if issubclass(typ, BasicValue): return 1 elif issubclass(typ, Bits): From c6cdec8217b41f2cfa20f4f117fbaac47a4c89ab Mon Sep 17 00:00:00 2001 From: vbuterin Date: Fri, 2 Aug 2019 09:45:26 -0400 Subject: [PATCH 045/250] Fixed get generalized indices --- specs/light_client/merkle_proofs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index bf6cd7a61..038990709 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -142,7 +142,7 @@ def concat_generalized_indices(*indices: Sequence[GeneralizedIndex]) -> Generali """ o = GeneralizedIndex(1) for i in indices: - o = o * get_previous_power_of_2(i) + i + o = o * get_previous_power_of_2(i) + (i - get_previous_power_of_2(i)) return o ``` From c8d128aa59c5dc3c9ae419ce30b20a028444af8f Mon Sep 17 00:00:00 2001 From: vbuterin Date: Fri, 2 Aug 2019 09:46:59 -0400 Subject: [PATCH 046/250] Update specs/light_client/merkle_proofs.md Co-Authored-By: Diederik Loerakker --- specs/light_client/merkle_proofs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 038990709..2e00806ee 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -259,7 +259,7 @@ def verify_merkle_multiproof(leaves: Sequence[Hash], proof: Sequence[Hash], indi while pos < len(keys): k = keys[pos] if k in objects and k ^ 1 in objects and k // 2 not in objects: - objects[k // 2] = hash(objects[k & -2] + objects[k | 1]) + objects[k // 2] = hash(objects[(k | 1) ^ 1] + objects[k | 1]) keys.append(k // 2) pos += 1 return objects[1] == root From 6138edc5bebd99c87759161e277c4d57823ad9a0 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Fri, 2 Aug 2019 09:49:35 -0400 Subject: [PATCH 047/250] log -> log2 --- specs/light_client/merkle_proofs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 2e00806ee..469f347ac 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -153,7 +153,7 @@ def get_generalized_index_length(index: GeneralizedIndex) -> int: """ Returns the length of a path represented by a generalized index. """ - return log(index) + return log2(index) ``` #### `get_generalized_index_bit` From 5237ac4954382e3b9f5a17dd9bf9fd6df0637876 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Fri, 2 Aug 2019 09:57:32 -0400 Subject: [PATCH 048/250] Update specs/light_client/merkle_proofs.md --- specs/light_client/merkle_proofs.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 469f347ac..e0be4f070 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -132,6 +132,8 @@ def get_generalized_index(typ: Type, path: List[Union[int, str]]) -> Generalized ### Helpers for generalized indices +_Usage note: functions outside this section should manipulate generalized indices using only functions inside this section. This is to make it easier for developers to implement generalized indices with underlying representations other than bigints._ + #### `concat_generalized_indices` ```python From 514ff5814f233836eba8b45d9bb5d94b97d7a60e Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Fri, 2 Aug 2019 21:12:40 +0200 Subject: [PATCH 049/250] Updates * constants -> configurations * constant name updates * initial validation requirement for attestations * allow aggregated attestations to be published * move discv5 down a bit * additional rationale --- specs/networking/p2p-interface.md | 143 +++++++++++++++++------------- 1 file changed, 80 insertions(+), 63 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 2661ecdb0..ed2047190 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -22,10 +22,10 @@ It consists of four main sections: - [Protocol Negotiation](#protocol-negotiation) - [Multiplexing](#multiplexing) - [ETH2 network interaction domains](#eth2-network-interaction-domains) - - [Constants](#constants) + - [Configuration](#configuration) - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) - - [The discovery domain: discv5](#the-discovery-domain-discv5) - [The Req/Resp domain](#the-reqresp-domain) + - [The discovery domain: discv5](#the-discovery-domain-discv5) - [Design Decision Rationale](#design-decision-rationale) - [Transport](#transport-1) - [Multiplexing](#multiplexing-1) @@ -89,6 +89,8 @@ Noise support will presumably include IX, IK and XX handshake patterns, and may ## Protocol Negotiation +Clients MUST use exact equality when negotiating protocol versions to use and MAY use the version to give priority to higher version numbers. + #### Interop Connection-level and stream-level (see the rationale section below for explanations) protocol negotiation MUST be conducted using [multistream-select v1.0](https://github.com/multiformats/multistream-select/). Its protocol ID is: `/multistream/1.0.0`. @@ -107,16 +109,15 @@ Clients MUST support [mplex](https://github.com/libp2p/specs/tree/master/mplex) # ETH2 network interaction domains -## Constants +## Configuration This section outlines constants that are used in this spec. -- `RQRP_MAX_SIZE`: The max size of uncompressed req/resp messages that clients will allow. - Value: TBD -- `GOSSIP_MAX_SIZE`: The max size of uncompressed gossip messages - Value: 1MB (estimated from expected largest uncompressed block size). -- `SHARD_SUBNET_COUNT`: The number of shard subnets used in the gossipsub protocol. - Value: TBD +| `REQ_RESP_MAX_SIZE` | `TODO` | The max size of uncompressed req/resp messages that clients will allow. | +| `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The max size of uncompressed gossip messages | +| `SHARD_SUBNET_COUNT` | `TODO` | The number of shard subnets used in the gossipsub protocol. | +| `TTFB_TIMEOUT` | `5s` | Maximum time to wait for first byte of request response (time-to-first-byte) | +| `RESP_TIMEOUT` | `10s` | Maximum time for complete response transfer | ## The gossip domain: gossipsub @@ -128,7 +129,7 @@ Clients MUST support the [gossipsub](https://github.com/libp2p/specs/tree/master *Note: Parameters listed here are subject to a large-scale network feasibility study.* -The following gossipsub parameters will be used: +The following gossipsub [parameters](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub#meshsub-an-overlay-mesh-router) will be used: - `D` (topic stable mesh target count): 6 - `D_low` (topic stable mesh low watermark): 4 @@ -147,8 +148,8 @@ Topic strings have form: `/eth2/TopicName/TopicEncoding`. This defines both the There are two main topics used to propagate attestations and beacon blocks to all nodes on the network. Their `TopicName`'s are: -- `beacon_block` - This topic is used solely for propagating new beacon blocks to all nodes on the networks. Blocks are sent in their entirety. Clients who receive a block on this topic MUST validate the block proposer signature before forwarding it across the network. -- `beacon_attestation` - This topic is used to propagate aggregated attestations (in their entirety) to subscribing nodes (typically block proposers) to be included in future blocks. Similarly to beacon blocks, clients will be expected to perform some sort of validation before forwarding, but the precise mechanism is still TBD. +- `beacon_block` - This topic is used solely for propagating new beacon blocks to all nodes on the networks. Blocks are sent in their entirety. Clients MUST validate the block proposer signature before forwarding it across the network. +- `beacon_attestation` - This topic is used to propagate aggregated attestations (in their entirety) to subscribing nodes (typically block proposers) to be included in future blocks. Clients MUST validate that the block being voted for passes validation before forwarding the attestation on the network (TODO: [additional validations](https://github.com/ethereum/eth2.0-specs/issues/1332)). Additional topics are used to propagate lower frequency validator messages. Their `TopicName`’s are: @@ -158,12 +159,14 @@ Additional topics are used to propagate lower frequency validator messages. Thei #### Interop -Unaggregated attestations from all shards are sent to the `beacon_attestation` topic. +Unaggregated and aggregated attestations from all shards are sent to the `beacon_attestation` topic. Clients are not required to publish aggregate attestations but must be able to process them. #### Mainnet Shards are grouped into their own subnets (defined by a shard topic). The number of shard subnets is defined via `SHARD_SUBNET_COUNT` and the shard `shard_number % SHARD_SUBNET_COUNT` is assigned to the topic: `shard{shard_number % SHARD_SUBNET_COUNT}_beacon_attestation`. Unaggregated attestations are sent to the subnet topic. Aggregated attestations are sent to the `beacon_attestation` topic. +TODO: [aggregation strategy](https://github.com/ethereum/eth2.0-specs/issues/1331) + ### Messages Each gossipsub [message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24) has a maximum size of `GOSSIP_MAX_SIZE`. @@ -200,51 +203,6 @@ Topics are post-fixed with an encoding. Encodings define how the payload of a go Implementations MUST use a single encoding. Changing an encoding will require coordination between participating implementations. -## The discovery domain: discv5 - -Discovery Version 5 ([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) is used for peer discovery, both in the interoperability testnet and mainnet. - -`discv5` is a standalone protocol, running on UDP on a dedicated port, meant for peer discovery only. `discv5` supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are (or will be) requirements in this context. - -### Integration into libp2p stacks - -`discv5` SHOULD be integrated into the client’s libp2p stack by implementing an adaptor to make it conform to the [service discovery](https://github.com/libp2p/go-libp2p-core/blob/master/discovery/discovery.go) and [peer routing](https://github.com/libp2p/go-libp2p-core/blob/master/routing/routing.go#L36-L44) abstractions and interfaces (go-libp2p links provided). - -Inputs to operations include peer IDs (when locating a specific peer), or capabilities (when searching for peers with a specific capability), and the outputs will be multiaddrs converted from the ENR records returned by the discv5 backend. - -This integration enables the libp2p stack to subsequently form connections and streams with discovered peers. - -### ENR structure - -The Ethereum Node Record (ENR) for an Ethereum 2.0 client MUST contain the following entries (exclusive of the sequence number and signature, which MUST be present in an ENR): - -- The compressed secp256k1 publickey, 33 bytes (`secp256k1` field). -- An IPv4 address (`ip` field) and/or IPv6 address (`ip6` field). -- A TCP port (`tcp` field) representing the local libp2p listening port. -- A UDP port (`udp` field) representing the local discv5 listening port. - -Specifications of these parameters can be found in the [ENR Specification](http://eips.ethereum.org/EIPS/eip-778). - -#### Interop - -In the interoperability testnet, all peers will support all capabilities defined in this document (gossip, full Req/Resp suite, discovery protocol), therefore the ENR record does not need to carry ETH2 capability information, as it would be superfluous. - -Nonetheless, ENRs MUST carry a generic `eth2` key with nil value, denoting that the peer is indeed an ETH2 peer, in order to eschew connecting to ETH1 peers. - -#### Mainnet - -On mainnet, ENRs MUST include a structure enumerating the capabilities offered by the peer in an efficient manner. The concrete solution is currently undefined. Proposals include using namespaced bloom filters mapping capabilities to specific protocol IDs supported under that capability. - -### Topic advertisement - -#### Interop - -This feature will not be used in the interoperability testnet. - -#### Mainnet - -In mainnet, we plan to use discv5’s topic advertisement feature as a rendezvous facility for peers on shards (thus subscribing to the relevant gossipsub topics). - ## The Req/Resp domain ### Protocol identification @@ -288,7 +246,7 @@ Once a new stream with the protocol ID for the request type has been negotiated, The requester MUST close the write side of the stream once it finishes writing the request message - at this point, the stream will be half-closed. -The requester MUST wait a maximum of **5 seconds** for the first response byte to arrive (time to first byte – or TTFB – timeout). On that happening, the requester will allow further **10 seconds** to receive the full response. +The requester MUST wait a maximum of `TTFB_TIMEOUT` for the first response byte to arrive (time to first byte – or TTFB – timeout). On that happening, the requester will allow further `RESP_TIMEOUT` to receive the full response. If any of these timeouts fire, the requester SHOULD reset the stream and deem the req/resp operation to have failed. @@ -306,11 +264,11 @@ The responder MUST: If steps (1), (2) or (3) fail due to invalid, malformed or inconsistent data, the responder MUST respond in error. Clients tracking peer reputation MAY record such failures, as well as unexpected events, e.g. early stream resets. -The entire request should be read in no more than **5 seconds**. Upon a timeout, the responder SHOULD reset the stream. +The entire request should be read in no more than `RESP_TIMEOUT`. Upon a timeout, the responder SHOULD reset the stream. The responder SHOULD send a response promptly, starting with a **single-byte** response code which determines the contents of the response (`result` particle in the BNF grammar above). -It can have one of the following values: +It can have one of the following values, encoded as a single unsigned byte: - 0: **Success** -- a normal response follows, with contents matching the expected message schema and encoding specified in the request. - 1: **InvalidRequest** -- the contents of the request are semantically invalid, or the payload is malformed, or could not be understood. The response payload adheres to the `ErrorMessage` schema (described below). @@ -461,6 +419,53 @@ Requests blocks by their block roots. The response is a list of `BeaconBlock` wi Clients MUST support requesting blocks since the latest finalized epoch. +## The discovery domain: discv5 + +Discovery Version 5 ([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) is used for peer discovery, both in the interoperability testnet and mainnet. + +`discv5` is a standalone protocol, running on UDP on a dedicated port, meant for peer discovery only. `discv5` supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are (or will be) requirements in this context. + +:warning: Under construction. :warning: + +### Integration into libp2p stacks + +`discv5` SHOULD be integrated into the client’s libp2p stack by implementing an adaptor to make it conform to the [service discovery](https://github.com/libp2p/go-libp2p-core/blob/master/discovery/discovery.go) and [peer routing](https://github.com/libp2p/go-libp2p-core/blob/master/routing/routing.go#L36-L44) abstractions and interfaces (go-libp2p links provided). + +Inputs to operations include peer IDs (when locating a specific peer), or capabilities (when searching for peers with a specific capability), and the outputs will be multiaddrs converted from the ENR records returned by the discv5 backend. + +This integration enables the libp2p stack to subsequently form connections and streams with discovered peers. + +### ENR structure + +The Ethereum Node Record (ENR) for an Ethereum 2.0 client MUST contain the following entries (exclusive of the sequence number and signature, which MUST be present in an ENR): + +- The compressed secp256k1 publickey, 33 bytes (`secp256k1` field). +- An IPv4 address (`ip` field) and/or IPv6 address (`ip6` field). +- A TCP port (`tcp` field) representing the local libp2p listening port. +- A UDP port (`udp` field) representing the local discv5 listening port. + +Specifications of these parameters can be found in the [ENR Specification](http://eips.ethereum.org/EIPS/eip-778). + +#### Interop + +In the interoperability testnet, all peers will support all capabilities defined in this document (gossip, full Req/Resp suite, discovery protocol), therefore the ENR record does not need to carry ETH2 capability information, as it would be superfluous. + +Nonetheless, ENRs MUST carry a generic `eth2` key with nil value, denoting that the peer is indeed an ETH2 peer, in order to eschew connecting to ETH1 peers. + +#### Mainnet + +On mainnet, ENRs MUST include a structure enumerating the capabilities offered by the peer in an efficient manner. The concrete solution is currently undefined. Proposals include using namespaced bloom filters mapping capabilities to specific protocol IDs supported under that capability. + +### Topic advertisement + +#### Interop + +This feature will not be used in the interoperability testnet. + +#### Mainnet + +In mainnet, we plan to use discv5’s topic advertisement feature as a rendezvous facility for peers on shards (thus subscribing to the relevant gossipsub topics). + # Design Decision Rationale ## Transport @@ -601,7 +606,19 @@ For future extensibility with almost zero overhead now (besides the extra bytes ### How do we upgrade gossip channels (e.g. changes in encoding, compression)? -Such upgrades lead to fragmentation, so they’ll need to be carried out in a coordinated manner most likely during a hard fork. +Changing gossipsub / broadcasts requires a coordinated upgrade where all clients start publishing to the new topic together, for example during a hard fork. + +One can envision a two-phase deployment as well where clients start listening to the new topic in a first phase then start publishing some time later, letting the traffic naturally move over to the new topic. + +### Why must all clients use the same gossip topic instead of one negotiated between each peer pair? + +Supporting multiple topics / encodings would require the presence of relayers to translate between encodings and topics so as to avoid network fragmentation where participants have diverging views on the gossiped state, making the protocol more complicated and fragile. + +Gossip protocols typically remember what messages they've seen for a finite period of time based on message identity - if you publish the same message again after that time has passed, it will be re-broadcast - adding a relay delay also makes this scenario more likely. + +One can imagine that in a complicated upgrade scenario, we might have peers publishing the same message on two topics/encodings, but the price here is pretty high in terms of overhead - both computational and networking, so we'd rather avoid that. + +It is permitted for clients to publish data on alternative topics as long as they also publish on the network-wide mandatory topic. ### Why are the topics strings and not hashes? @@ -625,7 +642,7 @@ The prohibition of unverified-block-gossiping extends to nodes that cannot verif ### How are we going to discover peers in a gossipsub topic? -Via discv5 topics. ENRs should not be used for this purpose, as they store identity, location and capability info, not volatile advertisements. +Via discv5 topics. ENRs should not be used for this purpose, as they store identity, location and capability info, not volatile [advertisements](#topic-advertisement). In the interoperability testnet, all peers will be subscribed to all global beacon chain topics, so discovering peers in specific shard topics will be unnecessary. From cf1d49a1dec7f7ed8d612b039b2dccc566883536 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Sat, 3 Aug 2019 09:27:49 +0200 Subject: [PATCH 050/250] cleanups --- specs/networking/p2p-interface.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index ed2047190..eaa767216 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -51,9 +51,7 @@ Even though libp2p is a multi-transport stack (designed to listen on multiple si #### Interop -All implementations MUST support the TCP libp2p transport, and it MUST be enabled for both dialing and listening (i.e. outbound and inbound connections). - -The libp2p TCP transport supports listening on IPv4 and IPv6 addresses (and on multiple simultaneously). Clients SHOULD allow the operator to configure the listen IP addresses and ports, including the addressing schemes (IPv4, IPv6). +All implementations MUST support the TCP libp2p transport, and it MUST be enabled for both dialing and listening (i.e. outbound and inbound connections). The libp2p TCP transport supports listening on IPv4 and IPv6 addresses (and on multiple simultaneously). To facilitate connectivity, and avert possible IPv6 routability/support issues, clients participating in the interoperability testnet MUST expose at least ONE IPv4 endpoint. @@ -236,9 +234,9 @@ result ::= “0” | “1” | “2” | [“128” ... ”255”] The encoding-dependent header may carry metadata or assertions such as the encoded payload length, for integrity and attack proofing purposes. It is not strictly necessary to length-prefix payloads, because req/resp streams are single-use, and stream closures implicitly delimit the boundaries, but certain encodings like SSZ do, for added security. -`encoded-payload` has a maximum byte size of `RQRP_MAX_SIZE`. +`encoded-payload` has a maximum byte size of `REQ_RESP_MAX_SIZE`. -Clients MUST ensure the payload size is less than or equal to `RQRP_MAX_SIZE`, if not, they SHOULD reset the stream immediately. Clients tracking peer reputation MAY decrement the score of the misbehaving peer under this circumstance. +Clients MUST ensure the payload size is less than or equal to `REQ_RESP_MAX_SIZE`, if not, they SHOULD reset the stream immediately. Clients tracking peer reputation MAY decrement the score of the misbehaving peer under this circumstance. #### Requesting side @@ -286,7 +284,7 @@ The `ErrorMessage` schema is: ) ``` -*Note that the String type is encoded as UTF-8 bytes when SSZ-encoded.* +*Note that the String type is encoded as UTF-8 bytes without NULL terminator when SSZ-encoded.* A response therefore has the form: ``` From d09d56bec8cec98e3d77a286c48972521dd2be96 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Sun, 4 Aug 2019 20:56:41 +0200 Subject: [PATCH 051/250] discuss length-prefixing pro/con, consider for removal, add link --- specs/networking/p2p-interface.md | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index eaa767216..6f79b5d49 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -664,11 +664,23 @@ CAVEAT: the protocol negotiation component in the current version of libp2p is c ### Why are messages length-prefixed with a protobuf varint in the SSZ encoding? -In stream-oriented protocols, we need to delimit messages from one another, so that the reader knows where one message ends and the next one starts. Length-prefixing is an effective solution. Alternatively, one could set a delimiter char/string, but this can readily cause ambiguity if the message itself may contain the delimiter. It also introduces another set of edge cases to model for, thus causing unnecessary complexity, especially if messages are to be compressed (and thus mutated beyond our control). +We are using single-use streams where each stream is closed at the end of the message - thus libp2p transparently handles message delimiting in the underlying stream. libp2p streams are full-duplex, and each party is responsible for closing their write side (like in TCP). We can therefore use stream closure to mark the end of the request and response independently. -That said, in our case, streams are single-use. libp2p streams are full-duplex, and each party is responsible for closing their write side (like in TCP). We therefore use stream closure to mark the end of a request. +Nevertheless, messages are still length-prefixed - this is now being considered for removal. -Nevertheless, messages are still length-prefixed to prevent DOS attacks where malicious actors send large amounts of data disguised as a request. A length prefix allows clients to set a maximum limit, and once that limit is read, the client can cease reading and disconnect the stream. This allows a client to determine the exact length of the packet being sent, and it capacitates it to reset the stream early if the other party expresses they intend to send too much data. +Advantages of length-prefixing include: + +* Reader can prepare a correctly sized buffer before reading message +* Alignment with protocols like gRPC over HTTP/2 that prefix with length +* Sanity checking of stream closure / message length + +Disadvantages include: + +* Redundant methods of message delimiting - both stream end marker and length prefix +* Harder to stream as length must be known up-front +* Additional code path required to verify length + +In some protocols, adding a length prefix serves as a form of DoS protection against very long messages, allowing the client to abort if an overlong message is about to be sent. In this protocol, we are globally limiting message sizes using `REQ_RESP_MAX_SIZE`, thus an the length prefix does not afford any additional protection. [Protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints) is an efficient technique to encode variable-length ints. Instead of reserving a fixed-size field of as many bytes as necessary to convey the maximum possible value, this field is elastic in exchange for 1-bit overhead per byte. @@ -728,7 +740,7 @@ SSZ has well defined schema’s for consensus objects (typically sent across the We compress on the wire to achieve smaller payloads per-message, which, in aggregate, result in higher efficiency, better utilisation of available bandwidth, and overall reduction in network-wide traffic overhead. -At this time, libp2p does not have an out-of-the-box compression feature that can be dynamically negotiated and layered atop connections and streams, but this will be raised in the libp2p community for consideration. +At this time, libp2p does not have an out-of-the-box compression feature that can be dynamically negotiated and layered atop connections and streams, but is [being considered](https://github.com/libp2p/libp2p/issues/81). This is a non-trivial feature because the behaviour of network IO loops, kernel buffers, chunking, packet fragmentation, amongst others, need to be taken into account. libp2p streams are unbounded streams, whereas compression algorithms work best on bounded byte streams of which we have some prior knowledge. From 499e187382798060b3c880782b5563e4253c0bcf Mon Sep 17 00:00:00 2001 From: Ben Edgington Date: Mon, 5 Aug 2019 12:19:32 +0100 Subject: [PATCH 052/250] Fix constants table Tables need header rows in Markdown. --- specs/networking/p2p-interface.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 6f79b5d49..6fca087da 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -111,6 +111,8 @@ Clients MUST support [mplex](https://github.com/libp2p/specs/tree/master/mplex) This section outlines constants that are used in this spec. +| Name | Value | Description | +|---|---|---| | `REQ_RESP_MAX_SIZE` | `TODO` | The max size of uncompressed req/resp messages that clients will allow. | | `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The max size of uncompressed gossip messages | | `SHARD_SUBNET_COUNT` | `TODO` | The number of shard subnets used in the gossipsub protocol. | From cb92aa91ddaedb9dc6cff67718803577bf82ee03 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Mon, 5 Aug 2019 14:37:38 -0400 Subject: [PATCH 053/250] Include state root blocks in crosslink data in non-block slots Also adds `total_bytes` to state. The goal is to facilitate easier fraud proofs, so that one needs to simply check two adjacent headers in a crosslink and their respective bodies to verify a fraud proof. --- specs/core/1_shard-data-chains.md | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 317011716..b82ae5732 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -181,6 +181,7 @@ class ShardState(Container): shard: Shard most_recent_block_core: ShardBlockCore receipt_root: Hash + total_bytes: uint64 ``` ### `ShardReceiptDelta` @@ -531,7 +532,9 @@ def shard_block_transition(state: ShardState, )) # Check total bytes - assert block.core.total_bytes == state.most_recent_block_core.total_bytes + len(block.core.data) + state.total_bytes += len(block.core.data) + assert block.core.total_bytes == state.total_bytes + # Update in-state block header state.most_recent_block_core = ShardBlockCore( @@ -554,14 +557,16 @@ def shard_block_transition(state: ShardState, Let: - `shard` be a valid `Shard` -- `shard_blocks` be the `ShardBlock` list such that `shard_blocks[slot]` is the canonical `ShardBlock` for shard `shard` at slot `slot` +- `pre_state` is the `ShardState` before processing any blocks +- `shard_blocks_or_state_roots` be the `Union[ShardBlock, Hash]` list such that `shard_blocks[slot]` is the canonical `ShardBlock` for shard `shard` at slot `slot` if a block exists, or the post-state-root of processing state up to and including that slot if a block does not exist. - `beacon_state` be the canonical `BeaconState` - `valid_attestations` be the set of valid `Attestation` objects, recursively defined - `candidate` be a candidate `Attestation` which is valid under Phase 0 rules, and for which validity is to be determined under Phase 1 rules by running `is_valid_beacon_attestation` ```python def is_valid_beacon_attestation(shard: Shard, - shard_blocks: Sequence[ShardBlock], + pre_state: ShardState, + shard_blocks_or_state_roots: Sequence[Union[ShardBlock, Hash]], beacon_state: BeaconState, valid_attestations: Set[Attestation], candidate: Attestation) -> bool: @@ -588,7 +593,14 @@ def is_valid_beacon_attestation(shard: Shard, start_epoch + MAX_EPOCHS_PER_CROSSLINK) blocks = [] for slot in range(start_epoch * SLOTS_PER_EPOCH, end_epoch * SLOTS_PER_EPOCH): - blocks.append(shard_blocks[slot]) + if isinstance(shard_blocks_or_state_roots[slot], ShardBlock): + blocks.append(shard_blocks_or_state_roots[slot]) + else: + blocks.append(ShardBlockHeader(ShardBlockCore( + slot=slot, + state_root=shard_blocks_or_state_roots[slot], + total_bytes=state.total_bytes + ), ShardBlockSignatures())) assert candidate.data.crosslink.data_root == compute_crosslink_data_root(blocks) return True From 095cfe6633f9dbe62e8d4745665bcf4361da3a4c Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 7 Aug 2019 19:29:24 +0800 Subject: [PATCH 054/250] Fix build_spec and typo --- scripts/build_spec.py | 2 +- specs/core/1_shard-data-chains.md | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/scripts/build_spec.py b/scripts/build_spec.py index 96866cc8a..52642c8f4 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -37,7 +37,7 @@ from eth2spec.utils.bls import ( from eth2spec.utils.hash_function import hash ''' PHASE1_IMPORTS = '''from typing import ( - Any, Dict, Optional, Set, Sequence, MutableSequence, Tuple, + Any, Dict, Optional, Set, Sequence, MutableSequence, Tuple, Union, ) from dataclasses import ( diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index b82ae5732..283c1a9ca 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -535,7 +535,6 @@ def shard_block_transition(state: ShardState, state.total_bytes += len(block.core.data) assert block.core.total_bytes == state.total_bytes - # Update in-state block header state.most_recent_block_core = ShardBlockCore( slot=block.core.slot, @@ -599,7 +598,7 @@ def is_valid_beacon_attestation(shard: Shard, blocks.append(ShardBlockHeader(ShardBlockCore( slot=slot, state_root=shard_blocks_or_state_roots[slot], - total_bytes=state.total_bytes + total_bytes=pre_state.total_bytes ), ShardBlockSignatures())) assert candidate.data.crosslink.data_root == compute_crosslink_data_root(blocks) From 2a2c9967a803dae41472578da9faf6bd62d8daba Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 9 Aug 2019 12:16:07 +1000 Subject: [PATCH 055/250] Minor corrections and clarifications to the network specification --- specs/networking/p2p-interface.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 6fca087da..84ad45022 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -114,6 +114,7 @@ This section outlines constants that are used in this spec. | Name | Value | Description | |---|---|---| | `REQ_RESP_MAX_SIZE` | `TODO` | The max size of uncompressed req/resp messages that clients will allow. | +| `SSZ_MAX_LIST_SIZE` | `TODO` | The max size of SSZ-encoded variable lists. | | `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The max size of uncompressed gossip messages | | `SHARD_SUBNET_COUNT` | `TODO` | The number of shard subnets used in the gossipsub protocol. | | `TTFB_TIMEOUT` | `5s` | Maximum time to wait for first byte of request response (time-to-first-byte) | @@ -195,11 +196,11 @@ Topics are post-fixed with an encoding. Encodings define how the payload of a go #### Interop -- `ssz` - All objects are SSZ-encoded. Example: The beacon block topic string is: `/beacon_block/ssz` and the data field of a gossipsub message is an ssz-encoded `BeaconBlock`. +- `ssz` - All objects are SSZ-encoded. Example: The beacon block topic string is: `/eth2/beacon_block/ssz` and the data field of a gossipsub message is an ssz-encoded `BeaconBlock`. #### Mainnet -- `ssz_snappy` - All objects are ssz-encoded and then compressed with snappy. Example: The beacon attestation topic string is: `/beacon_attestation/ssz_snappy` and the data field of a gossipsub message is an `Attestation` that has been ssz-encoded then compressed with snappy. +- `ssz_snappy` - All objects are ssz-encoded and then compressed with snappy. Example: The beacon attestation topic string is: `/eth2/beacon_attestation/ssz_snappy` and the data field of a gossipsub message is an `Attestation` that has been ssz-encoded then compressed with snappy. Implementations MUST use a single encoding. Changing an encoding will require coordination between participating implementations. @@ -286,7 +287,7 @@ The `ErrorMessage` schema is: ) ``` -*Note that the String type is encoded as UTF-8 bytes without NULL terminator when SSZ-encoded.* +*Note that the String type is encoded as UTF-8 bytes without NULL terminator when SSZ-encoded. As the `ErrorMessage` is not an SSZ-container, only the UTF-8 bytes will be sent when SSZ-encoded.* A response therefore has the form: ``` @@ -300,7 +301,8 @@ Here `result` represents the 1-byte response code. The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction. Two values are possible at this time: -- `ssz`: the contents are [SSZ](https://github.com/ethereum/eth2.0-specs/blob/192442be51a8a6907d6401dffbf5c73cb220b760/specs/networking/libp2p-standardization.md#ssz-encoding) encoded. This encoding type MUST be supported by all clients. +- `ssz`: the contents are [SSZ](https://github.com/ethereum/eth2.0-specs/blob/192442be51a8a6907d6401dffbf5c73cb220b760/specs/networking/libp2p-standardization.md#ssz-encoding) encoded. This encoding type MUST be supported by all clients. + For objects containing a single field, only the field is SSZ-encoded not a container with a single field. For example, the `BeaconBlocks` response would be an SSZ-encoded list of `BeaconBlock`s. All SSZ-Lists in the Req/Resp domain will have a max-list size of `SSZ_MAX_LIST_SIZE`. - `ssz_snappy`: the contents are SSZ encoded, and subsequently compressed with [Snappy](https://github.com/google/snappy). MAY be supported in the interoperability testnet; and MUST be supported in mainnet. #### SSZ encoding strategy (with or without Snappy) From 5808ab3ce7d5d6385ac3082b362df8894cf168e9 Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Fri, 9 Aug 2019 00:30:05 -0500 Subject: [PATCH 056/250] doc standardization for networking spec (#1338) * Update p2p-interface.md * Update p2p-interface.md * Update p2p-interface.md * Update specs/networking/p2p-interface.md Co-Authored-By: Hsiao-Wei Wang * Update specs/networking/p2p-interface.md Co-Authored-By: Hsiao-Wei Wang * Update specs/networking/p2p-interface.md Co-Authored-By: Hsiao-Wei Wang * Update specs/networking/p2p-interface.md Co-Authored-By: Hsiao-Wei Wang * Update specs/networking/p2p-interface.md Co-Authored-By: Hsiao-Wei Wang * Update specs/networking/p2p-interface.md Co-Authored-By: Hsiao-Wei Wang * Update specs/networking/p2p-interface.md Co-Authored-By: Hsiao-Wei Wang * Update specs/networking/p2p-interface.md Co-Authored-By: Hsiao-Wei Wang * Update specs/networking/p2p-interface.md Co-Authored-By: Hsiao-Wei Wang * Update specs/networking/p2p-interface.md Co-Authored-By: Hsiao-Wei Wang --- specs/networking/p2p-interface.md | 227 +++++++++++++++--------------- 1 file changed, 113 insertions(+), 114 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 6fca087da..53e203ca6 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -1,45 +1,45 @@ -# Overview +# Ethereum 2.0 networking specification -This document contains the network specification for Ethereum 2.0 clients. +This document contains the networking specification for Ethereum 2.0 clients. It consists of four main sections: -1. A specification of the network fundamentals detailing the two network configurations: interoperability test network, and mainnet launch. -2. A specification of the three network interaction _domains_ of ETH2.0: (a) the gossip domain, (b) the discovery domain, \(c\) the Req/Resp domain. +1. A specification of the network fundamentals detailing the two network configurations: interoperability test network and mainnet launch. +2. A specification of the three network interaction *domains* of Eth 2.0: (a) the gossip domain, (b) the discovery domain, and (c) the Req/Resp domain. 3. The rationale and further explanation for the design choices made in the previous two sections. -4. An analysis of the maturity/state of the libp2p features required by this spec across the languages in which ETH 2.0 clients are being developed. +4. An analysis of the maturity/state of the libp2p features required by this spec across the languages in which Eth 2.0 clients are being developed. -## Table of Contents +## Table of contents -- [Network Fundamentals](#network-fundamentals) +- [Network fundamentals](#network-fundamentals) - [Transport](#transport) - [Encryption and identification](#encryption-and-identification) - - [Protocol Negotiation](#protocol-negotiation) + - [Protocol negotiation](#protocol-negotiation) - [Multiplexing](#multiplexing) -- [ETH2 network interaction domains](#eth2-network-interaction-domains) +- [Eth 2.0 network interaction domains](#eth-20-network-interaction-domains) - [Configuration](#configuration) - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) - [The Req/Resp domain](#the-reqresp-domain) - [The discovery domain: discv5](#the-discovery-domain-discv5) -- [Design Decision Rationale](#design-decision-rationale) +- [Design decision rationale](#design-decision-rationale) - [Transport](#transport-1) - [Multiplexing](#multiplexing-1) - - [Protocol Negotiation](#protocol-negotiation-1) + - [Protocol negotiation](#protocol-negotiation-1) - [Encryption](#encryption) - [Gossipsub](#gossipsub) - [Req/Resp](#reqresp) - [Discovery](#discovery) - [Compression/Encoding](#compressionencoding) -- [libp2p Implementations Matrix](#libp2p-implementations-matrix) +- [libp2p implementations matrix](#libp2p-implementations-matrix) -# Network Fundamentals +# Network fundamentals This section outlines the specification for the networking stack in Ethereum 2.0 clients. @@ -53,9 +53,9 @@ Even though libp2p is a multi-transport stack (designed to listen on multiple si All implementations MUST support the TCP libp2p transport, and it MUST be enabled for both dialing and listening (i.e. outbound and inbound connections). The libp2p TCP transport supports listening on IPv4 and IPv6 addresses (and on multiple simultaneously). -To facilitate connectivity, and avert possible IPv6 routability/support issues, clients participating in the interoperability testnet MUST expose at least ONE IPv4 endpoint. +To facilitate connectivity and avert possible IPv6 routability/support issues, clients participating in the interoperability testnet MUST expose at least ONE IPv4 endpoint. -All listening endpoints must be publicly dialable, and thus not rely on libp2p circuit relay, AutoNAT or AutoRelay facilities. +All listening endpoints must be publicly dialable, and thus not rely on libp2p circuit relay, AutoNAT, or AutoRelay facilities. Nodes operating behind a NAT, or otherwise undialable by default (e.g. container runtime, firewall, etc.), MUST have their infrastructure configured to enable inbound traffic on the announced public listening endpoint. @@ -65,7 +65,7 @@ All requirements from the interoperability testnet apply, except for the IPv4 ad At this stage, clients are licensed to drop IPv4 support if they wish to do so, cognizant of the potential disadvantages in terms of Internet-wide routability/support. Clients MAY choose to listen only on IPv6, but MUST retain capability to dial both IPv4 and IPv6 addresses. -Usage of circuit relay, AutoNAT or AutoRelay will be specifically re-examined closer to the time. +Usage of circuit relay, AutoNAT, or AutoRelay will be specifically re-examined closer to the time. ## Encryption and identification @@ -81,9 +81,9 @@ The following SecIO parameters MUST be supported by all stacks: #### Mainnet -[Noise Framework](http://www.noiseprotocol.org/) handshakes will be used for mainnet. libp2p Noise support [is in the process of being standardised](https://github.com/libp2p/specs/issues/195) in the libp2p project. +[Noise Framework](http://www.noiseprotocol.org/) handshakes will be used for mainnet. libp2p Noise support [is in the process of being standardized](https://github.com/libp2p/specs/issues/195) in the libp2p project. -Noise support will presumably include IX, IK and XX handshake patterns, and may rely on Curve25519 keys, ChaCha20 and Poly1305 ciphers, and SHA-256 as a hash function. These aspects are being actively debated in the referenced issue [ETH 2.0 implementers are welcome to comment and contribute to the discussion.] +Noise support will presumably include IX, IK, and XX handshake patterns, and may rely on Curve25519 keys, ChaCha20 and Poly1305 ciphers, and SHA-256 as a hash function. These aspects are being actively debated in the referenced issue (Eth 2.0 implementers are welcome to comment and contribute to the discussion). ## Protocol Negotiation @@ -91,7 +91,7 @@ Clients MUST use exact equality when negotiating protocol versions to use and MA #### Interop -Connection-level and stream-level (see the rationale section below for explanations) protocol negotiation MUST be conducted using [multistream-select v1.0](https://github.com/multiformats/multistream-select/). Its protocol ID is: `/multistream/1.0.0`. +Connection-level and stream-level (see the [Rationale](#design-decision-rationale) section below for explanations) protocol negotiation MUST be conducted using [multistream-select v1.0](https://github.com/multiformats/multistream-select/). Its protocol ID is: `/multistream/1.0.0`. #### Mainnet @@ -103,9 +103,9 @@ During connection bootstrapping, libp2p dynamically negotiates a mutually suppor Two multiplexers are commonplace in libp2p implementations: [mplex](https://github.com/libp2p/specs/tree/master/mplex) and [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). Their protocol IDs are, respectively: `/mplex/6.7.0` and `/yamux/1.0.0`. -Clients MUST support [mplex](https://github.com/libp2p/specs/tree/master/mplex) and MAY support [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). If both are supported by the client, yamux must take precedence during negotiation. See the Rationale section of this document for tradeoffs. +Clients MUST support [mplex](https://github.com/libp2p/specs/tree/master/mplex) and MAY support [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). If both are supported by the client, yamux must take precedence during negotiation. See the [Rationale](#design-decision-rationale) section below for tradeoffs. -# ETH2 network interaction domains +# Eth 2.0 network interaction domains ## Configuration @@ -113,11 +113,11 @@ This section outlines constants that are used in this spec. | Name | Value | Description | |---|---|---| -| `REQ_RESP_MAX_SIZE` | `TODO` | The max size of uncompressed req/resp messages that clients will allow. | -| `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The max size of uncompressed gossip messages | +| `REQ_RESP_MAX_SIZE` | `TODO` | The maximum size of uncompressed req/resp messages that clients will allow. | +| `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The maximum size of uncompressed gossip messages. | | `SHARD_SUBNET_COUNT` | `TODO` | The number of shard subnets used in the gossipsub protocol. | -| `TTFB_TIMEOUT` | `5s` | Maximum time to wait for first byte of request response (time-to-first-byte) | -| `RESP_TIMEOUT` | `10s` | Maximum time for complete response transfer | +| `TTFB_TIMEOUT` | `5s` | The maximum time to wait for first byte of request response (time-to-first-byte). | +| `RESP_TIMEOUT` | `10s` | The maximum time for complete response transfer. | ## The gossip domain: gossipsub @@ -127,7 +127,7 @@ Clients MUST support the [gossipsub](https://github.com/libp2p/specs/tree/master **Gossipsub Parameters** -*Note: Parameters listed here are subject to a large-scale network feasibility study.* +*Note*: Parameters listed here are subject to a large-scale network feasibility study. The following gossipsub [parameters](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub#meshsub-an-overlay-mesh-router) will be used: @@ -142,16 +142,16 @@ The following gossipsub [parameters](https://github.com/libp2p/specs/tree/master ### Topics -Topics are plain UTF-8 strings, and are encoded on the wire as determined by protobuf (gossipsub messages are enveloped in protobuf messages). +Topics are plain UTF-8 strings and are encoded on the wire as determined by protobuf (gossipsub messages are enveloped in protobuf messages). Topic strings have form: `/eth2/TopicName/TopicEncoding`. This defines both the type of data being sent on the topic and how the data field of the message is encoded. (Further details can be found in [Messages](#Messages)). -There are two main topics used to propagate attestations and beacon blocks to all nodes on the network. Their `TopicName`'s are: +There are two main topics used to propagate attestations and beacon blocks to all nodes on the network. Their `TopicName`s are: - `beacon_block` - This topic is used solely for propagating new beacon blocks to all nodes on the networks. Blocks are sent in their entirety. Clients MUST validate the block proposer signature before forwarding it across the network. - `beacon_attestation` - This topic is used to propagate aggregated attestations (in their entirety) to subscribing nodes (typically block proposers) to be included in future blocks. Clients MUST validate that the block being voted for passes validation before forwarding the attestation on the network (TODO: [additional validations](https://github.com/ethereum/eth2.0-specs/issues/1332)). -Additional topics are used to propagate lower frequency validator messages. Their `TopicName`’s are: +Additional topics are used to propagate lower frequency validator messages. Their `TopicName`s are: - `voluntary_exit` - This topic is used solely for propagating voluntary validator exits to proposers on the network. Voluntary exits are sent in their entirety. Clients who receive a voluntary exit on this topic MUST validate the conditions within `process_voluntary_exit` before forwarding it across the network. - `proposer_slashing` - This topic is used solely for propagating proposer slashings to proposers on the network. Proposer slashings are sent in their entirety. Clients who receive a proposer slashing on this topic MUST validate the conditions within `process_proposer_slashing` before forwarding it across the network. @@ -195,11 +195,11 @@ Topics are post-fixed with an encoding. Encodings define how the payload of a go #### Interop -- `ssz` - All objects are SSZ-encoded. Example: The beacon block topic string is: `/beacon_block/ssz` and the data field of a gossipsub message is an ssz-encoded `BeaconBlock`. +- `ssz` - All objects are [SSZ-encoded](#ssz-encoding). Example: The beacon block topic string is `/beacon_block/ssz`, and the data field of a gossipsub message is an ssz-encoded `BeaconBlock`. #### Mainnet -- `ssz_snappy` - All objects are ssz-encoded and then compressed with snappy. Example: The beacon attestation topic string is: `/beacon_attestation/ssz_snappy` and the data field of a gossipsub message is an `Attestation` that has been ssz-encoded then compressed with snappy. +- `ssz_snappy` - All objects are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy). Example: The beacon attestation topic string is `/beacon_attestation/ssz_snappy`, and the data field of a gossipsub message is an `Attestation` that has been SSZ-encoded and then compressed with Snappy. Implementations MUST use a single encoding. Changing an encoding will require coordination between participating implementations. @@ -217,16 +217,16 @@ With: - `ProtocolPrefix` - messages are grouped into families identified by a shared libp2p protocol name prefix. In this case, we use `/eth2/beacon_chain/req`. - `MessageName` - each request is identified by a name consisting of English alphabet, digits and underscores (`_`). -- `SchemaVersion` - an ordinal version number (e.g. 1, 2, 3…) Each schema is versioned to facilitate backward and forward-compatibility when possible. -- `Encoding` - while the schema defines the data types in more abstract terms, the encoding strategy describes a specific representation of bytes that will be transmitted over the wire. See the [Encodings](#Encoding-strategies) section, for further details. +- `SchemaVersion` - an ordinal version number (e.g. 1, 2, 3…). Each schema is versioned to facilitate backward and forward-compatibility when possible. +- `Encoding` - while the schema defines the data types in more abstract terms, the encoding strategy describes a specific representation of bytes that will be transmitted over the wire. See the [Encodings](#Encoding-strategies) section for further details. -This protocol segregation allows libp2p `multistream-select 1.0` / `multiselect 2.0` to handle the request type, version and encoding negotiation before establishing the underlying streams. +This protocol segregation allows libp2p `multistream-select 1.0` / `multiselect 2.0` to handle the request type, version, and encoding negotiation before establishing the underlying streams. ### Req/Resp interaction We use ONE stream PER request/response interaction. Streams are closed when the interaction finishes, whether in success or in error. -Request/response messages MUST adhere to the encoding specified in the protocol name, and follow this structure (relaxed BNF grammar): +Request/response messages MUST adhere to the encoding specified in the protocol name and follow this structure (relaxed BNF grammar): ``` request ::= | @@ -234,19 +234,19 @@ response ::= | | result ::= “0” | “1” | “2” | [“128” ... ”255”] ``` -The encoding-dependent header may carry metadata or assertions such as the encoded payload length, for integrity and attack proofing purposes. It is not strictly necessary to length-prefix payloads, because req/resp streams are single-use, and stream closures implicitly delimit the boundaries, but certain encodings like SSZ do, for added security. +The encoding-dependent header may carry metadata or assertions such as the encoded payload length, for integrity and attack proofing purposes. Because req/resp streams are single-use and stream closures implicitly delimit the boundaries, it is not strictly necessary to length-prefix payloads; however, certain encodings like SSZ do, for added security. `encoded-payload` has a maximum byte size of `REQ_RESP_MAX_SIZE`. -Clients MUST ensure the payload size is less than or equal to `REQ_RESP_MAX_SIZE`, if not, they SHOULD reset the stream immediately. Clients tracking peer reputation MAY decrement the score of the misbehaving peer under this circumstance. +Clients MUST ensure the payload size is less than or equal to `REQ_RESP_MAX_SIZE`; if not, they SHOULD reset the stream immediately. Clients tracking peer reputation MAY decrement the score of the misbehaving peer under this circumstance. #### Requesting side Once a new stream with the protocol ID for the request type has been negotiated, the full request message should be sent immediately. It should be encoded according to the encoding strategy. -The requester MUST close the write side of the stream once it finishes writing the request message - at this point, the stream will be half-closed. +The requester MUST close the write side of the stream once it finishes writing the request message—at this point, the stream will be half-closed. -The requester MUST wait a maximum of `TTFB_TIMEOUT` for the first response byte to arrive (time to first byte – or TTFB – timeout). On that happening, the requester will allow further `RESP_TIMEOUT` to receive the full response. +The requester MUST wait a maximum of `TTFB_TIMEOUT` for the first response byte to arrive (time to first byte—or TTFB—timeout). On that happening, the requester will allow further `RESP_TIMEOUT` to receive the full response. If any of these timeouts fire, the requester SHOULD reset the stream and deem the req/resp operation to have failed. @@ -257,12 +257,12 @@ Once a new stream with the protocol ID for the request type has been negotiated, The responder MUST: 1. Use the encoding strategy to read the optional header. -2. If there are any length assertions for length `N`, it should read exactly `N` bytes from the stream, at which point an EOF should arise (no more bytes). Should this is not the case, it should be treated as a failure. +2. If there are any length assertions for length `N`, it should read exactly `N` bytes from the stream, at which point an EOF should arise (no more bytes). Should this not be the case, it should be treated as a failure. 3. Deserialize the expected type, and process the request. 4. Write the response (result, optional header, payload). 5. Close their write side of the stream. At this point, the stream will be fully closed. -If steps (1), (2) or (3) fail due to invalid, malformed or inconsistent data, the responder MUST respond in error. Clients tracking peer reputation MAY record such failures, as well as unexpected events, e.g. early stream resets. +If steps (1), (2), or (3) fail due to invalid, malformed, or inconsistent data, the responder MUST respond in error. Clients tracking peer reputation MAY record such failures, as well as unexpected events, e.g. early stream resets. The entire request should be read in no more than `RESP_TIMEOUT`. Upon a timeout, the responder SHOULD reset the stream. @@ -276,7 +276,7 @@ It can have one of the following values, encoded as a single unsigned byte: Clients MAY use response codes above `128` to indicate alternative, erroneous request-specific responses. -The range `[3, 127]` is RESERVED for future usages, and should be treated as error if not recognised expressly. +The range `[3, 127]` is RESERVED for future usages, and should be treated as error if not recognized expressly. The `ErrorMessage` schema is: @@ -286,7 +286,7 @@ The `ErrorMessage` schema is: ) ``` -*Note that the String type is encoded as UTF-8 bytes without NULL terminator when SSZ-encoded.* +*Note*: The String type is encoded as UTF-8 bytes without NULL terminator when SSZ-encoded. A response therefore has the form: ``` @@ -294,22 +294,22 @@ A response therefore has the form: | result | header (opt) | encoded_response | +--------+--------+--------+--------+--------+--------+ ``` -Here `result` represents the 1-byte response code. +Here, `result` represents the 1-byte response code. ### Encoding strategies The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction. Two values are possible at this time: -- `ssz`: the contents are [SSZ](https://github.com/ethereum/eth2.0-specs/blob/192442be51a8a6907d6401dffbf5c73cb220b760/specs/networking/libp2p-standardization.md#ssz-encoding) encoded. This encoding type MUST be supported by all clients. -- `ssz_snappy`: the contents are SSZ encoded, and subsequently compressed with [Snappy](https://github.com/google/snappy). MAY be supported in the interoperability testnet; and MUST be supported in mainnet. +- `ssz`: The contents are [SSZ-encoded](#ssz-encoding). This encoding type MUST be supported by all clients. +- `ssz_snappy`: The contents are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy). MAY be supported in the interoperability testnet; MUST be supported in mainnet. -#### SSZ encoding strategy (with or without Snappy) +#### SSZ-encoding strategy (with or without Snappy) -The [SimpleSerialize (SSZ) specification](https://github.com/ethereum/eth2.0-specs/blob/192442be51a8a6907d6401dffbf5c73cb220b760/specs/simple-serialize.md) outlines how objects are SSZ-encoded. If the Snappy variant is selected, we feed the serialised form to the Snappy compressor on encoding. The inverse happens on decoding. +The [SimpleSerialize (SSZ) specification](../simple-serialize.md) outlines how objects are SSZ-encoded. If the Snappy variant is selected, we feed the serialized form to the Snappy compressor on encoding. The inverse happens on decoding. **Encoding-dependent header:** Req/Resp protocols using the `ssz` or `ssz_snappy` encoding strategies MUST prefix all encoded and compressed (if applicable) payloads with an unsigned [protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). -Note that parameters defined as `[]VariableName` are SSZ-encoded containerless vectors. +*Note*: Parameters defined as `[]VariableName` are SSZ-encoded containerless vectors. ### Messages @@ -329,10 +329,10 @@ Note that parameters defined as `[]VariableName` are SSZ-encoded containerless v ``` The fields are: -- `fork_version`: The beacon_state `Fork` version -- `finalized_root`: The latest finalized root the node knows about -- `finalized_epoch`: The latest finalized epoch the node knows about -- `head_root`: The block hash tree root corresponding to the head of the chain as seen by the sending node +- `fork_version`: The beacon_state `Fork` version. +- `finalized_root`: The latest finalized root the node knows about. +- `finalized_epoch`: The latest finalized epoch the node knows about. +- `head_root`: The block hash tree root corresponding to the head of the chain as seen by the sending node. - `head_slot`: The slot corresponding to the `head_root`. Clients exchange hello messages upon connection, forming a two-phase handshake. The first message the initiating client sends MUST be the hello message. In response, the receiving client MUST respond with its own hello message. @@ -415,7 +415,7 @@ Response Content: Requests blocks by their block roots. The response is a list of `BeaconBlock` with the same length as the request. Blocks are returned in order of the request and any missing/unknown blocks are left empty (SSZ null `BeaconBlock`). -`RecentBeaconBlocks` is primarily used to recover recent blocks, for example when receiving a block or attestation whose parent is unknown. +`RecentBeaconBlocks` is primarily used to recover recent blocks (ex. when receiving a block or attestation whose parent is unknown). Clients MUST support requesting blocks since the latest finalized epoch. @@ -448,9 +448,9 @@ Specifications of these parameters can be found in the [ENR Specification](http: #### Interop -In the interoperability testnet, all peers will support all capabilities defined in this document (gossip, full Req/Resp suite, discovery protocol), therefore the ENR record does not need to carry ETH2 capability information, as it would be superfluous. +In the interoperability testnet, all peers will support all capabilities defined in this document (gossip, full Req/Resp suite, discovery protocol), therefore the ENR record does not need to carry Eth 2.0 capability information, as it would be superfluous. -Nonetheless, ENRs MUST carry a generic `eth2` key with nil value, denoting that the peer is indeed an ETH2 peer, in order to eschew connecting to ETH1 peers. +Nonetheless, ENRs MUST carry a generic `eth2` key with nil value, denoting that the peer is indeed an Eth 2.0 peer, in order to eschew connecting to Eth 1.0 peers. #### Mainnet @@ -466,15 +466,15 @@ This feature will not be used in the interoperability testnet. In mainnet, we plan to use discv5’s topic advertisement feature as a rendezvous facility for peers on shards (thus subscribing to the relevant gossipsub topics). -# Design Decision Rationale +# Design decision rationale ## Transport ### Why are we defining specific transports? -libp2p peers can listen on multiple transports concurrently, and these can change over time. multiaddrs not only encode the address, but also the transport to be used to dial. +libp2p peers can listen on multiple transports concurrently, and these can change over time. Multiaddrs encode not only the address but also the transport to be used to dial. -Due to this dynamic nature, agreeing on specific transports like TCP, QUIC or WebSockets on paper becomes irrelevant. +Due to this dynamic nature, agreeing on specific transports like TCP, QUIC, or WebSockets on paper becomes irrelevant. However, it is useful to define a minimum baseline for interoperability purposes. @@ -482,34 +482,34 @@ However, it is useful to define a minimum baseline for interoperability purposes Clients may support other transports such as libp2p QUIC, WebSockets, and WebRTC transports, if available in the language of choice. While interoperability shall not be harmed by lack of such support, the advantages are desirable: -- better latency, performance and other QoS characteristics (QUIC). -- paving the way for interfacing with future light clients (WebSockets, WebRTC). +- Better latency, performance, and other QoS characteristics (QUIC). +- Paving the way for interfacing with future light clients (WebSockets, WebRTC). -The libp2p QUIC transport inherently relies on TLS 1.3 per requirement in section 7 of the [QUIC protocol specification](https://tools.ietf.org/html/draft-ietf-quic-transport-22#section-7), and the accompanying [QUIC-TLS document](https://tools.ietf.org/html/draft-ietf-quic-tls-22). +The libp2p QUIC transport inherently relies on TLS 1.3 per requirement in section 7 of the [QUIC protocol specification](https://tools.ietf.org/html/draft-ietf-quic-transport-22#section-7) and the accompanying [QUIC-TLS document](https://tools.ietf.org/html/draft-ietf-quic-tls-22). -The usage of one handshake procedure or the other shall be transparent to the ETH 2.0 application layer, once the libp2p Host/Node object has been configured appropriately. +The usage of one handshake procedure or the other shall be transparent to the Eth 2.0 application layer, once the libp2p Host/Node object has been configured appropriately. -### What are advantages of using TCP/QUIC/Websockets? +### What are the advantages of using TCP/QUIC/Websockets? -TCP is a reliable, ordered, full-duplex, congestion controlled network protocol that powers much of the Internet as we know it today. HTTP/1.1 and HTTP/2 run atop TCP. +TCP is a reliable, ordered, full-duplex, congestion-controlled network protocol that powers much of the Internet as we know it today. HTTP/1.1 and HTTP/2 run atop TCP. -QUIC is a new protocol that’s in the final stages of specification by the IETF QUIC WG. It emerged from Google’s SPDY experiment. The QUIC transport is undoubtedly promising. It’s UDP based yet reliable, ordered, reduces latency vs. TCP, is multiplexed, natively secure (TLS 1.3), offers stream-level and connection-level congestion control (thus removing head-of-line blocking), 0-RTT connection establishment, and endpoint migration, amongst other features. UDP also has better NAT traversal properties than TCP -- something we desperately pursue in peer-to-peer networks. +QUIC is a new protocol that’s in the final stages of specification by the IETF QUIC WG. It emerged from Google’s SPDY experiment. The QUIC transport is undoubtedly promising. It’s UDP-based yet reliable, ordered, multiplexed, natively secure (TLS 1.3), reduces latency vs. TCP, and offers stream-level and connection-level congestion control (thus removing head-of-line blocking), 0-RTT connection establishment, and endpoint migration, amongst other features. UDP also has better NAT traversal properties than TCP—something we desperately pursue in peer-to-peer networks. -QUIC is being adopted as the underlying protocol for HTTP/3. This has the potential to award us censorship resistance via deep packet inspection for free. Provided that we use the same port numbers and encryption mechanisms as HTTP/3, our traffic may be indistinguishable from standard web traffic, and we may only become subject to standard IP-based firewall filtering -- something we can counteract via other mechanisms. +QUIC is being adopted as the underlying protocol for HTTP/3. This has the potential to award us censorship resistance via deep packet inspection for free. Provided that we use the same port numbers and encryption mechanisms as HTTP/3, our traffic may be indistinguishable from standard web traffic, and we may only become subject to standard IP-based firewall filtering—something we can counteract via other mechanisms. -WebSockets and/or WebRTC transports are necessary for interaction with browsers, and will become increasingly important as we incorporate browser-based light clients to the ETH2 network. +WebSockets and/or WebRTC transports are necessary for interaction with browsers, and will become increasingly important as we incorporate browser-based light clients to the Eth 2.0 network. ### Why do we not just support a single transport? Networks evolve. Hardcoding design decisions leads to ossification, preventing the evolution of networks alongside the state of the art. Introducing changes on an ossified protocol is very costly, and sometimes, downright impracticable without causing undesirable breakage. -Modelling for upgradeability and dynamic transport selection from the get-go lays the foundation for a future-proof stack. +Modeling for upgradeability and dynamic transport selection from the get-go lays the foundation for a future-proof stack. -Clients can adopt new transports without breaking old ones; and the multi-transport ability enables constrained and sandboxed environments (e.g. browsers, embedded devices) to interact with the network as first-class citizens via suitable/native transports (e.g. WSS), without the need for proxying or trust delegation to servers. +Clients can adopt new transports without breaking old ones, and the multi-transport ability enables constrained and sandboxed environments (e.g. browsers, embedded devices) to interact with the network as first-class citizens via suitable/native transports (e.g. WSS), without the need for proxying or trust delegation to servers. ### Why are we not using QUIC for mainnet from the start? -The QUIC standard is still not finalised (at working draft 22 at the time of writing), and not all mainstream runtimes/languages have mature, standard, and/or fully-interoperable [QUIC support](https://github.com/quicwg/base-drafts/wiki/Implementations). One remarkable example is node.js, where the QUIC implementation is [in early development](https://github.com/nodejs/quic). +The QUIC standard is still not finalized (at working draft 22 at the time of writing), and not all mainstream runtimes/languages have mature, standard, and/or fully-interoperable [QUIC support](https://github.com/quicwg/base-drafts/wiki/Implementations). One remarkable example is node.js, where the QUIC implementation is [in early development](https://github.com/nodejs/quic). ## Multiplexing @@ -517,17 +517,17 @@ The QUIC standard is still not finalised (at working draft 22 at the time of wri [Yamux](https://github.com/hashicorp/yamux/blob/master/spec.md) is a multiplexer invented by Hashicorp that supports stream-level congestion control. Implementations exist in a limited set of languages, and it’s not a trivial piece to develop. -Conscious of that, the libp2p community conceptualised [mplex](https://github.com/libp2p/specs/blob/master/mplex/README.md) as a simple, minimal multiplexer for usage with libp2p. It does not support stream-level congestion control, and is subject to head-of-line blocking. +Conscious of that, the libp2p community conceptualized [mplex](https://github.com/libp2p/specs/blob/master/mplex/README.md) as a simple, minimal multiplexer for usage with libp2p. It does not support stream-level congestion control and is subject to head-of-line blocking. -Overlay multiplexers are not necessary with QUIC, as the protocol provides native multiplexing, but they need to be layered atop TCP, WebSockets, and other transports that lack such support. +Overlay multiplexers are not necessary with QUIC since the protocol provides native multiplexing, but they need to be layered atop TCP, WebSockets, and other transports that lack such support. -## Protocol Negotiation +## Protocol negotiation ### When is multiselect 2.0 due and why are we using it for mainnet? -multiselect 2.0 is currently being conceptualised. Debate started [on this issue](https://github.com/libp2p/specs/pull/95), but it got overloaded – as it tends to happen with large conceptual OSS discussions that touch the heart and core of a system. +multiselect 2.0 is currently being conceptualized. The debate started [on this issue](https://github.com/libp2p/specs/pull/95), but it got overloaded—as it tends to happen with large conceptual OSS discussions that touch the heart and core of a system. -In the following weeks (August 2019), there will be a renewed initiative to first define the requirements, constraints, assumptions and features, in order to lock in basic consensus upfront, to subsequently build on that consensus by submitting a specification for implementation. +In the following weeks (August 2019), there will be a renewed initiative to first define the requirements, constraints, assumptions, and features, in order to lock in basic consensus upfront and subsequently build on that consensus by submitting a specification for implementation. We plan to use multiselect 2.0 for mainnet because it will: @@ -563,35 +563,34 @@ SecIO is not considered secure for the purposes of this spec. ### Why are we using Noise/TLS 1.3 for mainnet? -Copied from the Noise Protocol Framework website: +Copied from the Noise Protocol Framework [website](http://www.noiseprotocol.org): > Noise is a framework for building crypto protocols. Noise protocols support mutual and optional authentication, identity hiding, forward secrecy, zero round-trip encryption, and other advanced features. Noise in itself does not specify a single handshake procedure, but provides a framework to build secure handshakes based on Diffie-Hellman key agreement with a variety of tradeoffs and guarantees. -Noise handshakes are lightweight and simple to understand, and are used in major cryptographic-centric projects like WireGuard, I2P, Lightning. [Various](https://www.wireguard.com/papers/kobeissi-bhargavan-noise-explorer-2018.pdf) [studies](https://eprint.iacr.org/2019/436.pdf) have assessed the stated security goals of several Noise handshakes with positive results. +Noise handshakes are lightweight and simple to understand, and are used in major cryptographic-centric projects like WireGuard, I2P, and Lightning. [Various](https://www.wireguard.com/papers/kobeissi-bhargavan-noise-explorer-2018.pdf) [studies](https://eprint.iacr.org/2019/436.pdf) have assessed the stated security goals of several Noise handshakes with positive results. On the other hand, TLS 1.3 is the newest, simplified iteration of TLS. Old, insecure, obsolete ciphers and algorithms have been removed, adopting Ed25519 as the sole ECDH key agreement function. Handshakes are faster, 1-RTT data is supported, and session resumption is a reality, amongst other features. -Note that [TLS 1.3 is a prerequisite of the QUIC transport](https://tools.ietf.org/html/draft-ietf-quic-transport-22#section-7), although an experiment exists to integrate Noise as the QUIC crypto layer: [nQUIC](https://eprint.iacr.org/2019/028). +*Note*: [TLS 1.3 is a prerequisite of the QUIC transport](https://tools.ietf.org/html/draft-ietf-quic-transport-22#section-7), although an experiment exists to integrate Noise as the QUIC crypto layer: [nQUIC](https://eprint.iacr.org/2019/028). ### Why are we using encryption at all? Transport level encryption secures message exchange and provides properties that are useful for privacy, safety, and censorship resistance. These properties are derived from the following security guarantees that apply to the entire communication between two peers: -- Peer authentication: the peer I’m talking to is really who they claim to be, and who I expect them to be. +- Peer authentication: the peer I’m talking to is really who they claim to be and who I expect them to be. - Confidentiality: no observer can eavesdrop on the content of our messages. - Integrity: the data has not been tampered with by a third-party while in transit. - Non-repudiation: the originating peer cannot dispute that they sent the message. - Depending on the chosen algorithms and mechanisms (e.g. continuous HMAC), we may obtain additional guarantees, such as non-replayability (this byte could’ve only been sent *now;* e.g. by using continuous HMACs), or perfect forward secrecy (in the case that a peer key is compromised, the content of a past conversation will not be compromised). -Note that transport-level encryption is not exclusive of application-level encryption or cryptography. Transport-level encryption secures the communication itself, while application-level cryptography is necessary for the application’s use cases (e.g. signatures, randomness, etc.) +Note that transport-level encryption is not exclusive of application-level encryption or cryptography. Transport-level encryption secures the communication itself, while application-level cryptography is necessary for the application’s use cases (e.g. signatures, randomness, etc.). ### Will mainnnet networking be untested when it launches? Before launching mainnet, the testnet will be switched over to mainnet networking parameters, including Noise handshakes, and other new protocols. This gives us an opportunity to drill coordinated network upgrades and verifying that there are no significant upgradeability gaps. - ## Gossipsub ### Why are we using a pub/sub algorithm for block and attestation propagation? @@ -606,27 +605,27 @@ For future extensibility with almost zero overhead now (besides the extra bytes ### How do we upgrade gossip channels (e.g. changes in encoding, compression)? -Changing gossipsub / broadcasts requires a coordinated upgrade where all clients start publishing to the new topic together, for example during a hard fork. +Changing gossipsub/broadcasts requires a coordinated upgrade where all clients start publishing to the new topic together, for example during a hard fork. -One can envision a two-phase deployment as well where clients start listening to the new topic in a first phase then start publishing some time later, letting the traffic naturally move over to the new topic. +One can envision a two-phase deployment as well where clients start listening to the new topic in the first phase then start publishing some time later, letting the traffic naturally move over to the new topic. ### Why must all clients use the same gossip topic instead of one negotiated between each peer pair? -Supporting multiple topics / encodings would require the presence of relayers to translate between encodings and topics so as to avoid network fragmentation where participants have diverging views on the gossiped state, making the protocol more complicated and fragile. +Supporting multiple topics/encodings would require the presence of relayers to translate between encodings and topics so as to avoid network fragmentation where participants have diverging views on the gossiped state, making the protocol more complicated and fragile. -Gossip protocols typically remember what messages they've seen for a finite period of time based on message identity - if you publish the same message again after that time has passed, it will be re-broadcast - adding a relay delay also makes this scenario more likely. +Gossip protocols typically remember what messages they've seen for a finite period of time-based on message identity—if you publish the same message again after that time has passed, it will be re-broadcast—adding a relay delay also makes this scenario more likely. -One can imagine that in a complicated upgrade scenario, we might have peers publishing the same message on two topics/encodings, but the price here is pretty high in terms of overhead - both computational and networking, so we'd rather avoid that. +One can imagine that in a complicated upgrade scenario, we might have peers publishing the same message on two topics/encodings, but the price here is pretty high in terms of overhead—both computational and networking—so we'd rather avoid that. It is permitted for clients to publish data on alternative topics as long as they also publish on the network-wide mandatory topic. ### Why are the topics strings and not hashes? -Topics names have a hierarchical structure. In the future, gossipsub may support wildcard subscriptions (e.g. subscribe to all children topics under a root prefix) by way of prefix matching. Enforcing hashes for topic names would preclude us from leveraging such features going forward. +Topic names have a hierarchical structure. In the future, gossipsub may support wildcard subscriptions (e.g. subscribe to all children topics under a root prefix) by way of prefix matching. Enforcing hashes for topic names would preclude us from leveraging such features going forward. No security or privacy guarantees are lost as a result of choosing plaintext topic names, since the domain is finite anyway, and calculating a digest's preimage would be trivial. -Furthermore, the ETH2 topic names are shorter than their digest equivalents (assuming SHA-256 hash), so hashing topics would bloat messages unnecessarily. +Furthermore, the Eth 2.0 topic names are shorter than their digest equivalents (assuming SHA-256 hash), so hashing topics would bloat messages unnecessarily. ### Why are there `SHARD_SUBNET_COUNT` subnets, and why is this not defined? @@ -642,7 +641,7 @@ The prohibition of unverified-block-gossiping extends to nodes that cannot verif ### How are we going to discover peers in a gossipsub topic? -Via discv5 topics. ENRs should not be used for this purpose, as they store identity, location and capability info, not volatile [advertisements](#topic-advertisement). +Via discv5 topics. ENRs should not be used for this purpose, as they store identity, location, and capability information, not volatile [advertisements](#topic-advertisement). In the interoperability testnet, all peers will be subscribed to all global beacon chain topics, so discovering peers in specific shard topics will be unnecessary. @@ -652,23 +651,23 @@ In the interoperability testnet, all peers will be subscribed to all global beac Requests are segregated by protocol ID to: -1. Leverage protocol routing in libp2p, such that the libp2p stack will route the incoming stream to the appropriate handler. This allows each the handler function for each request type to be self-contained. For an analogy, think about how you attach HTTP handlers to a REST API server. +1. Leverage protocol routing in libp2p, such that the libp2p stack will route the incoming stream to the appropriate handler. This allows the handler function for each request type to be self-contained. For an analogy, think about how you attach HTTP handlers to a REST API server. 2. Version requests independently. In a coarser-grained umbrella protocol, the entire protocol would have to be versioned even if just one field in a single message changed. 3. Enable clients to select the individual requests/versions they support. It would no longer be a strict requirement to support all requests, and clients, in principle, could support a subset of requests and variety of versions. 4. Enable flexibility and agility for clients adopting spec changes that impact the request, by signalling to peers exactly which subset of new/old requests they support. 5. Enable clients to explicitly choose backwards compatibility at the request granularity. Without this, clients would be forced to support entire versions of the coarser request protocol. -6. Parallelise RFCs (or ETH2 EIPs). By decoupling requests from one another, each RFC that affects the request protocol can be deployed/tested/debated independently without relying on a synchronisation point to version the general top-level protocol. +6. Parallelise RFCs (or Eth 2.0 EIPs). By decoupling requests from one another, each RFC that affects the request protocol can be deployed/tested/debated independently without relying on a synchronization point to version the general top-level protocol. 1. This has the benefit that clients can explicitly choose which RFCs to deploy without buying into all other RFCs that may be included in that top-level version. 2. Affording this level of granularity with a top-level protocol would imply creating as many variants (e.g. /protocol/43-{a,b,c,d,...}) as the cartesian product of RFCs inflight, O(n^2). 7. Allow us to simplify the payload of requests. Request-id’s and method-ids no longer need to be sent. The encoding/request type and version can all be handled by the framework. -CAVEAT: the protocol negotiation component in the current version of libp2p is called multistream-select 1.0. It is somewhat naïve and introduces overhead on every request when negotiating streams, although implementation-specific optimizations are possible to save this cost. Multiselect 2.0 will remove this overhead by memoizing previously selected protocols, and modelling shared protocol tables. Fortunately this req/resp protocol is not the expected network bottleneck in the protocol so the additional overhead is not expected to hinder interop testing. More info is to be released from the libp2p community in the coming weeks. +**Caveat**: The protocol negotiation component in the current version of libp2p is called multistream-select 1.0. It is somewhat naïve and introduces overhead on every request when negotiating streams, although implementation-specific optimizations are possible to save this cost. Multiselect 2.0 will remove this overhead by memoizing previously selected protocols, and modeling shared protocol tables. Fortunately, this req/resp protocol is not the expected network bottleneck in the protocol so the additional overhead is not expected to hinder interop testing. More info is to be released from the libp2p community in the coming weeks. -### Why are messages length-prefixed with a protobuf varint in the SSZ encoding? +### Why are messages length-prefixed with a protobuf varint in the SSZ-encoding? -We are using single-use streams where each stream is closed at the end of the message - thus libp2p transparently handles message delimiting in the underlying stream. libp2p streams are full-duplex, and each party is responsible for closing their write side (like in TCP). We can therefore use stream closure to mark the end of the request and response independently. +We are using single-use streams where each stream is closed at the end of the message. Thus, libp2p transparently handles message delimiting in the underlying stream. libp2p streams are full-duplex, and each party is responsible for closing their write side (like in TCP). We can therefore use stream closure to mark the end of the request and response independently. -Nevertheless, messages are still length-prefixed - this is now being considered for removal. +Nevertheless, messages are still length-prefixed—this is now being considered for removal. Advantages of length-prefixing include: @@ -678,17 +677,17 @@ Advantages of length-prefixing include: Disadvantages include: -* Redundant methods of message delimiting - both stream end marker and length prefix +* Redundant methods of message delimiting—both stream end marker and length prefix * Harder to stream as length must be known up-front * Additional code path required to verify length -In some protocols, adding a length prefix serves as a form of DoS protection against very long messages, allowing the client to abort if an overlong message is about to be sent. In this protocol, we are globally limiting message sizes using `REQ_RESP_MAX_SIZE`, thus an the length prefix does not afford any additional protection. +In some protocols, adding a length prefix serves as a form of DoS protection against very long messages, allowing the client to abort if an overlong message is about to be sent. In this protocol, we are globally limiting message sizes using `REQ_RESP_MAX_SIZE`, thus the length prefix does not afford any additional protection. [Protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints) is an efficient technique to encode variable-length ints. Instead of reserving a fixed-size field of as many bytes as necessary to convey the maximum possible value, this field is elastic in exchange for 1-bit overhead per byte. ### Why do we version protocol strings with ordinals instead of semver? -Using semver for network protocols is confusing. It is never clear what a change in a field, even if backwards compatible on deserialisation, actually implies. Network protocol agreement should be explicit. Imagine two peers: +Using semver for network protocols is confusing. It is never clear what a change in a field, even if backwards compatible on deserialization, actually implies. Network protocol agreement should be explicit. Imagine two peers: - Peer A supporting v1.1.1 of protocol X. - Peer B supporting v1.1.2 of protocol X. @@ -697,9 +696,9 @@ These two peers should never speak to each other because the results can be unpr For this reason, we rely on negotiation of explicit, verbatim protocols. In the above case, peer B would provide backwards compatibility by supporting and advertising both v1.1.1 and v1.1.2 of the protocol. -Therefore, semver would be relegated to convey expectations at the human level, and it wouldn't do a good job there either, because it's unclear if "backwards-compatibility" and "breaking change" apply only to wire schema level, to behaviour, etc. +Therefore, semver would be relegated to convey expectations at the human level, and it wouldn't do a good job there either, because it's unclear if "backwards compatibility" and "breaking change" apply only to wire schema level, to behavior, etc. -For this reason, we remove semver out of the picture and replace it with ordinals that require explicit agreement and do not mandate a specific policy for changes. +For this reason, we remove and replace semver with ordinals that require explicit agreement and do not mandate a specific policy for changes. ### Why is it called Req/Resp and not RPC? @@ -713,7 +712,7 @@ discv5 is a standalone protocol, running on UDP on a dedicated port, meant for p On the other hand, libp2p Kademlia DHT is a fully-fledged DHT protocol/implementation with content routing and storage capabilities, both of which are irrelevant in this context. -We assume that ETH1 nodes will evolve to support discv5. By sharing the discovery network between ETH1 and ETH2, we benefit from the additive effect on network size that enhances resilience and resistance against certain attacks, to which smaller networks are more vulnerable. It should also assist light clients of both networks find nodes with specific capabilities. +We assume that Eth 1.0 nodes will evolve to support discv5. By sharing the discovery network between Eth 1.0 and 2.0, we benefit from the additive effect on network size that enhances resilience and resistance against certain attacks, to which smaller networks are more vulnerable. It should also help light clients of both networks find nodes with specific capabilities. discv5 is in the process of being audited. @@ -723,41 +722,41 @@ Ethereum Node Records are self-certified node records. Nodes craft and dissemina ENRs are key-value records with string-indexed ASCII keys. They can store arbitrary information, but EIP-778 specifies a pre-defined dictionary, including IPv4 and IPv6 addresses, secp256k1 public keys, etc. -Comparing ENRs and multiaddrs is like comparing apples and bananas. ENRs are self-certified containers of identity, addresses, and metadata about a node. Multiaddrs are address strings with the peculiarity that they’re self-describing, composable and future-proof. An ENR can contain multiaddrs, and multiaddrs can be derived securely from the fields of an authenticated ENR. +Comparing ENRs and multiaddrs is like comparing apples and oranges. ENRs are self-certified containers of identity, addresses, and metadata about a node. Multiaddrs are address strings with the peculiarity that they’re self-describing, composable and future-proof. An ENR can contain multiaddrs, and multiaddrs can be derived securely from the fields of an authenticated ENR. discv5 uses ENRs and we will presumably need to: 1. Add `multiaddr` to the dictionary, so that nodes can advertise their multiaddr under a reserved namespace in ENRs. – and/or – -2. Define a bi-directional conversion function between multiaddrs and the corresponding denormalized fields in an ENR (ip, ip6, tcp, tcp6, etc.), for compatibility with nodes that do not support multiaddr natively (e.g. ETH1 nodes). +2. Define a bi-directional conversion function between multiaddrs and the corresponding denormalized fields in an ENR (ip, ip6, tcp, tcp6, etc.), for compatibility with nodes that do not support multiaddr natively (e.g. Eth 1.0 nodes). ## Compression/Encoding ### Why are we using SSZ for encoding? -SSZ is used at the consensus layer and all implementations should have support for ssz encoding/decoding requiring no further dependencies to be added to client implementations. This is a natural choice for serializing objects to be sent across the wire. The actual data in most protocols will be further compressed for efficiency. +SSZ is used at the consensus layer, and all implementations should have support for SSZ-encoding/decoding, requiring no further dependencies to be added to client implementations. This is a natural choice for serializing objects to be sent across the wire. The actual data in most protocols will be further compressed for efficiency. -SSZ has well defined schema’s for consensus objects (typically sent across the wire) reducing any serialization schema data that needs to be sent. It also has defined all required types that are required for this network specification. +SSZ has well-defined schemas for consensus objects (typically sent across the wire) reducing any serialization schema data that needs to be sent. It also has defined all required types that are required for this network specification. ### Why are we compressing, and at which layers? -We compress on the wire to achieve smaller payloads per-message, which, in aggregate, result in higher efficiency, better utilisation of available bandwidth, and overall reduction in network-wide traffic overhead. +We compress on the wire to achieve smaller payloads per-message, which, in aggregate, result in higher efficiency, better utilization of available bandwidth, and overall reduction in network-wide traffic overhead. -At this time, libp2p does not have an out-of-the-box compression feature that can be dynamically negotiated and layered atop connections and streams, but is [being considered](https://github.com/libp2p/libp2p/issues/81). +At this time, libp2p does not have an out-of-the-box compression feature that can be dynamically negotiated and layered atop connections and streams, but it is [being considered](https://github.com/libp2p/libp2p/issues/81). -This is a non-trivial feature because the behaviour of network IO loops, kernel buffers, chunking, packet fragmentation, amongst others, need to be taken into account. libp2p streams are unbounded streams, whereas compression algorithms work best on bounded byte streams of which we have some prior knowledge. +This is a non-trivial feature because the behavior of network IO loops, kernel buffers, chunking, and packet fragmentation, amongst others, need to be taken into account. libp2p streams are unbounded streams, whereas compression algorithms work best on bounded byte streams of which we have some prior knowledge. -Compression tends not to be a one-size-fits-all problem. Lots of variables need careful evaluation, and generic approaches/choices lead to poor size shavings, which may even be counterproductive when factoring in the CPU and memory tradeoff. +Compression tends not to be a one-size-fits-all problem. A lot of variables need careful evaluation, and generic approaches/choices lead to poor size shavings, which may even be counterproductive when factoring in the CPU and memory tradeoff. For all these reasons, generically negotiating compression algorithms may be treated as a research problem at the libp2p community, one we’re happy to tackle in the medium-term. At this stage, the wisest choice is to consider libp2p a messenger of bytes, and to make application layer participate in compressing those bytes. This looks different depending on the interaction layer: -- Gossip domain: since gossipsub has a framing protocol and exposes an API, we compress the payload (when dictated by the encoding token in the topic name) prior to publishing the message via the API. No length prefixing is necessary because protobuf takes care of bounding the field in the serialised form. +- Gossip domain: since gossipsub has a framing protocol and exposes an API, we compress the payload (when dictated by the encoding token in the topic name) prior to publishing the message via the API. No length prefixing is necessary because protobuf takes care of bounding the field in the serialized form. - Req/Resp domain: since we define custom protocols that operate on byte streams, implementers are encouraged to encapsulate the encoding and compression logic behind MessageReader and MessageWriter components/strategies that can be layered on top of the raw byte streams. ### Why are using Snappy for compression? -Snappy is used in Ethereum 1.0. It is well maintained by Google, has good benchmarks and can calculate the size of the uncompressed object without inflating it in memory. This prevents DOS vectors where large uncompressed data is sent. +Snappy is used in Ethereum 1.0. It is well maintained by Google, has good benchmarks, and can calculate the size of the uncompressed object without inflating it in memory. This prevents DOS vectors where large uncompressed data is sent. ### Can I get access to unencrypted bytes on the wire for debugging purposes? @@ -767,6 +766,6 @@ If your libp2p library relies on frameworks/runtimes such as Netty (jvm) or Node For specific ad-hoc testing scenarios, you can use the [plaintext/2.0.0 secure channel](https://github.com/libp2p/specs/blob/master/plaintext/README.md) (which is essentially no-op encryption or message authentication), in combination with tcpdump or Wireshark to inspect the wire. -# libp2p Implementations Matrix +# libp2p implementations matrix -This section will soon contain a matrix showing the maturity/state of the libp2p features required by this spec across the languages in which ETH 2.0 clients are being developed. +This section will soon contain a matrix showing the maturity/state of the libp2p features required by this spec across the languages in which Eth 2.0 clients are being developed. From 0e7287eda5ea7601707a5a4e2167f98fab699644 Mon Sep 17 00:00:00 2001 From: Jim McDonald Date: Fri, 9 Aug 2019 19:09:04 +0100 Subject: [PATCH 057/250] Add link from DepositData reference to definition --- specs/core/0_deposit-contract.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_deposit-contract.md b/specs/core/0_deposit-contract.md index af81c6bec..ade1006a0 100644 --- a/specs/core/0_deposit-contract.md +++ b/specs/core/0_deposit-contract.md @@ -38,7 +38,7 @@ The initial deployment phases of Ethereum 2.0 are implemented without consensus ### `deposit` function -The deposit contract has a public `deposit` function to make deposits. It takes as arguments `pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96]` corresponding to a `DepositData` object. +The deposit contract has a public `deposit` function to make deposits. It takes as arguments `pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96]` corresponding to a [`DepositData`](./0_beacon-chain.md#depositdata) object. #### Deposit amount From 5290b62465379f33ff15361a12eb2811d41d4832 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sun, 11 Aug 2019 22:21:58 +0800 Subject: [PATCH 058/250] Fix + refactor `is_valid_beacon_attestation` and add basic test --- specs/core/1_shard-data-chains.md | 21 ++++---- .../test_beacon_attestation.py | 48 +++++++++++++++++++ 2 files changed, 59 insertions(+), 10 deletions(-) create mode 100644 test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_beacon_attestation.py diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 283c1a9ca..079c0b4b7 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -555,16 +555,14 @@ def shard_block_transition(state: ShardState, Let: -- `shard` be a valid `Shard` - `pre_state` is the `ShardState` before processing any blocks -- `shard_blocks_or_state_roots` be the `Union[ShardBlock, Hash]` list such that `shard_blocks[slot]` is the canonical `ShardBlock` for shard `shard` at slot `slot` if a block exists, or the post-state-root of processing state up to and including that slot if a block does not exist. +- `shard_blocks_or_state_roots` be the `Union[ShardBlock, Hash]` list such that `shard_blocks[slot]` is the canonical `ShardBlock` for shard `pre_state.shard` at slot `slot` if a block exists, or the post-state-root of processing state up to and including that slot if a block does not exist. - `beacon_state` be the canonical `BeaconState` - `valid_attestations` be the set of valid `Attestation` objects, recursively defined - `candidate` be a candidate `Attestation` which is valid under Phase 0 rules, and for which validity is to be determined under Phase 1 rules by running `is_valid_beacon_attestation` ```python -def is_valid_beacon_attestation(shard: Shard, - pre_state: ShardState, +def is_valid_beacon_attestation(pre_state: ShardState, shard_blocks_or_state_roots: Sequence[Union[ShardBlock, Hash]], beacon_state: BeaconState, valid_attestations: Set[Attestation], @@ -587,7 +585,7 @@ def is_valid_beacon_attestation(shard: Shard, assert candidate.data.previous_attestation.epoch < compute_epoch_of_slot(candidate.data.slot) # Check crosslink data root - start_epoch = beacon_state.crosslinks[shard].epoch + start_epoch = beacon_state.crosslinks[pre_state.shard].epoch end_epoch = min(compute_epoch_of_slot(candidate.data.slot) - CROSSLINK_LOOKBACK, start_epoch + MAX_EPOCHS_PER_CROSSLINK) blocks = [] @@ -595,11 +593,14 @@ def is_valid_beacon_attestation(shard: Shard, if isinstance(shard_blocks_or_state_roots[slot], ShardBlock): blocks.append(shard_blocks_or_state_roots[slot]) else: - blocks.append(ShardBlockHeader(ShardBlockCore( - slot=slot, - state_root=shard_blocks_or_state_roots[slot], - total_bytes=pre_state.total_bytes - ), ShardBlockSignatures())) + blocks.append(ShardBlock( + core=ExtendedShardBlockCore( + slot=slot, + state_root=shard_blocks_or_state_roots[slot], + total_bytes=pre_state.total_bytes, + ), + signatures=ShardBlockSignatures(), + )) assert candidate.data.crosslink.data_root == compute_crosslink_data_root(blocks) return True diff --git a/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_beacon_attestation.py b/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_beacon_attestation.py new file mode 100644 index 000000000..aface905b --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_beacon_attestation.py @@ -0,0 +1,48 @@ +from eth2spec.test.context import ( + with_all_phases_except, + spec_state_test, + always_bls, +) +from eth2spec.test.helpers.phase1.shard_block import ( + build_empty_shard_block, +) +from eth2spec.test.helpers.attestations import get_valid_attestation + + +@with_all_phases_except(['phase0']) +@always_bls +@spec_state_test +def test_process_empty_shard_block(spec, state): + beacon_state = state + + shard_slot = spec.PHASE_1_FORK_SLOT + beacon_state.slot = spec.Slot(spec.PHASE_1_FORK_EPOCH * spec.SLOTS_PER_EPOCH) + shard_state = spec.get_default_shard_state(beacon_state, shard=spec.Shard(0)) + shard_state.slot = shard_slot + + block = build_empty_shard_block( + spec, + shard_state, + beacon_state, + slot=shard_slot + 1, + parent_root=spec.Hash(), + signed=True, + full_attestation=True, + ) + + yield 'pre', shard_state + yield 'beacon_state', beacon_state + yield 'block', block + + beacon_attestation = get_valid_attestation(spec, beacon_state, signed=True) + yield 'beacon_attestation', beacon_attestation + + is_valid_beacon_attestation = spec.is_valid_beacon_attestation( + pre_state=shard_state, + shard_blocks_or_state_roots=(block,), + beacon_state=beacon_state, + valid_attestations=set([beacon_attestation]), + candidate=beacon_attestation, + ) + assert is_valid_beacon_attestation + yield 'is_valid_beacon_attestation', is_valid_beacon_attestation From 3dc7430ae98d8f540dae3bbb4e9a2dcb31a2dbf2 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sat, 27 Jul 2019 08:17:06 -0400 Subject: [PATCH 059/250] Starting on phase 1 misc beacon changes --- specs/core/1_beacon_chain_misc.md | 75 +++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 specs/core/1_beacon_chain_misc.md diff --git a/specs/core/1_beacon_chain_misc.md b/specs/core/1_beacon_chain_misc.md new file mode 100644 index 000000000..09e3c2c61 --- /dev/null +++ b/specs/core/1_beacon_chain_misc.md @@ -0,0 +1,75 @@ +# Phase 1 miscellaneous beacon chain changes + +## Table of contents + + + +- [Helpers](#helpers) + - [pack_compact_validator](#pack_compact_validator) + - [unpack_compact_validator](#unpack_compact_validator) + - [committee_to_compact_committee](#committee_to_compact_committee) +- [Changes](#changes) + - [Persistent committees](#persistent-committees) + + + +## Helpers + +#### `pack_compact_validator` + +```python +def pack_compact_validator(index: uint64, slashed: bool, balance_in_increments: uint64) -> uint64: + """ + Creates a compact validator object representing index, slashed status, and compressed balance. + Takes as input balance-in-increments (// EFFECTIVE_BALANCE_INCREMENT) to preserve symmetry with + the unpacking function. + """ + return (index << 16) + (slashed << 15) + balance_in_increments +``` + +### `unpack_compact_validator` + +```python +def unpack_compact_validator(compact_validator: uint64) -> Tuple[uint64, bool, uint64]: + """ + Returns validator index, slashed, balance // EFFECTIVE_BALANCE_INCREMENT + """ + return compact_validator >> 16, (compact_validator >> 15) % 2, compact_validator & (2**15 - 1) +``` + +#### `committee_to_compact_committee` + +```python +def committee_to_compact_committee(state: BeaconState, committee: Sequence[ValidatorIndex]) -> CompactCommittee: + validators = [state.validators[i] for i in committee] + compact_validators = [ + pack_compact_validator(i, v.slashed, v.effective_balance // EFFECTIVE_BALANCE_INCREMENT) + for i, v in zip(committee, validators) + ] + pubkeys = [v.pubkey for v in validators] + return CompactCommittee(pubkeys=pubkeys, compact_validators=compact_validators) +``` + +## Changes + +### Persistent committees + +Add to the beacon state the following fields: + +* `previous_persistent_committee_root: Hash` +* `current_persistent_committee_root: Hash` +* `next_persistent_committee_root: Hash` + +Process the following function before `process_final_updates`: + +```python +def update_persistent_committee(state: BeaconState): + if (get_current_epoch(state) + 1) % EPOCHS_PER_SHARD_PERIOD == 0: + state.previous_persistent_committee_root = state.current_persistent_committee_root + state.current_persistent_committee_root = state.next_persistent_committee_root + committees = Vector[CompactCommittee, SHARD_COUNT]([ + committee_to_compact_committee(state, get_period_committee(state, get_current_epoch(state) + 1, i)) + for i in range(SHARD_COUNT) + ]) + state.next_persistent_committee_root = hash_tree_root(committees) +``` From fe9fe8a1518ca890df861210f163c82be6c22852 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 30 Jul 2019 10:01:34 -0400 Subject: [PATCH 060/250] [WIP] add receipt processing to phase 1 beacon chain --- specs/core/1_beacon_chain_misc.md | 60 +++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/specs/core/1_beacon_chain_misc.md b/specs/core/1_beacon_chain_misc.md index 09e3c2c61..b5b76a9a9 100644 --- a/specs/core/1_beacon_chain_misc.md +++ b/specs/core/1_beacon_chain_misc.md @@ -50,6 +50,65 @@ def committee_to_compact_committee(state: BeaconState, committee: Sequence[Valid return CompactCommittee(pubkeys=pubkeys, compact_validators=compact_validators) ``` +#### `get_previous_power_of_2` + +```python +def get_previous_power_of_2(x: int) -> int: + return x if x <= 2 else 2 * get_previous_power_of_2(x // 2) +``` + + +#### `concat_generalized_indices` + +```python +def concat_generalized_indices(*indices: Sequence[GeneralizedIndex]) -> GeneralizedIndex: + o = GeneralizedIndex(1) + for i in indices: + o = o * get_previous_power_of_2(i) + i + return o +``` + +#### `compute_historical_state_generalized_index` + +```python +def compute_historical_state_generalized_index(frm: ShardSlot, to: ShardSlot) -> GeneralizedIndex: + o = GeneralizedIndex(1) + for i in range(63, -1, -1): + if (to-1) & 2**i > (frm-1) & 2**i: + to = to - ((to-1) % 2**i) - 1 + o = concat_generalized_indices(o, get_generalized_index(ShardState, 'history_acc', i)) + return o +``` + +#### `get_generalized_index_of_crosslink_header` + +```python +def get_generalized_index_of_crosslink_header(index: int) -> GeneralizedIndex: + MAX_CROSSLINK_SIZE = SHARD_BLOCK_SIZE_LIMIT * SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * MAX_EPOCHS_PER_CROSSLINK + assert MAX_CROSSLINK_SIZE == get_previous_power_of_2(MAX_CROSSLINK_SIZE) + return GeneralizedIndex(MAX_CROSSLINK_SIZE // SHARD_HEADER_SIZE + index) +``` + +#### `process_shard_receipt` + +```python +def process_shard_receipt(state: BeaconState, shard: Shard, proof: List[Hash, PLACEHOLDER], receipt: List[ShardReceiptDelta, PLACEHOLDER]): + receipt_slot = state.next_shard_receipt_period[shard] * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD + first_slot_in_last_crosslink = state.current_crosslinks[shard].start_epoch * SLOTS_PER_EPOCH + gindex = concat_generalized_indices( + get_generalized_index_of_crosslink_header(0), + get_generalized_index(ShardBlockHeader, 'state_root') + compute_historical_state_generalized_index(receipt_slot, first_slot_in_last_crosslink) + get_generalized_index(ShardState, 'receipt_root') + ) + assert verify_merkle_proof( + leaf=hash_tree_root(receipt), + proof=proof, + index=gindex, + root=state.current_crosslinks[shard].data_root + ) +``` + ## Changes ### Persistent committees @@ -59,6 +118,7 @@ Add to the beacon state the following fields: * `previous_persistent_committee_root: Hash` * `current_persistent_committee_root: Hash` * `next_persistent_committee_root: Hash` +* `next_shard_receipt_period: Vector[uint, SHARD_COUNT]`, values initialized to `PHASE_1_FORK_SLOT // SLOTS_PER_EPOCH // EPOCHS_PER_SHARD_PERIOD` Process the following function before `process_final_updates`: From caadc0d2349b464588b9c57a168bb58e913d2b12 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 30 Jul 2019 10:15:18 -0400 Subject: [PATCH 061/250] Update 1_beacon_chain_misc.md --- specs/core/1_beacon_chain_misc.md | 36 +++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/specs/core/1_beacon_chain_misc.md b/specs/core/1_beacon_chain_misc.md index b5b76a9a9..9e81acadf 100644 --- a/specs/core/1_beacon_chain_misc.md +++ b/specs/core/1_beacon_chain_misc.md @@ -13,6 +13,17 @@ +## Classes + +#### `ShardReceiptProof` + +```python +class ShardReceiptProof(Container): + shard: Shard + proof: List[Hash, PLACEHOLDER] + receipt: List[ShardReceiptDelta, PLACEHOLDER] +``` + ## Helpers #### `pack_compact_validator` @@ -27,7 +38,7 @@ def pack_compact_validator(index: uint64, slashed: bool, balance_in_increments: return (index << 16) + (slashed << 15) + balance_in_increments ``` -### `unpack_compact_validator` +#### `unpack_compact_validator` ```python def unpack_compact_validator(compact_validator: uint64) -> Tuple[uint64, bool, uint64]: @@ -92,9 +103,9 @@ def get_generalized_index_of_crosslink_header(index: int) -> GeneralizedIndex: #### `process_shard_receipt` ```python -def process_shard_receipt(state: BeaconState, shard: Shard, proof: List[Hash, PLACEHOLDER], receipt: List[ShardReceiptDelta, PLACEHOLDER]): - receipt_slot = state.next_shard_receipt_period[shard] * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD - first_slot_in_last_crosslink = state.current_crosslinks[shard].start_epoch * SLOTS_PER_EPOCH +def process_shard_receipt(state: BeaconState, receipt_proof: ShardReceiptProof): + receipt_slot = state.next_shard_receipt_period[receipt_proof.shard] * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD + first_slot_in_last_crosslink = state.current_crosslinks[receipt_proof.shard].start_epoch * SLOTS_PER_EPOCH gindex = concat_generalized_indices( get_generalized_index_of_crosslink_header(0), get_generalized_index(ShardBlockHeader, 'state_root') @@ -102,11 +113,16 @@ def process_shard_receipt(state: BeaconState, shard: Shard, proof: List[Hash, PL get_generalized_index(ShardState, 'receipt_root') ) assert verify_merkle_proof( - leaf=hash_tree_root(receipt), - proof=proof, + leaf=hash_tree_root(receipt_proof.receipt), + proof=receipt_proof.proof, index=gindex, root=state.current_crosslinks[shard].data_root ) + for delta in receipt_proof.receipt: + increase_balance(state, delta.index, state.validators[delta.index].effective_balance * delta.reward_coefficient // REWARD_COEFFICIENT_BASE) + decrease_balance(state, delta.index, delta.block_fee) + state.next_shard_receipt_period[receipt_proof.shard] += 1 + increase_balance(state, get_beacon_proposer_index(state), MICRO_REWARD) ``` ## Changes @@ -133,3 +149,11 @@ def update_persistent_committee(state: BeaconState): ]) state.next_persistent_committee_root = hash_tree_root(committees) ``` + +### Shard receipt processing + +Add to the beacon block body the following object: + +* `shard_receipts: List[ShardReceipt, MAX_SHARD_RECEIPTS]` + +Use `process_shard_receipt` to process each receipt. From 3f20aca65ee4759f72c865cfc6a5338a48189bf9 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 30 Jul 2019 11:55:58 -0400 Subject: [PATCH 062/250] Added comments and ToC --- specs/core/1_beacon_chain_misc.md | 49 +++++++++++++++++++++++++++++-- 1 file changed, 46 insertions(+), 3 deletions(-) diff --git a/specs/core/1_beacon_chain_misc.md b/specs/core/1_beacon_chain_misc.md index 9e81acadf..ceb514878 100644 --- a/specs/core/1_beacon_chain_misc.md +++ b/specs/core/1_beacon_chain_misc.md @@ -4,12 +4,21 @@ +- [Classes](#classes) + - [ShardReceiptProof](#shardreceiptproof) - [Helpers](#helpers) - [pack_compact_validator](#pack_compact_validator) - [unpack_compact_validator](#unpack_compact_validator) - [committee_to_compact_committee](#committee_to_compact_committee) + - [get_previous_power_of_2](#get_previous_power_of_2) + - [verify_merkle_proof](#verify_merkle_proof) + - [concat_generalized_indices](#concat_generalized_indices) + - [compute_historical_state_generalized_index](#compute_historical_state_generalized_index) + - [get_generalized_index_of_crosslink_header](#get_generalized_index_of_crosslink_header) + - [process_shard_receipt](#process_shard_receipt) - [Changes](#changes) - [Persistent committees](#persistent-committees) + - [Shard receipt processing](#shard-receipt-processing) @@ -52,6 +61,9 @@ def unpack_compact_validator(compact_validator: uint64) -> Tuple[uint64, bool, u ```python def committee_to_compact_committee(state: BeaconState, committee: Sequence[ValidatorIndex]) -> CompactCommittee: + """ + Given a state and a list of validator indices, outputs the CompactCommittee representing them. + """ validators = [state.validators[i] for i in committee] compact_validators = [ pack_compact_validator(i, v.slashed, v.effective_balance // EFFECTIVE_BALANCE_INCREMENT) @@ -68,11 +80,27 @@ def get_previous_power_of_2(x: int) -> int: return x if x <= 2 else 2 * get_previous_power_of_2(x // 2) ``` +#### `verify_merkle_proof` + +```python +def verify_merkle_proof(leaf: Hash, proof: Sequence[Hash], index: GeneralizedIndex, root: Hash) -> bool: + assert len(proof) == log2(index) + for i, h in enumerate(proof): + if index & 2**i: + leaf = hash(h + leaf) + else: + leaf = hash(leaf + h) + return leaf == root +``` #### `concat_generalized_indices` ```python def concat_generalized_indices(*indices: Sequence[GeneralizedIndex]) -> GeneralizedIndex: + """ + Given generalized indices i1 for A -> B, i2 for B -> C .... i_n for Y -> Z, returns + the generalized index for A -> Z. + """ o = GeneralizedIndex(1) for i in indices: o = o * get_previous_power_of_2(i) + i @@ -82,11 +110,17 @@ def concat_generalized_indices(*indices: Sequence[GeneralizedIndex]) -> Generali #### `compute_historical_state_generalized_index` ```python -def compute_historical_state_generalized_index(frm: ShardSlot, to: ShardSlot) -> GeneralizedIndex: +def compute_historical_state_generalized_index(earlier: ShardSlot, later: ShardSlot) -> GeneralizedIndex: + """ + Computes the generalized index of the state root of slot `frm` based on the state root of slot `to`. + Relies on the `history_acc` in the `ShardState`, where `history_acc[i]` maintains the most recent 2**i'th + slot state. Works by tracing a `log(later-earlier)` step path from `later` to `earlier` through intermediate + blocks at the next available multiples of descending powers of two. + """ o = GeneralizedIndex(1) for i in range(63, -1, -1): - if (to-1) & 2**i > (frm-1) & 2**i: - to = to - ((to-1) % 2**i) - 1 + if (later-1) & 2**i > (earlier-1) & 2**i: + later = later - ((later-1) % 2**i) - 1 o = concat_generalized_indices(o, get_generalized_index(ShardState, 'history_acc', i)) return o ``` @@ -95,6 +129,9 @@ def compute_historical_state_generalized_index(frm: ShardSlot, to: ShardSlot) -> ```python def get_generalized_index_of_crosslink_header(index: int) -> GeneralizedIndex: + """ + Gets the generalized index for the root of the index'th header in a crosslink. + """ MAX_CROSSLINK_SIZE = SHARD_BLOCK_SIZE_LIMIT * SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * MAX_EPOCHS_PER_CROSSLINK assert MAX_CROSSLINK_SIZE == get_previous_power_of_2(MAX_CROSSLINK_SIZE) return GeneralizedIndex(MAX_CROSSLINK_SIZE // SHARD_HEADER_SIZE + index) @@ -104,6 +141,9 @@ def get_generalized_index_of_crosslink_header(index: int) -> GeneralizedIndex: ```python def process_shard_receipt(state: BeaconState, receipt_proof: ShardReceiptProof): + """ + Processes a ShardReceipt object. + """ receipt_slot = state.next_shard_receipt_period[receipt_proof.shard] * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD first_slot_in_last_crosslink = state.current_crosslinks[receipt_proof.shard].start_epoch * SLOTS_PER_EPOCH gindex = concat_generalized_indices( @@ -140,6 +180,9 @@ Process the following function before `process_final_updates`: ```python def update_persistent_committee(state: BeaconState): + """ + Updates persistent committee roots at boundary blocks. + """ if (get_current_epoch(state) + 1) % EPOCHS_PER_SHARD_PERIOD == 0: state.previous_persistent_committee_root = state.current_persistent_committee_root state.current_persistent_committee_root = state.next_persistent_committee_root From 5dad213457636a9a6d17002820a3559c6015076b Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 30 Jul 2019 11:58:33 -0400 Subject: [PATCH 063/250] Fixed up ToC --- specs/core/1_beacon_chain_misc.md | 32 ++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/specs/core/1_beacon_chain_misc.md b/specs/core/1_beacon_chain_misc.md index ceb514878..3798d1d40 100644 --- a/specs/core/1_beacon_chain_misc.md +++ b/specs/core/1_beacon_chain_misc.md @@ -4,21 +4,23 @@ -- [Classes](#classes) - - [ShardReceiptProof](#shardreceiptproof) -- [Helpers](#helpers) - - [pack_compact_validator](#pack_compact_validator) - - [unpack_compact_validator](#unpack_compact_validator) - - [committee_to_compact_committee](#committee_to_compact_committee) - - [get_previous_power_of_2](#get_previous_power_of_2) - - [verify_merkle_proof](#verify_merkle_proof) - - [concat_generalized_indices](#concat_generalized_indices) - - [compute_historical_state_generalized_index](#compute_historical_state_generalized_index) - - [get_generalized_index_of_crosslink_header](#get_generalized_index_of_crosslink_header) - - [process_shard_receipt](#process_shard_receipt) -- [Changes](#changes) - - [Persistent committees](#persistent-committees) - - [Shard receipt processing](#shard-receipt-processing) +- [Phase 1 miscellaneous beacon chain changes](#phase-1-miscellaneous-beacon-chain-changes) + - [Table of contents](#table-of-contents) + - [Classes](#classes) + - [ShardReceiptProof](#shardreceiptproof) + - [Helpers](#helpers) + - [pack_compact_validator](#pack_compact_validator) + - [unpack_compact_validator](#unpack_compact_validator) + - [committee_to_compact_committee](#committee_to_compact_committee) + - [get_previous_power_of_2](#get_previous_power_of_2) + - [verify_merkle_proof](#verify_merkle_proof) + - [concat_generalized_indices](#concat_generalized_indices) + - [compute_historical_state_generalized_index](#compute_historical_state_generalized_index) + - [get_generalized_index_of_crosslink_header](#get_generalized_index_of_crosslink_header) + - [process_shard_receipt](#process_shard_receipt) + - [Changes](#changes) + - [Persistent committees](#persistent-committees) + - [Shard receipt processing](#shard-receipt-processing) From fab37e747a44d79f75b08d869b35fda972421bba Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 30 Jul 2019 11:59:12 -0400 Subject: [PATCH 064/250] Fixed position of Classes --- specs/core/1_beacon_chain_misc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_beacon_chain_misc.md b/specs/core/1_beacon_chain_misc.md index 3798d1d40..673f99ce4 100644 --- a/specs/core/1_beacon_chain_misc.md +++ b/specs/core/1_beacon_chain_misc.md @@ -6,7 +6,7 @@ - [Phase 1 miscellaneous beacon chain changes](#phase-1-miscellaneous-beacon-chain-changes) - [Table of contents](#table-of-contents) - - [Classes](#classes) + - [Classes](#classes) - [ShardReceiptProof](#shardreceiptproof) - [Helpers](#helpers) - [pack_compact_validator](#pack_compact_validator) From b9fddfe3103a77fb7502af40f2c0755aea65431d Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 1 Aug 2019 10:58:15 -0400 Subject: [PATCH 065/250] Made code work with #1186 --- specs/core/1_beacon_chain_misc.md | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/specs/core/1_beacon_chain_misc.md b/specs/core/1_beacon_chain_misc.md index 673f99ce4..4aa1796b6 100644 --- a/specs/core/1_beacon_chain_misc.md +++ b/specs/core/1_beacon_chain_misc.md @@ -86,29 +86,15 @@ def get_previous_power_of_2(x: int) -> int: ```python def verify_merkle_proof(leaf: Hash, proof: Sequence[Hash], index: GeneralizedIndex, root: Hash) -> bool: - assert len(proof) == log2(index) + assert len(proof) == get_generalized_index_length(index) for i, h in enumerate(proof): - if index & 2**i: + if get_generalized_index_bit(index, i): leaf = hash(h + leaf) else: leaf = hash(leaf + h) return leaf == root ``` -#### `concat_generalized_indices` - -```python -def concat_generalized_indices(*indices: Sequence[GeneralizedIndex]) -> GeneralizedIndex: - """ - Given generalized indices i1 for A -> B, i2 for B -> C .... i_n for Y -> Z, returns - the generalized index for A -> Z. - """ - o = GeneralizedIndex(1) - for i in indices: - o = o * get_previous_power_of_2(i) + i - return o -``` - #### `compute_historical_state_generalized_index` ```python From a273d9e09d27238f9617394ef3e3572915af0b39 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 1 Aug 2019 09:13:09 -0600 Subject: [PATCH 066/250] minor rename of beacon chain misc to conform to other files --- specs/core/{1_beacon_chain_misc.md => 1_beacon-chain-misc.md} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename specs/core/{1_beacon_chain_misc.md => 1_beacon-chain-misc.md} (100%) diff --git a/specs/core/1_beacon_chain_misc.md b/specs/core/1_beacon-chain-misc.md similarity index 100% rename from specs/core/1_beacon_chain_misc.md rename to specs/core/1_beacon-chain-misc.md From 2ae7323183fca93557fee81ed8bb5ad290585d2a Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 1 Aug 2019 09:42:39 -0600 Subject: [PATCH 067/250] use codeblock syntax for misc beacon updates --- specs/core/0_beacon-chain.md | 5 +++++ specs/core/1_beacon-chain-misc.md | 34 +++++++++++++++++++++++-------- 2 files changed, 31 insertions(+), 8 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index f0169f1d2..16d8048aa 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -480,6 +480,7 @@ class BeaconBlockBody(Container): deposits: List[Deposit, MAX_DEPOSITS] voluntary_exits: List[VoluntaryExit, MAX_VOLUNTARY_EXITS] transfers: List[Transfer, MAX_TRANSFERS] + # @shard_receipts ``` #### `BeaconBlock` @@ -533,6 +534,8 @@ class BeaconState(Container): previous_justified_checkpoint: Checkpoint # Previous epoch snapshot current_justified_checkpoint: Checkpoint finalized_checkpoint: Checkpoint + + # @persistent_committee_fields ``` ## Helper functions @@ -1237,6 +1240,7 @@ def process_epoch(state: BeaconState) -> None: # @process_reveal_deadlines # @process_challenge_deadlines process_slashings(state) + # @update_persistent_committee process_final_updates(state) # @after_process_final_updates ``` @@ -1598,6 +1602,7 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: (body.deposits, process_deposit), (body.voluntary_exits, process_voluntary_exit), (body.transfers, process_transfer), + # @process_shard_receipts ): for operation in operations: function(state, operation) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 4aa1796b6..f816c6259 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -159,15 +159,23 @@ def process_shard_receipt(state: BeaconState, receipt_proof: ShardReceiptProof): Add to the beacon state the following fields: -* `previous_persistent_committee_root: Hash` -* `current_persistent_committee_root: Hash` -* `next_persistent_committee_root: Hash` -* `next_shard_receipt_period: Vector[uint, SHARD_COUNT]`, values initialized to `PHASE_1_FORK_SLOT // SLOTS_PER_EPOCH // EPOCHS_PER_SHARD_PERIOD` +```python +# begin insert @persistent_committee_fields + previous_persistent_committee_root: Hash + current_persistent_committee_root: Hash + next_persistent_committee_root: Hash + next_shard_receipt_period: Vector[uint, SHARD_COUNT] +# end insert @persistent_committee_fields +``` +`next_shard_receipt_period` values initialized to `PHASE_1_FORK_SLOT // SLOTS_PER_EPOCH // EPOCHS_PER_SHARD_PERIOD` -Process the following function before `process_final_updates`: +Run `update_persistent_committee` immediately before `process_final_updates`: ```python -def update_persistent_committee(state: BeaconState): +# begin insert @update_persistent_committee + update_persistent_committee(state) +# end insert @update_persistent_committee +def update_persistent_committee(state: BeaconState) -> None: """ Updates persistent committee roots at boundary blocks. """ @@ -183,8 +191,18 @@ def update_persistent_committee(state: BeaconState): ### Shard receipt processing -Add to the beacon block body the following object: +Add the `shard_receipts` operation to `BeaconBlockBody`: -* `shard_receipts: List[ShardReceipt, MAX_SHARD_RECEIPTS]` +```python +# begin insert @shard_receipts + shard_receipts: List[ShardReceipt, MAX_SHARD_RECEIPTS] +# end insert @shard_receipts +``` Use `process_shard_receipt` to process each receipt. + +```python +# begin insert @process_shard_receipts + (body.shard_receipts, process_shard_receipts), +# end insert @process_shard_receipts +``` From 6560bc42d2fc877cddf044c0a993efde74ee7b72 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 1 Aug 2019 13:39:32 -0600 Subject: [PATCH 068/250] Apply suggestions from code review Co-Authored-By: Carl Beekhuizen --- specs/core/1_beacon-chain-misc.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index f816c6259..d1ea48dba 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -30,9 +30,9 @@ ```python class ShardReceiptProof(Container): - shard: Shard - proof: List[Hash, PLACEHOLDER] - receipt: List[ShardReceiptDelta, PLACEHOLDER] + shard: Shard + proof: List[Hash, PLACEHOLDER] + receipt: List[ShardReceiptDelta, PLACEHOLDER] ``` ## Helpers @@ -107,9 +107,9 @@ def compute_historical_state_generalized_index(earlier: ShardSlot, later: ShardS """ o = GeneralizedIndex(1) for i in range(63, -1, -1): - if (later-1) & 2**i > (earlier-1) & 2**i: - later = later - ((later-1) % 2**i) - 1 - o = concat_generalized_indices(o, get_generalized_index(ShardState, 'history_acc', i)) + if (later - 1) & 2**i > (earlier - 1) & 2**i: + later = later - ((later - 1) % 2**i) - 1 + o = concat_generalized_indices(o, get_generalized_index(ShardState, 'history_acc', i)) return o ``` @@ -136,8 +136,8 @@ def process_shard_receipt(state: BeaconState, receipt_proof: ShardReceiptProof): first_slot_in_last_crosslink = state.current_crosslinks[receipt_proof.shard].start_epoch * SLOTS_PER_EPOCH gindex = concat_generalized_indices( get_generalized_index_of_crosslink_header(0), - get_generalized_index(ShardBlockHeader, 'state_root') - compute_historical_state_generalized_index(receipt_slot, first_slot_in_last_crosslink) + get_generalized_index(ShardBlockHeader, 'state_root'), + compute_historical_state_generalized_index(receipt_slot, first_slot_in_last_crosslink), get_generalized_index(ShardState, 'receipt_root') ) assert verify_merkle_proof( From 0fa4491c412040ff3feaa9c2c76c5c07d5f63c69 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 1 Aug 2019 13:45:07 -0600 Subject: [PATCH 069/250] lint --- specs/core/0_beacon-chain.md | 1 - 1 file changed, 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 16d8048aa..5ddceebc1 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -534,7 +534,6 @@ class BeaconState(Container): previous_justified_checkpoint: Checkpoint # Previous epoch snapshot current_justified_checkpoint: Checkpoint finalized_checkpoint: Checkpoint - # @persistent_committee_fields ``` From 7132778a6993d12175c8870c933bcc7f53240848 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 6 Aug 2019 10:14:45 -0400 Subject: [PATCH 070/250] Added compact committee class --- specs/core/1_beacon-chain-misc.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index d1ea48dba..a8768d8b5 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -7,6 +7,7 @@ - [Phase 1 miscellaneous beacon chain changes](#phase-1-miscellaneous-beacon-chain-changes) - [Table of contents](#table-of-contents) - [Classes](#classes) + - [CompactCommittee](#compactcommittee) - [ShardReceiptProof](#shardreceiptproof) - [Helpers](#helpers) - [pack_compact_validator](#pack_compact_validator) @@ -26,6 +27,14 @@ ## Classes +#### `CompactCommittee` + +```python +class CompactCommittee(Container): + pubkeys: List[BLSPubkey, MAX_VALIDATORS_PER_COMMITTEE] + compact_validators: List[uint64, MAX_VALIDATORS_PER_COMMITTEE] +``` + #### `ShardReceiptProof` ```python From 36a22830526fed478a143c85c6cb2e4277b957b2 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 7 Aug 2019 12:13:02 -0400 Subject: [PATCH 071/250] Shard receipts cannot penalize withdrawn validators --- specs/core/1_beacon-chain-misc.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index a8768d8b5..c1d6283ca 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -156,8 +156,9 @@ def process_shard_receipt(state: BeaconState, receipt_proof: ShardReceiptProof): root=state.current_crosslinks[shard].data_root ) for delta in receipt_proof.receipt: - increase_balance(state, delta.index, state.validators[delta.index].effective_balance * delta.reward_coefficient // REWARD_COEFFICIENT_BASE) - decrease_balance(state, delta.index, delta.block_fee) + if get_current_epoch(state) < state.validators[delta.index].withdrawable_epoch: + increase_balance(state, delta.index, state.validators[delta.index].effective_balance * delta.reward_coefficient // REWARD_COEFFICIENT_BASE) + decrease_balance(state, delta.index, delta.block_fee) state.next_shard_receipt_period[receipt_proof.shard] += 1 increase_balance(state, get_beacon_proposer_index(state), MICRO_REWARD) ``` From c5acddc071fd64603a0ed5ca3442b25442fb203b Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sun, 11 Aug 2019 22:47:23 +0800 Subject: [PATCH 072/250] Enable it in CI --- Makefile | 2 +- scripts/build_spec.py | 27 +++++++++++++++------------ 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/Makefile b/Makefile index 318056689..3f6cf9816 100644 --- a/Makefile +++ b/Makefile @@ -86,7 +86,7 @@ $(PY_SPEC_PHASE_0_TARGETS): $(PY_SPEC_PHASE_0_DEPS) python3 $(SCRIPT_DIR)/build_spec.py -p0 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/0_fork-choice.md $(SPEC_DIR)/validator/0_beacon-chain-validator.md $@ $(PY_SPEC_DIR)/eth2spec/phase1/spec.py: $(PY_SPEC_PHASE_1_DEPS) - python3 $(SCRIPT_DIR)/build_spec.py -p1 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/1_custody-game.md $(SPEC_DIR)/core/1_shard-data-chains.md $(SPEC_DIR)/core/0_fork-choice.md $@ + python3 $(SCRIPT_DIR)/build_spec.py -p1 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/1_beacon-chain-misc.md $(SPEC_DIR)/core/1_custody-game.md $(SPEC_DIR)/core/1_shard-data-chains.md $(SPEC_DIR)/core/0_fork-choice.md $@ CURRENT_DIR = ${CURDIR} diff --git a/scripts/build_spec.py b/scripts/build_spec.py index 96866cc8a..5c353ede0 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -263,16 +263,18 @@ def build_phase0_spec(phase0_sourcefile: str, fork_choice_sourcefile: str, def build_phase1_spec(phase0_sourcefile: str, + phase1_phase1_shard_misc_source_file: str, phase1_custody_sourcefile: str, phase1_shard_sourcefile: str, fork_choice_sourcefile: str, outfile: str=None) -> Optional[str]: phase0_spec = get_spec(phase0_sourcefile) + phase1_shard_misc = get_spec(phase1_phase1_shard_misc_source_file) phase1_custody = get_spec(phase1_custody_sourcefile) phase1_shard_data = get_spec(phase1_shard_sourcefile) fork_choice_spec = get_spec(fork_choice_sourcefile) spec_objects = phase0_spec - for value in [phase1_custody, phase1_shard_data, fork_choice_spec]: + for value in [phase1_shard_misc, phase1_custody, phase1_shard_data, fork_choice_spec]: spec_objects = combine_spec_objects(spec_objects, value) spec = objects_to_spec(*spec_objects, PHASE1_IMPORTS) if outfile is not None: @@ -285,17 +287,18 @@ if __name__ == '__main__': description = ''' Build the specs from the md docs. If building phase 0: - 1st argument is input spec.md - 2nd argument is input fork_choice.md - 3rd argument is input validator_guide.md + 1st argument is input 0_beacon-chain.md + 2nd argument is input 0_fork-choice.md + 3rd argument is input 0_beacon-chain-validator.md 4th argument is output spec.py If building phase 1: - 1st argument is input spec_phase0.md - 2nd argument is input spec_phase1_custody.md - 3rd argument is input spec_phase1_shard_data.md - 4th argument is input fork_choice.md - 5th argument is output spec.py + 1st argument is input 0_beacon-chain.md + 2nd argument is input 1_shard-chain-misc.md + 3rd argument is input 1_custody-game.md + 4th argument is input 1_shard-data-chains.md + 5th argument is input 0_fork-choice.md + 6th argument is output spec.py ''' parser = ArgumentParser(description=description) parser.add_argument("-p", "--phase", dest="phase", type=int, default=0, help="Build for phase #") @@ -308,10 +311,10 @@ If building phase 1: else: print(" Phase 0 requires spec, forkchoice, and v-guide inputs as well as an output file.") elif args.phase == 1: - if len(args.files) == 5: + if len(args.files) == 6: build_phase1_spec(*args.files) else: - print(" Phase 1 requires 4 input files as well as an output file: " - + "(phase0.md and phase1.md, phase1.md, fork_choice.md, output.py)") + print(" Phase 1 requires 6 input files as well as an output file: " + + "(0_fork-choice.md, 0_beacon-chain.md and 1_shard-chain-misc.md, 1_custody-game.md, 1_shard-data-chains.md, output.py)") else: print("Invalid phase: {0}".format(args.phase)) From e4e6c4d8eef1839394a4a6aba183c4cf4fd77d5d Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Mon, 12 Aug 2019 00:45:33 +0800 Subject: [PATCH 073/250] Fix the order of build spec --- Makefile | 2 +- scripts/build_spec.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 3f6cf9816..f2d953f9a 100644 --- a/Makefile +++ b/Makefile @@ -86,7 +86,7 @@ $(PY_SPEC_PHASE_0_TARGETS): $(PY_SPEC_PHASE_0_DEPS) python3 $(SCRIPT_DIR)/build_spec.py -p0 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/0_fork-choice.md $(SPEC_DIR)/validator/0_beacon-chain-validator.md $@ $(PY_SPEC_DIR)/eth2spec/phase1/spec.py: $(PY_SPEC_PHASE_1_DEPS) - python3 $(SCRIPT_DIR)/build_spec.py -p1 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/1_beacon-chain-misc.md $(SPEC_DIR)/core/1_custody-game.md $(SPEC_DIR)/core/1_shard-data-chains.md $(SPEC_DIR)/core/0_fork-choice.md $@ + python3 $(SCRIPT_DIR)/build_spec.py -p1 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/1_custody-game.md $(SPEC_DIR)/core/1_shard-data-chains.md $(SPEC_DIR)/core/1_beacon-chain-misc.md $(SPEC_DIR)/core/0_fork-choice.md $@ CURRENT_DIR = ${CURDIR} diff --git a/scripts/build_spec.py b/scripts/build_spec.py index 6f0f160ac..44e73224f 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -269,12 +269,12 @@ def build_phase1_spec(phase0_sourcefile: str, fork_choice_sourcefile: str, outfile: str=None) -> Optional[str]: phase0_spec = get_spec(phase0_sourcefile) - phase1_shard_misc = get_spec(phase1_phase1_shard_misc_source_file) phase1_custody = get_spec(phase1_custody_sourcefile) phase1_shard_data = get_spec(phase1_shard_sourcefile) + phase1_shard_misc = get_spec(phase1_phase1_shard_misc_source_file) fork_choice_spec = get_spec(fork_choice_sourcefile) spec_objects = phase0_spec - for value in [phase1_shard_misc, phase1_custody, phase1_shard_data, fork_choice_spec]: + for value in [phase1_custody, phase1_shard_data, phase1_shard_misc, fork_choice_spec]: spec_objects = combine_spec_objects(spec_objects, value) spec = objects_to_spec(*spec_objects, PHASE1_IMPORTS) if outfile is not None: @@ -294,9 +294,9 @@ If building phase 0: If building phase 1: 1st argument is input 0_beacon-chain.md - 2nd argument is input 1_shard-chain-misc.md - 3rd argument is input 1_custody-game.md - 4th argument is input 1_shard-data-chains.md + 2nd argument is input 1_custody-game.md + 3rd argument is input 1_shard-data-chains.md + 4th argument is input 1_shard-chain-misc.md 5th argument is input 0_fork-choice.md 6th argument is output spec.py ''' @@ -315,6 +315,6 @@ If building phase 1: build_phase1_spec(*args.files) else: print(" Phase 1 requires 6 input files as well as an output file: " - + "(0_fork-choice.md, 0_beacon-chain.md and 1_shard-chain-misc.md, 1_custody-game.md, 1_shard-data-chains.md, output.py)") + + "(0_fork-choice.md, 0_beacon-chain.md and 1_custody-game.md, 1_shard-data-chains.md, 1_shard-chain-misc.md output.py)") else: print("Invalid phase: {0}".format(args.phase)) From b345dc0f5fc3aa9b7ded17e04c004f357fbf6389 Mon Sep 17 00:00:00 2001 From: dankrad Date: Sun, 11 Aug 2019 10:05:17 -0700 Subject: [PATCH 074/250] Legendre custody construction (#1305) * Stab at Legendre custody construction + some tests * Fix some problems and fix function puller to remove phase0 only lines in phase1 * Pass the linter * Add headings * Fix domain for BLS stub * Change Jacobi to Legendre * n -> q to clarify notation * Headings * Another missing heading * Custody subchunks via padding * Fix max_reveal_lateness stuff * Better names for reveal period functions * Better parametrization of max_reveal_lateness computation and tests for custody reveal processing * Fix linter * Allow challenging for one period after the custody reveal, shorter periods for minimal tests * Fix lint * Fix linter error --- configs/constant_presets/minimal.yaml | 4 + scripts/build_spec.py | 21 +- specs/core/0_beacon-chain.md | 9 +- specs/core/1_custody-game.md | 176 ++++++--- .../test/fork_choice/test_on_attestation.py | 4 +- .../pyspec/eth2spec/test/helpers/custody.py | 132 ++++++- .../pyspec/eth2spec/test/helpers/deposits.py | 2 +- .../test_process_attestation.py | 2 +- .../test_process_bit_challenge.py | 347 ++++++++++++++++++ .../test_process_custody_key_reveal.py | 118 ++++++ ...est_process_early_derived_secret_reveal.py | 18 +- test_libs/pyspec/eth2spec/utils/bls.py | 6 + .../pyspec/eth2spec/utils/merkle_minimal.py | 11 +- 13 files changed, 777 insertions(+), 73 deletions(-) create mode 100644 test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_bit_challenge.py create mode 100644 test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_custody_key_reveal.py diff --git a/configs/constant_presets/minimal.yaml b/configs/constant_presets/minimal.yaml index ab8aab3c4..15b749b9d 100644 --- a/configs/constant_presets/minimal.yaml +++ b/configs/constant_presets/minimal.yaml @@ -74,6 +74,10 @@ MAX_EPOCHS_PER_CROSSLINK: 4 MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 # [customized] 2**12 (= 4,096) epochs EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 4096 +# 2**2 (= 4) epochs +EPOCHS_PER_CUSTODY_PERIOD: 4 +# 2**2 (= 4) epochs +CUSTODY_PERIOD_TO_RANDAO_PADDING: 4 # State vector lengths diff --git a/scripts/build_spec.py b/scripts/build_spec.py index 52642c8f4..133834429 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -58,10 +58,15 @@ from eth2spec.utils.bls import ( bls_aggregate_pubkeys, bls_verify, bls_verify_multiple, + bls_signature_to_G2, ) from eth2spec.utils.hash_function import hash ''' +SUNDRY_CONSTANTS_FUNCTIONS = ''' +def ceillog2(x: uint64) -> int: + return (x - 1).bit_length() +''' SUNDRY_FUNCTIONS = ''' # Monkey patch hash cache _hash = hash @@ -111,6 +116,13 @@ def apply_constants_preset(preset: Dict[str, Any]) -> None: ''' +def remove_for_phase1(functions: Dict[str, str]): + for key, value in functions.items(): + lines = value.split("\n") + lines = filter(lambda s: "[to be removed in phase 1]" not in s, lines) + functions[key] = "\n".join(lines) + + def strip_comments(raw: str) -> str: comment_line_regex = re.compile(r'^\s+# ') lines = raw.split('\n') @@ -141,10 +153,15 @@ def objects_to_spec(functions: Dict[str, str], ] ) ) + for k in list(functions): + if "ceillog2" in k: + del functions[k] functions_spec = '\n\n'.join(functions.values()) for k in list(constants.keys()): if k.startswith('DOMAIN_'): constants[k] = f"DomainType(({constants[k]}).to_bytes(length=4, byteorder='little'))" + if k == "BLS12_381_Q": + constants[k] += " # noqa: E501" constants_spec = '\n'.join(map(lambda x: '%s = %s' % (x, constants[x]), constants)) ssz_objects_instantiation_spec = '\n\n'.join(ssz_objects.values()) ssz_objects_reinitialization_spec = ( @@ -157,6 +174,7 @@ def objects_to_spec(functions: Dict[str, str], spec = ( imports + '\n\n' + new_type_definitions + + '\n' + SUNDRY_CONSTANTS_FUNCTIONS + '\n\n' + constants_spec + '\n\n\n' + ssz_objects_instantiation_spec + '\n\n' + functions_spec @@ -186,7 +204,7 @@ ignored_dependencies = [ 'bit', 'boolean', 'Vector', 'List', 'Container', 'Hash', 'BLSPubkey', 'BLSSignature', 'Bytes', 'BytesN' 'Bytes1', 'Bytes4', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector', 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256', - 'bytes' # to be removed after updating spec doc + 'bytes', 'byte', 'BytesN' # to be removed after updating spec doc ] @@ -268,6 +286,7 @@ def build_phase1_spec(phase0_sourcefile: str, fork_choice_sourcefile: str, outfile: str=None) -> Optional[str]: phase0_spec = get_spec(phase0_sourcefile) + remove_for_phase1(phase0_spec[0]) phase1_custody = get_spec(phase1_custody_sourcefile) phase1_shard_data = get_spec(phase1_shard_sourcefile) fork_choice_spec = get_spec(fork_choice_sourcefile) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index f0169f1d2..1eddae58e 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -660,8 +660,8 @@ def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: Indexe bit_1_indices = indexed_attestation.custody_bit_1_indices # Verify no index has custody bit equal to 1 [to be removed in phase 1] - if not len(bit_1_indices) == 0: - return False + if not len(bit_1_indices) == 0: # [to be removed in phase 1] + return False # [to be removed in phase 1] # Verify max number of indices if not len(bit_0_indices) + len(bit_1_indices) <= MAX_VALIDATORS_PER_COMMITTEE: return False @@ -1661,6 +1661,11 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: proposer_index=get_beacon_proposer_index(state), ) + # Check bitlist lengths + committee_size = get_committee_count(state, attestation.data.target.epoch) + assert len(attestation.aggregation_bits) == committee_size + assert len(attestation.custody_bits) == committee_size + if data.target.epoch == get_current_epoch(state): assert data.source == state.current_justified_checkpoint parent_crosslink = state.current_crosslinks[data.crosslink.shard] diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index f79977442..3e0a38102 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -12,6 +12,7 @@ - [Terminology](#terminology) - [Constants](#constants) - [Misc](#misc) + - [Custody game parameters](#custody-game-parameters) - [Time parameters](#time-parameters) - [Max operations per block](#max-operations-per-block) - [Reward and penalty quotients](#reward-and-penalty-quotients) @@ -33,12 +34,14 @@ - [`BeaconBlockBody`](#beaconblockbody) - [Helpers](#helpers) - [`ceillog2`](#ceillog2) + - [`is_valid_merkle_branch_with_mixin`](#is_valid_merkle_branch_with_mixin) - [`get_crosslink_chunk_count`](#get_crosslink_chunk_count) - - [`get_bit`](#get_bit) + - [`legendre_bit`](#legendre_bit) + - [`custody_subchunkify`](#custody_subchunkify) - [`get_custody_chunk_bit`](#get_custody_chunk_bit) - [`get_chunk_bits_root`](#get_chunk_bits_root) - [`get_randao_epoch_for_custody_period`](#get_randao_epoch_for_custody_period) - - [`get_reveal_period`](#get_reveal_period) + - [`get_custody_period_for_validator`](#get_custody_period_for_validator) - [`replace_empty_or_append`](#replace_empty_or_append) - [Per-block processing](#per-block-processing) - [Operations](#operations) @@ -75,11 +78,20 @@ This document details the beacon chain additions and changes in Phase 1 of Ether ### Misc +| `BLS12_381_Q` | `4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787` | +| `MINOR_REWARD_QUOTIENT` | `2**8` (= 256) | + +### Custody game parameters + | Name | Value | | - | - | | `BYTES_PER_SHARD_BLOCK` | `2**14` (= 16,384) | | `BYTES_PER_CUSTODY_CHUNK` | `2**9` (= 512) | -| `MINOR_REWARD_QUOTIENT` | `2**8` (= 256) | +| `BYTES_PER_CUSTODY_SUBCHUNK` | `48` | +| `CHUNKS_PER_EPOCH` | `2 * BYTES_PER_SHARD_BLOCK * SLOTS_PER_EPOCH // BYTES_PER_CUSTODY_CHUNK` | +| `MAX_CUSTODY_CHUNKS` | `MAX_EPOCHS_PER_CROSSLINK * CHUNKS_PER_EPOCH` | +| `CUSTODY_DATA_DEPTH` | `ceillog2(MAX_CUSTODY_CHUNKS) + 1` | +| `CUSTODY_CHUNK_BIT_DEPTH` | `ceillog2(MAX_EPOCHS_PER_CROSSLINK * CHUNKS_PER_EPOCH // 256) + 2` | ### Time parameters @@ -144,7 +156,7 @@ class CustodyBitChallenge(Container): attestation: Attestation challenger_index: ValidatorIndex responder_key: BLSSignature - chunk_bits: Bytes[PLACEHOLDER] + chunk_bits: Bitlist[MAX_CUSTODY_CHUNKS] signature: BLSSignature ``` @@ -181,10 +193,10 @@ class CustodyBitChallengeRecord(Container): class CustodyResponse(Container): challenge_index: uint64 chunk_index: uint64 - chunk: Vector[Bytes[PLACEHOLDER], BYTES_PER_CUSTODY_CHUNK] - data_branch: List[Hash, PLACEHOLDER] - chunk_bits_branch: List[Hash, PLACEHOLDER] - chunk_bits_leaf: Hash + chunk: BytesN[BYTES_PER_CUSTODY_CHUNK] + data_branch: List[Hash, CUSTODY_DATA_DEPTH] + chunk_bits_branch: List[Hash, CUSTODY_CHUNK_BIT_DEPTH] + chunk_bits_leaf: Bitvector[256] ``` ### New beacon operations @@ -225,11 +237,11 @@ Add the following fields to the end of the specified container objects. Fields w ```python class Validator(Container): - # next_custody_reveal_period is initialised to the custody period + # next_custody_secret_to_reveal is initialised to the custody period # (of the particular validator) in which the validator is activated - # = get_reveal_period(...) - next_custody_reveal_period: uint64 - max_reveal_lateness: uint64 + # = get_custody_period_for_validator(...) + next_custody_secret_to_reveal: uint64 + max_reveal_lateness: Epoch ``` #### `BeaconState` @@ -263,7 +275,26 @@ class BeaconBlockBody(Container): ```python def ceillog2(x: uint64) -> int: - return x.bit_length() + return (x - 1).bit_length() +``` + +### `is_valid_merkle_branch_with_mixin` + +```python +def is_valid_merkle_branch_with_mixin(leaf: Hash, + branch: Sequence[Hash], + depth: uint64, + index: uint64, + root: Hash, + mixin: uint64) -> bool: + value = leaf + for i in range(depth): + if index // (2**i) % 2: + value = hash(branch[i] + value) + else: + value = hash(value + branch[i]) + value = hash(value + mixin.to_bytes(32, "little")) + return value == root ``` ### `get_crosslink_chunk_count` @@ -271,37 +302,69 @@ def ceillog2(x: uint64) -> int: ```python def get_custody_chunk_count(crosslink: Crosslink) -> int: crosslink_length = min(MAX_EPOCHS_PER_CROSSLINK, crosslink.end_epoch - crosslink.start_epoch) - chunks_per_epoch = 2 * BYTES_PER_SHARD_BLOCK * SLOTS_PER_EPOCH // BYTES_PER_CUSTODY_CHUNK - return crosslink_length * chunks_per_epoch + return crosslink_length * CHUNKS_PER_EPOCH ``` -### `get_bit` +### `legendre_bit` + +Returns the Legendre symbol `(a/q)` normalizes as a bit (i.e. `((a/q) + 1) // 2`). In a production implementation, a well-optimized library (e.g. GMP) should be used for this. ```python -def get_bit(serialization: bytes, i: uint64) -> int: - """ - Extract the bit in ``serialization`` at position ``i``. - """ - return (serialization[i // 8] >> (i % 8)) % 2 +def legendre_bit(a: int, q: int) -> int: + if a >= q: + return legendre_bit(a % q, q) + if a == 0: + return 0 + assert(q > a > 0 and q % 2 == 1) + t = 1 + n = q + while a != 0: + while a % 2 == 0: + a //= 2 + r = n % 8 + if r == 3 or r == 5: + t = -t + a, n = n, a + if a % 4 == n % 4 == 3: + t = -t + a %= n + if n == 1: + return (t + 1) // 2 + else: + return 0 +``` + +### ```custody_subchunkify``` + +Given one proof of custody chunk, returns the proof of custody subchunks of the correct sizes. + +```python +def custody_subchunkify(bytez: bytes) -> list: + bytez += b'\x00' * (-len(bytez) % BYTES_PER_CUSTODY_SUBCHUNK) + return [bytez[i:i + BYTES_PER_CUSTODY_SUBCHUNK] + for i in range(0, len(bytez), BYTES_PER_CUSTODY_SUBCHUNK)] ``` ### `get_custody_chunk_bit` ```python def get_custody_chunk_bit(key: BLSSignature, chunk: bytes) -> bool: - # TODO: Replace with something MPC-friendly, e.g. the Legendre symbol - return bool(get_bit(hash(key + chunk), 0)) + full_G2_element = bls_signature_to_G2(key) + s = full_G2_element[0].coeffs + bits = [legendre_bit((i + 1) * s[i % 2] + int.from_bytes(subchunk, "little"), BLS12_381_Q) + for i, subchunk in enumerate(custody_subchunkify(chunk))] + + return bool(sum(bits) % 2) ``` ### `get_chunk_bits_root` ```python -def get_chunk_bits_root(chunk_bits: bytes) -> Hash: - aggregated_bits = bytearray([0] * 32) - for i in range(0, len(chunk_bits), 32): - for j in range(32): - aggregated_bits[j] ^= chunk_bits[i + j] - return hash(aggregated_bits) +def get_chunk_bits_root(chunk_bits: Bitlist[MAX_CUSTODY_CHUNKS]) -> bit: + aggregated_bits = 0 + for i, b in enumerate(chunk_bits): + aggregated_bits += 2**i * b + return legendre_bit(aggregated_bits, BLS12_381_Q) ``` ### `get_randao_epoch_for_custody_period` @@ -312,10 +375,10 @@ def get_randao_epoch_for_custody_period(period: uint64, validator_index: Validat return Epoch(next_period_start + CUSTODY_PERIOD_TO_RANDAO_PADDING) ``` -### `get_reveal_period` +### `get_custody_period_for_validator` ```python -def get_reveal_period(state: BeaconState, validator_index: ValidatorIndex, epoch: Epoch=None) -> int: +def get_custody_period_for_validator(state: BeaconState, validator_index: ValidatorIndex, epoch: Epoch=None) -> int: ''' Return the reveal period for a given validator. ''' @@ -354,9 +417,9 @@ def process_custody_key_reveal(state: BeaconState, reveal: CustodyKeyReveal) -> Note that this function mutates ``state``. """ revealer = state.validators[reveal.revealer_index] - epoch_to_sign = get_randao_epoch_for_custody_period(revealer.next_custody_reveal_period, reveal.revealed_index) + epoch_to_sign = get_randao_epoch_for_custody_period(revealer.next_custody_secret_to_reveal, reveal.revealer_index) - assert revealer.next_custody_reveal_period < get_reveal_period(state, reveal.revealed_index) + assert revealer.next_custody_secret_to_reveal < get_custody_period_for_validator(state, reveal.revealer_index) # Revealed validator is active or exited, but not withdrawn assert is_slashable_validator(revealer, get_current_epoch(state)) @@ -374,15 +437,19 @@ def process_custody_key_reveal(state: BeaconState, reveal: CustodyKeyReveal) -> ) # Decrement max reveal lateness if response is timely - if revealer.next_custody_reveal_period == get_reveal_period(state, reveal.revealer_index) - 2: - revealer.max_reveal_lateness -= MAX_REVEAL_LATENESS_DECREMENT - revealer.max_reveal_lateness = max( - revealer.max_reveal_lateness, - get_reveal_period(state, reveal.revealed_index) - revealer.next_custody_reveal_period - ) + if epoch_to_sign + EPOCHS_PER_CUSTODY_PERIOD >= get_current_epoch(state): + if revealer.max_reveal_lateness >= MAX_REVEAL_LATENESS_DECREMENT: + revealer.max_reveal_lateness -= MAX_REVEAL_LATENESS_DECREMENT + else: + revealer.max_reveal_lateness = 0 + else: + revealer.max_reveal_lateness = max( + revealer.max_reveal_lateness, + get_current_epoch(state) - epoch_to_sign - EPOCHS_PER_CUSTODY_PERIOD + ) # Process reveal - revealer.next_custody_reveal_period += 1 + revealer.next_custody_secret_to_reveal += 1 # Reward Block Preposer proposer_index = get_beacon_proposer_index(state) @@ -520,7 +587,7 @@ For each `challenge` in `block.body.custody_bit_challenges`, run the following f ```python def process_bit_challenge(state: BeaconState, challenge: CustodyBitChallenge) -> None: attestation = challenge.attestation - epoch = compute_epoch_of_slot(attestation.data.slot) + epoch = attestation.data.target.epoch shard = attestation.data.crosslink.shard # Verify challenge signature @@ -533,7 +600,10 @@ def process_bit_challenge(state: BeaconState, challenge: CustodyBitChallenge) -> assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation)) # Verify attestation is eligible for challenging responder = state.validators[challenge.responder_index] - assert epoch + responder.max_reveal_lateness <= get_reveal_period(state, challenge.responder_index) + assert get_current_epoch(state) <= get_randao_epoch_for_custody_period( + get_custody_period_for_validator(state, challenge.responder_index, epoch), + challenge.responder_index + ) + 2 * EPOCHS_PER_CUSTODY_PERIOD + responder.max_reveal_lateness # Verify the responder participated in the attestation attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bits) @@ -543,17 +613,18 @@ def process_bit_challenge(state: BeaconState, challenge: CustodyBitChallenge) -> assert record.challenger_index != challenge.challenger_index # Verify the responder custody key epoch_to_sign = get_randao_epoch_for_custody_period( - get_reveal_period(state, challenge.responder_index, epoch), + get_custody_period_for_validator(state, challenge.responder_index, epoch), challenge.responder_index, ) domain = get_domain(state, DOMAIN_RANDAO, epoch_to_sign) assert bls_verify(responder.pubkey, hash_tree_root(epoch_to_sign), challenge.responder_key, domain) # Verify the chunk count chunk_count = get_custody_chunk_count(attestation.data.crosslink) - # Verify the first bit of the hash of the chunk bits does not equal the custody bit + assert chunk_count == len(challenge.chunk_bits) + # Verify custody bit is incorrect committee = get_crosslink_committee(state, epoch, shard) custody_bit = attestation.custody_bits[committee.index(challenge.responder_index)] - assert custody_bit != get_bit(get_chunk_bits_root(challenge.chunk_bits), 0) + assert custody_bit != get_chunk_bits_root(challenge.chunk_bits) # Add new bit challenge record new_record = CustodyBitChallengeRecord( challenge_index=state.custody_challenge_index, @@ -636,16 +707,17 @@ def process_bit_challenge_response(state: BeaconState, root=challenge.data_root, ) # Verify the chunk bit leaf matches the challenge data - assert is_valid_merkle_branch( - leaf=response.chunk_bits_leaf, + assert is_valid_merkle_branch_with_mixin( + leaf=hash_tree_root(response.chunk_bits_leaf), branch=response.chunk_bits_branch, - depth=ceillog2(challenge.chunk_count) >> 8, + depth=ceillog2(MAX_CUSTODY_CHUNKS // 256), index=response.chunk_index // 256, - root=challenge.chunk_bits_merkle_root + root=challenge.chunk_bits_merkle_root, + mixin=challenge.chunk_count, ) # Verify the chunk bit does not match the challenge chunk bit assert (get_custody_chunk_bit(challenge.responder_key, response.chunk) - != get_bit(challenge.chunk_bits_leaf, response.chunk_index % 256)) + != response.chunk_bits_leaf[response.chunk_index % 256]) # Clear the challenge records = state.custody_bit_challenge_records records[records.index(challenge)] = CustodyBitChallengeRecord() @@ -665,8 +737,8 @@ Run `process_reveal_deadlines(state)` immediately after `process_registry_update # end insert @process_reveal_deadlines def process_reveal_deadlines(state: BeaconState) -> None: for index, validator in enumerate(state.validators): - deadline = validator.next_custody_reveal_period + (CUSTODY_RESPONSE_DEADLINE // EPOCHS_PER_CUSTODY_PERIOD) - if get_reveal_period(state, ValidatorIndex(index)) > deadline: + deadline = validator.next_custody_secret_to_reveal + (CUSTODY_RESPONSE_DEADLINE // EPOCHS_PER_CUSTODY_PERIOD) + if get_custody_period_for_validator(state, ValidatorIndex(index)) > deadline: slash_validator(state, ValidatorIndex(index)) ``` diff --git a/test_libs/pyspec/eth2spec/test/fork_choice/test_on_attestation.py b/test_libs/pyspec/eth2spec/test/fork_choice/test_on_attestation.py index 400675888..ee1c04219 100644 --- a/test_libs/pyspec/eth2spec/test/fork_choice/test_on_attestation.py +++ b/test_libs/pyspec/eth2spec/test/fork_choice/test_on_attestation.py @@ -1,4 +1,4 @@ -from eth2spec.test.context import with_all_phases, with_state, bls_switch +from eth2spec.test.context import with_all_phases, with_state, bls_switch, with_phases from eth2spec.test.helpers.block import build_empty_block_for_next_slot from eth2spec.test.helpers.attestations import get_valid_attestation @@ -103,7 +103,7 @@ def test_on_attestation_same_slot(spec, state): run_on_attestation(spec, state, store, attestation, False) -@with_all_phases +@with_phases(['phase0']) @with_state @bls_switch def test_on_attestation_invalid_attestation(spec, state): diff --git a/test_libs/pyspec/eth2spec/test/helpers/custody.py b/test_libs/pyspec/eth2spec/test/helpers/custody.py index 36f23ad1c..4b7c8c97b 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/custody.py +++ b/test_libs/pyspec/eth2spec/test/helpers/custody.py @@ -1,6 +1,11 @@ from eth2spec.test.helpers.keys import privkeys from eth2spec.utils.bls import bls_sign, bls_aggregate_signatures from eth2spec.utils.hash_function import hash +from eth2spec.utils.ssz.ssz_typing import Bitlist, BytesN, Bitvector +from eth2spec.utils.ssz.ssz_impl import chunkify, pack, hash_tree_root +from eth2spec.utils.merkle_minimal import get_merkle_tree, get_merkle_proof + +BYTES_PER_CHUNK = 32 def get_valid_early_derived_secret_reveal(spec, state, epoch=None): @@ -13,7 +18,7 @@ def get_valid_early_derived_secret_reveal(spec, state, epoch=None): # Generate the secret that is being revealed reveal = bls_sign( - message_hash=spec.hash_tree_root(spec.Epoch(epoch)), + message_hash=hash_tree_root(spec.Epoch(epoch)), privkey=privkeys[revealed_index], domain=spec.get_domain( state=state, @@ -42,3 +47,128 @@ def get_valid_early_derived_secret_reveal(spec, state, epoch=None): masker_index=masker_index, mask=mask, ) + + +def get_valid_custody_key_reveal(spec, state, period=None): + current_epoch = spec.get_current_epoch(state) + revealer_index = spec.get_active_validator_indices(state, current_epoch)[0] + revealer = state.validators[revealer_index] + + if period is None: + period = revealer.next_custody_secret_to_reveal + + epoch_to_sign = spec.get_randao_epoch_for_custody_period(period, revealer_index) + + # Generate the secret that is being revealed + reveal = bls_sign( + message_hash=hash_tree_root(spec.Epoch(epoch_to_sign)), + privkey=privkeys[revealer_index], + domain=spec.get_domain( + state=state, + domain_type=spec.DOMAIN_RANDAO, + message_epoch=epoch_to_sign, + ), + ) + return spec.CustodyKeyReveal( + revealer_index=revealer_index, + reveal=reveal, + ) + + +def bitlist_from_int(max_len, num_bits, n): + return Bitlist[max_len](*[(n >> i) & 0b1 for i in range(num_bits)]) + + +def get_valid_bit_challenge(spec, state, attestation, invalid_custody_bit=False): + crosslink_committee = spec.get_crosslink_committee( + state, + attestation.data.target.epoch, + attestation.data.crosslink.shard, + ) + responder_index = crosslink_committee[0] + challenger_index = crosslink_committee[-1] + + epoch = spec.get_randao_epoch_for_custody_period(attestation.data.target.epoch, + responder_index) + + # Generate the responder key + responder_key = bls_sign( + message_hash=hash_tree_root(spec.Epoch(epoch)), + privkey=privkeys[responder_index], + domain=spec.get_domain( + state=state, + domain_type=spec.DOMAIN_RANDAO, + message_epoch=epoch, + ), + ) + + chunk_count = spec.get_custody_chunk_count(attestation.data.crosslink) + + chunk_bits = bitlist_from_int(spec.MAX_CUSTODY_CHUNKS, chunk_count, 0) + + n = 0 + while spec.get_chunk_bits_root(chunk_bits) == attestation.custody_bits[0] ^ invalid_custody_bit: + chunk_bits = bitlist_from_int(spec.MAX_CUSTODY_CHUNKS, chunk_count, n) + n += 1 + + return spec.CustodyBitChallenge( + responder_index=responder_index, + attestation=attestation, + challenger_index=challenger_index, + responder_key=responder_key, + chunk_bits=chunk_bits, + ) + + +def custody_chunkify(spec, x): + chunks = [bytes(x[i:i + spec.BYTES_PER_CUSTODY_CHUNK]) for i in range(0, len(x), spec.BYTES_PER_CUSTODY_CHUNK)] + chunks[-1] = chunks[-1].ljust(spec.BYTES_PER_CUSTODY_CHUNK, b"\0") + return chunks + + +def get_valid_custody_response(spec, state, bit_challenge, custody_data, challenge_index, invalid_chunk_bit=False): + chunks = custody_chunkify(spec, custody_data) + + chunk_index = len(chunks) - 1 + chunk_bit = spec.get_custody_chunk_bit(bit_challenge.responder_key, chunks[chunk_index]) + + while chunk_bit == bit_challenge.chunk_bits[chunk_index] ^ invalid_chunk_bit: + chunk_index -= 1 + chunk_bit = spec.get_custody_chunk_bit(bit_challenge.responder_key, chunks[chunk_index]) + + chunks_hash_tree_roots = [hash_tree_root(BytesN[spec.BYTES_PER_CUSTODY_CHUNK](chunk)) for chunk in chunks] + chunks_hash_tree_roots += [ + hash_tree_root(BytesN[spec.BYTES_PER_CUSTODY_CHUNK](b"\0" * spec.BYTES_PER_CUSTODY_CHUNK)) + for i in range(2 ** spec.ceillog2(len(chunks)) - len(chunks))] + data_tree = get_merkle_tree(chunks_hash_tree_roots) + + data_branch = get_merkle_proof(data_tree, chunk_index) + + bitlist_chunk_index = chunk_index // BYTES_PER_CHUNK + bitlist_chunks = chunkify(pack(bit_challenge.chunk_bits)) + bitlist_tree = get_merkle_tree(bitlist_chunks, pad_to=spec.MAX_CUSTODY_CHUNKS // 256) + bitlist_chunk_branch = get_merkle_proof(bitlist_tree, chunk_index // 256) + \ + [len(bit_challenge.chunk_bits).to_bytes(32, "little")] + + bitlist_chunk_index = chunk_index // 256 + + chunk_bits_leaf = Bitvector[256](bit_challenge.chunk_bits[bitlist_chunk_index * 256: + (bitlist_chunk_index + 1) * 256]) + + return spec.CustodyResponse( + challenge_index=challenge_index, + chunk_index=chunk_index, + chunk=BytesN[spec.BYTES_PER_CUSTODY_CHUNK](chunks[chunk_index]), + data_branch=data_branch, + chunk_bits_branch=bitlist_chunk_branch, + chunk_bits_leaf=chunk_bits_leaf, + ) + + +def get_custody_test_vector(bytelength): + ints = bytelength // 4 + return b"".join(i.to_bytes(4, "little") for i in range(ints)) + + +def get_custody_merkle_root(data): + return get_merkle_tree(chunkify(data))[-1][0] diff --git a/test_libs/pyspec/eth2spec/test/helpers/deposits.py b/test_libs/pyspec/eth2spec/test/helpers/deposits.py index 8dc6b3b58..89574c977 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/deposits.py +++ b/test_libs/pyspec/eth2spec/test/helpers/deposits.py @@ -47,7 +47,7 @@ def build_deposit(spec, deposit_data_list.append(deposit_data) root = hash_tree_root(List[spec.DepositData, 2**spec.DEPOSIT_CONTRACT_TREE_DEPTH](*deposit_data_list)) tree = calc_merkle_tree_from_leaves(tuple([d.hash_tree_root() for d in deposit_data_list])) - proof = list(get_merkle_proof(tree, item_index=index)) + [(index + 1).to_bytes(32, 'little')] + proof = list(get_merkle_proof(tree, item_index=index, tree_len=32)) + [(index + 1).to_bytes(32, 'little')] leaf = deposit_data.hash_tree_root() assert spec.is_valid_merkle_branch(leaf, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH + 1, index, root) deposit = spec.Deposit(proof=proof, data=deposit_data) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py index ab46a0d8c..84cb95ba0 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py @@ -363,7 +363,7 @@ def test_inconsistent_bits(spec, state): attestation = get_valid_attestation(spec, state) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - custody_bits = attestation.aggregation_bits[:] + custody_bits = attestation.custody_bits[:] custody_bits.append(False) attestation.custody_bits = custody_bits diff --git a/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_bit_challenge.py b/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_bit_challenge.py new file mode 100644 index 000000000..e4880555a --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_bit_challenge.py @@ -0,0 +1,347 @@ +from eth2spec.test.helpers.custody import ( + get_valid_bit_challenge, + get_valid_custody_response, + get_custody_test_vector, + get_custody_merkle_root +) +from eth2spec.test.helpers.attestations import ( + get_valid_attestation, +) +from eth2spec.utils.ssz.ssz_impl import hash_tree_root +from eth2spec.test.helpers.state import next_epoch, get_balance +from eth2spec.test.helpers.block import apply_empty_block +from eth2spec.test.context import ( + with_all_phases_except, + spec_state_test, + expect_assertion_error, +) +from eth2spec.test.phase_0.block_processing.test_process_attestation import run_attestation_processing + + +def run_bit_challenge_processing(spec, state, custody_bit_challenge, valid=True): + """ + Run ``process_bit_challenge``, yielding: + - pre-state ('pre') + - CustodyBitChallenge ('custody_bit_challenge') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + yield 'pre', state + yield 'custody_bit_challenge', custody_bit_challenge + + if not valid: + expect_assertion_error(lambda: spec.process_bit_challenge(state, custody_bit_challenge)) + yield 'post', None + return + + spec.process_bit_challenge(state, custody_bit_challenge) + + assert state.custody_bit_challenge_records[state.custody_challenge_index - 1].chunk_bits_merkle_root == \ + hash_tree_root(custody_bit_challenge.chunk_bits) + assert state.custody_bit_challenge_records[state.custody_challenge_index - 1].challenger_index == \ + custody_bit_challenge.challenger_index + assert state.custody_bit_challenge_records[state.custody_challenge_index - 1].responder_index == \ + custody_bit_challenge.responder_index + + yield 'post', state + + +def run_custody_response_processing(spec, state, custody_response, valid=True): + """ + Run ``process_bit_challenge_response``, yielding: + - pre-state ('pre') + - CustodyResponse ('custody_response') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + yield 'pre', state + yield 'custody_response', custody_response + + if not valid: + expect_assertion_error(lambda: spec.process_custody_response(state, custody_response)) + yield 'post', None + return + + # TODO: Add capability to also process chunk challenges, not only bit challenges + challenge = state.custody_bit_challenge_records[custody_response.challenge_index] + pre_slashed_balance = get_balance(state, challenge.challenger_index) + + spec.process_custody_response(state, custody_response) + + slashed_validator = state.validators[challenge.challenger_index] + + assert slashed_validator.slashed + assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH + assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH + + assert get_balance(state, challenge.challenger_index) < pre_slashed_balance + yield 'post', state + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_challenge_appended(spec, state): + state.slot = spec.SLOTS_PER_EPOCH + attestation = get_valid_attestation(spec, state, signed=True) + + test_vector = get_custody_test_vector( + spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK) + shard_root = get_custody_merkle_root(test_vector) + attestation.data.crosslink.data_root = shard_root + attestation.custody_bits[0] = 0 + + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + _, _, _ = run_attestation_processing(spec, state, attestation) + + state.slot += spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD + + challenge = get_valid_bit_challenge(spec, state, attestation) + + yield from run_bit_challenge_processing(spec, state, challenge) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_multiple_epochs_custody(spec, state): + state.slot = spec.SLOTS_PER_EPOCH * 3 + attestation = get_valid_attestation(spec, state, signed=True) + + test_vector = get_custody_test_vector( + spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK) + shard_root = get_custody_merkle_root(test_vector) + attestation.data.crosslink.data_root = shard_root + attestation.custody_bits[0] = 0 + + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + _, _, _ = run_attestation_processing(spec, state, attestation) + + state.slot += spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1) + + challenge = get_valid_bit_challenge(spec, state, attestation) + + yield from run_bit_challenge_processing(spec, state, challenge) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_many_epochs_custody(spec, state): + state.slot = spec.SLOTS_PER_EPOCH * 100 + attestation = get_valid_attestation(spec, state, signed=True) + + test_vector = get_custody_test_vector( + spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK) + shard_root = get_custody_merkle_root(test_vector) + attestation.data.crosslink.data_root = shard_root + attestation.custody_bits[0] = 0 + + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + _, _, _ = run_attestation_processing(spec, state, attestation) + + state.slot += spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1) + + challenge = get_valid_bit_challenge(spec, state, attestation) + + yield from run_bit_challenge_processing(spec, state, challenge) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_off_chain_attestation(spec, state): + state.slot = spec.SLOTS_PER_EPOCH + attestation = get_valid_attestation(spec, state, signed=True) + + test_vector = get_custody_test_vector( + spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK) + shard_root = get_custody_merkle_root(test_vector) + attestation.data.crosslink.data_root = shard_root + attestation.custody_bits[0] = 0 + + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + state.slot += spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD + + challenge = get_valid_bit_challenge(spec, state, attestation) + + yield from run_bit_challenge_processing(spec, state, challenge) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_invalid_custody_bit_challenge(spec, state): + state.slot = spec.SLOTS_PER_EPOCH + attestation = get_valid_attestation(spec, state, signed=True) + + test_vector = get_custody_test_vector( + spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK) + shard_root = get_custody_merkle_root(test_vector) + attestation.data.crosslink.data_root = shard_root + attestation.custody_bits[0] = 0 + + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + _, _, _ = run_attestation_processing(spec, state, attestation) + + state.slot += spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD + + challenge = get_valid_bit_challenge(spec, state, attestation, invalid_custody_bit=True) + + yield from run_bit_challenge_processing(spec, state, challenge, valid=False) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_max_reveal_lateness_1(spec, state): + next_epoch(spec, state) + apply_empty_block(spec, state) + + attestation = get_valid_attestation(spec, state, signed=True) + + test_vector = get_custody_test_vector( + spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK) + shard_root = get_custody_merkle_root(test_vector) + attestation.data.crosslink.data_root = shard_root + attestation.custody_bits[0] = 0 + + next_epoch(spec, state) + apply_empty_block(spec, state) + + _, _, _ = run_attestation_processing(spec, state, attestation) + + challenge = get_valid_bit_challenge(spec, state, attestation) + + responder_index = challenge.responder_index + + state.validators[responder_index].max_reveal_lateness = 3 + + for i in range(spec.get_randao_epoch_for_custody_period( + spec.get_custody_period_for_validator(state, responder_index), + responder_index + ) + 2 * spec.EPOCHS_PER_CUSTODY_PERIOD + state.validators[responder_index].max_reveal_lateness - 2): + next_epoch(spec, state) + apply_empty_block(spec, state) + + yield from run_bit_challenge_processing(spec, state, challenge) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_max_reveal_lateness_2(spec, state): + next_epoch(spec, state) + apply_empty_block(spec, state) + + attestation = get_valid_attestation(spec, state, signed=True) + + test_vector = get_custody_test_vector( + spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK) + shard_root = get_custody_merkle_root(test_vector) + attestation.data.crosslink.data_root = shard_root + attestation.custody_bits[0] = 0 + + next_epoch(spec, state) + apply_empty_block(spec, state) + + _, _, _ = run_attestation_processing(spec, state, attestation) + + challenge = get_valid_bit_challenge(spec, state, attestation) + + responder_index = challenge.responder_index + + state.validators[responder_index].max_reveal_lateness = 3 + + for i in range(spec.get_randao_epoch_for_custody_period( + spec.get_custody_period_for_validator(state, responder_index), + responder_index + ) + 2 * spec.EPOCHS_PER_CUSTODY_PERIOD + state.validators[responder_index].max_reveal_lateness - 1): + next_epoch(spec, state) + apply_empty_block(spec, state) + + yield from run_bit_challenge_processing(spec, state, challenge, False) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_custody_response(spec, state): + state.slot = spec.SLOTS_PER_EPOCH + attestation = get_valid_attestation(spec, state, signed=True) + + test_vector = get_custody_test_vector( + spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK) + shard_root = get_custody_merkle_root(test_vector) + attestation.data.crosslink.data_root = shard_root + attestation.custody_bits[0] = 0 + + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + _, _, _ = run_attestation_processing(spec, state, attestation) + + state.slot += spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD + + challenge = get_valid_bit_challenge(spec, state, attestation) + + _, _, _ = run_bit_challenge_processing(spec, state, challenge) + + bit_challenge_index = state.custody_challenge_index - 1 + + custody_response = get_valid_custody_response(spec, state, challenge, test_vector, bit_challenge_index) + + yield from run_custody_response_processing(spec, state, custody_response) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_custody_response_multiple_epochs(spec, state): + state.slot = spec.SLOTS_PER_EPOCH * 3 + attestation = get_valid_attestation(spec, state, signed=True) + + test_vector = get_custody_test_vector( + spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK) + shard_root = get_custody_merkle_root(test_vector) + attestation.data.crosslink.data_root = shard_root + attestation.custody_bits[0] = 0 + + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + _, _, _ = run_attestation_processing(spec, state, attestation) + + state.slot += spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD + + challenge = get_valid_bit_challenge(spec, state, attestation) + + _, _, _ = run_bit_challenge_processing(spec, state, challenge) + + bit_challenge_index = state.custody_challenge_index - 1 + + custody_response = get_valid_custody_response(spec, state, challenge, test_vector, bit_challenge_index) + + yield from run_custody_response_processing(spec, state, custody_response) + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_custody_response_many_epochs(spec, state): + state.slot = spec.SLOTS_PER_EPOCH * 100 + attestation = get_valid_attestation(spec, state, signed=True) + + test_vector = get_custody_test_vector( + spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK) + shard_root = get_custody_merkle_root(test_vector) + attestation.data.crosslink.data_root = shard_root + attestation.custody_bits[0] = 0 + + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + _, _, _ = run_attestation_processing(spec, state, attestation) + + state.slot += spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD + + challenge = get_valid_bit_challenge(spec, state, attestation) + + _, _, _ = run_bit_challenge_processing(spec, state, challenge) + + bit_challenge_index = state.custody_challenge_index - 1 + + custody_response = get_valid_custody_response(spec, state, challenge, test_vector, bit_challenge_index) + + yield from run_custody_response_processing(spec, state, custody_response) diff --git a/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_custody_key_reveal.py b/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_custody_key_reveal.py new file mode 100644 index 000000000..f8860cf87 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_custody_key_reveal.py @@ -0,0 +1,118 @@ +from eth2spec.test.helpers.custody import get_valid_custody_key_reveal +from eth2spec.test.context import ( + with_all_phases_except, + spec_state_test, + expect_assertion_error, + always_bls, +) + + +def run_custody_key_reveal_processing(spec, state, custody_key_reveal, valid=True): + """ + Run ``process_custody_key_reveal``, yielding: + - pre-state ('pre') + - custody_key_reveal ('custody_key_reveal') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + yield 'pre', state + yield 'custody_key_reveal', custody_key_reveal + + if not valid: + expect_assertion_error(lambda: spec.process_custody_key_reveal(state, custody_key_reveal)) + yield 'post', None + return + + revealer_index = custody_key_reveal.revealer_index + + pre_next_custody_secret_to_reveal = \ + state.validators[revealer_index].next_custody_secret_to_reveal + pre_reveal_lateness = state.validators[revealer_index].max_reveal_lateness + + spec.process_custody_key_reveal(state, custody_key_reveal) + + post_next_custody_secret_to_reveal = \ + state.validators[revealer_index].next_custody_secret_to_reveal + post_reveal_lateness = state.validators[revealer_index].max_reveal_lateness + + assert post_next_custody_secret_to_reveal == pre_next_custody_secret_to_reveal + 1 + + if spec.get_current_epoch(state) > spec.get_randao_epoch_for_custody_period( + pre_next_custody_secret_to_reveal, + revealer_index + ) + spec.EPOCHS_PER_CUSTODY_PERIOD: + assert post_reveal_lateness > 0 + if pre_reveal_lateness == 0: + assert post_reveal_lateness == spec.get_current_epoch(state) - spec.get_randao_epoch_for_custody_period( + pre_next_custody_secret_to_reveal, + revealer_index + ) - spec.EPOCHS_PER_CUSTODY_PERIOD + else: + if pre_reveal_lateness > 0: + assert post_reveal_lateness < pre_reveal_lateness + + yield 'post', state + + +@with_all_phases_except(['phase0']) +@always_bls +@spec_state_test +def test_success(spec, state): + state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH + custody_key_reveal = get_valid_custody_key_reveal(spec, state) + + yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal) + + +@with_all_phases_except(['phase0']) +@always_bls +@spec_state_test +def test_reveal_too_early(spec, state): + custody_key_reveal = get_valid_custody_key_reveal(spec, state) + + yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False) + + +@with_all_phases_except(['phase0']) +@always_bls +@spec_state_test +def test_wrong_period(spec, state): + custody_key_reveal = get_valid_custody_key_reveal(spec, state, period=5) + + yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False) + + +@with_all_phases_except(['phase0']) +@always_bls +@spec_state_test +def test_late_reveal(spec, state): + state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH * 3 + 150 + custody_key_reveal = get_valid_custody_key_reveal(spec, state) + + yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal) + + +@with_all_phases_except(['phase0']) +@always_bls +@spec_state_test +def test_double_reveal(spec, state): + state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH * 2 + custody_key_reveal = get_valid_custody_key_reveal(spec, state) + + _, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal) + + yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False) + + +@with_all_phases_except(['phase0']) +@always_bls +@spec_state_test +def test_max_decrement(spec, state): + state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH * 3 + 150 + custody_key_reveal = get_valid_custody_key_reveal(spec, state) + + _, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal) + + custody_key_reveal2 = get_valid_custody_key_reveal(spec, state) + + yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal2) diff --git a/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_early_derived_secret_reveal.py b/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_early_derived_secret_reveal.py index 831ad35a5..63f4721b9 100644 --- a/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_early_derived_secret_reveal.py +++ b/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_early_derived_secret_reveal.py @@ -98,25 +98,21 @@ def test_reveal_with_custody_padding_minus_one(spec, state): @never_bls @spec_state_test def test_double_reveal(spec, state): + epoch = spec.get_current_epoch(state) + spec.RANDAO_PENALTY_EPOCHS randao_key_reveal1 = get_valid_early_derived_secret_reveal( spec, state, - spec.get_current_epoch(state) + spec.RANDAO_PENALTY_EPOCHS + 1, + epoch, ) - res = dict(run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal1)) - pre_state = res['pre'] - yield 'pre', pre_state - intermediate_state = res['post'] + _, _, _ = dict(run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal1)) randao_key_reveal2 = get_valid_early_derived_secret_reveal( spec, - intermediate_state, - spec.get_current_epoch(pre_state) + spec.RANDAO_PENALTY_EPOCHS + 1, + state, + epoch, ) - res = dict(run_early_derived_secret_reveal_processing(spec, intermediate_state, randao_key_reveal2, False)) - post_state = res['post'] - yield 'randao_key_reveal', [randao_key_reveal1, randao_key_reveal2] - yield 'post', post_state + + yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal2, False) @with_all_phases_except(['phase0']) diff --git a/test_libs/pyspec/eth2spec/utils/bls.py b/test_libs/pyspec/eth2spec/utils/bls.py index d8a9ab5be..f40e5ab04 100644 --- a/test_libs/pyspec/eth2spec/utils/bls.py +++ b/test_libs/pyspec/eth2spec/utils/bls.py @@ -5,6 +5,7 @@ bls_active = True STUB_SIGNATURE = b'\x11' * 96 STUB_PUBKEY = b'\x22' * 48 +STUB_COORDINATES = bls.api.signature_to_G2(bls.sign(b"", 0, b"\0" * 8)) def only_with_bls(alt_return=None): @@ -47,3 +48,8 @@ def bls_aggregate_signatures(signatures): def bls_sign(message_hash, privkey, domain): return bls.sign(message_hash=message_hash, privkey=privkey, domain=domain) + + +@only_with_bls(alt_return=STUB_COORDINATES) +def bls_signature_to_G2(signature): + return bls.api.signature_to_G2(signature) diff --git a/test_libs/pyspec/eth2spec/utils/merkle_minimal.py b/test_libs/pyspec/eth2spec/utils/merkle_minimal.py index 9d7138d7d..aae7ff5c0 100644 --- a/test_libs/pyspec/eth2spec/utils/merkle_minimal.py +++ b/test_libs/pyspec/eth2spec/utils/merkle_minimal.py @@ -20,6 +20,13 @@ def calc_merkle_tree_from_leaves(values, layer_count=32): return tree +def get_merkle_tree(values, pad_to=None): + layer_count = (len(values) - 1).bit_length() if pad_to is None else (pad_to - 1).bit_length() + if len(values) == 0: + return zerohashes[layer_count] + return calc_merkle_tree_from_leaves(values, layer_count) + + def get_merkle_root(values, pad_to=1): if pad_to == 0: return zerohashes[0] @@ -29,9 +36,9 @@ def get_merkle_root(values, pad_to=1): return calc_merkle_tree_from_leaves(values, layer_count)[-1][0] -def get_merkle_proof(tree, item_index): +def get_merkle_proof(tree, item_index, tree_len=None): proof = [] - for i in range(32): + for i in range(tree_len if tree_len is not None else len(tree)): subindex = (item_index // 2**i) ^ 1 proof.append(tree[i][subindex] if subindex < len(tree[i]) else zerohashes[i]) return proof From e23b37842ea21f0ab0e6caa5141a59846ecbd62d Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 13 Aug 2019 08:09:44 +1000 Subject: [PATCH 075/250] Update specs/networking/p2p-interface.md Co-Authored-By: Danny Ryan --- specs/networking/p2p-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 84ad45022..0b2e3562a 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -301,7 +301,7 @@ Here `result` represents the 1-byte response code. The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction. Two values are possible at this time: -- `ssz`: the contents are [SSZ](https://github.com/ethereum/eth2.0-specs/blob/192442be51a8a6907d6401dffbf5c73cb220b760/specs/networking/libp2p-standardization.md#ssz-encoding) encoded. This encoding type MUST be supported by all clients. +- `ssz`: the contents are [SSZ](#ssz-encoding) encoded. This encoding type MUST be supported by all clients. For objects containing a single field, only the field is SSZ-encoded not a container with a single field. For example, the `BeaconBlocks` response would be an SSZ-encoded list of `BeaconBlock`s. All SSZ-Lists in the Req/Resp domain will have a max-list size of `SSZ_MAX_LIST_SIZE`. - `ssz_snappy`: the contents are SSZ encoded, and subsequently compressed with [Snappy](https://github.com/google/snappy). MAY be supported in the interoperability testnet; and MUST be supported in mainnet. From c224af999e4895983d0257403fb9d85dd679b6b7 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 12 Aug 2019 19:05:16 -0600 Subject: [PATCH 076/250] MAX_PERSISTENT_COMMITTEE_SIZE -> TARGET_PERSISTENT_COMMITTEE_SIZE --- specs/core/1_shard-data-chains.md | 18 +++++++++--------- .../test/helpers/phase1/shard_block.py | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 079c0b4b7..8e1532f17 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -66,7 +66,7 @@ We define the following Python custom types for type hinting and readability: | Name | Value | | - | - | | `SHARD_SLOTS_PER_BEACON_SLOT` | `2**1` (= 2) | -| `MAX_PERSISTENT_COMMITTEE_SIZE` | `2**7` (= 128) | +| `TARGET_PERSISTENT_COMMITTEE_SIZE` | `2**7` (= 128) | | `SHARD_HEADER_SIZE` | `2**9` (= 512) | | `SHARD_BLOCK_SIZE_TARGET` | `2**14` (= 16,384) | | `SHARD_BLOCK_SIZE_LIMIT` | `2**16` (= 65,536) | @@ -151,7 +151,7 @@ class ShardBlockCore(Container): data_root: Hash state_root: Hash total_bytes: uint64 - attester_bitfield: Bitvector[MAX_PERSISTENT_COMMITTEE_SIZE * 2] + attester_bitfield: Bitvector[TARGET_PERSISTENT_COMMITTEE_SIZE * 2] ``` ### `ExtendedShardBlockCore` @@ -164,7 +164,7 @@ class ExtendedShardBlockCore(Container): data: Bytes[SHARD_BLOCK_SIZE_LIMIT - SHARD_HEADER_SIZE] state_root: Hash total_bytes: uint64 - attester_bitfield: Bitvector[MAX_PERSISTENT_COMMITTEE_SIZE * 2] + attester_bitfield: Bitvector[TARGET_PERSISTENT_COMMITTEE_SIZE * 2] ``` ### `ShardState` @@ -172,10 +172,10 @@ class ExtendedShardBlockCore(Container): ```python class ShardState(Container): history_accumulator: Vector[Hash, HISTORY_ACCUMULATOR_VECTOR] - earlier_committee_rewards: List[uint64, MAX_PERSISTENT_COMMITTEE_SIZE] - later_committee_rewards: List[uint64, MAX_PERSISTENT_COMMITTEE_SIZE] - earlier_committee_fees: List[Gwei, MAX_PERSISTENT_COMMITTEE_SIZE] - later_committee_fees: List[Gwei, MAX_PERSISTENT_COMMITTEE_SIZE] + earlier_committee_rewards: List[uint64, TARGET_PERSISTENT_COMMITTEE_SIZE] + later_committee_rewards: List[uint64, TARGET_PERSISTENT_COMMITTEE_SIZE] + earlier_committee_fees: List[Gwei, TARGET_PERSISTENT_COMMITTEE_SIZE] + later_committee_fees: List[Gwei, TARGET_PERSISTENT_COMMITTEE_SIZE] basefee: Gwei slot: ShardSlot shard: Shard @@ -230,7 +230,7 @@ def get_period_committee(state: BeaconState, epoch: Epoch, shard: Shard) -> Sequ count=SHARD_COUNT, ) - return full_committee[:MAX_PERSISTENT_COMMITTEE_SIZE] + return full_committee[:TARGET_PERSISTENT_COMMITTEE_SIZE] ``` ### `get_persistent_committee` @@ -495,7 +495,7 @@ def shard_block_transition(state: ShardState, add_reward(state, beacon_state, validator_index, base_reward) attestations += 1 - for i in range(len(attester_committee), MAX_PERSISTENT_COMMITTEE_SIZE): + for i in range(len(attester_committee), TARGET_PERSISTENT_COMMITTEE_SIZE): assert block.core.attester_bitfield[i] is False or block.core.attester_bitfield[i] == 0 # TODO: FIX Bitvector assert bls_verify( diff --git a/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py b/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py index 42e2765ea..b9c388a3f 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py +++ b/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py @@ -57,7 +57,7 @@ def build_empty_shard_block(spec, attester_committee = spec.get_persistent_committee(beacon_state, shard_state.shard, block.core.slot) block.core.attester_bitfield = list( (True,) * len(attester_committee) + - (False,) * (spec.MAX_PERSISTENT_COMMITTEE_SIZE * 2 - len(attester_committee)) + (False,) * (spec.TARGET_PERSISTENT_COMMITTEE_SIZE * 2 - len(attester_committee)) ) block.signatures.attestation_signature = sign_shard_attestation( spec, From cb3e0f2146387920089309bc059dc6c91306531d Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 13 Aug 2019 06:12:51 -0400 Subject: [PATCH 077/250] Update specs/core/1_beacon-chain-misc.md Co-Authored-By: Hsiao-Wei Wang --- specs/core/1_beacon-chain-misc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index c1d6283ca..e82eb2a30 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -49,7 +49,7 @@ class ShardReceiptProof(Container): #### `pack_compact_validator` ```python -def pack_compact_validator(index: uint64, slashed: bool, balance_in_increments: uint64) -> uint64: +def pack_compact_validator(index: int, slashed: bool, balance_in_increments: int) -> int: """ Creates a compact validator object representing index, slashed status, and compressed balance. Takes as input balance-in-increments (// EFFECTIVE_BALANCE_INCREMENT) to preserve symmetry with From f9849ca562c2e4827ab2d6af3d2eecbfc582a8fe Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 13 Aug 2019 06:14:51 -0400 Subject: [PATCH 078/250] Update specs/core/1_beacon-chain-misc.md Co-Authored-By: Hsiao-Wei Wang --- specs/core/1_beacon-chain-misc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index e82eb2a30..8c1cee900 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -61,7 +61,7 @@ def pack_compact_validator(index: int, slashed: bool, balance_in_increments: int #### `unpack_compact_validator` ```python -def unpack_compact_validator(compact_validator: uint64) -> Tuple[uint64, bool, uint64]: +def unpack_compact_validator(compact_validator: int) -> Tuple[int, bool, int]: """ Returns validator index, slashed, balance // EFFECTIVE_BALANCE_INCREMENT """ From 84558e0c4c72dca4f347e49b22741363157038be Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 14 Aug 2019 15:48:30 +0200 Subject: [PATCH 079/250] Add summaries and expansions to simple-serialize.md --- specs/simple-serialize.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 119022248..ecef6ddfc 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -217,6 +217,12 @@ We now define Merkleization `hash_tree_root(value)` of an object `value` recursi Let `value` be a self-signed container object. The convention is that the signature (e.g. a `"bytes96"` BLS12-381 signature) be the last field of `value`. Further, the signed message for `value` is `signing_root(value) = hash_tree_root(truncate_last(value))` where `truncate_last` truncates the last element of `value`. +## Summaries and expansions + +Let `A` be an object derived from another object `B` by replacing some of the (possibly nested) values of `B` by their `hash_tree_root`. We say `A` is a "summary" of `B`, and that `B` is an "expansion" of `A`. Notice `hash_tree_root(A) == hash_tree_root(B)`. + +We similarly define "summary types" and "expansion types". For example, [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/core/0_beacon-chain.md#beaconblock) is an expansion type of [`BeaconBlockHeader`](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/core/0_beacon-chain.md#beaconblockheader). Notice that objects expand to at most one object of a given expansion type. For example, `BeaconBlockHeader` objects uniquely expand to `BeaconBlock` objects. + ## Implementations | Language | Project | Maintainer | Implementation | From 5d8c31cfb17feff2edffbba1830437569611bc42 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 14 Aug 2019 11:07:03 -0600 Subject: [PATCH 080/250] Update specs/simple-serialize.md --- specs/simple-serialize.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index ecef6ddfc..50d091c07 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -221,7 +221,7 @@ Let `value` be a self-signed container object. The convention is that the signat Let `A` be an object derived from another object `B` by replacing some of the (possibly nested) values of `B` by their `hash_tree_root`. We say `A` is a "summary" of `B`, and that `B` is an "expansion" of `A`. Notice `hash_tree_root(A) == hash_tree_root(B)`. -We similarly define "summary types" and "expansion types". For example, [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/core/0_beacon-chain.md#beaconblock) is an expansion type of [`BeaconBlockHeader`](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/core/0_beacon-chain.md#beaconblockheader). Notice that objects expand to at most one object of a given expansion type. For example, `BeaconBlockHeader` objects uniquely expand to `BeaconBlock` objects. +We similarly define "summary types" and "expansion types". For example, [`BeaconBlock`](./core/0_beacon-chain.md#beaconblock) is an expansion type of [`BeaconBlockHeader`](./core/0_beacon-chain.md#beaconblockheader). Notice that objects expand to at most one object of a given expansion type. For example, `BeaconBlockHeader` objects uniquely expand to `BeaconBlock` objects. ## Implementations From 0cf454547b8b15bf88a73f466a5fbf6065ba13f4 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 14 Aug 2019 23:27:25 +0200 Subject: [PATCH 081/250] Update specs/core/1_beacon-chain-misc.md --- specs/core/1_beacon-chain-misc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 8c1cee900..56f9a223f 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -205,7 +205,7 @@ Add the `shard_receipts` operation to `BeaconBlockBody`: ```python # begin insert @shard_receipts - shard_receipts: List[ShardReceipt, MAX_SHARD_RECEIPTS] + shard_receipts: List[ShardReceiptProof, MAX_SHARD_RECEIPTS] # end insert @shard_receipts ``` From 4f92e7fe3024bc1b545c96ac48ea7fd55cf71cb2 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 14 Aug 2019 23:31:48 +0200 Subject: [PATCH 082/250] Update specs/core/1_beacon-chain-misc.md --- specs/core/1_beacon-chain-misc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 56f9a223f..8524ad51b 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -213,6 +213,6 @@ Use `process_shard_receipt` to process each receipt. ```python # begin insert @process_shard_receipts - (body.shard_receipts, process_shard_receipts), + (body.shard_receipt_proofs, process_shard_receipt_proofs), # end insert @process_shard_receipts ``` From 49a008df605faf3ba94350b0d2a7a04fadb089b3 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 14 Aug 2019 23:31:55 +0200 Subject: [PATCH 083/250] Update specs/core/1_beacon-chain-misc.md --- specs/core/1_beacon-chain-misc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 8524ad51b..3bb1fa201 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -209,7 +209,7 @@ Add the `shard_receipts` operation to `BeaconBlockBody`: # end insert @shard_receipts ``` -Use `process_shard_receipt` to process each receipt. +Use `process_shard_receipt_proof` to process each receipt. ```python # begin insert @process_shard_receipts From d424863fc0bd2166ad02707679ad80739da8a859 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 14 Aug 2019 23:32:01 +0200 Subject: [PATCH 084/250] Update specs/core/1_beacon-chain-misc.md --- specs/core/1_beacon-chain-misc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 3bb1fa201..3b32f39e6 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -205,7 +205,7 @@ Add the `shard_receipts` operation to `BeaconBlockBody`: ```python # begin insert @shard_receipts - shard_receipts: List[ShardReceiptProof, MAX_SHARD_RECEIPTS] + shard_receipt_proofs: List[ShardReceiptProof, MAX_SHARD_RECEIPTS] # end insert @shard_receipts ``` From fd24308d19b36a4343185047e233a23fdaacd62b Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 14 Aug 2019 23:32:10 +0200 Subject: [PATCH 085/250] Update specs/core/1_beacon-chain-misc.md --- specs/core/1_beacon-chain-misc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 3b32f39e6..be376a6ae 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -201,7 +201,7 @@ def update_persistent_committee(state: BeaconState) -> None: ### Shard receipt processing -Add the `shard_receipts` operation to `BeaconBlockBody`: +Add the `shard_receipt_proofs` operation to `BeaconBlockBody`: ```python # begin insert @shard_receipts From 8255091e409968844d24e8e3ceed37c2aee3e729 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 14 Aug 2019 23:35:26 +0200 Subject: [PATCH 086/250] ShardReceipt -> ShardReceiptProof --- specs/core/1_beacon-chain-misc.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index be376a6ae..701b38db0 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -18,7 +18,7 @@ - [concat_generalized_indices](#concat_generalized_indices) - [compute_historical_state_generalized_index](#compute_historical_state_generalized_index) - [get_generalized_index_of_crosslink_header](#get_generalized_index_of_crosslink_header) - - [process_shard_receipt](#process_shard_receipt) + - [process_shard_receipt_proof](#process_shard_receipt_proof) - [Changes](#changes) - [Persistent committees](#persistent-committees) - [Shard receipt processing](#shard-receipt-processing) @@ -134,10 +134,10 @@ def get_generalized_index_of_crosslink_header(index: int) -> GeneralizedIndex: return GeneralizedIndex(MAX_CROSSLINK_SIZE // SHARD_HEADER_SIZE + index) ``` -#### `process_shard_receipt` +#### `process_shard_receipt_proof` ```python -def process_shard_receipt(state: BeaconState, receipt_proof: ShardReceiptProof): +def process_shard_receipt_proof(state: BeaconState, receipt_proof: ShardReceiptProof): """ Processes a ShardReceipt object. """ From c7af2496ef0f6cf113b52508e6b352378ea14746 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 14 Aug 2019 23:44:19 +0200 Subject: [PATCH 087/250] Update specs/light_client/merkle_proofs.md Co-Authored-By: Diederik Loerakker --- specs/light_client/merkle_proofs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index e0be4f070..698dce4e6 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -229,7 +229,7 @@ def get_helper_indices(indices: List[GeneralizedIndex]) -> List[GeneralizedIndex x for x in all_indices if not (generalized_index_child(x, 0) in all_indices and generalized_index_child(x, 1) in all_indices) and not (x in indices) - ])[::-1] + ], reverse=True) ``` Now we provide the Merkle proof verification functions. First, for single item proofs: From 72103e9deb0aeec8ead093913265b920c3452cbe Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 14 Aug 2019 23:44:26 +0200 Subject: [PATCH 088/250] Update specs/light_client/merkle_proofs.md Co-Authored-By: Diederik Loerakker --- specs/light_client/merkle_proofs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 698dce4e6..21115dd27 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -256,7 +256,7 @@ def verify_merkle_multiproof(leaves: Sequence[Hash], proof: Sequence[Hash], indi **{index:node for index, node in zip(indices, leaves)}, **{index:node for index, node in zip(helper_indices, proof)} } - keys = sorted(objects.keys())[::-1] + keys = sorted(objects.keys(), reverse=True) pos = 0 while pos < len(keys): k = keys[pos] From 722a69467fadc3fea659e2943bbda9cf976c5d31 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 15 Aug 2019 15:07:44 +0800 Subject: [PATCH 089/250] Add `light_client/merkle_proofs.md` to executable stack. Errors revealed. --- Makefile | 2 +- scripts/build_spec.py | 50 +++++++++++++++++++++++++++---------------- 2 files changed, 32 insertions(+), 20 deletions(-) diff --git a/Makefile b/Makefile index eeaed8898..fb93908cc 100644 --- a/Makefile +++ b/Makefile @@ -89,7 +89,7 @@ $(PY_SPEC_PHASE_0_TARGETS): $(PY_SPEC_PHASE_0_DEPS) python3 $(SCRIPT_DIR)/build_spec.py -p0 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/0_fork-choice.md $(SPEC_DIR)/validator/0_beacon-chain-validator.md $@ $(PY_SPEC_DIR)/eth2spec/phase1/spec.py: $(PY_SPEC_PHASE_1_DEPS) - python3 $(SCRIPT_DIR)/build_spec.py -p1 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/1_custody-game.md $(SPEC_DIR)/core/1_shard-data-chains.md $(SPEC_DIR)/core/0_fork-choice.md $@ + python3 $(SCRIPT_DIR)/build_spec.py -p1 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/0_fork-choice.md $(SPEC_DIR)/core/1_custody-game.md $(SPEC_DIR)/core/1_shard-data-chains.md $(SPEC_DIR)/light_client/merkle_proofs.md $@ CURRENT_DIR = ${CURDIR} diff --git a/scripts/build_spec.py b/scripts/build_spec.py index 88c3d46fb..9c5263399 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -281,17 +281,23 @@ def build_phase0_spec(phase0_sourcefile: str, fork_choice_sourcefile: str, def build_phase1_spec(phase0_sourcefile: str, + fork_choice_sourcefile: str, phase1_custody_sourcefile: str, phase1_shard_sourcefile: str, - fork_choice_sourcefile: str, + merkle_proofs_sourcefile: str, outfile: str=None) -> Optional[str]: - phase0_spec = get_spec(phase0_sourcefile) - remove_for_phase1(phase0_spec[0]) - phase1_custody = get_spec(phase1_custody_sourcefile) - phase1_shard_data = get_spec(phase1_shard_sourcefile) - fork_choice_spec = get_spec(fork_choice_sourcefile) - spec_objects = phase0_spec - for value in [phase1_custody, phase1_shard_data, fork_choice_spec]: + all_sourcefiles = ( + phase0_sourcefile, + fork_choice_sourcefile, + phase1_custody_sourcefile, + phase1_shard_sourcefile, + merkle_proofs_sourcefile, + ) + all_spescs = [get_spec(spec) for spec in all_sourcefiles] + for spec in all_spescs: + remove_for_phase1(spec[0]) + spec_objects = all_spescs[0] + for value in all_spescs[1:]: spec_objects = combine_spec_objects(spec_objects, value) spec = objects_to_spec(*spec_objects, PHASE1_IMPORTS) if outfile is not None: @@ -304,17 +310,18 @@ if __name__ == '__main__': description = ''' Build the specs from the md docs. If building phase 0: - 1st argument is input spec.md - 2nd argument is input fork_choice.md - 3rd argument is input validator_guide.md + 1st argument is input /core/0_beacon-chain.md + 2nd argument is input /core/0_fork-choice.md + 3rd argument is input /core/0_beacon-chain-validator.md 4th argument is output spec.py If building phase 1: - 1st argument is input spec_phase0.md - 2nd argument is input spec_phase1_custody.md - 3rd argument is input spec_phase1_shard_data.md - 4th argument is input fork_choice.md - 5th argument is output spec.py + 1st argument is input /core/0_beacon-chain.md + 2nd argument is input /core/0_fork-choice.md + 3rd argument is input /core/1_custody-game.md + 4th argument is input /core/1_shard-data-chains.md + 5th argument is input /light_client/merkle_proofs.md + 6th argument is output spec.py ''' parser = ArgumentParser(description=description) parser.add_argument("-p", "--phase", dest="phase", type=int, default=0, help="Build for phase #") @@ -327,10 +334,15 @@ If building phase 1: else: print(" Phase 0 requires spec, forkchoice, and v-guide inputs as well as an output file.") elif args.phase == 1: - if len(args.files) == 5: + if len(args.files) == 6: build_phase1_spec(*args.files) else: - print(" Phase 1 requires 4 input files as well as an output file: " - + "(phase0.md and phase1.md, phase1.md, fork_choice.md, output.py)") + print( + " Phase 1 requires input files as well as an output file:\n" + "\t core/phase_0: (0_beacon-chain.md, 0_fork-choice.md)\n" + "\t core/phase_1: (1_custody-game.md, 1_shard-data-chains.md)\n" + "\t light_client: (merkle_proofs.md)\n" + "\t and output.py" + ) else: print("Invalid phase: {0}".format(args.phase)) From dc933914213895e1eda337d8408a552e0aeb0548 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 15 Aug 2019 15:30:01 +0800 Subject: [PATCH 090/250] Make flake8 check pass --- scripts/build_spec.py | 9 +- specs/light_client/merkle_proofs.md | 130 +++++++++++++++++++--------- 2 files changed, 96 insertions(+), 43 deletions(-) diff --git a/scripts/build_spec.py b/scripts/build_spec.py index 9c5263399..410db2f21 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -39,6 +39,9 @@ from eth2spec.utils.hash_function import hash PHASE1_IMPORTS = '''from typing import ( Any, Dict, Optional, Set, Sequence, MutableSequence, Tuple, Union, ) +from math import ( + log2, +) from dataclasses import ( dataclass, @@ -51,8 +54,10 @@ from eth2spec.utils.ssz.ssz_impl import ( is_zero, ) from eth2spec.utils.ssz.ssz_typing import ( - uint64, bit, boolean, Container, List, Vector, Bytes, BytesN, - Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector, + BasicValue, Elements, BaseList, SSZType, + Container, List, Vector, Bytes, BytesN, Bitlist, Bitvector, Bits, + Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, + uint64, bit, boolean, ) from eth2spec.utils.bls import ( bls_aggregate_pubkeys, diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 21115dd27..009f5a66f 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -6,17 +6,54 @@ - [Merkle proof formats](#merkle-proof-formats) - - [Table of contents](#table-of-contents) - - [Constants](#constants) - - [Generalized Merkle tree index](#generalized-merkle-tree-index) - - [SSZ object to index](#ssz-object-to-index) - - [Merkle multiproofs](#merkle-multiproofs) - - [MerklePartial](#merklepartial) - - [`SSZMerklePartial`](#sszmerklepartial) - - [Proofs for execution](#proofs-for-execution) + - [Table of contents](#table-of-contents) + - [Custom types](#custom-types) + - [Helpers](#helpers) + - [Generalized Merkle tree index](#generalized-merkle-tree-index) + - [SSZ object to index](#ssz-object-to-index) + - [Helpers for generalized indices](#helpers-for-generalized-indices) + - [`concat_generalized_indices`](#concat_generalized_indices) + - [`get_generalized_index_length`](#get_generalized_index_length) + - [`get_generalized_index_bit`](#get_generalized_index_bit) + - [`generalized_index_sibling`](#generalized_index_sibling) + - [`generalized_index_child`](#generalized_index_child) + - [`generalized_index_parent`](#generalized_index_parent) + - [Merkle multiproofs](#merkle-multiproofs) +## Custom types + +We define the following Python custom types for type hinting and readability: + +| Name | SSZ equivalent | Description | +| - | - | - | +| `GeneralizedIndex` | `uint64` | the index of a node in a binary Merkle tree | + +## Helpers + +```python +def get_next_power_of_two(x: int) -> int: + """ + Get next power of 2 >= the input. + """ + if x <= 2: + return x + elif x % 2 == 0: + return 2 * get_next_power_of_two(x // 2) + else: + return 2 * get_next_power_of_two((x + 1) // 2) +``` + +```python +def get_previous_power_of_two(x: int) -> int: + """ + Get the previous power of 2 >= the input. + """ + assert x >= 2 + return get_next_power_of_two(x) // 2 +``` + ## Generalized Merkle tree index In a binary Merkle tree, we define a "generalized index" of a node as `2**depth + index`. Visually, this looks as follows: @@ -32,8 +69,8 @@ Note that the generalized index has the convenient property that the two childre ```python def merkle_tree(leaves: List[Bytes32]) -> List[Bytes32]: - padded_length = next_power_of_2(len(leaves)) - o = [ZERO_HASH] * padded_length + leaves + [ZERO_HASH] * (padded_length - len(leaves)) + padded_length = get_next_power_of_two(len(leaves)) + o = [Hash()] * padded_length + leaves + [Hash()] * (padded_length - len(leaves)) for i in range(len(leaves) - 1, 0, -1): o[i] = hash(o[i * 2] + o[i * 2 + 1]) return o @@ -61,25 +98,27 @@ We can now define a concept of a "path", a way of describing a function that tak ```python def item_length(typ: SSZType) -> int: """ - Returns the number of bytes in a basic type, or 32 (a full hash) for compound types. + Return the number of bytes in a basic type, or 32 (a full hash) for compound types. """ if issubclass(typ, BasicValue): return typ.byte_len else: return 32 - - -def get_elem_type(typ: ComplexType, index: Union[int, str]) -> Type: +``` + +```python +def get_elem_type(typ: Union[BaseList, Container], index: Union[int, str]) -> SSZType: """ - Returns the type of the element of an object of the given type with the given index + Return the type of the element of an object of the given type with the given index or member variable name (eg. `7` for `x[7]`, `"foo"` for `x.foo`) """ - return typ.get_fields()[index] if issubclass(typ, Container) else typ.elem_type - + return typ.get_fields()[index] if issubclass(typ, Container) else typ.elem_type +``` +```python def chunk_count(typ: SSZType) -> int: """ - Returns the number of hashes needed to represent the top-level elements in the given type + Return the number of hashes needed to represent the top-level elements in the given type (eg. `x.foo` or `x[7]` but not `x[7].bar` or `x.foo.baz`). In all cases except lists/vectors of basic types, this is simply the number of top-level elements, as each element gets one hash. For lists/vectors of basic types, it is often fewer because multiple basic elements @@ -96,13 +135,16 @@ def chunk_count(typ: SSZType) -> int: return len(typ.get_fields()) else: raise Exception(f"Type not supported: {typ}") +``` - +```python def get_item_position(typ: SSZType, index: Union[int, str]) -> Tuple[int, int, int]: """ - Returns three variables: (i) the index of the chunk in which the given element of the item is - represented, (ii) the starting byte position within the chunk, (iii) the ending byte position within the chunk. For example for - a 6-item list of uint64 values, index=2 will return (0, 16, 24), index=5 will return (1, 8, 16) + Return three variables: + (i) the index of the chunk in which the given element of the item is represented; + (ii) the starting byte position within the chunk; + (iii) the ending byte position within the chunk. + For example: for a 6-item list of uint64 values, index=2 will return (0, 16, 24), index=5 will return (1, 8, 16) """ if issubclass(typ, Elements): start = index * item_length(typ.elem_type) @@ -111,9 +153,10 @@ def get_item_position(typ: SSZType, index: Union[int, str]) -> Tuple[int, int, i return typ.get_field_names().index(index), 0, item_length(get_elem_type(typ, index)) else: raise Exception("Only lists/vectors/containers supported") +``` - -def get_generalized_index(typ: Type, path: List[Union[int, str]]) -> GeneralizedIndex: +```python +def get_generalized_index(typ: SSZType, path: List[Union[int, str]]) -> GeneralizedIndex: """ Converts a path (eg. `[7, "foo", 3]` for `x[7].foo[3]`, `[12, "bar", "__len__"]` for `len(x[12].bar)`) into the generalized index representing its position in the Merkle tree. @@ -125,7 +168,7 @@ def get_generalized_index(typ: Type, path: List[Union[int, str]]) -> Generalized typ, root = uint64, root * 2 + 1 if issubclass(typ, (List, Bytes)) else None else: pos, _, _ = get_item_position(typ, p) - root = root * (2 if issubclass(typ, (List, Bytes)) else 1) * next_power_of_two(chunk_count(typ)) + pos + root = root * (2 if issubclass(typ, (List, Bytes)) else 1) * get_next_power_of_two(chunk_count(typ)) + pos typ = get_elem_type(typ, p) return root ``` @@ -144,7 +187,7 @@ def concat_generalized_indices(*indices: Sequence[GeneralizedIndex]) -> Generali """ o = GeneralizedIndex(1) for i in indices: - o = o * get_previous_power_of_2(i) + (i - get_previous_power_of_2(i)) + o = o * get_previous_power_of_two(i) + (i - get_previous_power_of_two(i)) return o ``` @@ -152,41 +195,41 @@ def concat_generalized_indices(*indices: Sequence[GeneralizedIndex]) -> Generali ```python def get_generalized_index_length(index: GeneralizedIndex) -> int: - """ - Returns the length of a path represented by a generalized index. - """ - return log2(index) + """ + Return the length of a path represented by a generalized index. + """ + return log2(index) ``` #### `get_generalized_index_bit` ```python def get_generalized_index_bit(index: GeneralizedIndex, position: int) -> bool: - """ - Returns the given bit of a generalized index. - """ - return (index & (1 << position)) > 0 + """ + Return the given bit of a generalized index. + """ + return (index & (1 << position)) > 0 ``` #### `generalized_index_sibling` ```python def generalized_index_sibling(index: GeneralizedIndex) -> GeneralizedIndex: - return index ^ 1 + return index ^ 1 ``` #### `generalized_index_child` ```python def generalized_index_child(index: GeneralizedIndex, right_side: bool) -> GeneralizedIndex: - return index * 2 + right_side + return index * 2 + right_side ``` #### `generalized_index_parent` ```python def generalized_index_parent(index: GeneralizedIndex) -> GeneralizedIndex: - return index // 2 + return index // 2 ``` ## Merkle multiproofs @@ -214,7 +257,9 @@ def get_branch_indices(tree_index: GeneralizedIndex) -> List[GeneralizedIndex]: while o[-1] > 1: o.append(generalized_index_sibling(generalized_index_parent(o[-1]))) return o[:-1] +``` +```python def get_helper_indices(indices: List[GeneralizedIndex]) -> List[GeneralizedIndex]: """ Get the generalized indices of all "extra" chunks in the tree needed to prove the chunks with the given @@ -224,7 +269,7 @@ def get_helper_indices(indices: List[GeneralizedIndex]) -> List[GeneralizedIndex all_indices = set() for index in indices: all_indices = all_indices.union(set(get_branch_indices(index) + [index])) - + return sorted([ x for x in all_indices if not (generalized_index_child(x, 0) in all_indices and generalized_index_child(x, 1) in all_indices) and not @@ -248,13 +293,16 @@ def verify_merkle_proof(leaf: Hash, proof: Sequence[Hash], index: GeneralizedInd Now for multi-item proofs: ```python -def verify_merkle_multiproof(leaves: Sequence[Hash], proof: Sequence[Hash], indices: Sequence[GeneralizedIndex], root: Hash) -> bool: +def verify_merkle_multiproof(leaves: Sequence[Hash], + proof: Sequence[Hash], + indices: Sequence[GeneralizedIndex], + root: Hash) -> bool: assert len(leaves) == len(indices) helper_indices = get_helper_indices(indices) assert len(proof) == len(helper_indices) objects = { - **{index:node for index, node in zip(indices, leaves)}, - **{index:node for index, node in zip(helper_indices, proof)} + **{index: node for index, node in zip(indices, leaves)}, + **{index: node for index, node in zip(helper_indices, proof)} } keys = sorted(objects.keys(), reverse=True) pos = 0 From d88a83d48265656be5542a51cc6ddc5d444ceffc Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 15 Aug 2019 16:01:53 +0800 Subject: [PATCH 091/250] Fix most mypy errors --- specs/light_client/merkle_proofs.md | 39 +++++++++++++++++------------ 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 009f5a66f..9d530f7c2 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -156,19 +156,20 @@ def get_item_position(typ: SSZType, index: Union[int, str]) -> Tuple[int, int, i ``` ```python -def get_generalized_index(typ: SSZType, path: List[Union[int, str]]) -> GeneralizedIndex: +def get_generalized_index(typ: SSZType, path: List[Union[int, str]]) -> Optional[GeneralizedIndex]: """ Converts a path (eg. `[7, "foo", 3]` for `x[7].foo[3]`, `[12, "bar", "__len__"]` for `len(x[12].bar)`) into the generalized index representing its position in the Merkle tree. """ - root = 1 + root: Optional[GeneralizedIndex] = GeneralizedIndex(1) for p in path: assert not issubclass(typ, BasicValue) # If we descend to a basic type, the path cannot continue further if p == '__len__': typ, root = uint64, root * 2 + 1 if issubclass(typ, (List, Bytes)) else None else: pos, _, _ = get_item_position(typ, p) - root = root * (2 if issubclass(typ, (List, Bytes)) else 1) * get_next_power_of_two(chunk_count(typ)) + pos + base_index = (GeneralizedIndex(2) if issubclass(typ, (List, Bytes)) else GeneralizedIndex(1)) + root = root * base_index * get_next_power_of_two(chunk_count(typ)) + pos typ = get_elem_type(typ, p) return root ``` @@ -180,14 +181,14 @@ _Usage note: functions outside this section should manipulate generalized indice #### `concat_generalized_indices` ```python -def concat_generalized_indices(*indices: Sequence[GeneralizedIndex]) -> GeneralizedIndex: +def concat_generalized_indices(indices: Sequence[GeneralizedIndex]) -> GeneralizedIndex: """ Given generalized indices i1 for A -> B, i2 for B -> C .... i_n for Y -> Z, returns the generalized index for A -> Z. """ o = GeneralizedIndex(1) for i in indices: - o = o * get_previous_power_of_two(i) + (i - get_previous_power_of_two(i)) + o = GeneralizedIndex(o * get_previous_power_of_two(i) + (i - get_previous_power_of_two(i))) return o ``` @@ -198,7 +199,7 @@ def get_generalized_index_length(index: GeneralizedIndex) -> int: """ Return the length of a path represented by a generalized index. """ - return log2(index) + return int(log2(index)) ``` #### `get_generalized_index_bit` @@ -215,21 +216,21 @@ def get_generalized_index_bit(index: GeneralizedIndex, position: int) -> bool: ```python def generalized_index_sibling(index: GeneralizedIndex) -> GeneralizedIndex: - return index ^ 1 + return GeneralizedIndex(index ^ 1) ``` #### `generalized_index_child` ```python def generalized_index_child(index: GeneralizedIndex, right_side: bool) -> GeneralizedIndex: - return index * 2 + right_side + return GeneralizedIndex(index * 2 + right_side) ``` #### `generalized_index_parent` ```python def generalized_index_parent(index: GeneralizedIndex) -> GeneralizedIndex: - return index // 2 + return GeneralizedIndex(index // 2) ``` ## Merkle multiproofs @@ -266,14 +267,17 @@ def get_helper_indices(indices: List[GeneralizedIndex]) -> List[GeneralizedIndex generalized indices. Note that the decreasing order is chosen deliberately to ensure equivalence to the order of hashes in a regular single-item Merkle proof in the single-item case. """ - all_indices = set() + all_indices: Set[GeneralizedIndex] = set() for index in indices: all_indices = all_indices.union(set(get_branch_indices(index) + [index])) return sorted([ - x for x in all_indices if not - (generalized_index_child(x, 0) in all_indices and generalized_index_child(x, 1) in all_indices) and not - (x in indices) + x for x in all_indices if ( + not ( + generalized_index_child(x, GeneralizedIndex(0)) in all_indices and + generalized_index_child(x, GeneralizedIndex(1)) in all_indices + ) and not (x in indices) + ) ], reverse=True) ``` @@ -309,10 +313,13 @@ def verify_merkle_multiproof(leaves: Sequence[Hash], while pos < len(keys): k = keys[pos] if k in objects and k ^ 1 in objects and k // 2 not in objects: - objects[k // 2] = hash(objects[(k | 1) ^ 1] + objects[k | 1]) - keys.append(k // 2) + objects[GeneralizedIndex(k // 2)] = hash( + objects[GeneralizedIndex((k | 1) ^ 1)] + + objects[GeneralizedIndex(k | 1)] + ) + keys.append(GeneralizedIndex(k // 2)) pos += 1 - return objects[1] == root + return objects[GeneralizedIndex(1)] == root ``` Note that the single-item proof is a special case of a multi-item proof; a valid single-item proof verifies correctly when put into the multi-item verification function (making the natural trivial changes to input arguments, `index -> [index]` and `leaf -> [leaf]`). From 0f52d460a5649805296e497bb7820eb55b22caef Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 15 Aug 2019 16:14:07 +0800 Subject: [PATCH 092/250] Use the `get_previous_power_of_2` function in ethereum/eth2.0-specs#1323 --- specs/light_client/merkle_proofs.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 9d530f7c2..faad40c45 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -50,8 +50,7 @@ def get_previous_power_of_two(x: int) -> int: """ Get the previous power of 2 >= the input. """ - assert x >= 2 - return get_next_power_of_two(x) // 2 + return x if x <= 2 else 2 * get_previous_power_of_2(x // 2) ``` ## Generalized Merkle tree index From 2741a5f33dfa0a19fb2185705e5bd1cc4435baa0 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 15 Aug 2019 18:26:22 +0800 Subject: [PATCH 093/250] Minor fixes --- scripts/build_spec.py | 2 +- specs/light_client/merkle_proofs.md | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/build_spec.py b/scripts/build_spec.py index 410db2f21..07306af8a 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -343,7 +343,7 @@ If building phase 1: build_phase1_spec(*args.files) else: print( - " Phase 1 requires input files as well as an output file:\n" + " Phase 1 requires input files as well as an output file:\n" "\t core/phase_0: (0_beacon-chain.md, 0_fork-choice.md)\n" "\t core/phase_1: (1_custody-game.md, 1_shard-data-chains.md)\n" "\t light_client: (merkle_proofs.md)\n" diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index faad40c45..d6c6dee62 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -8,7 +8,7 @@ - [Merkle proof formats](#merkle-proof-formats) - [Table of contents](#table-of-contents) - [Custom types](#custom-types) - - [Helpers](#helpers) + - [Helper functions](#helper-functions) - [Generalized Merkle tree index](#generalized-merkle-tree-index) - [SSZ object to index](#ssz-object-to-index) - [Helpers for generalized indices](#helpers-for-generalized-indices) @@ -30,7 +30,7 @@ We define the following Python custom types for type hinting and readability: | - | - | - | | `GeneralizedIndex` | `uint64` | the index of a node in a binary Merkle tree | -## Helpers +## Helper functions ```python def get_next_power_of_two(x: int) -> int: @@ -67,7 +67,7 @@ In a binary Merkle tree, we define a "generalized index" of a node as `2**depth Note that the generalized index has the convenient property that the two children of node `k` are `2k` and `2k+1`, and also that it equals the position of a node in the linear representation of the Merkle tree that's computed by this function: ```python -def merkle_tree(leaves: List[Bytes32]) -> List[Bytes32]: +def merkle_tree(leaves: Squence[Hash]) -> Squence[Hash]: padded_length = get_next_power_of_two(len(leaves)) o = [Hash()] * padded_length + leaves + [Hash()] * (padded_length - len(leaves)) for i in range(len(leaves) - 1, 0, -1): From e4a18f6fa416e854c5a5337df7b5da8ff812301d Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 15 Aug 2019 12:51:11 +0200 Subject: [PATCH 094/250] Made persistent committee roots a vector --- specs/core/1_beacon-chain-misc.md | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 701b38db0..0c23e41b7 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -6,6 +6,7 @@ - [Phase 1 miscellaneous beacon chain changes](#phase-1-miscellaneous-beacon-chain-changes) - [Table of contents](#table-of-contents) + - [Configuration](#configuration) - [Classes](#classes) - [CompactCommittee](#compactcommittee) - [ShardReceiptProof](#shardreceiptproof) @@ -25,6 +26,13 @@ +## Configuration + +| Name | Value | Unit | Duration +| - | - | - | - | +| `MAX_SHARD_RECEIPT_PROOFS` | `2**0` (= 1) | - | - | +| `PERSISTENT_COMMITTEE_ROOT_LENGTH` | `2**8` (= 256) | periods | ~9 months | + ## Classes #### `CompactCommittee` @@ -171,9 +179,7 @@ Add to the beacon state the following fields: ```python # begin insert @persistent_committee_fields - previous_persistent_committee_root: Hash - current_persistent_committee_root: Hash - next_persistent_committee_root: Hash + persistent_committee_roots: Vector[Hash, PERSISTENT_COMMITTEE_ROOT_LENGTH] next_shard_receipt_period: Vector[uint, SHARD_COUNT] # end insert @persistent_committee_fields ``` @@ -190,13 +196,12 @@ def update_persistent_committee(state: BeaconState) -> None: Updates persistent committee roots at boundary blocks. """ if (get_current_epoch(state) + 1) % EPOCHS_PER_SHARD_PERIOD == 0: - state.previous_persistent_committee_root = state.current_persistent_committee_root - state.current_persistent_committee_root = state.next_persistent_committee_root + period = (get_current_epoch(state) + 1) // EPOCHS_PER_SHARD_PERIOD committees = Vector[CompactCommittee, SHARD_COUNT]([ committee_to_compact_committee(state, get_period_committee(state, get_current_epoch(state) + 1, i)) for i in range(SHARD_COUNT) ]) - state.next_persistent_committee_root = hash_tree_root(committees) + state.persistent_committee_roots[period % PERSISTENT_COMMITTEE_ROOT_LENGTH] = hash_tree_root(committees) ``` ### Shard receipt processing @@ -205,7 +210,7 @@ Add the `shard_receipt_proofs` operation to `BeaconBlockBody`: ```python # begin insert @shard_receipts - shard_receipt_proofs: List[ShardReceiptProof, MAX_SHARD_RECEIPTS] + shard_receipt_proofs: List[ShardReceiptProof, MAX_SHARD_RECEIPT_PROOFS] # end insert @shard_receipts ``` From 8e1333aad198025cb43640b07f52e7e8deeaa76b Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 15 Aug 2019 19:01:40 +0800 Subject: [PATCH 095/250] Add `SSZVariableName` custom type --- scripts/build_spec.py | 1 + test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/build_spec.py b/scripts/build_spec.py index 07306af8a..10e6034f2 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -55,6 +55,7 @@ from eth2spec.utils.ssz.ssz_impl import ( ) from eth2spec.utils.ssz.ssz_typing import ( BasicValue, Elements, BaseList, SSZType, + SSZVariableName, Container, List, Vector, Bytes, BytesN, Bitlist, Bitvector, Bits, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, uint64, bit, boolean, diff --git a/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py b/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py index 1f199e6e1..bcccb91b2 100644 --- a/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py +++ b/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py @@ -1,8 +1,11 @@ -from typing import Dict, Iterator +from typing import Dict, Iterator, NewType import copy from types import GeneratorType +SSZVariableName = NewType('SSZVariableName', str) + + class DefaultingTypeMeta(type): def default(cls): raise Exception("Not implemented") From bb0b5b09cc8b343f2b3a25fd18db5ba1b8a11a6b Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 15 Aug 2019 19:02:21 +0800 Subject: [PATCH 096/250] Use `SSZVariableName` instead of `str`, and fix some mypy errors --- specs/light_client/merkle_proofs.md | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index d6c6dee62..73c4c603d 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -26,7 +26,6 @@ We define the following Python custom types for type hinting and readability: -| Name | SSZ equivalent | Description | | - | - | - | | `GeneralizedIndex` | `uint64` | the index of a node in a binary Merkle tree | @@ -50,7 +49,7 @@ def get_previous_power_of_two(x: int) -> int: """ Get the previous power of 2 >= the input. """ - return x if x <= 2 else 2 * get_previous_power_of_2(x // 2) + return x if x <= 2 else 2 * get_previous_power_of_two(x // 2) ``` ## Generalized Merkle tree index @@ -67,9 +66,9 @@ In a binary Merkle tree, we define a "generalized index" of a node as `2**depth Note that the generalized index has the convenient property that the two children of node `k` are `2k` and `2k+1`, and also that it equals the position of a node in the linear representation of the Merkle tree that's computed by this function: ```python -def merkle_tree(leaves: Squence[Hash]) -> Squence[Hash]: +def merkle_tree(leaves: Sequence[Hash]) -> Sequence[Hash]: padded_length = get_next_power_of_two(len(leaves)) - o = [Hash()] * padded_length + leaves + [Hash()] * (padded_length - len(leaves)) + o = [Hash()] * padded_length + list(leaves) + [Hash()] * (padded_length - len(leaves)) for i in range(len(leaves) - 1, 0, -1): o[i] = hash(o[i * 2] + o[i * 2 + 1]) return o @@ -106,12 +105,12 @@ def item_length(typ: SSZType) -> int: ``` ```python -def get_elem_type(typ: Union[BaseList, Container], index: Union[int, str]) -> SSZType: +def get_elem_type(typ: Union[BaseList, Container], index_or_variable_name: Union[int, SSZVariableName]) -> SSZType: """ Return the type of the element of an object of the given type with the given index or member variable name (eg. `7` for `x[7]`, `"foo"` for `x.foo`) """ - return typ.get_fields()[index] if issubclass(typ, Container) else typ.elem_type + return typ.get_fields()[index_or_variable_name] if issubclass(typ, Container) else typ.elem_type ``` ```python @@ -137,7 +136,7 @@ def chunk_count(typ: SSZType) -> int: ``` ```python -def get_item_position(typ: SSZType, index: Union[int, str]) -> Tuple[int, int, int]: +def get_item_position(typ: SSZType, index_or_variable_name: Union[int, SSZVariableName]) -> Tuple[int, int, int]: """ Return three variables: (i) the index of the chunk in which the given element of the item is represented; @@ -146,16 +145,18 @@ def get_item_position(typ: SSZType, index: Union[int, str]) -> Tuple[int, int, i For example: for a 6-item list of uint64 values, index=2 will return (0, 16, 24), index=5 will return (1, 8, 16) """ if issubclass(typ, Elements): + index = int(index_or_variable_name) start = index * item_length(typ.elem_type) return start // 32, start % 32, start % 32 + item_length(typ.elem_type) elif issubclass(typ, Container): - return typ.get_field_names().index(index), 0, item_length(get_elem_type(typ, index)) + variable_name = int(index_or_variable_name) + return typ.get_field_names().index(variable_name), 0, item_length(get_elem_type(typ, variable_name)) else: raise Exception("Only lists/vectors/containers supported") ``` ```python -def get_generalized_index(typ: SSZType, path: List[Union[int, str]]) -> Optional[GeneralizedIndex]: +def get_generalized_index(typ: SSZType, path: Sequence[Union[int, SSZVariableName]]) -> Optional[GeneralizedIndex]: """ Converts a path (eg. `[7, "foo", 3]` for `x[7].foo[3]`, `[12, "bar", "__len__"]` for `len(x[12].bar)`) into the generalized index representing its position in the Merkle tree. @@ -248,7 +249,7 @@ x x . . . . x * First, we provide a method for computing the generalized indices of the auxiliary tree nodes that a proof of a given set of generalized indices will require: ```python -def get_branch_indices(tree_index: GeneralizedIndex) -> List[GeneralizedIndex]: +def get_branch_indices(tree_index: GeneralizedIndex) -> Sequence[GeneralizedIndex]: """ Get the generalized indices of the sister chunks along the path from the chunk with the given tree index to the root. @@ -260,7 +261,7 @@ def get_branch_indices(tree_index: GeneralizedIndex) -> List[GeneralizedIndex]: ``` ```python -def get_helper_indices(indices: List[GeneralizedIndex]) -> List[GeneralizedIndex]: +def get_helper_indices(indices: Sequence[GeneralizedIndex]) -> Sequence[GeneralizedIndex]: """ Get the generalized indices of all "extra" chunks in the tree needed to prove the chunks with the given generalized indices. Note that the decreasing order is chosen deliberately to ensure equivalence to the @@ -268,7 +269,7 @@ def get_helper_indices(indices: List[GeneralizedIndex]) -> List[GeneralizedIndex """ all_indices: Set[GeneralizedIndex] = set() for index in indices: - all_indices = all_indices.union(set(get_branch_indices(index) + [index])) + all_indices = all_indices.union(set(list(get_branch_indices(index)) + [index])) return sorted([ x for x in all_indices if ( From 24e583d5d9e2f8fbcd1a7ceca7e6d2672d222027 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 16 Aug 2019 11:01:48 -0600 Subject: [PATCH 097/250] add discord and add some external resources --- README.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 0ae6156e7..acc60a0cf 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Ethereum 2.0 Specifications -[![Join the chat at https://gitter.im/ethereum/sharding](https://badges.gitter.im/ethereum/sharding.svg)](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Join the chat at https://discord.gg/hpFs23p](https://img.shields.io/badge/chat-on%20discord-blue.svg)](https://discord.gg/hpFs23p) [![Join the chat at https://gitter.im/ethereum/sharding](https://badges.gitter.im/ethereum/sharding.svg)](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) To learn more about sharding and Ethereum 2.0 (Serenity), see the [sharding FAQ](https://github.com/ethereum/wiki/wiki/Sharding-FAQ) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm). @@ -47,8 +47,15 @@ The following are the broad design goals for Ethereum 2.0: * to allow for a typical consumer laptop with `O(C)` resources to process/validate `O(1)` shards (including any system level validation such as the beacon chain) +## Useful external resources + +* [Design Rationale](https://notes.ethereum.org/s/rkhCgQteN#) +* [Phase 0 Onboarding Document](https://notes.ethereum.org/s/Bkn3zpwxB) + + ## For spec contributors + Documentation on the different components used during spec writing can be found here: * [YAML Test Generators](test_generators/README.md) * [Executable Python Spec, with Py-tests](test_libs/pyspec/README.md) From 83566afa1babef3a9d5fd641438285bcc90b7334 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 16 Aug 2019 13:35:23 -0600 Subject: [PATCH 098/250] remove APIs and add links to outside spec/stnards repos --- README.md | 8 + .../validator/0_beacon-node-validator-api.md | 27 - specs/validator/beacon_node_oapi.yaml | 641 ------------------ 3 files changed, 8 insertions(+), 668 deletions(-) delete mode 100644 specs/validator/0_beacon-node-validator-api.md delete mode 100644 specs/validator/beacon_node_oapi.yaml diff --git a/README.md b/README.md index 0ae6156e7..8de68afbd 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,14 @@ See the [Eth 2.0 Phase 2 Wiki](https://hackmd.io/UzysWse1Th240HELswKqVA?view) fo * [Beacon node API for validator](specs/validator/0_beacon-node-validator-api.md) +## Additional specifications for client implementers + +Additional specifications and standards outside of core consensus can be found in the following repos: + +* [Eth2.0 APIs](https://github.com/ethereum/eth2.0-apis) +* [Eth2.0 Metrics](https://github.com/ethereum/eth2.0-metrics/) +* [Interop Standards in Eth2.0-pm](https://github.com/ethereum/eth2.0-pm/tree/master/interop) + ## Design goals The following are the broad design goals for Ethereum 2.0: diff --git a/specs/validator/0_beacon-node-validator-api.md b/specs/validator/0_beacon-node-validator-api.md deleted file mode 100644 index e87d36701..000000000 --- a/specs/validator/0_beacon-node-validator-api.md +++ /dev/null @@ -1,27 +0,0 @@ -# Ethereum 2.0 Phase 0 -- Beacon Node API for Validator - -**Notice**: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 0 -- Honest Validator](0_beacon-chain-validator.md) that describes an API exposed by the beacon node, which enables the validator client to participate in the Ethereum 2.0 protocol. - -## Outline - -This document outlines a minimal application programming interface (API) which is exposed by a beacon node for use by a validator client implementation which aims to facilitate [Phase 0](../../README.md#phase-0) of Ethereum 2.0. - -The API is a REST interface, accessed via HTTP, designed for use as a local communications protocol between binaries. Currently, the only supported return data type is JSON. - -## Background - -The beacon node maintains the state of the beacon chain by communicating with other beacon nodes in the Ethereum 2.0 network. Conceptually, it does not maintain keypairs that participate with the beacon chain. - -The validator client is a conceptually separate entity which utilizes private keys to perform validator related tasks, called "duties", on the beacon chain. These duties include the production of beacon blocks and signing of attestations. - -Since it is recommended to separate these concerns in the client implementations, we must clearly define the communication between them. - -The goal of this specification is to promote interoperability between beacon nodes and validator clients derived from different projects and to encourage innovation in validator client implementations, independently from beacon node development. For example, the validator client from [Lighthouse](https://github.com/sigp/lighthouse) could communicate with a running instance of the beacon node from [Prysm](https://github.com/prysmaticlabs/prysm), or a staking pool might create a decentrally managed validator client which utilizes the same API. - -This specification is derived from a proposal and discussion on Issues [#1011](https://github.com/ethereum/eth2.0-specs/issues/1011) and [#1012](https://github.com/ethereum/eth2.0-specs/issues/1012). - -## Specification - -The API specification has been written in [OpenAPI 3.0](https://swagger.io/docs/specification/about/) and is provided in the [beacon_node_oapi.yaml](beacon_node_oapi.yaml) file alongside this document. - -For convenience, this specification has been uploaded to SwaggerHub [here](https://app.swaggerhub.com/apis/spble/beacon_node_api_for_validator). diff --git a/specs/validator/beacon_node_oapi.yaml b/specs/validator/beacon_node_oapi.yaml deleted file mode 100644 index 4da8f7933..000000000 --- a/specs/validator/beacon_node_oapi.yaml +++ /dev/null @@ -1,641 +0,0 @@ -openapi: "3.0.2" -info: - title: "Minimal Beacon Node API for Validator" - description: "A minimal API specification for the beacon node, which enables a validator to connect and perform its obligations on the Ethereum 2.0 phase 0 beacon chain." - version: "0.2.0" - license: - name: "Apache 2.0" - url: "https://www.apache.org/licenses/LICENSE-2.0.html" -tags: - - name: MinimalSet - description: The minimal set of endpoints to enable a working validator implementation. - - name: OptionalSet - description: Extra endpoints which are nice-to-haves. -paths: - /node/version: - get: - tags: - - MinimalSet - summary: "Get version string of the running beacon node." - description: "Requests that the beacon node identify information about its implementation in a format similar to a [HTTP User-Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) field." - responses: - 200: - description: Request successful - content: - application/json: - schema: - $ref: '#/components/schemas/version' - 500: - $ref: '#/components/responses/InternalError' - /node/genesis_time: - get: - tags: - - MinimalSet - summary: "Get the genesis_time parameter from beacon node configuration." - description: "Requests the genesis_time parameter from the beacon node, which should be consistent across all beacon nodes that follow the same beacon chain." - responses: - 200: - description: Request successful - content: - application/json: - schema: - $ref: '#/components/schemas/genesis_time' - 500: - $ref: '#/components/responses/InternalError' - - /node/syncing: - get: - tags: - - MinimalSet - summary: "Poll to see if the the beacon node is syncing." - description: "Requests the beacon node to describe if it's currently syncing or not, and if it is, what block it is up to. This is modelled after the Eth1.0 JSON-RPC eth_syncing call.." - responses: - 200: - description: Request successful - content: - application/json: - schema: - type: object - properties: - is_syncing: - type: boolean - description: "A boolean of whether the node is currently syncing or not." - sync_status: - $ref: '#/components/schemas/SyncingStatus' - 500: - $ref: '#/components/responses/InternalError' - /node/fork: - get: - tags: - - OptionalSet - summary: "Get fork information from running beacon node." - description: "Requests the beacon node to provide which fork version it is currently on." - responses: - 200: - description: Request successful - content: - application/json: - schema: - type: object - properties: - fork: - $ref: '#/components/schemas/Fork' - chain_id: - type: integer - format: uint64 - description: "Sometimes called the network id, this number discerns the active chain for the beacon node. Analogous to Eth1.0 JSON-RPC net_version." - 500: - $ref: '#/components/responses/InternalError' - - /validator/duties: - get: - tags: - - MinimalSet - summary: "Get validator duties for the requested validators." - description: "Requests the beacon node to provide a set of _duties_, which are actions that should be performed by validators, for a particular epoch. Duties should only need to be checked once per epoch, however a chain reorganization (of > MIN_SEED_LOOKAHEAD epochs) could occur, resulting in a change of duties. For full safety, this API call should be polled at every slot to ensure that chain reorganizations are recognized, and to ensure that the beacon node is properly synchronized." - parameters: - - name: validator_pubkeys - in: query - required: true - description: "An array of hex-encoded BLS public keys" - schema: - type: array - items: - $ref: '#/components/schemas/pubkey' - minItems: 1 - - name: epoch - in: query - required: false - schema: - type: integer - responses: - 200: - description: Success response - content: - application/json: - schema: - type: array - items: - $ref: '#/components/schemas/ValidatorDuty' - 400: - $ref: '#/components/responses/InvalidRequest' - 406: - description: "Duties cannot be provided for the requested epoch." - 500: - $ref: '#/components/responses/InternalError' - 503: - $ref: '#/components/responses/CurrentlySyncing' - - /validator/block: - get: - tags: - - MinimalSet - summary: "Produce a new block, without signature." - description: "Requests a beacon node to produce a valid block, which can then be signed by a validator." - parameters: - - name: slot - in: query - required: true - description: "The slot for which the block should be proposed." - schema: - type: integer - format: uint64 - - name: randao_reveal - in: query - required: true - description: "The validator's randao reveal value." - schema: - type: string - format: byte - responses: - 200: - description: Success response - content: - application/json: - schema: - $ref: '#/components/schemas/BeaconBlock' - 400: - $ref: '#/components/responses/InvalidRequest' - 500: - $ref: '#/components/responses/InternalError' - 503: - $ref: '#/components/responses/CurrentlySyncing' - post: - tags: - - MinimalSet - summary: "Publish a signed block." - description: "Instructs the beacon node to broadcast a newly signed beacon block to the beacon network, to be included in the beacon chain. The beacon node is not required to validate the signed `BeaconBlock`, and a successful response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the new block into its state, and therefore validate the block internally, however blocks which fail the validation are still broadcast but a different status code is returned (202)" - parameters: - - name: beacon_block - in: query - required: true - description: "The `BeaconBlock` object, as sent from the beacon node originally, but now with the signature field completed." - schema: - $ref: '#/components/schemas/BeaconBlock' - responses: - 200: - description: "The block was validated successfully and has been broadcast. It has also been integrated into the beacon node's database." - 202: - description: "The block failed validation, but was successfully broadcast anyway. It was not integrated into the beacon node's database." - 400: - $ref: '#/components/responses/InvalidRequest' - 500: - $ref: '#/components/responses/InternalError' - 503: - $ref: '#/components/responses/CurrentlySyncing' - - /validator/attestation: - get: - tags: - - MinimalSet - summary: "Produce an attestation, without signature." - description: "Requests that the beacon node produce an IndexedAttestation, with a blank signature field, which the validator will then sign." - parameters: - - name: validator_pubkey - in: query - required: true - description: "Uniquely identifying which validator this attestation is to be produced for." - schema: - $ref: '#/components/schemas/pubkey' - - name: poc_bit - in: query - required: true - description: "The proof-of-custody bit that is to be reported by the requesting validator. This bit will be inserted into the appropriate location in the returned `IndexedAttestation`." - schema: - type: integer - format: uint32 - minimum: 0 - maximum: 1 - - name: slot - in: query - required: true - description: "The slot for which the attestation should be proposed." - schema: - type: integer - - name: shard - in: query - required: true - description: "The shard number for which the attestation is to be proposed." - schema: - type: integer - responses: - 200: - description: Success response - content: - application/json: - schema: - $ref: '#/components/schemas/IndexedAttestation' - 400: - $ref: '#/components/responses/InvalidRequest' - 500: - $ref: '#/components/responses/InternalError' - 503: - $ref: '#/components/responses/CurrentlySyncing' - post: - tags: - - MinimalSet - summary: "Publish a signed attestation." - description: "Instructs the beacon node to broadcast a newly signed IndexedAttestation object to the intended shard subnet. The beacon node is not required to validate the signed IndexedAttestation, and a successful response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the new attestation into its state, and therefore validate the attestation internally, however attestations which fail the validation are still broadcast but a different status code is returned (202)" - parameters: - - name: attestation - in: query - required: true - description: "An `IndexedAttestation` structure, as originally provided by the beacon node, but now with the signature field completed." - schema: - $ref: '#/components/schemas/IndexedAttestation' - responses: - 200: - description: "The attestation was validated successfully and has been broadcast. It has also been integrated into the beacon node's database." - 202: - description: "The attestation failed validation, but was successfully broadcast anyway. It was not integrated into the beacon node's database." - 400: - $ref: '#/components/responses/InvalidRequest' - 500: - $ref: '#/components/responses/InternalError' - 503: - $ref: '#/components/responses/CurrentlySyncing' - -components: - schemas: - pubkey: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{96}$" - description: "The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._" - example: "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc" - version: - type: string - description: "A string which uniquely identifies the client implementation and its version; similar to [HTTP User-Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3)." - example: "Lighthouse / v0.1.5 (Linux x86_64)" - genesis_time: - type: integer - format: uint64 - description: "The genesis_time configured for the beacon node, which is the unix time at which the Eth2.0 chain began." - example: 1557716289 - ValidatorDuty: - type: object - properties: - validator_pubkey: - $ref: '#/components/schemas/pubkey' - attestation_slot: - type: integer - format: uint64 - description: "The slot at which the validator must attest." - attestation_shard: - type: integer - format: uint64 - description: "The shard in which the validator must attest." - block_proposal_slot: - type: integer - format: uint64 - nullable: true - description: "The slot in which a validator must propose a block, or `null` if block production is not required." - SyncingStatus: - type: object - nullable: true - properties: - starting_slot: - type: integer - format: uint64 - description: "The slot at which syncing started (will only be reset after the sync reached its head)" - current_slot: - type: integer - format: uint64 - description: "The most recent slot sync'd by the beacon node." - highest_slot: - type: integer - format: uint64 - description: "Globally, the estimated most recent slot number, or current target slot number." - - BeaconBlock: - description: "The [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beaconblock) object from the Eth2.0 spec." - allOf: - - $ref: '#/components/schemas/BeaconBlockCommon' - - type: object - properties: - body: - $ref: '#/components/schemas/BeaconBlockBody' - BeaconBlockHeader: - description: "The [`BeaconBlockHeader`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beaconblockheader) object from the Eth2.0 spec." - allOf: - - $ref: '#/components/schemas/BeaconBlockCommon' - - type: object - properties: - body_root: - type: string - format: bytes - pattern: "^0x[a-fA-F0-9]{64}$" - description: "The tree hash merkle root of the `BeaconBlockBody` for the `BeaconBlock`" - BeaconBlockCommon: - # An abstract object to collect the common fields between the BeaconBlockHeader and the BeaconBlock objects - type: object - properties: - slot: - type: integer - format: uint64 - description: "The slot to which this block corresponds." - parent_root: - type: string - format: bytes - pattern: "^0x[a-fA-F0-9]{64}$" - description: "The signing merkle root of the parent `BeaconBlock`." - state_root: - type: string - format: bytes - pattern: "^0x[a-fA-F0-9]{64}$" - description: "The tree hash merkle root of the `BeaconState` for the `BeaconBlock`." - signature: - type: string - format: bytes - pattern: "^0x[a-fA-F0-9]{192}$" - example: "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" - description: "The BLS signature of the `BeaconBlock` made by the validator of the block." - BeaconBlockBody: - type: object - description: "The [`BeaconBlockBody`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beaconblockbody) object from the Eth2.0 spec." - properties: - randao_reveal: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{192}$" - description: "The RanDAO reveal value provided by the validator." - eth1_data: - title: Eth1Data - type: object - description: "The [`Eth1Data`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#eth1data) object from the Eth2.0 spec." - properties: - deposit_root: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - description: "Root of the deposit tree." - deposit_count: - type: integer - format: uint64 - description: "Total number of deposits." - block_hash: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - description: "Ethereum 1.x block hash." - graffiti: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - proposer_slashings: - type: array - items: - title: ProposerSlashings - type: object - description: "The [`ProposerSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposerslashing) object from the Eth2.0 spec." - properties: - proposer_index: - type: integer - format: uint64 - description: "The index of the proposer to be slashed." - header_1: - $ref: '#/components/schemas/BeaconBlockHeader' - header_2: - $ref: '#/components/schemas/BeaconBlockHeader' - attester_slashings: - type: array - items: - title: AttesterSlashings - type: object - description: "The [`AttesterSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attesterslashing) object from the Eth2.0 spec." - properties: - attestation_1: - $ref: '#/components/schemas/IndexedAttestation' - attestation_2: - $ref: '#/components/schemas/IndexedAttestation' - attestations: - type: array - items: - title: Attestation - type: object - description: "The [`Attestation`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestation) object from the Eth2.0 spec." - properties: - aggregation_bits: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]+$" - description: "Attester aggregation bits." - custody_bits: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]+$" - description: "Custody bits." - signature: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{192}$" - description: "BLS aggregate signature." - data: - $ref: '#/components/schemas/AttestationData' - deposits: - type: array - items: - title: Deposit - type: object - description: "The [`Deposit`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposit) object from the Eth2.0 spec." - properties: - proof: - type: array - description: "Branch in the deposit tree." - items: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - minItems: 32 - maxItems: 32 - index: - type: integer - format: uint64 - description: "Index in the deposit tree." - data: - title: DepositData - type: object - description: "The [`DepositData`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#depositdata) object from the Eth2.0 spec." - properties: - pubkey: - $ref: '#/components/schemas/pubkey' - withdrawal_credentials: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - description: "The withdrawal credentials." - amount: - type: integer - format: uint64 - description: "Amount in Gwei." - signature: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{192}$" - description: "Container self-signature." - voluntary_exits: - type: array - items: - title: VoluntaryExit - type: object - description: "The [`VoluntaryExit`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#voluntaryexit) object from the Eth2.0 spec." - properties: - epoch: - type: integer - format: uint64 - description: "Minimum epoch for processing exit." - validator_index: - type: integer - format: uint64 - description: "Index of the exiting validator." - signature: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{192}$" - description: "Validator signature." - transfers: - type: array - items: - title: Transfer - type: object - description: "The [`Transfer`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#transfer) object from the Eth2.0 spec." - properties: - sender: - type: integer - format: uint64 - description: "Sender index." - recipient: - type: integer - format: uint64 - description: "Recipient index." - amount: - type: integer - format: uint64 - description: "Amount in Gwei." - fee: - type: integer - format: uint64 - description: "Fee in Gwei for block producer." - slot: - type: integer - format: uint64 - description: "Inclusion slot." - pubkey: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{96}$" - description: "Sender withdrawal public key." - signature: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{192}$" - description: "Sender signature." - - Fork: - type: object - description: "The [`Fork`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#Fork) object from the Eth2.0 spec." - properties: - previous_version: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{8}$" - description: "Previous fork version." - current_version: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{8}$" - description: "Current fork version." - epoch: - type: integer - format: uint64 - description: "Fork epoch number." - IndexedAttestation: - type: object - description: "The [`IndexedAttestation`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#indexedattestation) object from the Eth2.0 spec." - properties: - custody_bit_0_indices: - type: array - description: "Validator indices for 0 bits." - items: - type: integer - format: uint64 - custody_bit_1_indices: - type: array - description: "Validator indices for 1 bits." - items: - type: integer - format: uint64 - signature: - type: string - format: bytes - pattern: "^0x[a-fA-F0-9]{192}$" - example: "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" - description: "The BLS signature of the `IndexedAttestation`, created by the validator of the attestation." - data: - $ref: '#/components/schemas/AttestationData' - AttestationData: - type: object - description: "The [`AttestationData`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestationdata) object from the Eth2.0 spec." - properties: - beacon_block_root: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - description: "LMD GHOST vote." - source_epoch: - type: integer - format: uint64 - description: "Source epoch from FFG vote." - source_root: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - description: "Source root from FFG vote." - target_epoch: - type: integer - format: uint64 - description: "Target epoch from FFG vote." - target_root: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - description: "Target root from FFG vote." - crosslink: - title: CrossLink - type: object - description: "The [`Crosslink`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#crosslink) object from the Eth2.0 spec, contains data from epochs [`start_epoch`, `end_epoch`)." - properties: - shard: - type: integer - format: uint64 - description: "The shard number." - start_epoch: - type: integer - format: uint64 - description: "The first epoch which the crosslinking data references." - end_epoch: - type: integer - format: uint64 - description: "The 'end' epoch referred to by the crosslinking data; no data in this Crosslink should refer to the `end_epoch` since it is not included in the crosslinking data interval." - parent_root: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - description: "Root of the previous crosslink." - data_root: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - description: "Root of the crosslinked shard data since the previous crosslink." - - responses: - Success: - description: "Request successful." - InvalidRequest: - description: "Invalid request syntax." - InternalError: - description: "Beacon node internal error." - CurrentlySyncing: - description: "Beacon node is currently syncing, try again later." - NotFound: - description: "The requested API endpoint does not exist." From 35d89e2706a3f482a1594cfdf08123093edcbf32 Mon Sep 17 00:00:00 2001 From: Martin Lundfall Date: Mon, 19 Aug 2019 13:03:51 +0200 Subject: [PATCH 099/250] merkle_proofs, simple-serialize, test_gen/README: update ToC --- specs/light_client/merkle_proofs.md | 19 +++++++++++-------- specs/simple-serialize.md | 1 + test_generators/README.md | 18 ++++++++++++++++++ 3 files changed, 30 insertions(+), 8 deletions(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 21115dd27..345435133 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -6,14 +6,17 @@ - [Merkle proof formats](#merkle-proof-formats) - - [Table of contents](#table-of-contents) - - [Constants](#constants) - - [Generalized Merkle tree index](#generalized-merkle-tree-index) - - [SSZ object to index](#ssz-object-to-index) - - [Merkle multiproofs](#merkle-multiproofs) - - [MerklePartial](#merklepartial) - - [`SSZMerklePartial`](#sszmerklepartial) - - [Proofs for execution](#proofs-for-execution) + - [Table of contents](#table-of-contents) + - [Generalized Merkle tree index](#generalized-merkle-tree-index) + - [SSZ object to index](#ssz-object-to-index) + - [Helpers for generalized indices](#helpers-for-generalized-indices) + - [`concat_generalized_indices`](#concat_generalized_indices) + - [`get_generalized_index_length`](#get_generalized_index_length) + - [`get_generalized_index_bit`](#get_generalized_index_bit) + - [`generalized_index_sibling`](#generalized_index_sibling) + - [`generalized_index_child`](#generalized_index_child) + - [`generalized_index_parent`](#generalized_index_parent) + - [Merkle multiproofs](#merkle-multiproofs) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 50d091c07..5b8e5e8f4 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -26,6 +26,7 @@ - [Deserialization](#deserialization) - [Merkleization](#merkleization) - [Self-signed containers](#self-signed-containers) + - [Summaries and expansions](#summaries-and-expansions) - [Implementations](#implementations) diff --git a/test_generators/README.md b/test_generators/README.md index 7a4a5c536..abcb8a1ee 100644 --- a/test_generators/README.md +++ b/test_generators/README.md @@ -9,6 +9,24 @@ On releases, test generators are run by the release manager. Test-generation of An automated nightly tests release system, with a config filter applied, is being considered as implementation needs mature. +## Table of contents + + + + + +- [How to run generators](#how-to-run-generators) + - [Cleaning](#cleaning) + - [Running all test generators](#running-all-test-generators) + - [Running a single generator](#running-a-single-generator) +- [Developing a generator](#developing-a-generator) +- [How to add a new test generator](#how-to-add-a-new-test-generator) +- [How to remove a test generator](#how-to-remove-a-test-generator) + + + + + ## How to run generators Prerequisites: From 62d37593fbd3ed53fd45e6ba80b0ba2f2130b676 Mon Sep 17 00:00:00 2001 From: Martin Lundfall Date: Mon, 19 Aug 2019 13:05:44 +0200 Subject: [PATCH 100/250] Correct various typos --- scripts/build_spec.py | 2 +- test_libs/pyspec/eth2spec/test/context.py | 2 +- test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py | 2 +- test_libs/pyspec/eth2spec/utils/ssz/test_ssz_typing.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/build_spec.py b/scripts/build_spec.py index 88c3d46fb..e6a95e028 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -210,7 +210,7 @@ ignored_dependencies = [ def dependency_order_ssz_objects(objects: Dict[str, str], custom_types: Dict[str, str]) -> None: """ - Determines which SSZ Object is depenedent on which other and orders them appropriately + Determines which SSZ Object is dependent on which other and orders them appropriately """ items = list(objects.items()) for key, value in items: diff --git a/test_libs/pyspec/eth2spec/test/context.py b/test_libs/pyspec/eth2spec/test/context.py index 5a0ddb59d..5cc42c510 100644 --- a/test_libs/pyspec/eth2spec/test/context.py +++ b/test_libs/pyspec/eth2spec/test/context.py @@ -101,7 +101,7 @@ all_phases = ['phase0', 'phase1'] def with_all_phases(fn): """ - A decorator for running a test wil every phase + A decorator for running a test with every phase """ return with_phases(all_phases)(fn) diff --git a/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py b/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py index 1f199e6e1..891633afe 100644 --- a/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py +++ b/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py @@ -344,7 +344,7 @@ class BaseList(list, Elements): return super().__iter__() def last(self): - # be explict about getting the last item, for the non-python readers, and negative-index safety + # be explicit about getting the last item, for the non-python readers, and negative-index safety return self[len(self) - 1] diff --git a/test_libs/pyspec/eth2spec/utils/ssz/test_ssz_typing.py b/test_libs/pyspec/eth2spec/utils/ssz/test_ssz_typing.py index f746a29c9..d5a53c5fa 100644 --- a/test_libs/pyspec/eth2spec/utils/ssz/test_ssz_typing.py +++ b/test_libs/pyspec/eth2spec/utils/ssz/test_ssz_typing.py @@ -222,7 +222,7 @@ def test_bytesn_subclass(): def test_uint_math(): - assert uint8(0) + uint8(uint32(16)) == uint8(16) # allow explict casting to make invalid addition valid + assert uint8(0) + uint8(uint32(16)) == uint8(16) # allow explicit casting to make invalid addition valid expect_value_error(lambda: uint8(0) - uint8(1), "no underflows allowed") expect_value_error(lambda: uint8(1) + uint8(255), "no overflows allowed") From 6722608978677fa0991875748e61afe0dbedb9ea Mon Sep 17 00:00:00 2001 From: Martin Lundfall Date: Mon, 19 Aug 2019 13:06:21 +0200 Subject: [PATCH 101/250] Add codespell whitelist --- .codespell-whitelist | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .codespell-whitelist diff --git a/.codespell-whitelist b/.codespell-whitelist new file mode 100644 index 000000000..ff694e380 --- /dev/null +++ b/.codespell-whitelist @@ -0,0 +1,2 @@ +uint +byteorder \ No newline at end of file From a843e9aeeba3312a3cc2b738fbc7dfeb21a24fa5 Mon Sep 17 00:00:00 2001 From: Martin Lundfall Date: Mon, 19 Aug 2019 13:47:09 +0200 Subject: [PATCH 102/250] specs/ fix links --- specs/core/0_beacon-chain.md | 2 +- specs/networking/p2p-interface.md | 2 +- specs/simple-serialize.md | 2 +- specs/validator/0_beacon-chain-validator.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a42465ad4..7ed3226e1 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -169,7 +169,7 @@ The following values are (non-configurable) constants used throughout the specif ## Configuration -*Note*: The default mainnet configuration values are included here for spec-design purposes. The different configurations for mainnet, testnets, and YAML-based testing can be found in the [`configs/constant_presets`](../../configs/constant_presets) directory. These configurations are updated for releases and may be out of sync during `dev` changes. +*Note*: The default mainnet configuration values are included here for spec-design purposes. The different configurations for mainnet, testnets, and YAML-based testing can be found in the [`configs/constant_presets`](../../configs) directory. These configurations are updated for releases and may be out of sync during `dev` changes. ### Misc diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index cdad92085..b3e0db50e 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -301,7 +301,7 @@ Here, `result` represents the 1-byte response code. The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction. Two values are possible at this time: -- `ssz`: The contents are [SSZ-encoded](#ssz-encoding). This encoding type MUST be supported by all clients. For objects containing a single field, only the field is SSZ-encoded not a container with a single field. For example, the `BeaconBlocks` response would be an SSZ-encoded list of `BeaconBlock`s. All SSZ-Lists in the Req/Resp domain will have a maximum list size of `SSZ_MAX_LIST_SIZE`. +- `ssz`: The contents are [SSZ-encoded](../simple-serialize.md). This encoding type MUST be supported by all clients. For objects containing a single field, only the field is SSZ-encoded not a container with a single field. For example, the `BeaconBlocks` response would be an SSZ-encoded list of `BeaconBlock`s. All SSZ-Lists in the Req/Resp domain will have a maximum list size of `SSZ_MAX_LIST_SIZE`. - `ssz_snappy`: The contents are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy). MAY be supported in the interoperability testnet; MUST be supported in mainnet. #### SSZ-encoding strategy (with or without Snappy) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 5b8e5e8f4..588200f20 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -231,7 +231,7 @@ We similarly define "summary types" and "expansion types". For example, [`Beacon | Python | Ethereum 2.0 | Ethereum Foundation | [https://github.com/ethereum/py-ssz](https://github.com/ethereum/py-ssz) | | Rust | Lighthouse | Sigma Prime | [https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz](https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz) | | Nim | Nimbus | Status | [https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim](https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim) | -| Rust | Shasper | ParityTech | [https://github.com/paritytech/shasper/tree/master/utils/ssz](https://github.com/paritytech/shasper/tree/master/util/ssz) | +| Rust | Shasper | ParityTech | [https://github.com/paritytech/shasper/tree/master/utils/ssz](https://github.com/paritytech/shasper/tree/master/utils/ssz) | | TypeScript | Lodestar | ChainSafe Systems | [https://github.com/ChainSafe/ssz-js](https://github.com/ChainSafe/ssz-js) | | Java | Cava | ConsenSys | [https://www.github.com/ConsenSys/cava/tree/master/ssz](https://www.github.com/ConsenSys/cava/tree/master/ssz) | | Go | Prysm | Prysmatic Labs | [https://github.com/prysmaticlabs/go-ssz](https://github.com/prysmaticlabs/go-ssz) | diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 188a6a291..ef5ad4415 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -266,7 +266,7 @@ Up to `MAX_ATTESTATIONS`, aggregate attestations can be included in the `block`. ##### Deposits -If there are any unprocessed deposits for the existing `state.eth1_data` (i.e. `state.eth1_data.deposit_count > state.eth1_deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, eth1_data.deposit_count - state.eth1_deposit_index)`. These [`deposits`](../core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth 1.0 deposit contract](../core/0_deposit-contract) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](../core/0_beacon-chain.md#deposits). +If there are any unprocessed deposits for the existing `state.eth1_data` (i.e. `state.eth1_data.deposit_count > state.eth1_deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, eth1_data.deposit_count - state.eth1_deposit_index)`. These [`deposits`](../core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth 1.0 deposit contract](../core/0_deposit-contract.md) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](../core/0_beacon-chain.md#deposits). The `proof` for each deposit must be constructed against the deposit root contained in `state.eth1_data` rather than the deposit root at the time the deposit was initially logged from the 1.0 chain. This entails storing a full deposit merkle tree locally and computing updated proofs against the `eth1_data.deposit_root` as needed. See [`minimal_merkle.py`](https://github.com/ethereum/research/blob/master/spec_pythonizer/utils/merkle_minimal.py) for a sample implementation. From 75f56380593e51fc4f4bcd0fcef9199f795d559d Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 19 Aug 2019 11:06:37 -0600 Subject: [PATCH 103/250] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8de68afbd..09333f422 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ See the [Eth 2.0 Phase 2 Wiki](https://hackmd.io/UzysWse1Th240HELswKqVA?view) fo ## Additional specifications for client implementers -Additional specifications and standards outside of core consensus can be found in the following repos: +Additional specifications and standards outside of requisite client functionality can be found in the following repos: * [Eth2.0 APIs](https://github.com/ethereum/eth2.0-apis) * [Eth2.0 Metrics](https://github.com/ethereum/eth2.0-metrics/) From df6e531d7401a9388c3a69d92bf542a18c7532d5 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Tue, 20 Aug 2019 11:37:27 +0200 Subject: [PATCH 104/250] Improve beacon proposer selection logic --- specs/core/0_beacon-chain.md | 14 +++++--------- .../eth2spec/test/helpers/proposer_slashings.py | 1 - 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 7ed3226e1..f27d016cf 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -934,15 +934,12 @@ def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: Return the beacon proposer index at the current slot. """ epoch = get_current_epoch(state) - committees_per_slot = get_committee_count(state, epoch) // SLOTS_PER_EPOCH - offset = committees_per_slot * (state.slot % SLOTS_PER_EPOCH) - shard = Shard((get_start_shard(state, epoch) + offset) % SHARD_COUNT) - first_committee = get_crosslink_committee(state, epoch, shard) + indices = get_active_validator_indices(state, epoch) + seed = hash(get_seed(state, epoch) + int_to_bytes(state.slot, length=8)) MAX_RANDOM_BYTE = 2**8 - 1 - seed = get_seed(state, epoch) i = 0 while True: - candidate_index = first_committee[(epoch + i) % len(first_committee)] + candidate_index = indices[compute_shuffled_index(ValidatorIndex(i % len(indices)), len(indices), seed)] random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32] effective_balance = state.validators[candidate_index].effective_balance if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: @@ -1608,9 +1605,8 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: ```python def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSlashing) -> None: proposer = state.validators[proposer_slashing.proposer_index] - # Verify that the epoch is the same - assert (compute_epoch_of_slot(proposer_slashing.header_1.slot) - == compute_epoch_of_slot(proposer_slashing.header_2.slot)) + # Verify slots match + assert proposer_slashing.header_1.slot == proposer_slashing.header_2.slot # But the headers are different assert proposer_slashing.header_1 != proposer_slashing.header_2 # Check proposer is slashable diff --git a/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py b/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py index d5b7f7b7f..ce53c5931 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py +++ b/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py @@ -18,7 +18,6 @@ def get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=False): ) header_2 = deepcopy(header_1) header_2.parent_root = b'\x99' * 32 - header_2.slot = slot + 1 if signed_1: sign_block_header(spec, state, header_1, privkey) From 663d43d07f6bd4f263e4b7f5831747a28c6ac943 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 20 Aug 2019 18:55:30 +0800 Subject: [PATCH 105/250] PR feedback, fix type hinting, add missing `Container.get_field_names()` method --- scripts/build_spec.py | 4 +++- specs/light_client/merkle_proofs.md | 19 ++++++++++++------- .../pyspec/eth2spec/utils/ssz/ssz_typing.py | 11 +++++++---- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/scripts/build_spec.py b/scripts/build_spec.py index 10e6034f2..28022a752 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -55,7 +55,6 @@ from eth2spec.utils.ssz.ssz_impl import ( ) from eth2spec.utils.ssz.ssz_typing import ( BasicValue, Elements, BaseList, SSZType, - SSZVariableName, Container, List, Vector, Bytes, BytesN, Bitlist, Bitvector, Bits, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, uint64, bit, boolean, @@ -68,6 +67,9 @@ from eth2spec.utils.bls import ( ) from eth2spec.utils.hash_function import hash + + +SSZVariableName = str ''' SUNDRY_CONSTANTS_FUNCTIONS = ''' def ceillog2(x: uint64) -> int: diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 73c4c603d..2a4e100d6 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -38,8 +38,6 @@ def get_next_power_of_two(x: int) -> int: """ if x <= 2: return x - elif x % 2 == 0: - return 2 * get_next_power_of_two(x // 2) else: return 2 * get_next_power_of_two((x + 1) // 2) ``` @@ -49,7 +47,10 @@ def get_previous_power_of_two(x: int) -> int: """ Get the previous power of 2 >= the input. """ - return x if x <= 2 else 2 * get_previous_power_of_two(x // 2) + if x <= 2: + return x + else: + return 2 * get_previous_power_of_two(x // 2) ``` ## Generalized Merkle tree index @@ -91,7 +92,7 @@ y_data_root len(y) ....... ``` -We can now define a concept of a "path", a way of describing a function that takes as input an SSZ object and outputs some specific (possibly deeply nested) member. For example, `foo -> foo.x` is a path, as are `foo -> len(foo.y)` and `foo -> foo.y[5].w`. We'll describe paths as lists, which can have two representations. In "human-readable form", they are `["x"]`, `["y", "__len__"]` and `["y", 5, "w"]` respectively. In "encoded form", they are lists of `uint64` values, in these cases (assuming the fields of `foo` in order are `x` then `y`, and `w` is the first field of `y[i]`) `[0]`, `[1, 2**64-1]`, `[1, 5, 0]`. +We can now define a concept of a "path", a way of describing a function that takes as input an SSZ object and outputs some specific (possibly deeply nested) member. For example, `foo -> foo.x` is a path, as are `foo -> len(foo.y)` and `foo -> foo.y[5].w`. We'll describe paths as lists, which can have two representations. In "human-readable form", they are `["x"]`, `["y", "__len__"]` and `["y", 5, "w"]` respectively. In "encoded form", they are lists of `uint64` values, in these cases (assuming the fields of `foo` in order are `x` then `y`, and `w` is the first field of `y[i]`) `[0]`, `[1, 2**64-1]`, `[1, 5, 0]`. We define `SSZVariableName` as the member variable name string, i.e., a path is presented as a sequence of integers and `SSZVariableName`. ```python def item_length(typ: SSZType) -> int: @@ -149,7 +150,7 @@ def get_item_position(typ: SSZType, index_or_variable_name: Union[int, SSZVariab start = index * item_length(typ.elem_type) return start // 32, start % 32, start % 32 + item_length(typ.elem_type) elif issubclass(typ, Container): - variable_name = int(index_or_variable_name) + variable_name = index_or_variable_name return typ.get_field_names().index(variable_name), 0, item_length(get_elem_type(typ, variable_name)) else: raise Exception("Only lists/vectors/containers supported") @@ -161,11 +162,15 @@ def get_generalized_index(typ: SSZType, path: Sequence[Union[int, SSZVariableNam Converts a path (eg. `[7, "foo", 3]` for `x[7].foo[3]`, `[12, "bar", "__len__"]` for `len(x[12].bar)`) into the generalized index representing its position in the Merkle tree. """ - root: Optional[GeneralizedIndex] = GeneralizedIndex(1) + root = GeneralizedIndex(1) for p in path: assert not issubclass(typ, BasicValue) # If we descend to a basic type, the path cannot continue further if p == '__len__': - typ, root = uint64, root * 2 + 1 if issubclass(typ, (List, Bytes)) else None + typ = uint64 + if issubclass(typ, (List, Bytes)): + root = GeneralizedIndex(root * 2 + 1) + else: + return None else: pos, _, _ = get_item_position(typ, p) base_index = (GeneralizedIndex(2) if issubclass(typ, (List, Bytes)) else GeneralizedIndex(1)) diff --git a/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py b/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py index bcccb91b2..ff942b84d 100644 --- a/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py +++ b/test_libs/pyspec/eth2spec/utils/ssz/ssz_typing.py @@ -1,11 +1,8 @@ -from typing import Dict, Iterator, NewType +from typing import Dict, Iterator, Iterable import copy from types import GeneratorType -SSZVariableName = NewType('SSZVariableName', str) - - class DefaultingTypeMeta(type): def default(cls): raise Exception("Not implemented") @@ -198,6 +195,12 @@ class Container(Series, metaclass=SSZType): return {} return dict(cls.__annotations__) + @classmethod + def get_field_names(cls) -> Iterable[SSZType]: + if not hasattr(cls, '__annotations__'): # no container fields + return () + return list(cls.__annotations__.keys()) + @classmethod def default(cls): return cls(**{f: t.default() for f, t in cls.get_fields().items()}) From b22caeb2463477b9ce5402a258f92782c915834d Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 20 Aug 2019 19:09:21 +0800 Subject: [PATCH 106/250] Add basic merkle proofs tests --- .../eth2spec/test/merkle_proofs/__init__.py | 0 .../test/merkle_proofs/test_merkle_proofs.py | 98 +++++++++++++++++++ 2 files changed, 98 insertions(+) create mode 100644 test_libs/pyspec/eth2spec/test/merkle_proofs/__init__.py create mode 100644 test_libs/pyspec/eth2spec/test/merkle_proofs/test_merkle_proofs.py diff --git a/test_libs/pyspec/eth2spec/test/merkle_proofs/__init__.py b/test_libs/pyspec/eth2spec/test/merkle_proofs/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/pyspec/eth2spec/test/merkle_proofs/test_merkle_proofs.py b/test_libs/pyspec/eth2spec/test/merkle_proofs/test_merkle_proofs.py new file mode 100644 index 000000000..5e2c4046b --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/merkle_proofs/test_merkle_proofs.py @@ -0,0 +1,98 @@ + +import re +from eth_utils import ( + to_tuple, +) + +from eth2spec.test.context import ( + spec_state_test, + with_all_phases_except, +) +from eth2spec.utils.ssz.ssz_typing import ( + Bytes32, + Container, + List, + uint64, +) + + +class Foo(Container): + x: uint64 + y: List[Bytes32, 2] + +# Tree +# root +# / \ +# x y_root +# / \ +# y_data_root len(y) +# / \ +# / \ / \ +# +# Generalized indices +# 1 +# / \ +# 2 (x) 3 (y_root) +# / \ +# 6 7 +# / \ +# 12 13 + + +@to_tuple +def ssz_object_to_path(start, end): + is_len = False + len_findall = re.findall(r"(?<=len\().*(?=\))", end) + if len_findall: + is_len = True + end = len_findall[0] + + route = '' + if end.startswith(start): + route = end[len(start):] + + segments = route.split('.') + for word in segments: + index_match = re.match(r"(\w+)\[(\d+)]", word) + if index_match: + yield from index_match.groups() + elif len(word): + yield word + if is_len: + yield '__len__' + + +to_path_test_cases = [ + ('foo', 'foo.x', ('x',)), + ('foo', 'foo.x[100].y', ('x', '100', 'y')), + ('foo', 'foo.x[100].y[1].z[2]', ('x', '100', 'y', '1', 'z', '2')), + ('foo', 'len(foo.x[100].y[1].z[2])', ('x', '100', 'y', '1', 'z', '2', '__len__')), +] + + +def test_to_path(): + for test_case in to_path_test_cases: + start, end, expected = test_case + assert ssz_object_to_path(start, end) == expected + + +generalized_index_cases = [ + (Foo, ('x',), 2), + (Foo, ('y',), 3), + (Foo, ('y', 0), 12), + (Foo, ('y', 1), 13), + (Foo, ('y', '__len__'), None), +] + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_get_generalized_index(spec, state): + for typ, path, generalized_index in generalized_index_cases: + assert spec.get_generalized_index( + typ=typ, + path=path, + ) == generalized_index + yield 'typ', typ + yield 'path', path + yield 'generalized_index', generalized_index From d6bbd9bfa10204e7f0ec2a97f575ed55072b8cdc Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 20 Aug 2019 19:21:12 +0800 Subject: [PATCH 107/250] Add `BaseBytes` to cover `Bytes` and `BytesN` --- scripts/build_spec.py | 2 +- specs/light_client/merkle_proofs.md | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/build_spec.py b/scripts/build_spec.py index 202801d09..0a5171e8f 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -54,7 +54,7 @@ from eth2spec.utils.ssz.ssz_impl import ( is_zero, ) from eth2spec.utils.ssz.ssz_typing import ( - BasicValue, Elements, BaseList, SSZType, + BasicValue, Elements, BaseBytes, BaseList, SSZType, Container, List, Vector, Bytes, BytesN, Bitlist, Bitvector, Bits, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, uint64, bit, boolean, diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 2a4e100d6..d7f0ab382 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -106,7 +106,8 @@ def item_length(typ: SSZType) -> int: ``` ```python -def get_elem_type(typ: Union[BaseList, Container], index_or_variable_name: Union[int, SSZVariableName]) -> SSZType: +def get_elem_type(typ: Union[BaseBytes, BaseList, Container], + index_or_variable_name: Union[int, SSZVariableName]) -> SSZType: """ Return the type of the element of an object of the given type with the given index or member variable name (eg. `7` for `x[7]`, `"foo"` for `x.foo`) From 5fcfcac75e093af2c39ec860e3d35088a96a2d5c Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 20 Aug 2019 14:33:29 +0200 Subject: [PATCH 108/250] Updated light client sync for newer committees (#1316) --- specs/light_client/sync_protocol.md | 300 +++++++++++++--------------- 1 file changed, 144 insertions(+), 156 deletions(-) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 580b669f2..944abf8c1 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -1,199 +1,187 @@ -# Beacon Chain Light Client Syncing +# Minimal Light Client Design -**Notice**: This document is a work-in-progress for researchers and implementers. One of the design goals of the Eth 2.0 beacon chain is light-client friendliness, not only to allow low-resource clients (mobile phones, IoT, etc.) to maintain access to the blockchain in a reasonably safe way, but also to facilitate the development of "bridges" between the Eth 2.0 beacon chain and other chains. +**Notice**: This document is a work-in-progress for researchers and implementers. ## Table of contents -- [Beacon Chain Light Client Syncing](#beacon-chain-light-client-syncing) +- [Minimal Light Client Design](#minimal-light-client-design) - [Table of contents](#table-of-contents) - - [Preliminaries](#preliminaries) - - [Expansions](#expansions) - - [`get_active_validator_indices`](#get_active_validator_indices) - - [`MerklePartial`](#merklepartial) - - [`PeriodData`](#perioddata) - - [`get_earlier_start_epoch`](#get_earlier_start_epoch) - - [`get_later_start_epoch`](#get_later_start_epoch) - - [`get_period_data`](#get_period_data) - - [Light client state](#light-client-state) - - [Updating the shuffled committee](#updating-the-shuffled-committee) - - [Computing the current committee](#computing-the-current-committee) - - [Verifying blocks](#verifying-blocks) + - [Introduction](#introduction) + - [Custom types](#custom-types) + - [Constants](#constants) + - [Containers](#containers) + - [`LightClientUpdate`](#lightclientupdate) + - [Helpers](#helpers) + - [`LightClientMemory`](#lightclientmemory) + - [`unpack_compact_validator`](#unpack_compact_validator) + - [`get_persistent_committee_pubkeys_and_balances`](#get_persistent_committee_pubkeys_and_balances) + - [Light client state updates](#light-client-state-updates) + - [Data overhead](#data-overhead) -## Preliminaries +## Introduction -### Expansions +Ethereum 2.0 is designed to be light client friendly. This allows low-resource clients such as mobile phones to access Ethereum 2.0 with reasonable safety and liveness. It also facilitates the development of "bridges" to external blockchains. This document suggests a minimal light client design for the beacon chain. -We define an "expansion" of an object as an object where a field in an object that is meant to represent the `hash_tree_root` of another object is replaced by the object. Note that defining expansions is not a consensus-layer-change; it is merely a "re-interpretation" of the object. Particularly, the `hash_tree_root` of an expansion of an object is identical to that of the original object, and we can define expansions where, given a complete history, it is always possible to compute the expansion of any object in the history. The opposite of an expansion is a "summary" (e.g. `BeaconBlockHeader` is a summary of `BeaconBlock`). +## Custom types -We define two expansions: +We define the following Python custom types for type hinting and readability: -* `ExtendedBeaconState`, which is identical to a `BeaconState` except `compact_committees_roots: List[Bytes32]` is replaced by `active_indices: List[List[ValidatorIndex]]`, where `BeaconState.compact_committees_roots[i] = hash_tree_root(ExtendedBeaconState.active_indices[i])`. -* `ExtendedBeaconBlock`, which is identical to a `BeaconBlock` except `state_root` is replaced with the corresponding `state: ExtendedBeaconState`. +| Name | SSZ equivalent | Description | +| - | - | - | +| `CompactValidator` | `uint64` | compact representation of a validator for light clients | -### `get_active_validator_indices` +## Constants -Note that there is now a new way to compute `get_active_validator_indices`: +| Name | Value | +| - | - | +| `BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_DEPTH` | `4` | +| `BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_INDEX` | **TBD** | +| `PERSISTENT_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH` | `5` | +| `PERSISTENT_COMMITTEE_ROOT_IN_BEACON_STATE_INDEX` | **TBD** | + +## Containers + +### `LightClientUpdate` ```python -def get_active_validator_indices(state: ExtendedBeaconState, epoch: Epoch) -> List[ValidatorIndex]: - return state.active_indices[epoch % EPOCHS_PER_HISTORICAL_VECTOR] +class LightClientUpdate(container): + # Shard block root (and authenticating signature data) + shard_block_root: Hash + fork_version: Version + aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] + signature: BLSSignature + # Updated beacon header (and authenticating branch) + header: BeaconBlockHeader + header_branch: Vector[Hash, BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_DEPTH] + # Updated persistent committee (and authenticating branch) + committee: CompactCommittee + committee_branch: Vector[Hash, PERSISTENT_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH + log_2(SHARD_COUNT)] ``` -Note that it takes `state` instead of `state.validators` as an argument. This does not affect its use in `get_shuffled_committee`, because `get_shuffled_committee` has access to the full `state` as one of its arguments. +## Helpers - -### `MerklePartial` - -A `MerklePartial(f, *args)` is an object that contains a minimal Merkle proof needed to compute `f(*args)`. A `MerklePartial` can be used in place of a regular SSZ object, though a computation would return an error if it attempts to access part of the object that is not contained in the proof. - -### `PeriodData` +### `LightClientMemory` ```python -{ - 'validator_count': 'uint64', - 'seed': 'bytes32', - 'committee': [Validator], -} +@dataclass +class LightClientMemory(object): + shard: Shard # Randomly initialized and retained forever + header: BeaconBlockHeader # Beacon header which is not expected to revert + # Persistent committees corresponding to the beacon header + previous_committee: CompactCommittee + current_committee: CompactCommittee + next_committee: CompactCommittee ``` -### `get_earlier_start_epoch` +### `unpack_compact_validator` ```python -def get_earlier_start_epoch(slot: Slot) -> int: - return slot - slot % PERSISTENT_COMMITTEE_PERIOD - PERSISTENT_COMMITTEE_PERIOD * 2 -``` - -### `get_later_start_epoch` - -```python -def get_later_start_epoch(slot: Slot) -> int: - return slot - slot % PERSISTENT_COMMITTEE_PERIOD - PERSISTENT_COMMITTEE_PERIOD -``` - -### `get_period_data` - -```python -def get_period_data(block: ExtendedBeaconBlock, shard_id: Shard, later: bool) -> PeriodData: - period_start = get_later_start_epoch(header.slot) if later else get_earlier_start_epoch(header.slot) - validator_count = len(get_active_validator_indices(state, period_start)) - committee_count = validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE) + 1 - indices = get_period_committee(block.state, shard_id, period_start, 0, committee_count) - return PeriodData( - validator_count, - get_seed(block.state, period_start), - [block.state.validators[i] for i in indices], +def unpack_compact_validator(compact_validator: CompactValidator) -> Tuple[ValidatorIndex, bool, uint64]: + """ + Return the index, slashed, effective_balance // EFFECTIVE_BALANCE_INCREMENT of ``compact_validator``. + """ + return ( + ValidatorIndex(compact_validator >> 16), + (compact_validator >> 15) % 2, + uint64(compact_validator & (2**15 - 1)), ) ``` -### Light client state - -A light client will keep track of: - -* A random `shard_id` in `[0...SHARD_COUNT-1]` (selected once and retained forever) -* A block header that they consider to be finalized (`finalized_header`) and do not expect to revert. -* `later_period_data = get_period_data(finalized_header, shard_id, later=True)` -* `earlier_period_data = get_period_data(finalized_header, shard_id, later=False)` - -We use the struct `ValidatorMemory` to keep track of these variables. - -### Updating the shuffled committee - -If a client's `validator_memory.finalized_header` changes so that `header.slot // PERSISTENT_COMMITTEE_PERIOD` increases, then the client can ask the network for a `new_committee_proof = MerklePartial(get_period_data, validator_memory.finalized_header, shard_id, later=True)`. It can then compute: +### `get_persistent_committee_pubkeys_and_balances` ```python -earlier_period_data = later_period_data -later_period_data = get_period_data(new_committee_proof, finalized_header, shard_id, later=True) +def get_persistent_committee_pubkeys_and_balances(memory: LightClientMemory, + epoch: Epoch) -> Tuple[Sequence[BLSPubkey], Sequence[uint64]]: + """ + Return pubkeys and balances for the persistent committee at ``epoch``. + """ + current_period = compute_epoch_of_slot(memory.header.slot) // EPOCHS_PER_SHARD_PERIOD + next_period = epoch // EPOCHS_PER_SHARD_PERIOD + assert next_period in (current_period, current_period + 1) + if next_period == current_period: + earlier_committee, later_committee = memory.previous_committee, memory.current_committee + else: + earlier_committee, later_committee = memory.current_committee, memory.next_committee + + pubkeys = [] + balances = [] + for pubkey, compact_validator in zip(earlier_committee.pubkeys, earlier_committee.compact_validators): + index, slashed, balance = unpack_compact_validator(compact_validator) + if epoch % EPOCHS_PER_SHARD_PERIOD < index % EPOCHS_PER_SHARD_PERIOD: + pubkeys.append(pubkey) + balances.append(balance) + for pubkey, compact_validator in zip(later_committee.pubkeys, later_committee.compact_validators): + index, slashed, balance = unpack_compact_validator(compact_validator) + if epoch % EPOCHS_PER_SHARD_PERIOD >= index % EPOCHS_PER_SHARD_PERIOD: + pubkeys.append(pubkey) + balances.append(balance) + return pubkeys, balances ``` -The maximum size of a proof is `128 * ((22-7) * 32 + 110) = 75520` bytes for validator records and `(22-7) * 32 + 128 * 8 = 1504` for the active index proof (much smaller because the relevant active indices are all beside each other in the Merkle tree). This needs to be done once per `PERSISTENT_COMMITTEE_PERIOD` epochs (2048 epochs / 9 days), or ~38 bytes per epoch. +## Light client state updates -## Computing the current committee - -Here is a helper to compute the committee at a slot given the maximal earlier and later committees: +The state of a light client is stored in a `memory` object of type `LightClientMemory`. To advance its state a light client requests an `update` object of type `LightClientUpdate` from the network by sending a request containing `(memory.shard, memory.header.slot, slot_range_end)` and calls `update_memory(memory, update)`. ```python -def compute_committee(header: BeaconBlockHeader, - validator_memory: ValidatorMemory) -> List[ValidatorIndex]: - earlier_validator_count = validator_memory.earlier_period_data.validator_count - later_validator_count = validator_memory.later_period_data.validator_count - maximal_earlier_committee = validator_memory.earlier_period_data.committee - maximal_later_committee = validator_memory.later_period_data.committee - earlier_start_epoch = get_earlier_start_epoch(header.slot) - later_start_epoch = get_later_start_epoch(header.slot) - epoch = compute_epoch_of_slot(header.slot) +def update_memory(memory: LightClientMemory, update: LightClientUpdate) -> None: + # Verify the update does not skip a period + current_period = compute_epoch_of_slot(memory.header.slot) // EPOCHS_PER_SHARD_PERIOD + next_epoch = compute_epoch_of_shard_slot(update.header.slot) + next_period = next_epoch // EPOCHS_PER_SHARD_PERIOD + assert next_period in (current_period, current_period + 1) - committee_count = max( - earlier_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE), - later_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE), - ) + 1 + # Verify update header against shard block root and header branch + assert is_valid_merkle_branch( + leaf=hash_tree_root(update.header), + branch=update.header_branch, + depth=BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_DEPTH, + index=BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_INDEX, + root=update.shard_block_root, + ) - def get_offset(count: int, end: bool) -> int: - return get_split_offset( - count, - SHARD_COUNT * committee_count, - validator_memory.shard_id * committee_count + (1 if end else 0), - ) + # Verify persistent committee votes pass 2/3 threshold + pubkeys, balances = get_persistent_committee_pubkeys_and_balances(memory, next_epoch) + assert 3 * sum(filter(lambda i: update.aggregation_bits[i], balances)) > 2 * sum(balances) - actual_earlier_committee = maximal_earlier_committee[ - 0:get_offset(earlier_validator_count, True) - get_offset(earlier_validator_count, False) - ] - actual_later_committee = maximal_later_committee[ - 0:get_offset(later_validator_count, True) - get_offset(later_validator_count, False) - ] - def get_switchover_epoch(index): - return ( - bytes_to_int(hash(validator_memory.earlier_period_data.seed + int_to_bytes(index, length=3))[0:8]) % - PERSISTENT_COMMITTEE_PERIOD - ) - - # Take not-yet-cycled-out validators from earlier committee and already-cycled-in validators from - # later committee; return a sorted list of the union of the two, deduplicated - return sorted(list(set( - [i for i in actual_earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(i)] - + [i for i in actual_later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)] - ))) -``` - -Note that this method makes use of the fact that the committee for any given shard always starts and ends at the same validator index independently of the committee count (this is because the validator set is split into `SHARD_COUNT * committee_count` slices but the first slice of a shard is a multiple `committee_count * i`, so the start of the slice is `n * committee_count * i // (SHARD_COUNT * committee_count) = n * i // SHARD_COUNT`, using the slightly nontrivial algebraic identity `(x * a) // ab == x // b`). - -## Verifying blocks - -If a client wants to update its `finalized_header` it asks the network for a `BlockValidityProof`, which is simply: - -```python -{ - 'header': BeaconBlockHeader, - 'shard_aggregate_signature': BLSSignature, - 'shard_bits': Bitlist[PLACEHOLDER], - 'shard_parent_block': ShardBlock, -} -``` - -The verification procedure is as follows: - -```python -def verify_block_validity_proof(proof: BlockValidityProof, validator_memory: ValidatorMemory) -> bool: - assert proof.shard_parent_block.beacon_chain_root == hash_tree_root(proof.header) - committee = compute_committee(proof.header, validator_memory) - # Verify that we have >=50% support - support_balance = sum([v.effective_balance for i, v in enumerate(committee) if proof.shard_bits[i]]) - total_balance = sum([v.effective_balance for i, v in enumerate(committee)]) - assert support_balance * 2 > total_balance # Verify shard attestations - group_public_key = bls_aggregate_pubkeys([ - v.pubkey for v, index in enumerate(committee) - if proof.shard_bits[index] - ]) - assert bls_verify( - pubkey=group_public_key, - message_hash=hash_tree_root(shard_parent_block), - signature=proof.shard_aggregate_signature, - domain=get_domain(state, compute_epoch_of_slot(shard_block.slot), DOMAIN_SHARD_ATTESTER), - ) + pubkey = bls_aggregate_pubkeys(filter(lambda i: update.aggregation_bits[i], pubkeys)) + domain = compute_domain(DOMAIN_SHARD_ATTESTER, update.fork_version) + assert bls_verify(pubkey, update.shard_block_root, update.signature, domain) + + # Update persistent committees if entering a new period + if next_period == current_period + 1: + assert is_valid_merkle_branch( + leaf=hash_tree_root(update.committee), + branch=update.committee_branch, + depth=PERSISTENT_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH + log_2(SHARD_COUNT), + index=PERSISTENT_COMMITTEE_ROOT_IN_BEACON_STATE_INDEX << log_2(SHARD_COUNT) + memory.shard, + root=hash_tree_root(update.header), + ) + memory.previous_committee = memory.current_committee + memory.current_committee = memory.next_committee + memory.next_committee = update.committee + + # Update header + memory.header = update.header ``` -The size of this proof is only 200 (header) + 96 (signature) + 16 (bits) + 352 (shard block) = 664 bytes. It can be reduced further by replacing `ShardBlock` with `MerklePartial(lambda x: x.beacon_chain_root, ShardBlock)`, which would cut off ~220 bytes. +## Data overhead + +Once every `EPOCHS_PER_SHARD_PERIOD` epochs (~27 hours) a light client downloads a `LightClientUpdate` object: + +* `shard_block_root`: 32 bytes +* `fork_version`: 4 bytes +* `aggregation_bits`: 16 bytes +* `signature`: 96 bytes +* `header`: 8 + 32 + 32 + 32 + 96 = 200 bytes +* `header_branch`: 4 * 32 = 128 bytes +* `committee`: 128 * (48 + 8) = 7,168 bytes +* `committee_branch`: (5 + 10) * 32 = 480 bytes + +The total overhead is 8,124 bytes, or ~0.083 bytes per second. The Bitcoin SPV equivalent is 80 bytes per ~560 seconds, or ~0.143 bytes per second. Various compression optimisations (similar to [these](https://github.com/RCasatta/compressedheaders)) are possible. + +A light client can choose to update the header (without updating the committee) more frequently than once every `EPOCHS_PER_SHARD_PERIOD` epochs at a cost of 32 + 4 + 16 + 96 + 200 + 128 = 476 bytes per update. From 7409b5ae829aa5cfa8be58e4fc3adaa61c69c39a Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 20 Aug 2019 20:57:37 +0800 Subject: [PATCH 109/250] Add basic `test_verify_merkle_proof` and `test_verify_merkle_multiproof` tests --- .../test/merkle_proofs/test_merkle_proofs.py | 50 +++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/test_libs/pyspec/eth2spec/test/merkle_proofs/test_merkle_proofs.py b/test_libs/pyspec/eth2spec/test/merkle_proofs/test_merkle_proofs.py index 5e2c4046b..91c861de3 100644 --- a/test_libs/pyspec/eth2spec/test/merkle_proofs/test_merkle_proofs.py +++ b/test_libs/pyspec/eth2spec/test/merkle_proofs/test_merkle_proofs.py @@ -96,3 +96,53 @@ def test_get_generalized_index(spec, state): yield 'typ', typ yield 'path', path yield 'generalized_index', generalized_index + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_verify_merkle_proof(spec, state): + h = spec.hash + a = b'\x11' * 32 + b = b'\x22' * 32 + c = b'\x33' * 32 + d = b'\x44' * 32 + root = h(h(a + b) + h(c + d)) + leaf = a + generalized_index = 4 + proof = [b, h(c + d)] + + is_valid = spec.verify_merkle_proof( + leaf=leaf, + proof=proof, + index=generalized_index, + root=root, + ) + assert is_valid + + yield 'proof', proof + yield 'is_valid', is_valid + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_verify_merkle_multiproof(spec, state): + h = spec.hash + a = b'\x11' * 32 + b = b'\x22' * 32 + c = b'\x33' * 32 + d = b'\x44' * 32 + root = h(h(a + b) + h(c + d)) + leaves = [a, d] + generalized_indices = [4, 7] + proof = [c, b] # helper_indices = [6, 5] + + is_valid = spec.verify_merkle_multiproof( + leaves=leaves, + proof=proof, + indices=generalized_indices, + root=root, + ) + assert is_valid + + yield 'proof', proof + yield 'is_valid', is_valid From c1f2e92ad1af9e1bd88c6f85c7087beebfcbe7b7 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Fri, 23 Aug 2019 12:33:25 +0200 Subject: [PATCH 110/250] Update specs/core/1_beacon-chain-misc.md Co-Authored-By: Danny Ryan --- specs/core/1_beacon-chain-misc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 0c23e41b7..4539be6b0 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -180,7 +180,7 @@ Add to the beacon state the following fields: ```python # begin insert @persistent_committee_fields persistent_committee_roots: Vector[Hash, PERSISTENT_COMMITTEE_ROOT_LENGTH] - next_shard_receipt_period: Vector[uint, SHARD_COUNT] + next_shard_receipt_period: Vector[uint64, SHARD_COUNT] # end insert @persistent_committee_fields ``` `next_shard_receipt_period` values initialized to `PHASE_1_FORK_SLOT // SLOTS_PER_EPOCH // EPOCHS_PER_SHARD_PERIOD` From bbaa238742a93e2aa0524baaf6fd9049d2943d59 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 23 Aug 2019 20:16:46 +0800 Subject: [PATCH 111/250] Fix the definition of `GeneralizedIndex` --- scripts/build_spec.py | 3 ++- specs/light_client/merkle_proofs.md | 16 +++++----------- 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/scripts/build_spec.py b/scripts/build_spec.py index 0a5171e8f..83f9a2145 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -37,7 +37,7 @@ from eth2spec.utils.bls import ( from eth2spec.utils.hash_function import hash ''' PHASE1_IMPORTS = '''from typing import ( - Any, Dict, Optional, Set, Sequence, MutableSequence, Tuple, Union, + Any, Dict, Optional, Set, Sequence, MutableSequence, NewType, Tuple, Union, ) from math import ( log2, @@ -70,6 +70,7 @@ from eth2spec.utils.hash_function import hash SSZVariableName = str +GeneralizedIndex = NewType('GeneralizedIndex', int) ''' SUNDRY_CONSTANTS_FUNCTIONS = ''' def ceillog2(x: uint64) -> int: diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index d7f0ab382..ce7dc647c 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -7,7 +7,6 @@ - [Merkle proof formats](#merkle-proof-formats) - [Table of contents](#table-of-contents) - - [Custom types](#custom-types) - [Helper functions](#helper-functions) - [Generalized Merkle tree index](#generalized-merkle-tree-index) - [SSZ object to index](#ssz-object-to-index) @@ -22,13 +21,6 @@ -## Custom types - -We define the following Python custom types for type hinting and readability: - -| - | - | - | -| `GeneralizedIndex` | `uint64` | the index of a node in a binary Merkle tree | - ## Helper functions ```python @@ -75,6 +67,8 @@ def merkle_tree(leaves: Sequence[Hash]) -> Sequence[Hash]: return o ``` +We define a custom type `GeneralizedIndex` as a Python integer type in this document. It can be represented as a Bitvector/Bitlist object as well. + We will define Merkle proofs in terms of generalized indices. ## SSZ object to index @@ -175,7 +169,7 @@ def get_generalized_index(typ: SSZType, path: Sequence[Union[int, SSZVariableNam else: pos, _, _ = get_item_position(typ, p) base_index = (GeneralizedIndex(2) if issubclass(typ, (List, Bytes)) else GeneralizedIndex(1)) - root = root * base_index * get_next_power_of_two(chunk_count(typ)) + pos + root = GeneralizedIndex(root * base_index * get_next_power_of_two(chunk_count(typ)) + pos) typ = get_elem_type(typ, p) return root ``` @@ -280,8 +274,8 @@ def get_helper_indices(indices: Sequence[GeneralizedIndex]) -> Sequence[Generali return sorted([ x for x in all_indices if ( not ( - generalized_index_child(x, GeneralizedIndex(0)) in all_indices and - generalized_index_child(x, GeneralizedIndex(1)) in all_indices + generalized_index_child(x, False) in all_indices and + generalized_index_child(x, True) in all_indices ) and not (x in indices) ) ], reverse=True) From 1392c931d0f208ab3f156ea56e3cb667644706de Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 23 Aug 2019 12:00:01 -0600 Subject: [PATCH 112/250] add GeneralizedIndex to custom types --- specs/core/1_beacon-chain-misc.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 4539be6b0..b6f23a00e 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -6,6 +6,7 @@ - [Phase 1 miscellaneous beacon chain changes](#phase-1-miscellaneous-beacon-chain-changes) - [Table of contents](#table-of-contents) + - [Custom types](#custom-types) - [Configuration](#configuration) - [Classes](#classes) - [CompactCommittee](#compactcommittee) @@ -26,6 +27,14 @@ +## Custom types + +We define the following Python custom types for type hinting and readability: + +| Name | SSZ equivalent | Description | +| - | - | - | +| `GeneralizedIndex` | `uint64` | a generalized index into an SSZ merkle tree | + ## Configuration | Name | Value | Unit | Duration From 56954ec5089eaaa08423a453c6a44db3d8ce1457 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 23 Aug 2019 12:16:57 -0600 Subject: [PATCH 113/250] fix adding fields to phase 1 ssz objects --- specs/core/0_beacon-chain.md | 2 -- specs/core/1_beacon-chain-misc.md | 37 ++++++++++++++++++------------- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 5ddceebc1..90c179107 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -480,7 +480,6 @@ class BeaconBlockBody(Container): deposits: List[Deposit, MAX_DEPOSITS] voluntary_exits: List[VoluntaryExit, MAX_VOLUNTARY_EXITS] transfers: List[Transfer, MAX_TRANSFERS] - # @shard_receipts ``` #### `BeaconBlock` @@ -534,7 +533,6 @@ class BeaconState(Container): previous_justified_checkpoint: Checkpoint # Previous epoch snapshot current_justified_checkpoint: Checkpoint finalized_checkpoint: Checkpoint - # @persistent_committee_fields ``` ## Helper functions diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index b6f23a00e..65c4cdbbc 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -182,17 +182,32 @@ def process_shard_receipt_proof(state: BeaconState, receipt_proof: ShardReceiptP ## Changes -### Persistent committees +### Phase 0 container updates -Add to the beacon state the following fields: +Add the following fields to the end of the specified container objects. + +#### `BeaconState` ```python -# begin insert @persistent_committee_fields +class BeaconState(Container): + # Persistent committees persistent_committee_roots: Vector[Hash, PERSISTENT_COMMITTEE_ROOT_LENGTH] next_shard_receipt_period: Vector[uint64, SHARD_COUNT] -# end insert @persistent_committee_fields ``` -`next_shard_receipt_period` values initialized to `PHASE_1_FORK_SLOT // SLOTS_PER_EPOCH // EPOCHS_PER_SHARD_PERIOD` + +`persistent_committee_roots` values are initialized to `Bytes32()` (empty bytes value). +`next_shard_receipt_period` values are initialized to `PHASE_1_FORK_SLOT // SLOTS_PER_EPOCH // EPOCHS_PER_SHARD_PERIOD`. + +#### `BeaconBlockBody` + +```python +class BeaconBlockBody(Container): + shard_receipt_proofs: List[ShardReceiptProof, MAX_SHARD_RECEIPT_PROOFS] +``` + +`shard_receipt_proofs` is initialized to `[]`. + +### Persistent committees Run `update_persistent_committee` immediately before `process_final_updates`: @@ -215,18 +230,10 @@ def update_persistent_committee(state: BeaconState) -> None: ### Shard receipt processing -Add the `shard_receipt_proofs` operation to `BeaconBlockBody`: - -```python -# begin insert @shard_receipts - shard_receipt_proofs: List[ShardReceiptProof, MAX_SHARD_RECEIPT_PROOFS] -# end insert @shard_receipts -``` - -Use `process_shard_receipt_proof` to process each receipt. +Run `process_shard_receipt_proof` on each `ShardReceiptProof` during block processing. ```python # begin insert @process_shard_receipts - (body.shard_receipt_proofs, process_shard_receipt_proofs), + (body.shard_receipt_proofs, process_shard_receipt_proof), # end insert @process_shard_receipts ``` From bcdbf7dfc7881ede422f8e29eadaeb50caea1dd6 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sat, 24 Aug 2019 02:44:22 +0800 Subject: [PATCH 114/250] Fix some flake8 errors --- specs/core/1_beacon-chain-misc.md | 53 +++++++++++++++---------------- 1 file changed, 26 insertions(+), 27 deletions(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 65c4cdbbc..b4a8d6dbe 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -6,43 +6,37 @@ - [Phase 1 miscellaneous beacon chain changes](#phase-1-miscellaneous-beacon-chain-changes) - [Table of contents](#table-of-contents) - - [Custom types](#custom-types) - [Configuration](#configuration) - - [Classes](#classes) - - [CompactCommittee](#compactcommittee) - - [ShardReceiptProof](#shardreceiptproof) - - [Helpers](#helpers) - - [pack_compact_validator](#pack_compact_validator) - - [unpack_compact_validator](#unpack_compact_validator) - - [committee_to_compact_committee](#committee_to_compact_committee) - - [get_previous_power_of_2](#get_previous_power_of_2) - - [verify_merkle_proof](#verify_merkle_proof) - - [concat_generalized_indices](#concat_generalized_indices) - - [compute_historical_state_generalized_index](#compute_historical_state_generalized_index) - - [get_generalized_index_of_crosslink_header](#get_generalized_index_of_crosslink_header) - - [process_shard_receipt_proof](#process_shard_receipt_proof) + - [Containers](#containers) + - [`CompactCommittee`](#compactcommittee) + - [`ShardReceiptProof`](#shardreceiptproof) + - [Helper functions](#helper-functions) + - [`pack_compact_validator`](#pack_compact_validator) + - [`unpack_compact_validator`](#unpack_compact_validator) + - [`committee_to_compact_committee`](#committee_to_compact_committee) + - [`get_previous_power_of_2`](#get_previous_power_of_2) + - [`verify_merkle_proof`](#verify_merkle_proof) + - [`compute_historical_state_generalized_index`](#compute_historical_state_generalized_index) + - [`get_generalized_index_of_crosslink_header`](#get_generalized_index_of_crosslink_header) + - [`process_shard_receipt_proof`](#process_shard_receipt_proof) - [Changes](#changes) + - [Phase 0 container updates](#phase-0-container-updates) + - [`BeaconState`](#beaconstate) + - [`BeaconBlockBody`](#beaconblockbody) - [Persistent committees](#persistent-committees) - [Shard receipt processing](#shard-receipt-processing) -## Custom types - -We define the following Python custom types for type hinting and readability: - -| Name | SSZ equivalent | Description | -| - | - | - | -| `GeneralizedIndex` | `uint64` | a generalized index into an SSZ merkle tree | - ## Configuration | Name | Value | Unit | Duration | - | - | - | - | | `MAX_SHARD_RECEIPT_PROOFS` | `2**0` (= 1) | - | - | | `PERSISTENT_COMMITTEE_ROOT_LENGTH` | `2**8` (= 256) | periods | ~9 months | +| `MICRO_REWARD` | `Gwei(2**0)` (=1) | Gwei | - | -## Classes +## Containers #### `CompactCommittee` @@ -61,7 +55,7 @@ class ShardReceiptProof(Container): receipt: List[ShardReceiptDelta, PLACEHOLDER] ``` -## Helpers +## Helper functions #### `pack_compact_validator` @@ -146,7 +140,9 @@ def get_generalized_index_of_crosslink_header(index: int) -> GeneralizedIndex: """ Gets the generalized index for the root of the index'th header in a crosslink. """ - MAX_CROSSLINK_SIZE = SHARD_BLOCK_SIZE_LIMIT * SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * MAX_EPOCHS_PER_CROSSLINK + MAX_CROSSLINK_SIZE = ( + SHARD_BLOCK_SIZE_LIMIT * SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * MAX_EPOCHS_PER_CROSSLINK + ) assert MAX_CROSSLINK_SIZE == get_previous_power_of_2(MAX_CROSSLINK_SIZE) return GeneralizedIndex(MAX_CROSSLINK_SIZE // SHARD_HEADER_SIZE + index) ``` @@ -170,11 +166,14 @@ def process_shard_receipt_proof(state: BeaconState, receipt_proof: ShardReceiptP leaf=hash_tree_root(receipt_proof.receipt), proof=receipt_proof.proof, index=gindex, - root=state.current_crosslinks[shard].data_root + root=state.current_crosslinks[receipt_proof.shard].data_root ) for delta in receipt_proof.receipt: if get_current_epoch(state) < state.validators[delta.index].withdrawable_epoch: - increase_balance(state, delta.index, state.validators[delta.index].effective_balance * delta.reward_coefficient // REWARD_COEFFICIENT_BASE) + increase_amount = ( + state.validators[delta.index].effective_balance * delta.reward_coefficient // REWARD_COEFFICIENT_BASE + ) + increase_balance(state, delta.index, increase_amount) decrease_balance(state, delta.index, delta.block_fee) state.next_shard_receipt_period[receipt_proof.shard] += 1 increase_balance(state, get_beacon_proposer_index(state), MICRO_REWARD) From 17043891ff39af47cca4c4867c284b8ba14735e5 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sat, 24 Aug 2019 03:03:00 +0800 Subject: [PATCH 115/250] Fix some mypy errors --- specs/core/1_beacon-chain-misc.md | 13 ++++++++----- specs/light_client/merkle_proofs.md | 2 +- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index b4a8d6dbe..21b754156 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -76,7 +76,7 @@ def unpack_compact_validator(compact_validator: int) -> Tuple[int, bool, int]: """ Returns validator index, slashed, balance // EFFECTIVE_BALANCE_INCREMENT """ - return compact_validator >> 16, (compact_validator >> 15) % 2, compact_validator & (2**15 - 1) + return compact_validator >> 16, bool((compact_validator >> 15) % 2), compact_validator & (2**15 - 1) ``` #### `committee_to_compact_committee` @@ -129,7 +129,7 @@ def compute_historical_state_generalized_index(earlier: ShardSlot, later: ShardS for i in range(63, -1, -1): if (later - 1) & 2**i > (earlier - 1) & 2**i: later = later - ((later - 1) % 2**i) - 1 - o = concat_generalized_indices(o, get_generalized_index(ShardState, 'history_acc', i)) + o = concat_generalized_indices(o, get_generalized_index(ShardState, ['history_acc', i])) return o ``` @@ -150,7 +150,7 @@ def get_generalized_index_of_crosslink_header(index: int) -> GeneralizedIndex: #### `process_shard_receipt_proof` ```python -def process_shard_receipt_proof(state: BeaconState, receipt_proof: ShardReceiptProof): +def process_shard_receipt_proof(state: BeaconState, receipt_proof: ShardReceiptProof) -> None: """ Processes a ShardReceipt object. """ @@ -221,8 +221,11 @@ def update_persistent_committee(state: BeaconState) -> None: if (get_current_epoch(state) + 1) % EPOCHS_PER_SHARD_PERIOD == 0: period = (get_current_epoch(state) + 1) // EPOCHS_PER_SHARD_PERIOD committees = Vector[CompactCommittee, SHARD_COUNT]([ - committee_to_compact_committee(state, get_period_committee(state, get_current_epoch(state) + 1, i)) - for i in range(SHARD_COUNT) + committee_to_compact_committee( + state, + get_period_committee(state, Epoch(get_current_epoch(state) + 1), Shard(shard)), + ) + for shard in range(SHARD_COUNT) ]) state.persistent_committee_roots[period % PERSISTENT_COMMITTEE_ROOT_LENGTH] = hash_tree_root(committees) ``` diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index ce7dc647c..344f365b0 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -181,7 +181,7 @@ _Usage note: functions outside this section should manipulate generalized indice #### `concat_generalized_indices` ```python -def concat_generalized_indices(indices: Sequence[GeneralizedIndex]) -> GeneralizedIndex: +def concat_generalized_indices(*indices: GeneralizedIndex) -> GeneralizedIndex: """ Given generalized indices i1 for A -> B, i2 for B -> C .... i_n for Y -> Z, returns the generalized index for A -> Z. From 7c9f1aad61ef0877ea9a464af7a44d80f00109a8 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sat, 24 Aug 2019 03:13:57 +0800 Subject: [PATCH 116/250] `test_compact_committees_root` will be moved to phase 1 --- .../test_process_final_updates.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_final_updates.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_final_updates.py index 385cc289b..58882a44f 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_final_updates.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_final_updates.py @@ -89,20 +89,3 @@ def test_historical_root_accumulator(spec, state): yield from run_process_final_updates(spec, state) assert len(state.historical_roots) == history_len + 1 - - -@with_all_phases -@spec_state_test -def test_compact_committees_root(spec, state): - assert spec.SLOTS_PER_ETH1_VOTING_PERIOD > spec.SLOTS_PER_EPOCH - # skip ahead to the end of the epoch - state.slot = spec.SLOTS_PER_EPOCH - 1 - - next_epoch = spec.get_current_epoch(state) + 1 - - # ensure that order in which items are processed in final_updates - # does not alter the expected_root - expected_root = spec.get_compact_committees_root(state, next_epoch) - yield from run_process_final_updates(spec, state) - - assert state.compact_committees_roots[next_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR] == expected_root From 6923bdc46a1d8785092dcee7f61a65df81e2041f Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 23 Aug 2019 14:31:26 -0600 Subject: [PATCH 117/250] remove Optional None from get_generalized_index. instead throw --- specs/core/1_beacon-chain-misc.md | 6 +++--- specs/light_client/merkle_proofs.md | 8 +++----- .../test/merkle_proofs/test_merkle_proofs.py | 14 +++++++++----- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 21b754156..bd7130f2c 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -129,7 +129,7 @@ def compute_historical_state_generalized_index(earlier: ShardSlot, later: ShardS for i in range(63, -1, -1): if (later - 1) & 2**i > (earlier - 1) & 2**i: later = later - ((later - 1) % 2**i) - 1 - o = concat_generalized_indices(o, get_generalized_index(ShardState, ['history_acc', i])) + o = concat_generalized_indices(o, GeneralizedIndex(get_generalized_index(ShardState, ['history_acc', i]))) return o ``` @@ -158,9 +158,9 @@ def process_shard_receipt_proof(state: BeaconState, receipt_proof: ShardReceiptP first_slot_in_last_crosslink = state.current_crosslinks[receipt_proof.shard].start_epoch * SLOTS_PER_EPOCH gindex = concat_generalized_indices( get_generalized_index_of_crosslink_header(0), - get_generalized_index(ShardBlockHeader, 'state_root'), + GeneralizedIndex(get_generalized_index(ShardBlockHeader, 'state_root')), compute_historical_state_generalized_index(receipt_slot, first_slot_in_last_crosslink), - get_generalized_index(ShardState, 'receipt_root') + GeneralizedIndex(get_generalized_index(ShardState, 'receipt_root')) ) assert verify_merkle_proof( leaf=hash_tree_root(receipt_proof.receipt), diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 344f365b0..bbd03d379 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -152,7 +152,7 @@ def get_item_position(typ: SSZType, index_or_variable_name: Union[int, SSZVariab ``` ```python -def get_generalized_index(typ: SSZType, path: Sequence[Union[int, SSZVariableName]]) -> Optional[GeneralizedIndex]: +def get_generalized_index(typ: SSZType, path: Sequence[Union[int, SSZVariableName]]) -> GeneralizedIndex: """ Converts a path (eg. `[7, "foo", 3]` for `x[7].foo[3]`, `[12, "bar", "__len__"]` for `len(x[12].bar)`) into the generalized index representing its position in the Merkle tree. @@ -162,10 +162,8 @@ def get_generalized_index(typ: SSZType, path: Sequence[Union[int, SSZVariableNam assert not issubclass(typ, BasicValue) # If we descend to a basic type, the path cannot continue further if p == '__len__': typ = uint64 - if issubclass(typ, (List, Bytes)): - root = GeneralizedIndex(root * 2 + 1) - else: - return None + assert issubclass(typ, (List, Bytes)) + root = GeneralizedIndex(root * 2 + 1) else: pos, _, _ = get_item_position(typ, p) base_index = (GeneralizedIndex(2) if issubclass(typ, (List, Bytes)) else GeneralizedIndex(1)) diff --git a/test_libs/pyspec/eth2spec/test/merkle_proofs/test_merkle_proofs.py b/test_libs/pyspec/eth2spec/test/merkle_proofs/test_merkle_proofs.py index 91c861de3..62a2f6379 100644 --- a/test_libs/pyspec/eth2spec/test/merkle_proofs/test_merkle_proofs.py +++ b/test_libs/pyspec/eth2spec/test/merkle_proofs/test_merkle_proofs.py @@ -1,10 +1,10 @@ - import re from eth_utils import ( to_tuple, ) from eth2spec.test.context import ( + expect_assertion_error, spec_state_test, with_all_phases_except, ) @@ -89,10 +89,14 @@ generalized_index_cases = [ @spec_state_test def test_get_generalized_index(spec, state): for typ, path, generalized_index in generalized_index_cases: - assert spec.get_generalized_index( - typ=typ, - path=path, - ) == generalized_index + if generalized_index is not None: + assert spec.get_generalized_index( + typ=typ, + path=path, + ) == generalized_index + else: + expect_assertion_error(lambda: spec.get_generalized_index(typ=typ, path=path)) + yield 'typ', typ yield 'path', path yield 'generalized_index', generalized_index From b6d854de097cdfd7b63124a45b0f2b7070a56db9 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sat, 24 Aug 2019 13:50:57 +0200 Subject: [PATCH 118/250] Fix ToC --- specs/core/1_beacon-chain-misc.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index bd7130f2c..720de4096 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -8,17 +8,17 @@ - [Table of contents](#table-of-contents) - [Configuration](#configuration) - [Containers](#containers) - - [`CompactCommittee`](#compactcommittee) - - [`ShardReceiptProof`](#shardreceiptproof) + - [`CompactCommittee`](#compactcommittee) + - [`ShardReceiptProof`](#shardreceiptproof) - [Helper functions](#helper-functions) - - [`pack_compact_validator`](#pack_compact_validator) - - [`unpack_compact_validator`](#unpack_compact_validator) - - [`committee_to_compact_committee`](#committee_to_compact_committee) - - [`get_previous_power_of_2`](#get_previous_power_of_2) - - [`verify_merkle_proof`](#verify_merkle_proof) - - [`compute_historical_state_generalized_index`](#compute_historical_state_generalized_index) - - [`get_generalized_index_of_crosslink_header`](#get_generalized_index_of_crosslink_header) - - [`process_shard_receipt_proof`](#process_shard_receipt_proof) + - [`pack_compact_validator`](#pack_compact_validator) + - [`unpack_compact_validator`](#unpack_compact_validator) + - [`committee_to_compact_committee`](#committee_to_compact_committee) + - [`get_previous_power_of_2`](#get_previous_power_of_2) + - [`verify_merkle_proof`](#verify_merkle_proof) + - [`compute_historical_state_generalized_index`](#compute_historical_state_generalized_index) + - [`get_generalized_index_of_crosslink_header`](#get_generalized_index_of_crosslink_header) + - [`process_shard_receipt_proof`](#process_shard_receipt_proof) - [Changes](#changes) - [Phase 0 container updates](#phase-0-container-updates) - [`BeaconState`](#beaconstate) From fb59160e6a32d437c032a33c00124a74de2327fb Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sat, 24 Aug 2019 13:54:48 +0200 Subject: [PATCH 119/250] Persistent -> period, process_shard_receipt: add _proofs --- specs/core/1_beacon-chain-misc.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 720de4096..a105fc8a0 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -33,7 +33,7 @@ | Name | Value | Unit | Duration | - | - | - | - | | `MAX_SHARD_RECEIPT_PROOFS` | `2**0` (= 1) | - | - | -| `PERSISTENT_COMMITTEE_ROOT_LENGTH` | `2**8` (= 256) | periods | ~9 months | +| `PERIOD_COMMITTEE_ROOT_LENGTH` | `2**8` (= 256) | periods | ~9 months | | `MICRO_REWARD` | `Gwei(2**0)` (=1) | Gwei | - | ## Containers @@ -189,12 +189,12 @@ Add the following fields to the end of the specified container objects. ```python class BeaconState(Container): - # Persistent committees - persistent_committee_roots: Vector[Hash, PERSISTENT_COMMITTEE_ROOT_LENGTH] + # Period committees + period_committee_roots: Vector[Hash, PERIOD_COMMITTEE_ROOT_LENGTH] next_shard_receipt_period: Vector[uint64, SHARD_COUNT] ``` -`persistent_committee_roots` values are initialized to `Bytes32()` (empty bytes value). +`period_committee_roots` values are initialized to `Bytes32()` (empty bytes value). `next_shard_receipt_period` values are initialized to `PHASE_1_FORK_SLOT // SLOTS_PER_EPOCH // EPOCHS_PER_SHARD_PERIOD`. #### `BeaconBlockBody` @@ -208,15 +208,15 @@ class BeaconBlockBody(Container): ### Persistent committees -Run `update_persistent_committee` immediately before `process_final_updates`: +Run `update_period_committee` immediately before `process_final_updates`: ```python -# begin insert @update_persistent_committee - update_persistent_committee(state) -# end insert @update_persistent_committee -def update_persistent_committee(state: BeaconState) -> None: +# begin insert @update_period_committee + update_period_committee(state) +# end insert @update_period_committee +def update_period_committee(state: BeaconState) -> None: """ - Updates persistent committee roots at boundary blocks. + Updates period committee roots at boundary blocks. """ if (get_current_epoch(state) + 1) % EPOCHS_PER_SHARD_PERIOD == 0: period = (get_current_epoch(state) + 1) // EPOCHS_PER_SHARD_PERIOD @@ -227,7 +227,7 @@ def update_persistent_committee(state: BeaconState) -> None: ) for shard in range(SHARD_COUNT) ]) - state.persistent_committee_roots[period % PERSISTENT_COMMITTEE_ROOT_LENGTH] = hash_tree_root(committees) + state.period_committee_roots[period % PERIOD_COMMITTEE_ROOT_LENGTH] = hash_tree_root(committees) ``` ### Shard receipt processing @@ -235,7 +235,7 @@ def update_persistent_committee(state: BeaconState) -> None: Run `process_shard_receipt_proof` on each `ShardReceiptProof` during block processing. ```python -# begin insert @process_shard_receipts +# begin insert @process_shard_receipt_proofs (body.shard_receipt_proofs, process_shard_receipt_proof), -# end insert @process_shard_receipts +# end insert @process_shard_receipt_proofs ``` From 7175ac55935a8460b24a0105ac3b39510fe26ba1 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sat, 24 Aug 2019 13:55:18 +0200 Subject: [PATCH 120/250] Update specs/core/0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 320bb8fd8..a26b4ae88 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1550,7 +1550,7 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: (body.deposits, process_deposit), (body.voluntary_exits, process_voluntary_exit), (body.transfers, process_transfer), - # @process_shard_receipts + # @process_shard_receipt_proofs ): for operation in operations: function(state, operation) From a509c68c302916cb75b7063065a9d8d59cbb4bb3 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sat, 24 Aug 2019 13:55:35 +0200 Subject: [PATCH 121/250] Update specs/core/0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a26b4ae88..102525d25 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1196,7 +1196,7 @@ def process_epoch(state: BeaconState) -> None: # @process_reveal_deadlines # @process_challenge_deadlines process_slashings(state) - # @update_persistent_committee + # @update_period_committee process_final_updates(state) # @after_process_final_updates ``` From 178dd23314f5db4ad15992bf761115df0f515ba3 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sat, 24 Aug 2019 20:26:35 +0800 Subject: [PATCH 122/250] `MINOR_REWARD_QUOTIENT` for rewarding the proposer for including shard receipt proof Co-Authored-By: vbuterin --- specs/core/1_beacon-chain-misc.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index a105fc8a0..d07f1c217 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -34,7 +34,7 @@ | - | - | - | - | | `MAX_SHARD_RECEIPT_PROOFS` | `2**0` (= 1) | - | - | | `PERIOD_COMMITTEE_ROOT_LENGTH` | `2**8` (= 256) | periods | ~9 months | -| `MICRO_REWARD` | `Gwei(2**0)` (=1) | Gwei | - | +| `MINOR_REWARD_QUOTIENT` | `2**8` (=256) | - | - | ## Containers @@ -176,7 +176,8 @@ def process_shard_receipt_proof(state: BeaconState, receipt_proof: ShardReceiptP increase_balance(state, delta.index, increase_amount) decrease_balance(state, delta.index, delta.block_fee) state.next_shard_receipt_period[receipt_proof.shard] += 1 - increase_balance(state, get_beacon_proposer_index(state), MICRO_REWARD) + proposer_index = get_beacon_proposer_index(state) + increase_balance(state, proposer_index, Gwei(get_base_reward(state, proposer_index) // MINOR_REWARD_QUOTIENT)) ``` ## Changes From 01af8e6297770f0599ac26efb49e89d6803efcc3 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sun, 25 Aug 2019 17:19:19 +0800 Subject: [PATCH 123/250] Use `get_previous_power_of_two` from merkle proofs spec --- specs/core/1_beacon-chain-misc.md | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index d07f1c217..fcd004b95 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -14,7 +14,6 @@ - [`pack_compact_validator`](#pack_compact_validator) - [`unpack_compact_validator`](#unpack_compact_validator) - [`committee_to_compact_committee`](#committee_to_compact_committee) - - [`get_previous_power_of_2`](#get_previous_power_of_2) - [`verify_merkle_proof`](#verify_merkle_proof) - [`compute_historical_state_generalized_index`](#compute_historical_state_generalized_index) - [`get_generalized_index_of_crosslink_header`](#get_generalized_index_of_crosslink_header) @@ -95,13 +94,6 @@ def committee_to_compact_committee(state: BeaconState, committee: Sequence[Valid return CompactCommittee(pubkeys=pubkeys, compact_validators=compact_validators) ``` -#### `get_previous_power_of_2` - -```python -def get_previous_power_of_2(x: int) -> int: - return x if x <= 2 else 2 * get_previous_power_of_2(x // 2) -``` - #### `verify_merkle_proof` ```python @@ -143,7 +135,7 @@ def get_generalized_index_of_crosslink_header(index: int) -> GeneralizedIndex: MAX_CROSSLINK_SIZE = ( SHARD_BLOCK_SIZE_LIMIT * SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * MAX_EPOCHS_PER_CROSSLINK ) - assert MAX_CROSSLINK_SIZE == get_previous_power_of_2(MAX_CROSSLINK_SIZE) + assert MAX_CROSSLINK_SIZE == get_previous_power_of_two(MAX_CROSSLINK_SIZE) return GeneralizedIndex(MAX_CROSSLINK_SIZE // SHARD_HEADER_SIZE + index) ``` From 9b3cb306e3ce5dca1e3d3df1584ca2c29d2ff9b6 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Mon, 26 Aug 2019 10:09:13 +0200 Subject: [PATCH 124/250] Update specs/core/1_beacon-chain-misc.md Co-Authored-By: John Adler --- specs/core/1_beacon-chain-misc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index fcd004b95..ee63eca64 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -118,7 +118,7 @@ def compute_historical_state_generalized_index(earlier: ShardSlot, later: ShardS blocks at the next available multiples of descending powers of two. """ o = GeneralizedIndex(1) - for i in range(63, -1, -1): + for i in range(HISTORY_ACCUMULATOR_VECTOR - 1, -1, -1): if (later - 1) & 2**i > (earlier - 1) & 2**i: later = later - ((later - 1) % 2**i) - 1 o = concat_generalized_indices(o, GeneralizedIndex(get_generalized_index(ShardState, ['history_acc', i]))) From f1caa85aaf260fbd35774efff8eb057100ac3875 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Mon, 26 Aug 2019 10:09:43 +0200 Subject: [PATCH 125/250] Update specs/core/1_beacon-chain-misc.md Co-Authored-By: John Adler --- specs/core/1_beacon-chain-misc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index ee63eca64..9e9bfddb9 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -147,7 +147,7 @@ def process_shard_receipt_proof(state: BeaconState, receipt_proof: ShardReceiptP Processes a ShardReceipt object. """ receipt_slot = state.next_shard_receipt_period[receipt_proof.shard] * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD - first_slot_in_last_crosslink = state.current_crosslinks[receipt_proof.shard].start_epoch * SLOTS_PER_EPOCH + first_slot_in_last_crosslink = state.current_crosslinks[receipt_proof.shard].start_epoch * SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH gindex = concat_generalized_indices( get_generalized_index_of_crosslink_header(0), GeneralizedIndex(get_generalized_index(ShardBlockHeader, 'state_root')), From ffdc36920e9acc2472a9ba7657b13219ab77279e Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 26 Aug 2019 08:55:50 -0600 Subject: [PATCH 126/250] lint --- specs/core/1_beacon-chain-misc.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 9e9bfddb9..c23b018f0 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -146,8 +146,9 @@ def process_shard_receipt_proof(state: BeaconState, receipt_proof: ShardReceiptP """ Processes a ShardReceipt object. """ + SHARD_SLOTS_PER_EPOCH = SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH receipt_slot = state.next_shard_receipt_period[receipt_proof.shard] * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD - first_slot_in_last_crosslink = state.current_crosslinks[receipt_proof.shard].start_epoch * SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH + first_slot_in_last_crosslink = state.current_crosslinks[receipt_proof.shard].start_epoch * SHARD_SLOTS_PER_EPOCH gindex = concat_generalized_indices( get_generalized_index_of_crosslink_header(0), GeneralizedIndex(get_generalized_index(ShardBlockHeader, 'state_root')), From 79e34a00e82518bdef23b265a15b5f7f9ef1c797 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Mon, 26 Aug 2019 17:37:18 -0700 Subject: [PATCH 127/250] Update sync_protocol.md --- specs/light_client/sync_protocol.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 944abf8c1..feef1fcea 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -86,7 +86,7 @@ def unpack_compact_validator(compact_validator: CompactValidator) -> Tuple[Valid """ return ( ValidatorIndex(compact_validator >> 16), - (compact_validator >> 15) % 2, + (compact_validator >> 15) % 2 == 0, uint64(compact_validator & (2**15 - 1)), ) ``` From 4768ec89f680655574f68425802e5140fe5c8c6d Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Tue, 27 Aug 2019 11:45:17 +0100 Subject: [PATCH 128/250] SSZ clarifications on deserialization --- specs/simple-serialize.md | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 588200f20..6e250fd81 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -129,7 +129,7 @@ return bytes(array) ### `Bitlist[N]` -Note that from the offset coding, the length (in bytes) of the bitlist is known. An additional leading `1` bit is added so that the length in bits will also be known. +Note that from the offset coding, the length (in bytes) of the bitlist is known. An additional `1` bit is added at position `N` where `N` is the legnth of the bitlist so that the length in bits will also be known. ```python array = [0] * ((len(value) // 8) + 1) @@ -171,7 +171,15 @@ return serialized_type_index + serialized_bytes ## Deserialization -Because serialization is an injective function (i.e. two distinct objects of the same type will serialize to different values) any bytestring has at most one object it could deserialize to. Efficient algorithms for computing this object can be found in [the implementations](#implementations). +Because serialization is an injective function (i.e. two distinct objects of the same type will serialize to different values) any bytestring has at most one object it could deserialize to. + +Deserialization can be implemented using a recursive algorithm. The deserialization of basic objects is easy, and from there we can find a simple recursive algorithm for all fixed-size objects. For variable-size objects we have to do one of the following depending on what kind of object it is: + +* Vector/list of a variable-size object: The serialized data will start with offsets of all the serialized objects (`BYTES_PER_LENGTH_OFFSET` bytes each). + * Using the first offset, we can compute the length of the list (divide by `BYTES_PER_LENGTH_OFFSET`), as it gives us the total number of bytes in the offset data. + * The size of each object in the vector/list can be inferred from the difference of two offsets. To get the size of the last object, the total number of bytes has to be known (it is not generally possible to deserialize an SSZ object of unknown length) +* Containers follow the same principles as vectors, with the difference that there may be fixed-size objects in a container as well. This means the `fixed_parts` data will contain offsets as well as fixed-size objects. +* In the case of bitlists, the length in bits cannot be uniquely inferred from the number of bytes in the object. Because of this, they have a bit in position `N` where `N` is the length of the list that is always set. This bit has to be used to infer the size of the bitlist in bits. Note that deserialization requires hardening against invalid inputs. A non-exhaustive list: @@ -179,6 +187,8 @@ Note that deserialization requires hardening against invalid inputs. A non-exhau - Scope: Extra unused bytes, not aligned with element size. - More elements than a list limit allows. Part of enforcing consensus. +Efficient algorithms for computing this object can be found in [the implementations](#implementations). + ## Merkleization We first define helper functions: From ab4820ced6ed85effc871de712064cbb2a531ca3 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 27 Aug 2019 13:13:47 +0200 Subject: [PATCH 129/250] Update specs/core/1_beacon-chain-misc.md Co-Authored-By: John Adler --- specs/core/1_beacon-chain-misc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index c23b018f0..41d0de233 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -147,7 +147,7 @@ def process_shard_receipt_proof(state: BeaconState, receipt_proof: ShardReceiptP Processes a ShardReceipt object. """ SHARD_SLOTS_PER_EPOCH = SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH - receipt_slot = state.next_shard_receipt_period[receipt_proof.shard] * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD + receipt_slot = state.next_shard_receipt_period[receipt_proof.shard] * SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD first_slot_in_last_crosslink = state.current_crosslinks[receipt_proof.shard].start_epoch * SHARD_SLOTS_PER_EPOCH gindex = concat_generalized_indices( get_generalized_index_of_crosslink_header(0), From 0b38ff0fe26f7291f771be9b65b6bf9353ccbf7e Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 27 Aug 2019 13:17:38 +0200 Subject: [PATCH 130/250] Update specs/core/1_beacon-chain-misc.md Co-Authored-By: Hsiao-Wei Wang --- specs/core/1_beacon-chain-misc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 41d0de233..ddf06a1f0 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -189,7 +189,7 @@ class BeaconState(Container): ``` `period_committee_roots` values are initialized to `Bytes32()` (empty bytes value). -`next_shard_receipt_period` values are initialized to `PHASE_1_FORK_SLOT // SLOTS_PER_EPOCH // EPOCHS_PER_SHARD_PERIOD`. +`next_shard_receipt_period` values are initialized to `compute_epoch_of_shard_slot(PHASE_1_FORK_SLOT) // EPOCHS_PER_SHARD_PERIOD`. #### `BeaconBlockBody` From 0f2e814c63238f14339dd1a30723ced8336cf63e Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 27 Aug 2019 13:19:04 +0200 Subject: [PATCH 131/250] Shard slot -> slot for PHASE_1_FORK_SLOT --- specs/core/1_beacon-chain-misc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index ddf06a1f0..35a58e53b 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -189,7 +189,7 @@ class BeaconState(Container): ``` `period_committee_roots` values are initialized to `Bytes32()` (empty bytes value). -`next_shard_receipt_period` values are initialized to `compute_epoch_of_shard_slot(PHASE_1_FORK_SLOT) // EPOCHS_PER_SHARD_PERIOD`. +`next_shard_receipt_period` values are initialized to `compute_epoch_of_slot(PHASE_1_FORK_SLOT) // EPOCHS_PER_SHARD_PERIOD`. #### `BeaconBlockBody` From 17702e6d88515435a2a7c017d3d026d47acdbd8d Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 27 Aug 2019 20:10:39 +0800 Subject: [PATCH 132/250] Shard slot -> slot for PHASE_1_FORK_SLOT part2 --- configs/minimal.yaml | 4 ++-- specs/core/1_shard-data-chains.md | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/configs/minimal.yaml b/configs/minimal.yaml index 15b749b9d..be787ca3c 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -141,5 +141,5 @@ SHARD_SLOTS_PER_BEACON_SLOT: 2 EPOCHS_PER_SHARD_PERIOD: 4 # PHASE_1_FORK_EPOCH >= EPOCHS_PER_SHARD_PERIOD * 2 PHASE_1_FORK_EPOCH: 8 -# PHASE_1_FORK_SLOT = PHASE_1_FORK_EPOCH * SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH -PHASE_1_FORK_SLOT: 128 +# PHASE_1_FORK_SLOT = PHASE_1_FORK_EPOCH * SLOTS_PER_EPOCH +PHASE_1_FORK_SLOT: 64 diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 8e1532f17..3dc549816 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -73,10 +73,10 @@ We define the following Python custom types for type hinting and readability: ### Initial values -| Name | Value | -| - | - | -| `PHASE_1_FORK_EPOCH` | **TBD** | -| `PHASE_1_FORK_SLOT` | **TBD** | +| Name | Value | Unit | +| - | - | - | +| `PHASE_1_FORK_EPOCH` | **TBD** | Epoch | +| `PHASE_1_FORK_SLOT` | **TBD** | Slot | ### Time parameters @@ -359,7 +359,7 @@ def get_default_shard_state(beacon_state: BeaconState, shard: Shard) -> ShardSta return ShardState( basefee=1, shard=shard, - slot=PHASE_1_FORK_SLOT, + slot=PHASE_1_FORK_SLOT * SHARD_SLOTS_PER_BEACON_SLOT, earlier_committee_rewards=[REWARD_COEFFICIENT_BASE for _ in range(len(earlier_committee))], later_committee_rewards=[REWARD_COEFFICIENT_BASE for _ in range(len(later_committee))], earlier_committee_fees=[Gwei(0) for _ in range(len(earlier_committee))], From 979fa38ae8a7e2ebdba82a993baa0e1cde3fb639 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 27 Aug 2019 20:10:55 +0800 Subject: [PATCH 133/250] fix linter error --- specs/core/1_beacon-chain-misc.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 35a58e53b..5bb0f6da0 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -147,7 +147,10 @@ def process_shard_receipt_proof(state: BeaconState, receipt_proof: ShardReceiptP Processes a ShardReceipt object. """ SHARD_SLOTS_PER_EPOCH = SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH - receipt_slot = state.next_shard_receipt_period[receipt_proof.shard] * SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD + receipt_slot = ( + state.next_shard_receipt_period[receipt_proof.shard] * + SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD + ) first_slot_in_last_crosslink = state.current_crosslinks[receipt_proof.shard].start_epoch * SHARD_SLOTS_PER_EPOCH gindex = concat_generalized_indices( get_generalized_index_of_crosslink_header(0), From 334d6c6bc73eee2daf45f75c53a62a8cd5a4066a Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 27 Aug 2019 14:58:28 +0100 Subject: [PATCH 134/250] Revamp 1_shard-data-chains.md WIP! * Significant simplifications * A few bug fixes * Lots of cleanups and reorganising (making it consistent with `0_beacon-chain.md`) * Likely a few bugs introduced --- specs/core/1_shard-data-chains.md | 641 ++++++++++++++---------------- 1 file changed, 295 insertions(+), 346 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 8e1532f17..9335c1803 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -17,31 +17,37 @@ - [State list lengths](#state-list-lengths) - [Rewards and penalties](#rewards-and-penalties) - [Signature domain types](#signature-domain-types) - - [TODO PLACEHOLDER](#todo-placeholder) - - [Data structures](#data-structures) - - [`ShardBlockHeader`](#shardblockheader) - - [`ShardBlock`](#shardblock) + - [Containers](#containers) - [`ShardBlockSignatures`](#shardblocksignatures) - - [`ShardBlockCore`](#shardblockcore) - - [`ExtendedShardBlockCore`](#extendedshardblockcore) + - [`ShardBlock`](#shardblock) + - [`ShardBlockHeader`](#shardblockheader) - [`ShardState`](#shardstate) - [`ShardReceiptDelta`](#shardreceiptdelta) - [Helper functions](#helper-functions) - - [`compute_slot_of_shard_slot`](#compute_slot_of_shard_slot) - - [`compute_epoch_of_shard_slot`](#compute_epoch_of_shard_slot) - - [`get_shard_period_start_epoch`](#get_shard_period_start_epoch) - - [`get_period_committee`](#get_period_committee) - - [`get_persistent_committee`](#get_persistent_committee) - - [`get_shard_block_proposer_index`](#get_shard_block_proposer_index) - - [`get_shard_header`](#get_shard_header) - - [`pad`](#pad) - - [`flatten_shard_header`](#flatten_shard_header) - - [`compute_crosslink_data_root`](#compute_crosslink_data_root) - - [`get_default_shard_state`](#get_default_shard_state) + - [Misc](#misc-1) + - [`pad`](#pad) + - [`compute_slot_of_shard_slot`](#compute_slot_of_shard_slot) + - [`compute_epoch_of_shard_slot`](#compute_epoch_of_shard_slot) + - [`compute_period_start_epoch`](#compute_period_start_epoch) + - [`compute_flat_shard_header`](#compute_flat_shard_header) + - [`compute_crosslink_data_root`](#compute_crosslink_data_root) + - [State accessors](#state-accessors) + - [`get_period_committee`](#get_period_committee) + - [`get_persistent_committee`](#get_persistent_committee) + - [`get_shard_proposer_index`](#get_shard_proposer_index) + - [`get_default_shard_state`](#get_default_shard_state) + - [`get_shard_base_reward`](#get_shard_base_reward) + - [State mutators](#state-mutators) + - [`add_fee`](#add_fee) + - [`add_reward`](#add_reward) + - [Shard state transition function](#shard-state-transition-function) + - [Period processing](#period-processing) + - [Block processing](#block-processing) + - [Block header](#block-header) + - [Attestations](#attestations) + - [Block data fees](#block-data-fees) - [Object validity](#object-validity) - [Shard block validation: preliminary](#shard-block-validation-preliminary) - - [Shard state transition function helpers](#shard-state-transition-function-helpers) - - [Shard state transition function](#shard-state-transition-function) - [Beacon attestations](#beacon-attestations) - [Shard fork choice rule](#shard-fork-choice-rule) @@ -53,11 +59,9 @@ This document describes the shard data layer and the shard fork choice rule in P ## Custom types -We define the following Python custom types for type hinting and readability: - | Name | SSZ equivalent | Description | | - | - | - | -| `ShardSlot` | `uint64` | a slot number in shard chain | +| `ShardSlot` | `uint64` | a shard slot number | ## Configuration @@ -65,7 +69,7 @@ We define the following Python custom types for type hinting and readability: | Name | Value | | - | - | -| `SHARD_SLOTS_PER_BEACON_SLOT` | `2**1` (= 2) | +| `SHARD_SLOTS_PER_EPOCH` | `2**7` (= 128) | | `TARGET_PERSISTENT_COMMITTEE_SIZE` | `2**7` (= 128) | | `SHARD_HEADER_SIZE` | `2**9` (= 512) | | `SHARD_BLOCK_SIZE_TARGET` | `2**14` (= 16,384) | @@ -76,7 +80,6 @@ We define the following Python custom types for type hinting and readability: | Name | Value | | - | - | | `PHASE_1_FORK_EPOCH` | **TBD** | -| `PHASE_1_FORK_SLOT` | **TBD** | ### Time parameters @@ -96,75 +99,53 @@ We define the following Python custom types for type hinting and readability: | Name | Value | | - | - | | `BASEFEE_ADJUSTMENT_FACTOR` | `2**3` (= 8) | -| `REWARD_COEFFICIENT_BASE` | `2**20` ( = 1,048,576) | +| `REWARD_COEFFICIENT_BASE` | `2**20` (= 1,048,576) | ### Signature domain types -The following types are defined, mapping into `DomainType` (little endian): - | Name | Value | | - | - | | `DOMAIN_SHARD_PROPOSER` | `128` | | `DOMAIN_SHARD_ATTESTER` | `129` | -### TODO PLACEHOLDER +## Containers -| Name | Value | -| - | - | -| `PLACEHOLDER` | `2**3` | - -## Data structures - -_Note: the shard block header structure is carefully designed so that all of the values have the same depth in a hash tree implementation, so `hash_tree_root(SSZ_partial(x)) == hash_tree_root(x)` (using the "left-to-right leaves" scheme [here](https://github.com/ethereum/eth2.0-specs/issues/1303)), which allows shard block headers to look like an SSZ object when in the crosslink structure. This is done by balancing it so that 7 or 8 items are on the left side (the "core") and two 96-byte (ie. 3*2 = 6 chunk) items are on the right side. Change with care._ - -### `ShardBlockHeader` +### `ShardBlockSignatures` ```python -class ShardBlockHeader(Container): - core: ShardBlockCore - signatures: ShardBlockSignatures +class ShardBlockSignatures(Container): + attesters: BLSSignature + proposer: BLSSignature ``` ### `ShardBlock` ```python class ShardBlock(Container): - core: ExtendedShardBlockCore + slot: ShardSlot + beacon_chain_root: Hash + parent_root: Hash + state_root: Hash + aggregation_bits: Bitvector[TARGET_PERSISTENT_COMMITTEE_SIZE * 2] + total_bytes: uint64 + body: Bytes[SHARD_BLOCK_SIZE_LIMIT - SHARD_HEADER_SIZE] + padding: Bytes[32] signatures: ShardBlockSignatures ``` -### `ShardBlockSignatures` +### `ShardBlockHeader` ```python -class ShardBlockSignatures(Container): - attestation_signature: BLSSignature - proposer_signature: BLSSignature -``` - -### `ShardBlockCore` - -```python -class ShardBlockCore(Container): +class ShardBlockHeader(Container): slot: ShardSlot beacon_chain_root: Hash parent_root: Hash - data_root: Hash state_root: Hash + aggregation_bits: Bitvector[TARGET_PERSISTENT_COMMITTEE_SIZE * 2] total_bytes: uint64 - attester_bitfield: Bitvector[TARGET_PERSISTENT_COMMITTEE_SIZE * 2] -``` - -### `ExtendedShardBlockCore` - -```python -class ExtendedShardBlockCore(Container): - slot: ShardSlot - beacon_chain_root: Hash - parent_root: Hash - data: Bytes[SHARD_BLOCK_SIZE_LIMIT - SHARD_HEADER_SIZE] - state_root: Hash - total_bytes: uint64 - attester_bitfield: Bitvector[TARGET_PERSISTENT_COMMITTEE_SIZE * 2] + body_root: Hash + padding: Bytes[32] + signatures: ShardBlockSignatures ``` ### `ShardState` @@ -179,7 +160,7 @@ class ShardState(Container): basefee: Gwei slot: ShardSlot shard: Shard - most_recent_block_core: ShardBlockCore + latest_block_header: ShardBlockHeader receipt_root: Hash total_bytes: uint64 ``` @@ -195,34 +176,67 @@ class ShardReceiptDelta(Container): ## Helper functions -### `compute_slot_of_shard_slot` +### Misc + +#### `pad` ```python -def compute_slot_of_shard_slot(slot: ShardSlot) -> Epoch: - return Epoch(slot // SHARD_SLOTS_PER_BEACON_SLOT) +def pad(x: bytes, length: uint64) -> bytes: + assert len(x) <= length + return x + b'\x00' * (length - len(x)) ``` -### `compute_epoch_of_shard_slot` +#### `compute_epoch_of_shard_slot` ```python def compute_epoch_of_shard_slot(slot: ShardSlot) -> Epoch: - return Epoch(slot // SHARD_SLOTS_PER_BEACON_SLOT // SLOTS_PER_EPOCH) + return compute_epoch_of_slot(compute_slot_of_shard_slot(slot)) ``` -### `get_shard_period_start_epoch` +#### `compute_period_start_epoch` ```python -def get_shard_period_start_epoch(epoch: Epoch, lookback: int=0) -> Epoch: +def compute_period_start_epoch(epoch: Epoch, lookback: Epoch=0) -> Epoch: return Epoch(epoch - (epoch % EPOCHS_PER_SHARD_PERIOD) - lookback * EPOCHS_PER_SHARD_PERIOD) ``` -### `get_period_committee` +#### `compute_flat_shard_header` + +```python +def compute_flat_shard_header(block: ShardBlock) -> Bytes[SHARD_HEADER_SIZE]: + """ + Return a flat serialisation of the ``block`` header which preserves hash tree root. + """ + return ( + pad(int_to_bytes(block.slot, length=8), 32) + + block.beacon_chain_root + + block.parent_root + + hash_tree_root(block.body) + + block.state_root + + pad(int_to_bytes(block.total_bytes, length=8), 32) + + bytes([sum([block.aggregation_bits[i + j] << j for j in range(8)]) for i in range(0, 256, 8)]) + + block.padding + + pad(block.signatures.attesters, 128) + + pad(block.signatures.proposer, 128) + ) +``` + +#### `compute_crosslink_data_root` + +```python +def compute_crosslink_data_root(blocks: Sequence[ShardBlock]) -> Hash: + headers = b''.join([compute_flat_shard_header(block) for block in blocks]) + bodies = b''.join([block.body for block in blocks]) + MAX_SIZE = SHARD_BLOCK_SIZE_LIMIT * SHARD_SLOTS_PER_EPOCH * MAX_EPOCHS_PER_CROSSLINK + return hash_tree_root(BytesN[MAX_SIZE](pad(headers + bodies, MAX_SIZE))) +``` + +### State accessors + +#### `get_period_committee` ```python def get_period_committee(state: BeaconState, epoch: Epoch, shard: Shard) -> Sequence[ValidatorIndex]: - """ - Return committee for a period. Used to construct persistent committees. - """ full_committee = compute_committee( indices=get_active_validator_indices(state, epoch), seed=get_seed(state, epoch), @@ -233,19 +247,12 @@ def get_period_committee(state: BeaconState, epoch: Epoch, shard: Shard) -> Sequ return full_committee[:TARGET_PERSISTENT_COMMITTEE_SIZE] ``` -### `get_persistent_committee` +#### `get_persistent_committee` ```python -def get_persistent_committee(state: BeaconState, - shard: Shard, - slot: ShardSlot) -> Sequence[ValidatorIndex]: - """ - Return the persistent committee for the given ``shard`` at the given ``slot``. - """ - epoch = compute_epoch_of_shard_slot(slot) - - earlier_committee = get_period_committee(state, get_shard_period_start_epoch(epoch, lookback=2), shard) - later_committee = get_period_committee(state, get_shard_period_start_epoch(epoch, lookback=1), shard) +def get_persistent_committee(state: BeaconState, shard: Shard, epoch: Epoch) -> Sequence[ValidatorIndex]: + earlier_committee = get_period_committee(state, compute_period_start_epoch(epoch, lookback=2), shard) + later_committee = get_period_committee(state, compute_period_start_epoch(epoch, lookback=1), shard) # Take not-yet-cycled-out validators from earlier committee and already-cycled-in validators from # later committee; return a sorted list of the union of the two, deduplicated @@ -255,22 +262,17 @@ def get_persistent_committee(state: BeaconState, )) ``` -### `get_shard_block_proposer_index` +#### `get_shard_proposer_index` ```python -def get_shard_block_proposer_index(state: BeaconState, - shard: Shard, - slot: ShardSlot) -> Optional[ValidatorIndex]: - # Randomly shift persistent committee - persistent_committee = list(get_persistent_committee(state, shard, slot)) - current_epoch = get_current_epoch(state) - - active_indices = [i for i in persistent_committee if is_active_validator(state.validators[i], current_epoch)] - if not any(active_indices): - return None +def get_shard_proposer_index(state: BeaconState, shard: Shard, slot: ShardSlot) -> ValidatorIndex: + epoch = get_current_epoch(state) + persistent_committee = list(get_persistent_committee(state, shard, epoch)) + active_indices = [i for i in persistent_committee if is_active_validator(state.validators[i], epoch)] + assert len(active_indices) > 0 MAX_RANDOM_BYTE = 2**8 - 1 - seed = hash(get_seed(state, current_epoch) + int_to_bytes(shard, length=8) + int_to_bytes(slot, length=8)) + seed = hash(get_seed(state, epoch) + int_to_bytes(shard, length=8) + int_to_bytes(slot, length=8)) i = 0 while True: candidate_index = active_indices[(slot + i) % len(active_indices)] @@ -281,68 +283,7 @@ def get_shard_block_proposer_index(state: BeaconState, i += 1 ``` -### `get_shard_header` - -```python -def get_shard_header(block: ShardBlock) -> ShardBlockHeader: - return ShardBlockHeader( - core=ShardBlockCore( - slot=block.core.slot, - beacon_chain_root=block.core.beacon_chain_root, - parent_root=block.core.parent_root, - data_root=hash_tree_root(block.core.data), - state_root=block.core.state_root, - total_bytes=block.core.total_bytes, - attester_bitfield=block.core.attester_bitfield, - ), - signatures=block.signatures, - ) -``` - -### `pad` - -```python -def pad(x: bytes, length: int) -> bytes: - assert len(x) <= length - return x + b'\x00' * (length - len(x)) -``` - -### `flatten_shard_header` - -```python -def flatten_shard_header(header: ShardBlockHeader) -> Bytes[SHARD_HEADER_SIZE]: - """ - Converts a shard block header into a flat object with the same hash tree root. Used - in the crosslink construction. - """ - committee_size = len(header.core.attester_bitfield) - attester_bits = [header.core.attester_bitfield[i] if i < committee_size else 0 for i in range(256)] - attester_bytes = bytes([sum([attester_bits[i + j] << j for j in range(8)]) for i in range(0, 256, 8)]) - return ( - pad(int_to_bytes(header.core.slot, length=8), 32) + - header.core.beacon_chain_root + - header.core.parent_root + - header.core.data_root + - header.core.state_root + - pad(int_to_bytes(header.core.total_bytes, length=8), 32) + - attester_bytes + - b'\x00' * 32 + - pad(header.signatures.attestation_signature, 128) + - pad(header.signatures.proposer_signature, 128) - ) -``` - -### `compute_crosslink_data_root` - -```python -def compute_crosslink_data_root(blocks: Sequence[ShardBlock]) -> Hash: - header = b''.join([flatten_shard_header(get_shard_header(block)) for block in blocks]) - footer = b''.join([block.core.data for block in blocks]) - MAX_SIZE = SHARD_BLOCK_SIZE_LIMIT * SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * MAX_EPOCHS_PER_CROSSLINK - return hash_tree_root(BytesN[MAX_SIZE](pad(header + footer, MAX_SIZE))) -``` - -### `get_default_shard_state` +#### `get_default_shard_state` ```python def get_default_shard_state(beacon_state: BeaconState, shard: Shard) -> ShardState: @@ -359,7 +300,7 @@ def get_default_shard_state(beacon_state: BeaconState, shard: Shard) -> ShardSta return ShardState( basefee=1, shard=shard, - slot=PHASE_1_FORK_SLOT, + slot=ShardSlot(PHASE_1_FORK_EPOCH * SHARD_SLOTS_PER_EPOCH), earlier_committee_rewards=[REWARD_COEFFICIENT_BASE for _ in range(len(earlier_committee))], later_committee_rewards=[REWARD_COEFFICIENT_BASE for _ in range(len(later_committee))], earlier_committee_fees=[Gwei(0) for _ in range(len(earlier_committee))], @@ -367,195 +308,206 @@ def get_default_shard_state(beacon_state: BeaconState, shard: Shard) -> ShardSta ) ``` +#### `get_shard_base_reward` + +```python +def get_shard_base_reward(beacon_state: BeaconState) -> Gwei: + total_balance_root = integer_squareroot(get_total_active_balance(beacon_state)) + return Gwei(REWARD_COEFFICIENT_BASE * BASE_REWARD_FACTOR // total_balance_root // BASE_REWARDS_PER_EPOCH) +``` + +### State mutators + +#### `add_reward` + +```python +def add_reward(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: + epoch = compute_epoch_of_shard_slot(state.slot) + earlier_committee = get_period_committee(beacon_state, compute_period_start_epoch(epoch, lookback=2), state.shard) + later_committee = get_period_committee(beacon_state, compute_period_start_epoch(epoch, lookback=1), state.shard) + if index in earlier_committee: + state.earlier_committee_rewards[earlier_committee.index(index)] += delta + elif index in later_committee: + state.later_committee_rewards[later_committee.index(index)] += delta +``` + +#### `add_fee` + +```python +def add_fee(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: + epoch = compute_epoch_of_shard_slot(state.slot) + earlier_committee = get_period_committee(beacon_state, compute_period_start_epoch(epoch, lookback=2), state.shard) + later_committee = get_period_committee(beacon_state, compute_period_start_epoch(epoch, lookback=1), state.shard) + if index in earlier_committee: + state.earlier_committee_fees[earlier_committee.index(index)] += delta + elif index in later_committee: + state.later_committee_fees[later_committee.index(index)] += delta +``` + +## Shard state transition function + +The post-state corresponding to a pre-state `state`, a beacon state `beacon_state`, and a block `block` is defined as `shard_state_transition(state, beacon_state, block)`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid. + +```python +def shard_state_transition(state: ShardState, + beacon_state: BeaconState, + block: ShardBlock, + validate_state_root: bool=False) -> ShardState: + # Process slots (including those with no blocks) since block + process_shard_slots(state, beacon_state, block.slot) + # Process block + process_shard_block(state, beacon_state, block) + # Validate state root (`validate_state_root == True` in production) + if validate_state_root: + assert block.state_root == hash_tree_root(state) + # Return post-state + return state +``` + +```python +def process_shard_slots(state: ShardState, beacon_state: BeaconState, slot: ShardSlot) -> None: + assert state.slot <= slot + while state.slot < slot: + process_shard_slot(state) + # Process period on the start slot of the next period + if (state.slot + 1) % (SHARD_SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD) == 0: + process_shard_period(state) + state.slot += ShardSlot(1) +``` + +```python +def process_shard_slot(state: ShardState, beacon_state: BeaconState, slot: ShardSlot) -> None: + # Cache state root + if state.latest_block_header.state_root == Hash(): + state.latest_block_header.state_root = hash_tree_root(state) + # Save state roots in history accumulator + depth = 0 + state_root = hash_tree_root(state) + while state.slot % 2**depth == 0 and depth < HISTORY_ACCUMULATOR_VECTOR: + state.history_accumulator[depth] = state_root + depth += 1 +``` + +### Period processing + +```python +def process_shard_period(state: ShardState, beacon_state: BeaconState) -> None: + epoch = compute_epoch_of_shard_slot(state.slot) + earlier_committee = get_period_committee( + beacon_state, + compute_period_start_epoch(epoch, lookback=2), + state.shard, + ) + later_committee = get_period_committee( + beacon_state, + compute_period_start_epoch(epoch, lookback=1), + state.shard, + ) + + state.receipt_root = hash_tree_root(List[ShardReceiptDelta, TARGET_PERSISTENT_COMMITTEE_SIZE]([ + ShardReceiptDelta(validator_index, state.earlier_committee_rewards[i], state.earlier_committee_fees[i]) + for i, validator_index in enumerate(earlier_committee) + ])) + state.earlier_committee_rewards = state.later_committee_rewards + state.earlier_committee_fees = state.later_committee_fees + state.later_committee_rewards = [REWARD_COEFFICIENT_BASE for _ in range(len(later_committee))] + state.later_committee_fees = [Gwei(0) for _ in range(len(later_committee))] +``` + +### Block processing + +```python +def process_shard_block(state: ShardState, beacon_state: BeaconState, block: ShardBlock) -> None: + process_shard_block_header(state, beacon_state, block) + process_shard_attestations(state, beacon_state, block + process_shard_block_data_fees(state, beacon_state, block) +``` + +#### Block header + +```python +def process_shard_block_header(state: ShardState, beacon_state: BeaconState, block: ShardBlock) -> None: + # Verify that the slots match + assert block.slot == state.slot + # Verify that the parent matches + if block.parent_root != Hash(): + assert block.parent_root == signing_root(state.latest_block_header) + # Save current block as the new latest block + state.latest_block_header = ShardBlockHeader( + slot=block.slot, + beacon_chain_root=block.beacon_chain_root, + parent_root=block.parent_root, + # `state_root` is zeroed and overwritten in the next `process_shard_slot` call + aggregation_bits=block.aggregation_bits, + total_bytes=block.total_bytes, + body_root=hash_tree_root(block.body), + # `signatures` is zeroed + ) + # Verify proposer signature + proposer_index = get_shard_proposer_index(beacon_state, state.shard, block.slot) + pubkey = beacon_state.validators[proposer_index].pubkey + domain = get_domain(beacon_state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(block.slot)) + assert bls_verify(pubkey, signing_root(block), block.signatures.proposer, domain) + # Verify total bytes count + state.total_bytes += len(block.body) + assert block.total_bytes == state.total_bytes +``` + +#### Attestations + +```python +def process_shard_attestations(state: ShardState, beacon_state: BeaconState, block: ShardBlock) -> None: + persistent_committee = get_persistent_committee(beacon_state, state.shard, block.slot) + pubkeys = [] + attestation_count = 0 + base_reward = get_shard_base_reward(beacon_state) + for i, validator_index in enumerate(persistent_committee): + if block.aggregation_bits[i]: + pubkeys.append(beacon_state.validators[validator_index].pubkey) + add_reward(state, beacon_state, validator_index, base_reward) + attestation_count += 1 + for i in range(len(persistent_committee), TARGET_PERSISTENT_COMMITTEE_SIZE): + assert block.aggregation_bits[i] is False or block.aggregation_bits[i] == 0 # TODO: Fix Bitvector + # Verify aggregate signature + domain = get_domain(beacon_state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_shard_slot(block.slot)) + assert bls_verify(bls_aggregate_pubkeys(pubkeys), block.parent_root, block.signatures.attesters, domain) + # Proposer micro-rewards + add_reward(state, beacon_state, proposer_index, attestation_count * get_shard_base_reward(beacon_state) // PROPOSER_REWARD_QUOTIENT) +``` + +#### Block data fees + +```python +def process_shard_block_data_fees(state: ShardState, beacon_state: BeaconState, block: ShardBlock) -> None: + base_reward = get_shard_base_reward(beacon_state) + add_fee(state, beacon_state, proposer_index, state.basefee * len(block.body) // SHARD_BLOCK_SIZE_LIMIT) + QUOTIENT = SHARD_BLOCK_SIZE_LIMIT * BASEFEE_ADJUSTMENT_FACTOR + if len(block.body) > SHARD_BLOCK_SIZE_TARGET: + state.basefee += Gwei(max(1, state.basefee * (len(block.body) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT)) + elif len(block.body) < SHARD_BLOCK_SIZE_TARGET: + state.basefee -= Gwei(max(1, state.basefee * (len(block.body) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT)) + state.basefee = Gwei(max(1, min( EFFECTIVE_BALANCE_INCREMENT // EPOCHS_PER_SHARD_PERIOD // SHARD_SLOTS_PER_EPOCH, + state.basefee, + ))) +``` + ## Object validity ### Shard block validation: preliminary Accept a shard block `block` only if all of the following are correct: -* Either `block.core.parent_root == Hash()` or a block `parent` such that `hash_tree_root(parent.core) == block.core.parent_root` has already been accepted. -* `block.core.beacon_chain_root == get_block_root(head_beacon_state, compute_epoch_of_shard_slot(parent.core.slot))` where `head_beacon_state` is the current beacon chain head state. Alternatively phrased, a beacon chain block `beacon_ref` such that `signing_root(beacon_ref) == block.core.beacon_chain_root` has already been accepted and is part of the canonical chain, and no block with slot `beacon_ref.slot < slot <= compute_start_slot_of_epoch(compute_epoch_of_shard_slot(parent.core.slot))` is part of the canonical chain. -* Let `beacon_state` be the state where `beacon_ref.state_root == hash_tree_root(beacon_state)`. Let `prev_state` be the post-state of the `parent` if the `parent` exists, otherwise let it be `get_default_shard_state(beacon_state, shard)` (defined below). `block.core.state_root` must equal the `hash_tree_root` of the state after applying `shard_state_transition(prev_state, beacon_state, block)`. +* Either `block.parent_root == Hash()` or a block `parent` such that `signing_root(parent) == block.parent_root` has already been accepted. +* `block.beacon_chain_root == get_block_root(head_beacon_state, compute_epoch_of_shard_slot(parent.slot))` where `head_beacon_state` is the current beacon chain head state. Alternatively phrased, a beacon chain block `beacon_ref` such that `signing_root(beacon_ref) == block.beacon_chain_root` has already been accepted and is part of the canonical chain, and no block with slot `beacon_ref.slot < slot <= compute_start_slot_of_epoch(compute_epoch_of_shard_slot(parent.slot))` is part of the canonical chain. +* Let `beacon_state` be the state where `beacon_ref.state_root == hash_tree_root(beacon_state)`. Let `prev_state` be the post-state of the `parent` if the `parent` exists, otherwise let it be `get_default_shard_state(beacon_state, shard)` (defined below). `block.state_root` must equal the `hash_tree_root` of the state after applying `shard_state_transition(prev_state, beacon_state, block)`. Note that these acceptance conditions depend on the canonical beacon chain; when the canonical beacon chain reorganizes, the eligibility of shard blocks should be re-evaluated. -### Shard state transition function helpers - -```python -def add_reward(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: int) -> None: - epoch = compute_epoch_of_shard_slot(state.slot) - earlier_committee = get_period_committee( - beacon_state, - get_shard_period_start_epoch(epoch, lookback=2), - state.shard, - ) - later_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=1), state.shard) - if index in earlier_committee: - state.earlier_committee_rewards[earlier_committee.index(index)] += delta - elif index in later_committee: - state.later_committee_rewards[later_committee.index(index)] += delta - else: - raise Exception("Should never be here") -``` - -```python -def add_fee(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: int) -> None: - epoch = compute_epoch_of_shard_slot(state.slot) - earlier_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=2), state.shard) - later_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=1), state.shard) - if index in earlier_committee: - state.earlier_committee_fees[earlier_committee.index(index)] += delta - elif index in later_committee: - state.later_committee_fees[later_committee.index(index)] += delta - else: - raise Exception("Should never be here") -``` - -### Shard state transition function - -```python -def shard_state_transition(state: ShardState, - beacon_state: BeaconState, - block: ShardBlock, - validate_state_root: bool=False) -> None: - assert block.core.slot > state.slot - for slot in range(state.slot, block.core.slot): - shard_slot_transition(state, beacon_state) - shard_block_transition(state, beacon_state, block, validate_state_root=validate_state_root) -``` - -```python -def shard_slot_transition(state: ShardState, beacon_state: BeaconState) -> None: - # Correct saved state root - if state.most_recent_block_core.state_root == Hash(): - state.most_recent_block_core.state_root = hash_tree_root(state) - - # Save states in history accumulator - depth = 0 - h = hash_tree_root(state) - while state.slot % 2**depth == 0 and depth < HISTORY_ACCUMULATOR_VECTOR: - state.history_accumulator[depth] = h - depth += 1 - - # Period transitions - if (state.slot + 1) % (SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD) == 0: - epoch = compute_epoch_of_shard_slot(state.slot) - earlier_committee = get_period_committee( - beacon_state, - get_shard_period_start_epoch(epoch, lookback=2), - state.shard, - ) - later_committee = get_period_committee( - beacon_state, - get_shard_period_start_epoch(epoch, lookback=1), - state.shard, - ) - state.receipt_root = hash_tree_root(List[ShardReceiptDelta, PLACEHOLDER]([ - ShardReceiptDelta( - index=validator_index, - reward_coefficient=state.earlier_committee_rewards[i], - block_fee=state.earlier_committee_fees[i], - ) - for i, validator_index in enumerate(earlier_committee) - ])) - state.earlier_committee_rewards = state.later_committee_rewards - state.earlier_committee_fees = state.later_committee_fees - state.later_committee_rewards = [REWARD_COEFFICIENT_BASE for _ in range(len(later_committee))], - state.later_committee_fees = [Gwei(0) for _ in range(len(later_committee))], - else: - state.receipt_root = Hash() - state.slot += ShardSlot(1) -``` - -```python -def shard_block_transition(state: ShardState, - beacon_state: BeaconState, - block: ShardBlock, - validate_state_root: bool) -> None: - # Check slot number - assert block.core.slot == state.slot - - # Check parent block - if block.core.parent_root != Hash(): - assert block.core.parent_root == hash_tree_root(state.most_recent_block_core) - - # Calculate base reward - total_balance = get_total_active_balance(beacon_state) - base_reward = ( - REWARD_COEFFICIENT_BASE * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH - ) - # Check attestations - attester_committee = get_persistent_committee(beacon_state, state.shard, block.core.slot) - pubkeys = [] - attestations = 0 - - for i, validator_index in enumerate(attester_committee): - if block.core.attester_bitfield[i]: - pubkeys.append(beacon_state.validators[validator_index].pubkey) - add_reward(state, beacon_state, validator_index, base_reward) - attestations += 1 - - for i in range(len(attester_committee), TARGET_PERSISTENT_COMMITTEE_SIZE): - assert block.core.attester_bitfield[i] is False or block.core.attester_bitfield[i] == 0 # TODO: FIX Bitvector - - assert bls_verify( - pubkey=bls_aggregate_pubkeys(pubkeys), - message_hash=block.core.parent_root, - signature=block.signatures.attestation_signature, - domain=get_domain(beacon_state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_shard_slot(block.core.slot)) - ) - - # Check proposer - proposer_index = get_shard_block_proposer_index(beacon_state, state.shard, block.core.slot) - assert proposer_index is not None - add_reward(state, beacon_state, proposer_index, attestations * base_reward // PROPOSER_REWARD_QUOTIENT) - assert bls_verify( - pubkey=beacon_state.validators[proposer_index].pubkey, - message_hash=hash_tree_root(block.core), - signature=block.signatures.proposer_signature, - domain=get_domain(beacon_state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(block.core.slot)), - ) - - # Process and update block data fees - add_fee(state, beacon_state, proposer_index, state.basefee * len(block.core.data) // SHARD_BLOCK_SIZE_LIMIT) - QUOTIENT = SHARD_BLOCK_SIZE_LIMIT * BASEFEE_ADJUSTMENT_FACTOR - if len(block.core.data) > SHARD_BLOCK_SIZE_TARGET: - state.basefee += Gwei(max(1, state.basefee * (len(block.core.data) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT)) - elif len(block.core.data) < SHARD_BLOCK_SIZE_TARGET: - state.basefee -= Gwei(max(1, state.basefee * (len(block.core.data) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT)) - state.basefee = Gwei(max( - 1, - min( - EFFECTIVE_BALANCE_INCREMENT // EPOCHS_PER_SHARD_PERIOD // SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH, - state.basefee, - ) - )) - - # Check total bytes - state.total_bytes += len(block.core.data) - assert block.core.total_bytes == state.total_bytes - - # Update in-state block header - state.most_recent_block_core = ShardBlockCore( - slot=block.core.slot, - beacon_chain_root=block.core.beacon_chain_root, - parent_root=block.core.parent_root, - data_root=hash_tree_root(block.core.data), - state_root=Hash(), - total_bytes=block.core.total_bytes, - attester_bitfield=block.core.attester_bitfield, - ) - - # Check state root - if validate_state_root: - assert block.core.state_root == hash_tree_root(state) -``` - ### Beacon attestations Let: -- `pre_state` is the `ShardState` before processing any blocks +- `pre_state` be the `ShardState` before processing any blocks - `shard_blocks_or_state_roots` be the `Union[ShardBlock, Hash]` list such that `shard_blocks[slot]` is the canonical `ShardBlock` for shard `pre_state.shard` at slot `slot` if a block exists, or the post-state-root of processing state up to and including that slot if a block does not exist. - `beacon_state` be the canonical `BeaconState` - `valid_attestations` be the set of valid `Attestation` objects, recursively defined @@ -594,12 +546,9 @@ def is_valid_beacon_attestation(pre_state: ShardState, blocks.append(shard_blocks_or_state_roots[slot]) else: blocks.append(ShardBlock( - core=ExtendedShardBlockCore( - slot=slot, - state_root=shard_blocks_or_state_roots[slot], - total_bytes=pre_state.total_bytes, - ), - signatures=ShardBlockSignatures(), + slot=slot, + state_root=shard_blocks_or_state_roots[slot], + total_bytes=pre_state.total_bytes, )) assert candidate.data.crosslink.data_root == compute_crosslink_data_root(blocks) From d0b4dc2b0164c626df766b25ad6ecddd57de9a18 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 27 Aug 2019 09:16:02 -0600 Subject: [PATCH 135/250] remove bad length checks from process attestation; ensure committee count and committee size not equal --- specs/core/0_beacon-chain.md | 5 ----- test_libs/pyspec/eth2spec/test/context.py | 2 +- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 01a6ca352..4970a3b00 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1615,11 +1615,6 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: proposer_index=get_beacon_proposer_index(state), ) - # Check bitlist lengths - committee_size = get_committee_count(state, attestation.data.target.epoch) - assert len(attestation.aggregation_bits) == committee_size - assert len(attestation.custody_bits) == committee_size - if data.target.epoch == get_current_epoch(state): assert data.source == state.current_justified_checkpoint parent_crosslink = state.current_crosslinks[data.crosslink.shard] diff --git a/test_libs/pyspec/eth2spec/test/context.py b/test_libs/pyspec/eth2spec/test/context.py index 5cc42c510..9f7fc41d4 100644 --- a/test_libs/pyspec/eth2spec/test/context.py +++ b/test_libs/pyspec/eth2spec/test/context.py @@ -10,7 +10,7 @@ from .utils import vector_test, with_meta_tags def with_state(fn): def entry(*args, **kw): try: - kw['state'] = create_genesis_state(spec=kw['spec'], num_validators=spec_phase0.SLOTS_PER_EPOCH * 8) + kw['state'] = create_genesis_state(spec=kw['spec'], num_validators=spec_phase0.SLOTS_PER_EPOCH * 9) except KeyError: raise TypeError('Spec decorator must come within state decorator to inject spec into state.') return fn(*args, **kw) From 82faaf101d50f33206094a99e60a3efa86662f9b Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 27 Aug 2019 09:38:20 -0600 Subject: [PATCH 136/250] fix tests --- test_libs/pyspec/eth2spec/test/context.py | 2 +- .../block_processing/test_process_bit_challenge.py | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/test_libs/pyspec/eth2spec/test/context.py b/test_libs/pyspec/eth2spec/test/context.py index 9f7fc41d4..80edaba9b 100644 --- a/test_libs/pyspec/eth2spec/test/context.py +++ b/test_libs/pyspec/eth2spec/test/context.py @@ -10,7 +10,7 @@ from .utils import vector_test, with_meta_tags def with_state(fn): def entry(*args, **kw): try: - kw['state'] = create_genesis_state(spec=kw['spec'], num_validators=spec_phase0.SLOTS_PER_EPOCH * 9) + kw['state'] = create_genesis_state(spec=kw['spec'], num_validators=spec_phase0.SLOTS_PER_EPOCH * 10) except KeyError: raise TypeError('Spec decorator must come within state decorator to inject spec into state.') return fn(*args, **kw) diff --git a/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_bit_challenge.py b/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_bit_challenge.py index e4880555a..ae6ff258c 100644 --- a/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_bit_challenge.py +++ b/test_libs/pyspec/eth2spec/test/phase_1/block_processing/test_process_bit_challenge.py @@ -212,13 +212,16 @@ def test_max_reveal_lateness_1(spec, state): challenge = get_valid_bit_challenge(spec, state, attestation) responder_index = challenge.responder_index + target_epoch = attestation.data.target.epoch state.validators[responder_index].max_reveal_lateness = 3 - for i in range(spec.get_randao_epoch_for_custody_period( - spec.get_custody_period_for_validator(state, responder_index), + latest_reveal_epoch = spec.get_randao_epoch_for_custody_period( + spec.get_custody_period_for_validator(state, responder_index, target_epoch), responder_index - ) + 2 * spec.EPOCHS_PER_CUSTODY_PERIOD + state.validators[responder_index].max_reveal_lateness - 2): + ) + 2 * spec.EPOCHS_PER_CUSTODY_PERIOD + state.validators[responder_index].max_reveal_lateness + + while spec.get_current_epoch(state) < latest_reveal_epoch - 2: next_epoch(spec, state) apply_empty_block(spec, state) From 78a0e15e9c5f0087ae51e482a05c7e78c844bce0 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 27 Aug 2019 10:36:25 -0600 Subject: [PATCH 137/250] explicitly cast to bool --- specs/light_client/sync_protocol.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index feef1fcea..207e0e63e 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -86,7 +86,7 @@ def unpack_compact_validator(compact_validator: CompactValidator) -> Tuple[Valid """ return ( ValidatorIndex(compact_validator >> 16), - (compact_validator >> 15) % 2 == 0, + bool((compact_validator >> 15) % 2), uint64(compact_validator & (2**15 - 1)), ) ``` From c8c47e39865567e4f9eb7a180bc32e99192f34e8 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Wed, 28 Aug 2019 10:22:34 +0100 Subject: [PATCH 138/250] Abstract away compute_proposer_index for phase 1 --- specs/core/0_beacon-chain.md | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index f27d016cf..7398da312 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -68,6 +68,7 @@ - [`is_valid_merkle_branch`](#is_valid_merkle_branch) - [Misc](#misc-1) - [`compute_shuffled_index`](#compute_shuffled_index) + - [`compute_proposer_index`](#compute_proposer_index) - [`compute_committee`](#compute_committee) - [`compute_epoch_of_slot`](#compute_epoch_of_slot) - [`compute_start_slot_of_epoch`](#compute_start_slot_of_epoch) @@ -729,6 +730,25 @@ def compute_shuffled_index(index: ValidatorIndex, index_count: uint64, seed: Has return ValidatorIndex(index) ``` +#### `compute_proposer_index` + +```python +def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Hash) -> ValidatorIndex: + """ + Return from ``indices`` a random index sampled by effective balance. + """ + assert len(indices) > 0 + MAX_RANDOM_BYTE = 2**8 - 1 + i = 0 + while True: + candidate_index = indices[compute_shuffled_index(ValidatorIndex(i % len(indices)), len(indices), seed)] + random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32] + effective_balance = state.validators[candidate_index].effective_balance + if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: + return ValidatorIndex(candidate_index) + i += 1 +``` + #### `compute_committee` ```python @@ -934,17 +954,9 @@ def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: Return the beacon proposer index at the current slot. """ epoch = get_current_epoch(state) - indices = get_active_validator_indices(state, epoch) seed = hash(get_seed(state, epoch) + int_to_bytes(state.slot, length=8)) - MAX_RANDOM_BYTE = 2**8 - 1 - i = 0 - while True: - candidate_index = indices[compute_shuffled_index(ValidatorIndex(i % len(indices)), len(indices), seed)] - random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32] - effective_balance = state.validators[candidate_index].effective_balance - if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: - return ValidatorIndex(candidate_index) - i += 1 + indices = get_active_validator_indices(state, epoch) + return compute_proposer_index(state, indices, seed) ``` #### `get_attestation_data_slot` From d7e628e08fefbd00a88cb204fa554548b96fbd23 Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 28 Aug 2019 22:57:24 +0100 Subject: [PATCH 139/250] WIP! --- specs/core/1_shard-data-chains.md | 550 +++++++++++++----------------- 1 file changed, 245 insertions(+), 305 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 9335c1803..367091c91 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -19,43 +19,43 @@ - [Signature domain types](#signature-domain-types) - [Containers](#containers) - [`ShardBlockSignatures`](#shardblocksignatures) + - [`ShardBlockData`](#shardblockdata) - [`ShardBlock`](#shardblock) + - [`ShardBlockHeaderData`](#shardblockheaderdata) - [`ShardBlockHeader`](#shardblockheader) - [`ShardState`](#shardstate) - - [`ShardReceiptDelta`](#shardreceiptdelta) + - [`ShardReceipt`](#shardreceipt) + - [`ShardCheckpoint`](#shardcheckpoint) - [Helper functions](#helper-functions) - [Misc](#misc-1) - - [`pad`](#pad) - - [`compute_slot_of_shard_slot`](#compute_slot_of_shard_slot) + - [`compute_padded_data`](#compute_padded_data) - [`compute_epoch_of_shard_slot`](#compute_epoch_of_shard_slot) - - [`compute_period_start_epoch`](#compute_period_start_epoch) + - [`compute_shard_period_start_epoch`](#compute_shard_period_start_epoch) - [`compute_flat_shard_header`](#compute_flat_shard_header) - [`compute_crosslink_data_root`](#compute_crosslink_data_root) - - [State accessors](#state-accessors) + - [Beacon state accessors](#beacon-state-accessors) - [`get_period_committee`](#get_period_committee) - - [`get_persistent_committee`](#get_persistent_committee) + - [`get_shard_committee`](#get_shard_committee) - [`get_shard_proposer_index`](#get_shard_proposer_index) - - [`get_default_shard_state`](#get_default_shard_state) - - [`get_shard_base_reward`](#get_shard_base_reward) - - [State mutators](#state-mutators) - - [`add_fee`](#add_fee) + - [Shard state mutators](#shard-state-mutators) - [`add_reward`](#add_reward) + - [`add_fee`](#add_fee) + - [Genesis](#genesis) + - [`get_genesis_shard_state`](#get_genesis_shard_state) + - [`get_genesis_shard_block`](#get_genesis_shard_block) - [Shard state transition function](#shard-state-transition-function) - [Period processing](#period-processing) - [Block processing](#block-processing) - [Block header](#block-header) - [Attestations](#attestations) - - [Block data fees](#block-data-fees) - - [Object validity](#object-validity) - - [Shard block validation: preliminary](#shard-block-validation-preliminary) - - [Beacon attestations](#beacon-attestations) + - [Block size fee](#block-size-fee) - [Shard fork choice rule](#shard-fork-choice-rule) ## Introduction -This document describes the shard data layer and the shard fork choice rule in Phase 1 of Ethereum 2.0. +This document describes the shard transition function (data layer only) and the shard fork choice rule as part of Phase 1 of Ethereum 2.0. ## Custom types @@ -69,8 +69,8 @@ This document describes the shard data layer and the shard fork choice rule in P | Name | Value | | - | - | -| `SHARD_SLOTS_PER_EPOCH` | `2**7` (= 128) | -| `TARGET_PERSISTENT_COMMITTEE_SIZE` | `2**7` (= 128) | +| `MIN_BLOCK_SIZE_PRICE` | `2**0` (= 1) | +| `MAX_PERIOD_COMMITTEE_SIZE` | `2**7` (= 128) | | `SHARD_HEADER_SIZE` | `2**9` (= 512) | | `SHARD_BLOCK_SIZE_TARGET` | `2**14` (= 16,384) | | `SHARD_BLOCK_SIZE_LIMIT` | `2**16` (= 65,536) | @@ -79,13 +79,13 @@ This document describes the shard data layer and the shard fork choice rule in P | Name | Value | | - | - | -| `PHASE_1_FORK_EPOCH` | **TBD** | +| `SHARD_GENESIS_EPOCH` | **TBD** | ### Time parameters | Name | Value | Unit | Duration | | - | - | :-: | :-: | -| `CROSSLINK_LOOKBACK` | `2**0` (= 1) | epochs | 6.4 minutes | +| `SHARD_SLOTS_PER_EPOCH` | `2**7` (= 128) | shard slots | 6.4 minutes | | `EPOCHS_PER_SHARD_PERIOD` | `2**8` (= 256) | epochs | ~27 hours | ### State list lengths @@ -98,8 +98,7 @@ This document describes the shard data layer and the shard fork choice rule in P | Name | Value | | - | - | -| `BASEFEE_ADJUSTMENT_FACTOR` | `2**3` (= 8) | -| `REWARD_COEFFICIENT_BASE` | `2**20` (= 1,048,576) | +| `BLOCK_SIZE_PRICE_QUOTIENT` | `2**3` (= 8) | ### Signature domain types @@ -118,33 +117,45 @@ class ShardBlockSignatures(Container): proposer: BLSSignature ``` +### `ShardBlockData` + +```python +class ShardBlockData(Container): + slot: ShardSlot + beacon_block_root: Hash + parent_root: Hash + state_root: Hash + aggregation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE] + block_size_sum: uint64 + body: List[byte, SHARD_BLOCK_SIZE_LIMIT - SHARD_HEADER_SIZE] +``` + ### `ShardBlock` ```python class ShardBlock(Container): + data: ShardBlockData + signatures: ShardBlockSignatures +``` + +### `ShardBlockHeaderData` + +```python +class ShardBlockHeaderData(Container): slot: ShardSlot - beacon_chain_root: Hash + beacon_block_root: Hash parent_root: Hash state_root: Hash - aggregation_bits: Bitvector[TARGET_PERSISTENT_COMMITTEE_SIZE * 2] - total_bytes: uint64 - body: Bytes[SHARD_BLOCK_SIZE_LIMIT - SHARD_HEADER_SIZE] - padding: Bytes[32] - signatures: ShardBlockSignatures + aggregation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE] + block_size_sum: uint64 + body_root: Hash ``` ### `ShardBlockHeader` ```python class ShardBlockHeader(Container): - slot: ShardSlot - beacon_chain_root: Hash - parent_root: Hash - state_root: Hash - aggregation_bits: Bitvector[TARGET_PERSISTENT_COMMITTEE_SIZE * 2] - total_bytes: uint64 - body_root: Hash - padding: Bytes[32] + data: ShardBlockHeaderData signatures: ShardBlockSignatures ``` @@ -152,51 +163,60 @@ class ShardBlockHeader(Container): ```python class ShardState(Container): - history_accumulator: Vector[Hash, HISTORY_ACCUMULATOR_VECTOR] - earlier_committee_rewards: List[uint64, TARGET_PERSISTENT_COMMITTEE_SIZE] - later_committee_rewards: List[uint64, TARGET_PERSISTENT_COMMITTEE_SIZE] - earlier_committee_fees: List[Gwei, TARGET_PERSISTENT_COMMITTEE_SIZE] - later_committee_fees: List[Gwei, TARGET_PERSISTENT_COMMITTEE_SIZE] - basefee: Gwei - slot: ShardSlot shard: Shard - latest_block_header: ShardBlockHeader + slot: ShardSlot + history_accumulator: Vector[Hash, HISTORY_ACCUMULATOR_VECTOR] + latest_block_header_data: ShardBlockHeader receipt_root: Hash - total_bytes: uint64 + block_size_sum: uint64 + # Rewards and fees + block_size_price: Gwei + older_committee_rewards: List[Gwei, MAX_PERIOD_COMMITTEE_SIZE] + newer_committee_rewards: List[Gwei, MAX_PERIOD_COMMITTEE_SIZE] + older_committee_fees: List[Gwei, MAX_PERIOD_COMMITTEE_SIZE] + newer_committee_fees: List[Gwei, MAX_PERIOD_COMMITTEE_SIZE] ``` -### `ShardReceiptDelta` +### `ShardReceipt` ```python -class ShardReceiptDelta(Container): +class ShardReceipt(Container): index: ValidatorIndex - reward_coefficient: uint64 - block_fee: Gwei + rewards: Gwei + fees: Gwei +``` + +### `ShardCheckpoint` + +```python +class ShardCheckpoint(Container): + slot: ShardSlot + parent_root: Hash ``` ## Helper functions ### Misc -#### `pad` +#### `compute_padded_data` ```python -def pad(x: bytes, length: uint64) -> bytes: - assert len(x) <= length - return x + b'\x00' * (length - len(x)) +def compute_padded_data(data: bytes, length: uint64) -> bytes: + assert len(data) <= length + return data + b'\x00' * (length - len(data)) ``` #### `compute_epoch_of_shard_slot` ```python def compute_epoch_of_shard_slot(slot: ShardSlot) -> Epoch: - return compute_epoch_of_slot(compute_slot_of_shard_slot(slot)) + return compute_epoch_of_slot(slot // SHARD_SLOTS_PER_EPOCH) ``` -#### `compute_period_start_epoch` +#### `compute_shard_period_start_epoch` ```python -def compute_period_start_epoch(epoch: Epoch, lookback: Epoch=0) -> Epoch: +def compute_shard_period_start_epoch(epoch: Epoch, lookback: uint64) -> Epoch: return Epoch(epoch - (epoch % EPOCHS_PER_SHARD_PERIOD) - lookback * EPOCHS_PER_SHARD_PERIOD) ``` @@ -205,19 +225,22 @@ def compute_period_start_epoch(epoch: Epoch, lookback: Epoch=0) -> Epoch: ```python def compute_flat_shard_header(block: ShardBlock) -> Bytes[SHARD_HEADER_SIZE]: """ - Return a flat serialisation of the ``block`` header which preserves hash tree root. + Return a flat serialisation of the ``block`` header, preserving hash tree root. """ + data = block.data return ( - pad(int_to_bytes(block.slot, length=8), 32) + - block.beacon_chain_root + - block.parent_root + - hash_tree_root(block.body) + - block.state_root + - pad(int_to_bytes(block.total_bytes, length=8), 32) + - bytes([sum([block.aggregation_bits[i + j] << j for j in range(8)]) for i in range(0, 256, 8)]) + - block.padding + - pad(block.signatures.attesters, 128) + - pad(block.signatures.proposer, 128) + # Left half of the hash tree + compute_padded_data(int_to_bytes(data.slot, length=8), 32) + + data.beacon_block_root + + data.parent_root + + hash_tree_root(data.body) + + data.state_root + + compute_padded_data(int_to_bytes(data.block_size_sum, length=8), 32) + + bytes([sum([data.aggregation_bits[i + j] << j for j in range(8)]) for i in range(0, 256, 8)]) + + Bytes32() + # Padding + # Right half of the hash tree + compute_padded_data(block.signatures.attesters, 128) + + compute_padded_data(block.signatures.proposer, 128) ) ``` @@ -226,40 +249,32 @@ def compute_flat_shard_header(block: ShardBlock) -> Bytes[SHARD_HEADER_SIZE]: ```python def compute_crosslink_data_root(blocks: Sequence[ShardBlock]) -> Hash: headers = b''.join([compute_flat_shard_header(block) for block in blocks]) - bodies = b''.join([block.body for block in blocks]) - MAX_SIZE = SHARD_BLOCK_SIZE_LIMIT * SHARD_SLOTS_PER_EPOCH * MAX_EPOCHS_PER_CROSSLINK - return hash_tree_root(BytesN[MAX_SIZE](pad(headers + bodies, MAX_SIZE))) + bodies = b''.join([block.data.body for block in blocks]) + MAX_SIZE = MAX_EPOCHS_PER_CROSSLINK * SHARD_SLOTS_PER_EPOCH * SHARD_BLOCK_SIZE_LIMIT + return hash_tree_root(BytesN[MAX_SIZE](compute_padded_data(headers + bodies, MAX_SIZE))) ``` -### State accessors +### Beacon state accessors #### `get_period_committee` ```python -def get_period_committee(state: BeaconState, epoch: Epoch, shard: Shard) -> Sequence[ValidatorIndex]: - full_committee = compute_committee( - indices=get_active_validator_indices(state, epoch), - seed=get_seed(state, epoch), - index=shard, - count=SHARD_COUNT, - ) - - return full_committee[:TARGET_PERSISTENT_COMMITTEE_SIZE] +def get_period_committee(state: BeaconState, shard: Shard, epoch: Epoch) -> Sequence[ValidatorIndex]: + active_validator_indices = get_active_validator_indices(state, epoch) + seed = get_seed(state, epoch) + return compute_committee(active_validator_indices, seed, shard, SHARD_COUNT)[:MAX_PERIOD_COMMITTEE_SIZE] ``` -#### `get_persistent_committee` +#### `get_shard_committee` ```python -def get_persistent_committee(state: BeaconState, shard: Shard, epoch: Epoch) -> Sequence[ValidatorIndex]: - earlier_committee = get_period_committee(state, compute_period_start_epoch(epoch, lookback=2), shard) - later_committee = get_period_committee(state, compute_period_start_epoch(epoch, lookback=1), shard) - - # Take not-yet-cycled-out validators from earlier committee and already-cycled-in validators from - # later committee; return a sorted list of the union of the two, deduplicated - return sorted(set( - [i for i in earlier_committee if epoch % EPOCHS_PER_SHARD_PERIOD < i % EPOCHS_PER_SHARD_PERIOD] - + [i for i in later_committee if epoch % EPOCHS_PER_SHARD_PERIOD >= i % EPOCHS_PER_SHARD_PERIOD] - )) +def get_shard_committee(state: BeaconState, shard: Shard, epoch: Epoch) -> Sequence[ValidatorIndex]: + older_committee = get_period_committee(state, shard, compute_shard_period_start_epoch(epoch, 2)) + newer_committee = get_period_committee(state, shard, compute_shard_period_start_epoch(epoch, 1)) + # Every epoch cycle out validators from the older committee and cycle in validators from the newer committee + older_subcommittee = [i for i in older_committee if i % EPOCHS_PER_SHARD_PERIOD > epoch % EPOCHS_PER_SHARD_PERIOD] + newer_subcommittee = [i for i in newer_committee if i % EPOCHS_PER_SHARD_PERIOD <= epoch % EPOCHS_PER_SHARD_PERIOD] + return older_subcommittee + newer_subcommittee ``` #### `get_shard_proposer_index` @@ -267,294 +282,219 @@ def get_persistent_committee(state: BeaconState, shard: Shard, epoch: Epoch) -> ```python def get_shard_proposer_index(state: BeaconState, shard: Shard, slot: ShardSlot) -> ValidatorIndex: epoch = get_current_epoch(state) - persistent_committee = list(get_persistent_committee(state, shard, epoch)) - active_indices = [i for i in persistent_committee if is_active_validator(state.validators[i], epoch)] - assert len(active_indices) > 0 - - MAX_RANDOM_BYTE = 2**8 - 1 - seed = hash(get_seed(state, epoch) + int_to_bytes(shard, length=8) + int_to_bytes(slot, length=8)) - i = 0 - while True: - candidate_index = active_indices[(slot + i) % len(active_indices)] - random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32] - effective_balance = state.validators[candidate_index].effective_balance - if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: - return ValidatorIndex(candidate_index) - i += 1 + active_indices = [i for i in get_shard_committee(state, shard, epoch) if is_active_validator(state.validators[i], epoch)] + seed = hash(get_seed(state, epoch) + int_to_bytes(slot, length=8) + int_to_bytes(shard, length=8)) + compute_proposer_index(state, active_indices, seed) ``` -#### `get_default_shard_state` - -```python -def get_default_shard_state(beacon_state: BeaconState, shard: Shard) -> ShardState: - earlier_committee = get_period_committee( - beacon_state, - Epoch(PHASE_1_FORK_EPOCH - EPOCHS_PER_SHARD_PERIOD * 2), - shard, - ) - later_committee = get_period_committee( - beacon_state, - Epoch(PHASE_1_FORK_EPOCH - EPOCHS_PER_SHARD_PERIOD), - shard, - ) - return ShardState( - basefee=1, - shard=shard, - slot=ShardSlot(PHASE_1_FORK_EPOCH * SHARD_SLOTS_PER_EPOCH), - earlier_committee_rewards=[REWARD_COEFFICIENT_BASE for _ in range(len(earlier_committee))], - later_committee_rewards=[REWARD_COEFFICIENT_BASE for _ in range(len(later_committee))], - earlier_committee_fees=[Gwei(0) for _ in range(len(earlier_committee))], - later_committee_fees=[Gwei(0) for _ in range(len(later_committee))], - ) -``` - -#### `get_shard_base_reward` - -```python -def get_shard_base_reward(beacon_state: BeaconState) -> Gwei: - total_balance_root = integer_squareroot(get_total_active_balance(beacon_state)) - return Gwei(REWARD_COEFFICIENT_BASE * BASE_REWARD_FACTOR // total_balance_root // BASE_REWARDS_PER_EPOCH) -``` - -### State mutators +### Shard state mutators #### `add_reward` ```python -def add_reward(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: +def add_reward(state: BeaconState, shard_state: ShardState, index: ValidatorIndex, delta: Gwei) -> None: epoch = compute_epoch_of_shard_slot(state.slot) - earlier_committee = get_period_committee(beacon_state, compute_period_start_epoch(epoch, lookback=2), state.shard) - later_committee = get_period_committee(beacon_state, compute_period_start_epoch(epoch, lookback=1), state.shard) - if index in earlier_committee: - state.earlier_committee_rewards[earlier_committee.index(index)] += delta - elif index in later_committee: - state.later_committee_rewards[later_committee.index(index)] += delta + older_committee = get_period_committee(state, shard_state.shard, compute_shard_period_start_epoch(epoch, 2)) + newer_committee = get_period_committee(state, shard_state.shard, compute_shard_period_start_epoch(epoch, 1)) + if index in older_committee: + shard_state.older_committee_rewards[older_committee.index(index)] += delta + elif index in newer_committee: + shard_state.newer_committee_rewards[newer_committee.index(index)] += delta ``` #### `add_fee` ```python -def add_fee(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: +def add_fee(state: BeaconState, shard_state: ShardState, index: ValidatorIndex, delta: Gwei) -> None: epoch = compute_epoch_of_shard_slot(state.slot) - earlier_committee = get_period_committee(beacon_state, compute_period_start_epoch(epoch, lookback=2), state.shard) - later_committee = get_period_committee(beacon_state, compute_period_start_epoch(epoch, lookback=1), state.shard) - if index in earlier_committee: - state.earlier_committee_fees[earlier_committee.index(index)] += delta - elif index in later_committee: - state.later_committee_fees[later_committee.index(index)] += delta + older_committee = get_period_committee(state, shard_state.shard, compute_shard_period_start_epoch(epoch, 2)) + newer_committee = get_period_committee(state, shard_state.shard, compute_shard_period_start_epoch(epoch, 1)) + if index in older_committee: + shard_state.older_committee_fees[older_committee.index(index)] += delta + elif index in newer_committee: + shard_state.newer_committee_fees[newer_committee.index(index)] += delta +``` + +## Genesis + +### `get_genesis_shard_state` + +```python +def get_genesis_shard_state(state: BeaconState, shard: Shard) -> ShardState: + older_committee = get_period_committee(state, shard, compute_shard_period_start_epoch(SHARD_GENESIS_EPOCH, 2)) + newer_committee = get_period_committee(state, shard, compute_shard_period_start_epoch(SHARD_GENESIS_EPOCH, 1)) + return ShardState( + shard=shard, + slot=ShardSlot(SHARD_GENESIS_EPOCH * SHARD_SLOTS_PER_EPOCH), + block_size_price=MIN_BLOCK_SIZE_PRICE, + older_committee_rewards=[Gwei(0) for _ in range(len(older_committee))], + newer_committee_rewards=[Gwei(0) for _ in range(len(newer_committee))], + older_committee_fees=[Gwei(0) for _ in range(len(older_committee))], + newer_committee_fees=[Gwei(0) for _ in range(len(newer_committee))], + ) +``` + +### `get_genesis_shard_block` + +```python +def get_genesis_shard_block(state: BeaconState, shard: Shard) -> ShardBlock: + genesis_state = get_genesis_shard_state(state, shard) + return ShardBlock(data=ShardBlockData( + shard=shard, + slot=ShardSlot(SHARD_GENESIS_EPOCH * SHARD_SLOTS_PER_EPOCH), + state_root=hash_tree_root(genesis_state), + )) ``` ## Shard state transition function -The post-state corresponding to a pre-state `state`, a beacon state `beacon_state`, and a block `block` is defined as `shard_state_transition(state, beacon_state, block)`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid. - ```python -def shard_state_transition(state: ShardState, - beacon_state: BeaconState, +def shard_state_transition(state: BeaconState, + shard_state: ShardState, block: ShardBlock, validate_state_root: bool=False) -> ShardState: # Process slots (including those with no blocks) since block - process_shard_slots(state, beacon_state, block.slot) + process_shard_slots(state, shard_state, block.data.slot) # Process block - process_shard_block(state, beacon_state, block) + process_shard_block(state, shard_state, block) # Validate state root (`validate_state_root == True` in production) if validate_state_root: - assert block.state_root == hash_tree_root(state) + assert block.data.state_root == hash_tree_root(shard_state) # Return post-state - return state + return shard_state ``` ```python -def process_shard_slots(state: ShardState, beacon_state: BeaconState, slot: ShardSlot) -> None: - assert state.slot <= slot - while state.slot < slot: - process_shard_slot(state) +def process_shard_slots(state: BeaconState, shard_state: ShardState, slot: ShardSlot) -> None: + assert shard_state.slot <= slot + while shard_state.slot < slot: + process_shard_slot(state, shard_state) # Process period on the start slot of the next period - if (state.slot + 1) % (SHARD_SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD) == 0: - process_shard_period(state) - state.slot += ShardSlot(1) + if (shard_state.slot + 1) % (SHARD_SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD) == 0: + process_shard_period(state, shard_state) + shard_state.slot += ShardSlot(1) ``` ```python -def process_shard_slot(state: ShardState, beacon_state: BeaconState, slot: ShardSlot) -> None: +def process_shard_slot(state: BeaconState, shard_state: ShardState) -> None: # Cache state root - if state.latest_block_header.state_root == Hash(): - state.latest_block_header.state_root = hash_tree_root(state) - # Save state roots in history accumulator + previous_state_root = hash_tree_root(state) + if state.latest_block_header_data.state_root == Bytes32(): + state.latest_block_header_data.state_root = previous_state_root + # Cache state root in history accumulator depth = 0 - state_root = hash_tree_root(state) while state.slot % 2**depth == 0 and depth < HISTORY_ACCUMULATOR_VECTOR: - state.history_accumulator[depth] = state_root + state.history_accumulator[depth] = previous_state_root depth += 1 ``` ### Period processing ```python -def process_shard_period(state: ShardState, beacon_state: BeaconState) -> None: +def process_shard_period(shard_state: ShardState, state: BeaconState) -> None: epoch = compute_epoch_of_shard_slot(state.slot) - earlier_committee = get_period_committee( - beacon_state, - compute_period_start_epoch(epoch, lookback=2), - state.shard, - ) - later_committee = get_period_committee( - beacon_state, - compute_period_start_epoch(epoch, lookback=1), - state.shard, - ) - - state.receipt_root = hash_tree_root(List[ShardReceiptDelta, TARGET_PERSISTENT_COMMITTEE_SIZE]([ - ShardReceiptDelta(validator_index, state.earlier_committee_rewards[i], state.earlier_committee_fees[i]) - for i, validator_index in enumerate(earlier_committee) + older_committee = get_period_committee(state, state.shard, compute_shard_period_start_epoch(epoch, 2)) + newer_committee = get_period_committee(state, state.shard, compute_shard_period_start_epoch(epoch, 1)) + # Compute receipt root for older committee + state.receipt_root = hash_tree_root(List[ShardReceipt, MAX_PERIOD_COMMITTEE_SIZE]([ + ShardReceipt(validator_index, state.older_committee_rewards[i], state.older_committee_fees[i]) + for i, validator_index in enumerate(older_committee) ])) - state.earlier_committee_rewards = state.later_committee_rewards - state.earlier_committee_fees = state.later_committee_fees - state.later_committee_rewards = [REWARD_COEFFICIENT_BASE for _ in range(len(later_committee))] - state.later_committee_fees = [Gwei(0) for _ in range(len(later_committee))] + # Rotate rewards and fees + state.older_committee_rewards = state.newer_committee_rewards + state.newer_committee_rewards = [Gwei(0) for _ in range(len(newer_committee))] + state.older_committee_fees = state.newer_committee_fees + state.newer_committee_fees = [Gwei(0) for _ in range(len(newer_committee))] ``` ### Block processing ```python -def process_shard_block(state: ShardState, beacon_state: BeaconState, block: ShardBlock) -> None: - process_shard_block_header(state, beacon_state, block) - process_shard_attestations(state, beacon_state, block - process_shard_block_data_fees(state, beacon_state, block) +def process_shard_block(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: + process_shard_block_header(state, shard_state, block) + process_shard_attestations(state, shard_state, block) + process_shard_block_size_fee(state, shard_state, block) ``` #### Block header ```python -def process_shard_block_header(state: ShardState, beacon_state: BeaconState, block: ShardBlock) -> None: +def process_shard_block_header(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: # Verify that the slots match - assert block.slot == state.slot + data = block.data + assert data.slot == state.slot + # Verify that the beacon chain root matches + parent_epoch = compute_epoch_of_shard_slot(state.latest_block_header_data.slot) + assert data.beacon_block_root == get_block_root(state, parent_epoch) # Verify that the parent matches - if block.parent_root != Hash(): - assert block.parent_root == signing_root(state.latest_block_header) + assert data.parent_root == hash_tree_root(state.latest_block_header_data) # Save current block as the new latest block - state.latest_block_header = ShardBlockHeader( - slot=block.slot, - beacon_chain_root=block.beacon_chain_root, - parent_root=block.parent_root, + state.latest_block_header_data = ShardBlockHeaderData( + slot=data.slot, + beacon_block_root=data.beacon_block_root, + parent_root=data.parent_root, # `state_root` is zeroed and overwritten in the next `process_shard_slot` call - aggregation_bits=block.aggregation_bits, - total_bytes=block.total_bytes, - body_root=hash_tree_root(block.body), - # `signatures` is zeroed + aggregation_bits=data.aggregation_bits, + block_size_sum=data.block_size_sum, + body_root=hash_tree_root(data.body), ) # Verify proposer signature - proposer_index = get_shard_proposer_index(beacon_state, state.shard, block.slot) - pubkey = beacon_state.validators[proposer_index].pubkey - domain = get_domain(beacon_state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(block.slot)) - assert bls_verify(pubkey, signing_root(block), block.signatures.proposer, domain) - # Verify total bytes count - state.total_bytes += len(block.body) - assert block.total_bytes == state.total_bytes + proposer_index = get_shard_proposer_index(state, state.shard, data.slot) + pubkey = state.validators[proposer_index].pubkey + domain = get_domain(state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(data.slot)) + assert bls_verify(pubkey, hash_tree_root(block.data), block.signatures.proposer, domain) + # Verify total body bytes count + state.block_size_sum += SHARD_HEADER_SIZE + len(data.body) + assert data.block_size_sum == state.block_size_sum ``` #### Attestations ```python -def process_shard_attestations(state: ShardState, beacon_state: BeaconState, block: ShardBlock) -> None: - persistent_committee = get_persistent_committee(beacon_state, state.shard, block.slot) +def process_shard_attestations(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: + data = block.data pubkeys = [] attestation_count = 0 - base_reward = get_shard_base_reward(beacon_state) - for i, validator_index in enumerate(persistent_committee): - if block.aggregation_bits[i]: - pubkeys.append(beacon_state.validators[validator_index].pubkey) - add_reward(state, beacon_state, validator_index, base_reward) + shard_committee = get_shard_committee(state, state.shard, data.slot) + for i, validator_index in enumerate(shard_committee): + if data.aggregation_bits[i]: + pubkeys.append(state.validators[validator_index].pubkey) + add_reward(state, shard_state, validator_index, get_base_reward(state, validator_index)) attestation_count += 1 - for i in range(len(persistent_committee), TARGET_PERSISTENT_COMMITTEE_SIZE): - assert block.aggregation_bits[i] is False or block.aggregation_bits[i] == 0 # TODO: Fix Bitvector - # Verify aggregate signature - domain = get_domain(beacon_state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_shard_slot(block.slot)) - assert bls_verify(bls_aggregate_pubkeys(pubkeys), block.parent_root, block.signatures.attesters, domain) - # Proposer micro-rewards - add_reward(state, beacon_state, proposer_index, attestation_count * get_shard_base_reward(beacon_state) // PROPOSER_REWARD_QUOTIENT) + # Verify there are no extraneous bits set beyond the shard committee + for i in range(len(shard_committee), 2 * MAX_PERIOD_COMMITTEE_SIZE): + assert data.aggregation_bits[i] == 0b0 + # Verify attester aggregate signature + domain = get_domain(state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_shard_slot(data.slot)) + message = hash_tree_root(ShardCheckpoint(shard_state.slot, data.parent_root)) + assert bls_verify(bls_aggregate_pubkeys(pubkeys), message, block.signatures.attesters, domain) + # Proposer micro-reward + proposer_index = get_shard_proposer_index(state, state.shard, data.slot) + reward = attestation_count * get_base_reward(state, proposer_index) // PROPOSER_REWARD_QUOTIENT + add_reward(state, shard_state, proposer_index, reward) ``` -#### Block data fees +#### Block size fee ```python -def process_shard_block_data_fees(state: ShardState, beacon_state: BeaconState, block: ShardBlock) -> None: - base_reward = get_shard_base_reward(beacon_state) - add_fee(state, beacon_state, proposer_index, state.basefee * len(block.body) // SHARD_BLOCK_SIZE_LIMIT) - QUOTIENT = SHARD_BLOCK_SIZE_LIMIT * BASEFEE_ADJUSTMENT_FACTOR - if len(block.body) > SHARD_BLOCK_SIZE_TARGET: - state.basefee += Gwei(max(1, state.basefee * (len(block.body) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT)) - elif len(block.body) < SHARD_BLOCK_SIZE_TARGET: - state.basefee -= Gwei(max(1, state.basefee * (len(block.body) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT)) - state.basefee = Gwei(max(1, min( EFFECTIVE_BALANCE_INCREMENT // EPOCHS_PER_SHARD_PERIOD // SHARD_SLOTS_PER_EPOCH, - state.basefee, - ))) -``` - -## Object validity - -### Shard block validation: preliminary - -Accept a shard block `block` only if all of the following are correct: - -* Either `block.parent_root == Hash()` or a block `parent` such that `signing_root(parent) == block.parent_root` has already been accepted. -* `block.beacon_chain_root == get_block_root(head_beacon_state, compute_epoch_of_shard_slot(parent.slot))` where `head_beacon_state` is the current beacon chain head state. Alternatively phrased, a beacon chain block `beacon_ref` such that `signing_root(beacon_ref) == block.beacon_chain_root` has already been accepted and is part of the canonical chain, and no block with slot `beacon_ref.slot < slot <= compute_start_slot_of_epoch(compute_epoch_of_shard_slot(parent.slot))` is part of the canonical chain. -* Let `beacon_state` be the state where `beacon_ref.state_root == hash_tree_root(beacon_state)`. Let `prev_state` be the post-state of the `parent` if the `parent` exists, otherwise let it be `get_default_shard_state(beacon_state, shard)` (defined below). `block.state_root` must equal the `hash_tree_root` of the state after applying `shard_state_transition(prev_state, beacon_state, block)`. - -Note that these acceptance conditions depend on the canonical beacon chain; when the canonical beacon chain reorganizes, the eligibility of shard blocks should be re-evaluated. - -### Beacon attestations - -Let: - -- `pre_state` be the `ShardState` before processing any blocks -- `shard_blocks_or_state_roots` be the `Union[ShardBlock, Hash]` list such that `shard_blocks[slot]` is the canonical `ShardBlock` for shard `pre_state.shard` at slot `slot` if a block exists, or the post-state-root of processing state up to and including that slot if a block does not exist. -- `beacon_state` be the canonical `BeaconState` -- `valid_attestations` be the set of valid `Attestation` objects, recursively defined -- `candidate` be a candidate `Attestation` which is valid under Phase 0 rules, and for which validity is to be determined under Phase 1 rules by running `is_valid_beacon_attestation` - -```python -def is_valid_beacon_attestation(pre_state: ShardState, - shard_blocks_or_state_roots: Sequence[Union[ShardBlock, Hash]], - beacon_state: BeaconState, - valid_attestations: Set[Attestation], - candidate: Attestation) -> bool: - # Check if attestation is already determined valid - for attestation in valid_attestations: - if candidate == attestation: - return True - - # Check previous attestation - if candidate.data.previous_crosslink.epoch <= PHASE_1_FORK_EPOCH: - assert candidate.data.previous_crosslink.data_root == Hash() +def process_shard_block_size_fee(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: + # Charge proposer block size fee + proposer_index = get_shard_proposer_index(state, state.shard, block.data.slot) + block_size = SHARD_HEADER_SIZE + len(block.data.body) + add_fee(state, shard_state, proposer_index, state.block_size_price * block_size // SHARD_BLOCK_SIZE_LIMIT) + # Calculate new block size price + if block_size > SHARD_BLOCK_SIZE_TARGET: + size_delta = block_size - SHARD_BLOCK_SIZE_TARGET + price_delta = Gwei(state.block_size_price * size_delta // SHARD_BLOCK_SIZE_LIMIT // BLOCK_SIZE_PRICE_QUOTIENT) + # The maximum gas price caps the amount burnt on gas fees within a period to 32 ETH + MAX_BLOCK_SIZE_PRICE = MAX_EFFECTIVE_BALANCE // EPOCHS_PER_SHARD_PERIOD // SHARD_SLOTS_PER_EPOCH + state.block_size_price = min(MAX_BLOCK_SIZE_PRICE, state.block_size_price + price_delta) else: - previous_attestation = next( - (attestation for attestation in valid_attestations - if attestation.data.crosslink.data_root == candidate.data.previous_crosslink.data_root), - None, - ) - assert previous_attestation is not None - assert candidate.data.previous_attestation.epoch < compute_epoch_of_slot(candidate.data.slot) - - # Check crosslink data root - start_epoch = beacon_state.crosslinks[pre_state.shard].epoch - end_epoch = min(compute_epoch_of_slot(candidate.data.slot) - CROSSLINK_LOOKBACK, - start_epoch + MAX_EPOCHS_PER_CROSSLINK) - blocks = [] - for slot in range(start_epoch * SLOTS_PER_EPOCH, end_epoch * SLOTS_PER_EPOCH): - if isinstance(shard_blocks_or_state_roots[slot], ShardBlock): - blocks.append(shard_blocks_or_state_roots[slot]) - else: - blocks.append(ShardBlock( - slot=slot, - state_root=shard_blocks_or_state_roots[slot], - total_bytes=pre_state.total_bytes, - )) - assert candidate.data.crosslink.data_root == compute_crosslink_data_root(blocks) - - return True + size_delta = SHARD_BLOCK_SIZE_TARGET - block_size + price_delta = Gwei(state.block_size_price * size_delta // SHARD_BLOCK_SIZE_LIMIT // BLOCK_SIZE_PRICE_QUOTIENT) + state.block_size_price = max(MIN_BLOCK_SIZE_PRICE, state.block_size_price - price_delta) ``` ## Shard fork choice rule -The fork choice rule for any shard is LMD GHOST using the shard attestations of the persistent committee and the beacon chain attestations of the crosslink committee currently assigned to that shard, but instead of being rooted in the genesis it is rooted in the block referenced in the most recent accepted crosslink (i.e. `state.crosslinks[shard].shard_block_root`). Only blocks whose `beacon_chain_root` is the block in the main beacon chain at the specified `slot` should be considered. (If the beacon chain skips a slot, then the block at that slot is considered to be the block in the beacon chain at the highest slot lower than that slot.) +The fork choice rule for any shard is LMD GHOST using the shard attestations of the shard committee and the beacon chain attestations of the crosslink committee currently assigned to that shard, but instead of being rooted in the genesis it is rooted in the block referenced in the most recent accepted crosslink (i.e. `state.crosslinks[shard].shard_block_root`). Only blocks whose `beacon_block_root` is the block in the main beacon chain at the specified `slot` should be considered. (If the beacon chain skips a slot, then the block at that slot is considered to be the block in the beacon chain at the highest slot lower than that slot.) From 66c3c391d4714b03adce0ec82e7cc00c326f19eb Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 29 Aug 2019 16:36:13 +0100 Subject: [PATCH 140/250] Update 1_shard-data-chains.md --- specs/core/1_shard-data-chains.md | 79 ++++--------------------------- 1 file changed, 9 insertions(+), 70 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 367091c91..bd4b6d53c 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -24,15 +24,11 @@ - [`ShardBlockHeaderData`](#shardblockheaderdata) - [`ShardBlockHeader`](#shardblockheader) - [`ShardState`](#shardstate) - - [`ShardReceipt`](#shardreceipt) - [`ShardCheckpoint`](#shardcheckpoint) - [Helper functions](#helper-functions) - [Misc](#misc-1) - - [`compute_padded_data`](#compute_padded_data) - [`compute_epoch_of_shard_slot`](#compute_epoch_of_shard_slot) - [`compute_shard_period_start_epoch`](#compute_shard_period_start_epoch) - - [`compute_flat_shard_header`](#compute_flat_shard_header) - - [`compute_crosslink_data_root`](#compute_crosslink_data_root) - [Beacon state accessors](#beacon-state-accessors) - [`get_period_committee`](#get_period_committee) - [`get_shard_committee`](#get_shard_committee) @@ -167,23 +163,13 @@ class ShardState(Container): slot: ShardSlot history_accumulator: Vector[Hash, HISTORY_ACCUMULATOR_VECTOR] latest_block_header_data: ShardBlockHeader - receipt_root: Hash block_size_sum: uint64 - # Rewards and fees + # Fees and rewards block_size_price: Gwei - older_committee_rewards: List[Gwei, MAX_PERIOD_COMMITTEE_SIZE] - newer_committee_rewards: List[Gwei, MAX_PERIOD_COMMITTEE_SIZE] older_committee_fees: List[Gwei, MAX_PERIOD_COMMITTEE_SIZE] newer_committee_fees: List[Gwei, MAX_PERIOD_COMMITTEE_SIZE] -``` - -### `ShardReceipt` - -```python -class ShardReceipt(Container): - index: ValidatorIndex - rewards: Gwei - fees: Gwei + older_committee_rewards: List[Gwei, MAX_PERIOD_COMMITTEE_SIZE] + newer_committee_rewards: List[Gwei, MAX_PERIOD_COMMITTEE_SIZE] ``` ### `ShardCheckpoint` @@ -198,14 +184,6 @@ class ShardCheckpoint(Container): ### Misc -#### `compute_padded_data` - -```python -def compute_padded_data(data: bytes, length: uint64) -> bytes: - assert len(data) <= length - return data + b'\x00' * (length - len(data)) -``` - #### `compute_epoch_of_shard_slot` ```python @@ -220,40 +198,6 @@ def compute_shard_period_start_epoch(epoch: Epoch, lookback: uint64) -> Epoch: return Epoch(epoch - (epoch % EPOCHS_PER_SHARD_PERIOD) - lookback * EPOCHS_PER_SHARD_PERIOD) ``` -#### `compute_flat_shard_header` - -```python -def compute_flat_shard_header(block: ShardBlock) -> Bytes[SHARD_HEADER_SIZE]: - """ - Return a flat serialisation of the ``block`` header, preserving hash tree root. - """ - data = block.data - return ( - # Left half of the hash tree - compute_padded_data(int_to_bytes(data.slot, length=8), 32) + - data.beacon_block_root + - data.parent_root + - hash_tree_root(data.body) + - data.state_root + - compute_padded_data(int_to_bytes(data.block_size_sum, length=8), 32) + - bytes([sum([data.aggregation_bits[i + j] << j for j in range(8)]) for i in range(0, 256, 8)]) + - Bytes32() + # Padding - # Right half of the hash tree - compute_padded_data(block.signatures.attesters, 128) + - compute_padded_data(block.signatures.proposer, 128) - ) -``` - -#### `compute_crosslink_data_root` - -```python -def compute_crosslink_data_root(blocks: Sequence[ShardBlock]) -> Hash: - headers = b''.join([compute_flat_shard_header(block) for block in blocks]) - bodies = b''.join([block.data.body for block in blocks]) - MAX_SIZE = MAX_EPOCHS_PER_CROSSLINK * SHARD_SLOTS_PER_EPOCH * SHARD_BLOCK_SIZE_LIMIT - return hash_tree_root(BytesN[MAX_SIZE](compute_padded_data(headers + bodies, MAX_SIZE))) -``` - ### Beacon state accessors #### `get_period_committee` @@ -338,11 +282,10 @@ def get_genesis_shard_state(state: BeaconState, shard: Shard) -> ShardState: ```python def get_genesis_shard_block(state: BeaconState, shard: Shard) -> ShardBlock: - genesis_state = get_genesis_shard_state(state, shard) return ShardBlock(data=ShardBlockData( shard=shard, slot=ShardSlot(SHARD_GENESIS_EPOCH * SHARD_SLOTS_PER_EPOCH), - state_root=hash_tree_root(genesis_state), + state_root=hash_tree_root(get_genesis_shard_state(state, shard)), )) ``` @@ -392,15 +335,9 @@ def process_shard_slot(state: BeaconState, shard_state: ShardState) -> None: ```python def process_shard_period(shard_state: ShardState, state: BeaconState) -> None: - epoch = compute_epoch_of_shard_slot(state.slot) - older_committee = get_period_committee(state, state.shard, compute_shard_period_start_epoch(epoch, 2)) - newer_committee = get_period_committee(state, state.shard, compute_shard_period_start_epoch(epoch, 1)) - # Compute receipt root for older committee - state.receipt_root = hash_tree_root(List[ShardReceipt, MAX_PERIOD_COMMITTEE_SIZE]([ - ShardReceipt(validator_index, state.older_committee_rewards[i], state.older_committee_fees[i]) - for i, validator_index in enumerate(older_committee) - ])) # Rotate rewards and fees + epoch = compute_epoch_of_shard_slot(state.slot) + newer_committee = get_period_committee(state, state.shard, compute_shard_period_start_epoch(epoch, 1)) state.older_committee_rewards = state.newer_committee_rewards state.newer_committee_rewards = [Gwei(0) for _ in range(len(newer_committee))] state.older_committee_fees = state.newer_committee_fees @@ -443,7 +380,9 @@ def process_shard_block_header(state: BeaconState, shard_state: ShardState, bloc pubkey = state.validators[proposer_index].pubkey domain = get_domain(state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(data.slot)) assert bls_verify(pubkey, hash_tree_root(block.data), block.signatures.proposer, domain) - # Verify total body bytes count + # Verify body size is a multiple of the header size + assert len(data.body) % SHARD_HEADER_SIZE == 0 + # Verify the sum of the block sizes since genesis state.block_size_sum += SHARD_HEADER_SIZE + len(data.body) assert data.block_size_sum == state.block_size_sum ``` From 9a712ead6875c7c70edf081a71c95b10cc90b4b0 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Sun, 1 Sep 2019 10:24:46 -0700 Subject: [PATCH 141/250] Update merkle_proofs.md --- specs/light_client/merkle_proofs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index ce7dc647c..e42de9f37 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -62,7 +62,7 @@ Note that the generalized index has the convenient property that the two childre def merkle_tree(leaves: Sequence[Hash]) -> Sequence[Hash]: padded_length = get_next_power_of_two(len(leaves)) o = [Hash()] * padded_length + list(leaves) + [Hash()] * (padded_length - len(leaves)) - for i in range(len(leaves) - 1, 0, -1): + for i in range(len(padded_length) - 1, 0, -1): o[i] = hash(o[i * 2] + o[i * 2 + 1]) return o ``` From 84965be2517a028056a16580d6663e830e481558 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Sun, 1 Sep 2019 11:07:44 -0700 Subject: [PATCH 142/250] Update merkle_proofs.md --- specs/light_client/merkle_proofs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index e42de9f37..092f7142e 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -62,7 +62,7 @@ Note that the generalized index has the convenient property that the two childre def merkle_tree(leaves: Sequence[Hash]) -> Sequence[Hash]: padded_length = get_next_power_of_two(len(leaves)) o = [Hash()] * padded_length + list(leaves) + [Hash()] * (padded_length - len(leaves)) - for i in range(len(padded_length) - 1, 0, -1): + for i in range(padded_length - 1, 0, -1): o[i] = hash(o[i * 2] + o[i * 2 + 1]) return o ``` From b9390f09676e3457c419c97e4d3b16b4c6ec3f29 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Mon, 2 Sep 2019 14:00:14 +0800 Subject: [PATCH 143/250] import `byte` --- scripts/build_spec.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build_spec.py b/scripts/build_spec.py index 83f9a2145..5b1fa6fcf 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -57,7 +57,7 @@ from eth2spec.utils.ssz.ssz_typing import ( BasicValue, Elements, BaseBytes, BaseList, SSZType, Container, List, Vector, Bytes, BytesN, Bitlist, Bitvector, Bits, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, - uint64, bit, boolean, + uint64, bit, boolean, byte, ) from eth2spec.utils.bls import ( bls_aggregate_pubkeys, From ab2fac66967387d9a49b46775f839f7305637517 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 3 Sep 2019 07:14:04 +0100 Subject: [PATCH 144/250] Added misc beacon chain updates to ToC --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 6fbd38aa4..0de6cd9ef 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,7 @@ Core specifications for Eth 2.0 client validation can be found in [specs/core](s ### Phase 1 * [Custody Game](specs/core/1_custody-game.md) * [Shard Data Chains](specs/core/1_shard-data-chains.md) +* [Misc beacon chain updates](specs/core/1_beacon-chain-misc.md) ### Phase 2 From 834edc2dbc6ac31ed658b1b83af24342bc1b9a66 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 3 Sep 2019 13:44:50 +0100 Subject: [PATCH 145/250] Removed shard from genesis shard block --- specs/core/1_shard-data-chains.md | 1 - 1 file changed, 1 deletion(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index bd4b6d53c..63bfe45e4 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -283,7 +283,6 @@ def get_genesis_shard_state(state: BeaconState, shard: Shard) -> ShardState: ```python def get_genesis_shard_block(state: BeaconState, shard: Shard) -> ShardBlock: return ShardBlock(data=ShardBlockData( - shard=shard, slot=ShardSlot(SHARD_GENESIS_EPOCH * SHARD_SLOTS_PER_EPOCH), state_root=hash_tree_root(get_genesis_shard_state(state, shard)), )) From 3ef24870d6f30039d2a8dfefca5b8ae23320e5d0 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 3 Sep 2019 18:55:46 +0100 Subject: [PATCH 146/250] Changes as per today's discussion 1) Make `ShardBlock` and `ShardState` flat containers (as opposed to plain containers) 2) Make Gwei deltas `int64` (as opposed `uint64`) 3) Make `older_committee_deltas` a `Vector` (as opposed to `List`) 4) Apply size fee on block body only (as opposed to block header and body) 5) Enshrine minimum "extra" block body fee for proposers (reusing `PROPOSER_REWARD_QUOTIENT`) 6) Fix bugs reported by @terencechain and @hwwhww :+1: --- specs/core/1_shard-data-chains.md | 221 ++++++++++++------------------ 1 file changed, 89 insertions(+), 132 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 63bfe45e4..e052e2bcb 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -18,10 +18,7 @@ - [Rewards and penalties](#rewards-and-penalties) - [Signature domain types](#signature-domain-types) - [Containers](#containers) - - [`ShardBlockSignatures`](#shardblocksignatures) - - [`ShardBlockData`](#shardblockdata) - [`ShardBlock`](#shardblock) - - [`ShardBlockHeaderData`](#shardblockheaderdata) - [`ShardBlockHeader`](#shardblockheader) - [`ShardState`](#shardstate) - [`ShardCheckpoint`](#shardcheckpoint) @@ -34,8 +31,7 @@ - [`get_shard_committee`](#get_shard_committee) - [`get_shard_proposer_index`](#get_shard_proposer_index) - [Shard state mutators](#shard-state-mutators) - - [`add_reward`](#add_reward) - - [`add_fee`](#add_fee) + - [`process_delta`](#process_delta) - [Genesis](#genesis) - [`get_genesis_shard_state`](#get_genesis_shard_state) - [`get_genesis_shard_block`](#get_genesis_shard_block) @@ -44,7 +40,7 @@ - [Block processing](#block-processing) - [Block header](#block-header) - [Attestations](#attestations) - - [Block size fee](#block-size-fee) + - [Block body fee](#block-body-fee) - [Shard fork choice rule](#shard-fork-choice-rule) @@ -58,6 +54,7 @@ This document describes the shard transition function (data layer only) and the | Name | SSZ equivalent | Description | | - | - | - | | `ShardSlot` | `uint64` | a shard slot number | +| `GweiDelta` | `int64` | a signed Gwei delta | ## Configuration @@ -94,7 +91,7 @@ This document describes the shard transition function (data layer only) and the | Name | Value | | - | - | -| `BLOCK_SIZE_PRICE_QUOTIENT` | `2**3` (= 8) | +| `SHARD_BLOCK_SIZE_PRICE_QUOTIENT` | `2**3` (= 8) | ### Signature domain types @@ -105,71 +102,51 @@ This document describes the shard transition function (data layer only) and the ## Containers -### `ShardBlockSignatures` - -```python -class ShardBlockSignatures(Container): - attesters: BLSSignature - proposer: BLSSignature -``` - -### `ShardBlockData` - -```python -class ShardBlockData(Container): - slot: ShardSlot - beacon_block_root: Hash - parent_root: Hash - state_root: Hash - aggregation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE] - block_size_sum: uint64 - body: List[byte, SHARD_BLOCK_SIZE_LIMIT - SHARD_HEADER_SIZE] -``` - ### `ShardBlock` ```python -class ShardBlock(Container): - data: ShardBlockData - signatures: ShardBlockSignatures -``` - -### `ShardBlockHeaderData` - -```python -class ShardBlockHeaderData(Container): +class ShardBlock(FlatContainer): + shard: Shard slot: ShardSlot beacon_block_root: Hash parent_root: Hash state_root: Hash - aggregation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE] block_size_sum: uint64 - body_root: Hash + body: List[byte, SHARD_BLOCK_SIZE_LIMIT - SHARD_HEADER_SIZE] + attestation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE] + attestation_signature: BLSSignature + signature: BLSSignature ``` ### `ShardBlockHeader` ```python -class ShardBlockHeader(Container): - data: ShardBlockHeaderData - signatures: ShardBlockSignatures +class ShardBlockHeader(FlatContainer): + shard: Shard + slot: ShardSlot + beacon_block_root: Hash + parent_root: Hash + state_root: Hash + block_size_sum: uint64 + body_root: List[byte, SHARD_BLOCK_SIZE_LIMIT - SHARD_HEADER_SIZE] + attestation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE] + attestation_signature: BLSSignature + signature: BLSSignature ``` ### `ShardState` ```python -class ShardState(Container): +class ShardState(FlatContainer): shard: Shard slot: ShardSlot history_accumulator: Vector[Hash, HISTORY_ACCUMULATOR_VECTOR] - latest_block_header_data: ShardBlockHeader + latest_block_header: ShardBlockHeader block_size_sum: uint64 # Fees and rewards - block_size_price: Gwei - older_committee_fees: List[Gwei, MAX_PERIOD_COMMITTEE_SIZE] - newer_committee_fees: List[Gwei, MAX_PERIOD_COMMITTEE_SIZE] - older_committee_rewards: List[Gwei, MAX_PERIOD_COMMITTEE_SIZE] - newer_committee_rewards: List[Gwei, MAX_PERIOD_COMMITTEE_SIZE] + block_body_price: Gwei + older_committee_deltas: Vector[GweiDelta, MAX_PERIOD_COMMITTEE_SIZE] + newer_committee_deltas: Vector[GweiDelta, MAX_PERIOD_COMMITTEE_SIZE] ``` ### `ShardCheckpoint` @@ -188,7 +165,7 @@ class ShardCheckpoint(Container): ```python def compute_epoch_of_shard_slot(slot: ShardSlot) -> Epoch: - return compute_epoch_of_slot(slot // SHARD_SLOTS_PER_EPOCH) + return Epoch(slot // SHARD_SLOTS_PER_EPOCH) ``` #### `compute_shard_period_start_epoch` @@ -226,37 +203,25 @@ def get_shard_committee(state: BeaconState, shard: Shard, epoch: Epoch) -> Seque ```python def get_shard_proposer_index(state: BeaconState, shard: Shard, slot: ShardSlot) -> ValidatorIndex: epoch = get_current_epoch(state) - active_indices = [i for i in get_shard_committee(state, shard, epoch) if is_active_validator(state.validators[i], epoch)] + shard_committee = get_shard_committee(state, shard, epoch) + active_indices = [i for i in shard_committee if is_active_validator(state.validators[i], epoch)] seed = hash(get_seed(state, epoch) + int_to_bytes(slot, length=8) + int_to_bytes(shard, length=8)) compute_proposer_index(state, active_indices, seed) ``` ### Shard state mutators -#### `add_reward` +#### `process_delta` ```python -def add_reward(state: BeaconState, shard_state: ShardState, index: ValidatorIndex, delta: Gwei) -> None: +def process_delta(state: BeaconState, shard_state: ShardState, index: ValidatorIndex, delta: GweiDelta) -> None: epoch = compute_epoch_of_shard_slot(state.slot) older_committee = get_period_committee(state, shard_state.shard, compute_shard_period_start_epoch(epoch, 2)) newer_committee = get_period_committee(state, shard_state.shard, compute_shard_period_start_epoch(epoch, 1)) if index in older_committee: - shard_state.older_committee_rewards[older_committee.index(index)] += delta + shard_state.older_committee_deltas[older_committee.index(index)] += delta elif index in newer_committee: - shard_state.newer_committee_rewards[newer_committee.index(index)] += delta -``` - -#### `add_fee` - -```python -def add_fee(state: BeaconState, shard_state: ShardState, index: ValidatorIndex, delta: Gwei) -> None: - epoch = compute_epoch_of_shard_slot(state.slot) - older_committee = get_period_committee(state, shard_state.shard, compute_shard_period_start_epoch(epoch, 2)) - newer_committee = get_period_committee(state, shard_state.shard, compute_shard_period_start_epoch(epoch, 1)) - if index in older_committee: - shard_state.older_committee_fees[older_committee.index(index)] += delta - elif index in newer_committee: - shard_state.newer_committee_fees[newer_committee.index(index)] += delta + shard_state.newer_committee_deltas[newer_committee.index(index)] += delta ``` ## Genesis @@ -265,16 +230,10 @@ def add_fee(state: BeaconState, shard_state: ShardState, index: ValidatorIndex, ```python def get_genesis_shard_state(state: BeaconState, shard: Shard) -> ShardState: - older_committee = get_period_committee(state, shard, compute_shard_period_start_epoch(SHARD_GENESIS_EPOCH, 2)) - newer_committee = get_period_committee(state, shard, compute_shard_period_start_epoch(SHARD_GENESIS_EPOCH, 1)) return ShardState( shard=shard, slot=ShardSlot(SHARD_GENESIS_EPOCH * SHARD_SLOTS_PER_EPOCH), - block_size_price=MIN_BLOCK_SIZE_PRICE, - older_committee_rewards=[Gwei(0) for _ in range(len(older_committee))], - newer_committee_rewards=[Gwei(0) for _ in range(len(newer_committee))], - older_committee_fees=[Gwei(0) for _ in range(len(older_committee))], - newer_committee_fees=[Gwei(0) for _ in range(len(newer_committee))], + block_body_price=MIN_BLOCK_SIZE_PRICE, ) ``` @@ -282,10 +241,11 @@ def get_genesis_shard_state(state: BeaconState, shard: Shard) -> ShardState: ```python def get_genesis_shard_block(state: BeaconState, shard: Shard) -> ShardBlock: - return ShardBlock(data=ShardBlockData( + return ShardBlock( + shard=shard, slot=ShardSlot(SHARD_GENESIS_EPOCH * SHARD_SLOTS_PER_EPOCH), state_root=hash_tree_root(get_genesis_shard_state(state, shard)), - )) + ) ``` ## Shard state transition function @@ -296,12 +256,12 @@ def shard_state_transition(state: BeaconState, block: ShardBlock, validate_state_root: bool=False) -> ShardState: # Process slots (including those with no blocks) since block - process_shard_slots(state, shard_state, block.data.slot) + process_shard_slots(state, shard_state, block.slot) # Process block process_shard_block(state, shard_state, block) # Validate state root (`validate_state_root == True` in production) if validate_state_root: - assert block.data.state_root == hash_tree_root(shard_state) + assert block.state_root == hash_tree_root(shard_state) # Return post-state return shard_state ``` @@ -321,8 +281,8 @@ def process_shard_slots(state: BeaconState, shard_state: ShardState, slot: Shard def process_shard_slot(state: BeaconState, shard_state: ShardState) -> None: # Cache state root previous_state_root = hash_tree_root(state) - if state.latest_block_header_data.state_root == Bytes32(): - state.latest_block_header_data.state_root = previous_state_root + if state.latest_block_header.state_root == Bytes32(): + state.latest_block_header.state_root = previous_state_root # Cache state root in history accumulator depth = 0 while state.slot % 2**depth == 0 and depth < HISTORY_ACCUMULATOR_VECTOR: @@ -333,14 +293,10 @@ def process_shard_slot(state: BeaconState, shard_state: ShardState) -> None: ### Period processing ```python -def process_shard_period(shard_state: ShardState, state: BeaconState) -> None: +def process_shard_period(state: BeaconState, shard_state: ShardState) -> None: # Rotate rewards and fees - epoch = compute_epoch_of_shard_slot(state.slot) - newer_committee = get_period_committee(state, state.shard, compute_shard_period_start_epoch(epoch, 1)) - state.older_committee_rewards = state.newer_committee_rewards - state.newer_committee_rewards = [Gwei(0) for _ in range(len(newer_committee))] - state.older_committee_fees = state.newer_committee_fees - state.newer_committee_fees = [Gwei(0) for _ in range(len(newer_committee))] + state.older_committee_deltas = state.newer_committee_deltas + state.newer_committee_deltas = [GweiDelta(0) for _ in range(MAX_PERIOD_COMMITTEE_SIZE)] ``` ### Block processing @@ -349,7 +305,7 @@ def process_shard_period(shard_state: ShardState, state: BeaconState) -> None: def process_shard_block(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: process_shard_block_header(state, shard_state, block) process_shard_attestations(state, shard_state, block) - process_shard_block_size_fee(state, shard_state, block) + process_shard_block_body_fee(state, shard_state, block) ``` #### Block header @@ -357,80 +313,81 @@ def process_shard_block(state: BeaconState, shard_state: ShardState, block: Shar ```python def process_shard_block_header(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: # Verify that the slots match - data = block.data - assert data.slot == state.slot + assert block.slot == state.slot # Verify that the beacon chain root matches - parent_epoch = compute_epoch_of_shard_slot(state.latest_block_header_data.slot) - assert data.beacon_block_root == get_block_root(state, parent_epoch) + parent_epoch = compute_epoch_of_shard_slot(state.latest_block_header.slot) + assert block.beacon_block_root == get_block_root(state, parent_epoch) # Verify that the parent matches - assert data.parent_root == hash_tree_root(state.latest_block_header_data) + assert block.parent_root == hash_tree_root(state.latest_block_header) # Save current block as the new latest block - state.latest_block_header_data = ShardBlockHeaderData( - slot=data.slot, - beacon_block_root=data.beacon_block_root, - parent_root=data.parent_root, + state.latest_block_header = ShardBlockHeader( + shard=block.shard, + slot=block.slot, + beacon_block_root=block.beacon_block_root, + parent_root=block.parent_root, # `state_root` is zeroed and overwritten in the next `process_shard_slot` call - aggregation_bits=data.aggregation_bits, - block_size_sum=data.block_size_sum, - body_root=hash_tree_root(data.body), + block_size_sum=block.block_size_sum, + body_root=hash_tree_root(block.body), + attestation_bits=block.attestation_bits, + attestation_signature=block.attestation_signature, + # `signature` is zeroed ) # Verify proposer signature - proposer_index = get_shard_proposer_index(state, state.shard, data.slot) + proposer_index = get_shard_proposer_index(state, state.shard, block.slot) pubkey = state.validators[proposer_index].pubkey - domain = get_domain(state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(data.slot)) - assert bls_verify(pubkey, hash_tree_root(block.data), block.signatures.proposer, domain) + domain = get_domain(state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(block.slot)) + assert bls_verify(pubkey, hash_tree_root(block.block), block.proposer, domain) # Verify body size is a multiple of the header size - assert len(data.body) % SHARD_HEADER_SIZE == 0 + assert len(block.body) % SHARD_HEADER_SIZE == 0 # Verify the sum of the block sizes since genesis - state.block_size_sum += SHARD_HEADER_SIZE + len(data.body) - assert data.block_size_sum == state.block_size_sum + state.block_size_sum += SHARD_HEADER_SIZE + len(block.body) + assert block.block_size_sum == state.block_size_sum ``` #### Attestations ```python def process_shard_attestations(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: - data = block.data pubkeys = [] attestation_count = 0 - shard_committee = get_shard_committee(state, state.shard, data.slot) + shard_committee = get_shard_committee(state, state.shard, block.slot) for i, validator_index in enumerate(shard_committee): - if data.aggregation_bits[i]: + if block.attestation_bits[i]: pubkeys.append(state.validators[validator_index].pubkey) - add_reward(state, shard_state, validator_index, get_base_reward(state, validator_index)) + process_delta(state, shard_state, validator_index, get_base_reward(state, validator_index)) attestation_count += 1 # Verify there are no extraneous bits set beyond the shard committee for i in range(len(shard_committee), 2 * MAX_PERIOD_COMMITTEE_SIZE): - assert data.aggregation_bits[i] == 0b0 + assert block.attestation_bits[i] == 0b0 # Verify attester aggregate signature - domain = get_domain(state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_shard_slot(data.slot)) - message = hash_tree_root(ShardCheckpoint(shard_state.slot, data.parent_root)) - assert bls_verify(bls_aggregate_pubkeys(pubkeys), message, block.signatures.attesters, domain) + domain = get_domain(state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_shard_slot(block.slot)) + message = hash_tree_root(ShardCheckpoint(shard_state.slot, block.parent_root)) + assert bls_verify(bls_aggregate_pubkeys(pubkeys), message, block.attestation_signature, domain) # Proposer micro-reward - proposer_index = get_shard_proposer_index(state, state.shard, data.slot) + proposer_index = get_shard_proposer_index(state, state.shard, block.slot) reward = attestation_count * get_base_reward(state, proposer_index) // PROPOSER_REWARD_QUOTIENT - add_reward(state, shard_state, proposer_index, reward) + process_delta(state, shard_state, proposer_index, reward) ``` -#### Block size fee +#### Block body fee ```python -def process_shard_block_size_fee(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: - # Charge proposer block size fee - proposer_index = get_shard_proposer_index(state, state.shard, block.data.slot) - block_size = SHARD_HEADER_SIZE + len(block.data.body) - add_fee(state, shard_state, proposer_index, state.block_size_price * block_size // SHARD_BLOCK_SIZE_LIMIT) - # Calculate new block size price - if block_size > SHARD_BLOCK_SIZE_TARGET: - size_delta = block_size - SHARD_BLOCK_SIZE_TARGET - price_delta = Gwei(state.block_size_price * size_delta // SHARD_BLOCK_SIZE_LIMIT // BLOCK_SIZE_PRICE_QUOTIENT) - # The maximum gas price caps the amount burnt on gas fees within a period to 32 ETH +def process_shard_block_body_fee(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: + # Apply proposer block body fee + proposer_index = get_shard_proposer_index(state, state.shard, block.slot) + block_body_fee = state.block_body_price * len(block.body) // SHARD_BLOCK_SIZE_LIMIT + process_delta(state, shard_state, proposer_index, -block_body_fee) # Burn + process_delta(state, shard_state, proposer_index, block_body_fee // PROPOSER_REWARD_QUOTIENT) # Reward + # Calculate new block body price + block_size = SHARD_HEADER_SIZE + len(block.body) + QUOTIENT = SHARD_BLOCK_SIZE_LIMIT * SHARD_BLOCK_SIZE_PRICE_QUOTIENT + price_delta = GweiDelta(state.block_body_price * (block_size - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT) + if price_delta > 0: + # The maximum gas price caps the amount burnt on gas fees within a period MAX_BLOCK_SIZE_PRICE = MAX_EFFECTIVE_BALANCE // EPOCHS_PER_SHARD_PERIOD // SHARD_SLOTS_PER_EPOCH - state.block_size_price = min(MAX_BLOCK_SIZE_PRICE, state.block_size_price + price_delta) + state.block_body_price = Gwei(min(MAX_BLOCK_SIZE_PRICE, state.block_body_price + price_delta)) else: - size_delta = SHARD_BLOCK_SIZE_TARGET - block_size - price_delta = Gwei(state.block_size_price * size_delta // SHARD_BLOCK_SIZE_LIMIT // BLOCK_SIZE_PRICE_QUOTIENT) - state.block_size_price = max(MIN_BLOCK_SIZE_PRICE, state.block_size_price - price_delta) + state.block_body_price = Gwei(max(MIN_BLOCK_SIZE_PRICE, state.block_body_price + price_delta)) ``` ## Shard fork choice rule From d1fe8f16fd4449cbf766d4ef2484b0891c04be58 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 3 Sep 2019 18:59:18 +0100 Subject: [PATCH 147/250] Deposit contract fixes (#1362) --- .../contracts/validator_registration.json | 2 +- .../contracts/validator_registration.v.py | 18 ++- deposit_contract/requirements-testing.txt | 2 +- .../tests/contracts/test_deposit.py | 112 +++++++++++++----- specs/core/0_deposit-contract.md | 4 +- specs/validator/0_beacon-chain-validator.md | 2 +- 6 files changed, 99 insertions(+), 41 deletions(-) diff --git a/deposit_contract/contracts/validator_registration.json b/deposit_contract/contracts/validator_registration.json index 3a6bfb2d8..fbf20e74c 100644 --- a/deposit_contract/contracts/validator_registration.json +++ b/deposit_contract/contracts/validator_registration.json @@ -1 +1 @@ -{"abi": [{"name": "DepositEvent", "inputs": [{"type": "bytes", "name": "pubkey", "indexed": false}, {"type": "bytes", "name": "withdrawal_credentials", "indexed": false}, {"type": "bytes", "name": "amount", "indexed": false}, {"type": "bytes", "name": "signature", "indexed": false}, {"type": "bytes", "name": "index", "indexed": false}], "anonymous": false, "type": "event"}, {"outputs": [], "inputs": [], "constant": false, "payable": false, "type": "constructor"}, {"name": "get_hash_tree_root", "outputs": [{"type": "bytes32", "name": "out"}], "inputs": [], "constant": true, "payable": false, "type": "function", "gas": 91674}, {"name": "get_deposit_count", "outputs": [{"type": "bytes", "name": "out"}], "inputs": [], "constant": true, "payable": false, "type": "function", "gas": 10433}, {"name": "deposit", "outputs": [], "inputs": [{"type": "bytes", "name": "pubkey"}, {"type": "bytes", "name": "withdrawal_credentials"}, {"type": "bytes", "name": "signature"}], "constant": false, "payable": true, "type": "function", "gas": 1334417}], "bytecode": "0x740100000000000000000000000000000000000000006020526f7fffffffffffffffffffffffffffffff6040527fffffffffffffffffffffffffffffffff8000000000000000000000000000000060605274012a05f1fffffffffffffffffffffffffdabf41c006080527ffffffffffffffffffffffffed5fa0e000000000000000000000000000000000060a052341561009857600080fd5b6101406000601f818352015b600061014051602081106100b757600080fd5b600260c052602060c020015460208261016001015260208101905061014051602081106100e357600080fd5b600260c052602060c020015460208261016001015260208101905080610160526101609050602060c0825160208401600060025af161012157600080fd5b60c0519050606051600161014051018060405190131561014057600080fd5b809190121561014e57600080fd5b6020811061015b57600080fd5b600260c052602060c02001555b81516001018083528114156100a4575b50506112f956600035601c52740100000000000000000000000000000000000000006020526f7fffffffffffffffffffffffffffffff6040527fffffffffffffffffffffffffffffffff8000000000000000000000000000000060605274012a05f1fffffffffffffffffffffffffdabf41c006080527ffffffffffffffffffffffffed5fa0e000000000000000000000000000000000060a052600015610277575b6101605261014052600061018052610140516101a0526101c060006008818352015b61018051600860008112156100da578060000360020a82046100e1565b8060020a82025b905090506101805260ff6101a051166101e052610180516101e0516101805101101561010c57600080fd5b6101e0516101805101610180526101a0517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff86000811215610155578060000360020a820461015c565b8060020a82025b905090506101a0525b81516001018083528114156100bd575b50506018600860208206610200016020828401111561019357600080fd5b60208061022082610180600060046015f15050818152809050905090508051602001806102c0828460006004600a8704601201f16101d057600080fd5b50506102c05160206001820306601f82010390506103206102c0516008818352015b826103205111156102025761021e565b6000610320516102e001535b81516001018083528114156101f2575b50505060206102a05260406102c0510160206001820306601f8201039050610280525b6000610280511115156102535761026f565b602061028051036102a001516020610280510361028052610241565b610160515650005b63863a311b600051141561050857341561029057600080fd5b6000610140526101405161016052600154610180526101a060006020818352015b60016001610180511614156103325760006101a051602081106102d357600080fd5b600060c052602060c02001546020826102400101526020810190506101605160208261024001015260208101905080610240526102409050602060c0825160208401600060025af161032457600080fd5b60c0519050610160526103a0565b6000610160516020826101c00101526020810190506101a0516020811061035857600080fd5b600260c052602060c02001546020826101c0010152602081019050806101c0526101c09050602060c0825160208401600060025af161039657600080fd5b60c0519050610160525b61018060026103ae57600080fd5b60028151048152505b81516001018083528114156102b1575b505060006101605160208261044001015260208101905061014051610160516101805163806732896102c0526001546102e0526102e0516006580161009b565b506103405260006103a0525b6103405160206001820306601f82010390506103a0511015156104355761044e565b6103a05161036001526103a0516020016103a052610413565b61018052610160526101405261034060088060208461044001018260208501600060046012f150508051820191505060006018602082066103c0016020828401111561049957600080fd5b6020806103e082610140600060046015f150508181528090509050905060188060208461044001018260208501600060046014f150508051820191505080610440526104409050602060c0825160208401600060025af16104f957600080fd5b60c051905060005260206000f3005b63621fd130600051141561061a57341561052157600080fd5b63806732896101405260015461016052610160516006580161009b565b506101c0526000610220525b6101c05160206001820306601f82010390506102205110151561056c57610585565b610220516101e00152610220516020016102205261054a565b6101c0805160200180610280828460006004600a8704601201f16105a857600080fd5b50506102805160206001820306601f82010390506102e0610280516008818352015b826102e05111156105da576105f6565b60006102e0516102a001535b81516001018083528114156105ca575b5050506020610260526040610280510160206001820306601f8201039050610260f3005b63c47e300d600051141561117457606060046101403760506004356004016101a037603060043560040135111561065057600080fd5b604060243560040161022037602060243560040135111561067057600080fd5b608060443560040161028037606060443560040135111561069057600080fd5b63ffffffff600154106106a257600080fd5b633b9aca0061034052610340516106b857600080fd5b61034051340461032052633b9aca006103205110156106d657600080fd5b60306101a051146106e657600080fd5b602061022051146106f657600080fd5b6060610280511461070657600080fd5b6101a0516101c0516101e05161020051610220516102405161026051610280516102a0516102c0516102e05161030051610320516103405161036051610380516103a05163806732896103c052610320516103e0526103e0516006580161009b565b506104405260006104a0525b6104405160206001820306601f82010390506104a051101515610796576107af565b6104a05161046001526104a0516020016104a052610774565b6103a05261038052610360526103405261032052610300526102e0526102c0526102a05261028052610260526102405261022052610200526101e0526101c0526101a052610440805160200180610360828460006004600a8704601201f161081657600080fd5b50506101a0516101c0516101e05161020051610220516102405161026051610280516102a0516102c0516102e05161030051610320516103405161036051610380516103a0516103c0516103e05161040051610420516104405161046051610480516104a05163806732896104c0526001546104e0526104e0516006580161009b565b506105405260006105a0525b6105405160206001820306601f82010390506105a0511015156108c7576108e0565b6105a05161056001526105a0516020016105a0526108a5565b6104a05261048052610460526104405261042052610400526103e0526103c0526103a05261038052610360526103405261032052610300526102e0526102c0526102a05261028052610260526102405261022052610200526101e0526101c0526101a0526105408051602001806105c0828460006004600a8704601201f161096757600080fd5b505060a06106405261064051610680526101a08051602001806106405161068001828460006004600a8704601201f161099f57600080fd5b505061064051610680015160206001820306601f8201039050610640516106800161062081516040818352015b83610620511015156109dd576109fa565b6000610620516020850101535b81516001018083528114156109cc575b50505050602061064051610680015160206001820306601f820103905061064051010161064052610640516106a0526102208051602001806106405161068001828460006004600a8704601201f1610a5157600080fd5b505061064051610680015160206001820306601f8201039050610640516106800161062081516020818352015b8361062051101515610a8f57610aac565b6000610620516020850101535b8151600101808352811415610a7e575b50505050602061064051610680015160206001820306601f820103905061064051010161064052610640516106c0526103608051602001806106405161068001828460006004600a8704601201f1610b0357600080fd5b505061064051610680015160206001820306601f8201039050610640516106800161062081516020818352015b8361062051101515610b4157610b5e565b6000610620516020850101535b8151600101808352811415610b30575b50505050602061064051610680015160206001820306601f820103905061064051010161064052610640516106e0526102808051602001806106405161068001828460006004600a8704601201f1610bb557600080fd5b505061064051610680015160206001820306601f8201039050610640516106800161062081516060818352015b8361062051101515610bf357610c10565b6000610620516020850101535b8151600101808352811415610be2575b50505050602061064051610680015160206001820306601f82010390506106405101016106405261064051610700526105c08051602001806106405161068001828460006004600a8704601201f1610c6757600080fd5b505061064051610680015160206001820306601f8201039050610640516106800161062081516020818352015b8361062051101515610ca557610cc2565b6000610620516020850101535b8151600101808352811415610c94575b50505050602061064051610680015160206001820306601f8201039050610640510101610640527f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c561064051610680a160006107205260006101a06030806020846107e001018260208501600060046016f150508051820191505060006010602082066107600160208284011115610d5957600080fd5b60208061078082610720600060046015f15050818152809050905090506010806020846107e001018260208501600060046013f1505080518201915050806107e0526107e09050602060c0825160208401600060025af1610db957600080fd5b60c0519050610740526000600060406020820661088001610280518284011115610de257600080fd5b6060806108a0826020602088068803016102800160006004601bf1505081815280905090509050602060c0825160208401600060025af1610e2257600080fd5b60c0519050602082610a800101526020810190506000604060206020820661094001610280518284011115610e5657600080fd5b606080610960826020602088068803016102800160006004601bf1505081815280905090509050602080602084610a0001018260208501600060046015f150508051820191505061072051602082610a0001015260208101905080610a0052610a009050602060c0825160208401600060025af1610ed357600080fd5b60c0519050602082610a8001015260208101905080610a8052610a809050602060c0825160208401600060025af1610f0a57600080fd5b60c0519050610860526000600061074051602082610b20010152602081019050610220602080602084610b2001018260208501600060046015f150508051820191505080610b2052610b209050602060c0825160208401600060025af1610f7057600080fd5b60c0519050602082610ca00101526020810190506000610360600880602084610c2001018260208501600060046012f15050805182019150506000601860208206610ba00160208284011115610fc557600080fd5b602080610bc082610720600060046015f1505081815280905090509050601880602084610c2001018260208501600060046014f150508051820191505061086051602082610c2001015260208101905080610c2052610c209050602060c0825160208401600060025af161103857600080fd5b60c0519050602082610ca001015260208101905080610ca052610ca09050602060c0825160208401600060025af161106f57600080fd5b60c0519050610b0052600180546001825401101561108c57600080fd5b6001815401815550600154610d2052610d4060006020818352015b60016001610d20511614156110dc57610b0051610d4051602081106110cb57600080fd5b600060c052602060c0200155611170565b6000610d4051602081106110ef57600080fd5b600060c052602060c0200154602082610d60010152602081019050610b0051602082610d6001015260208101905080610d6052610d609050602060c0825160208401600060025af161114057600080fd5b60c0519050610b0052610d20600261115757600080fd5b60028151048152505b81516001018083528114156110a7575b5050005b60006000fd5b61017f6112f90361017f60003961017f6112f9036000f3"} \ No newline at end of file +{"abi": [{"name": "DepositEvent", "inputs": [{"type": "bytes", "name": "pubkey", "indexed": false}, {"type": "bytes", "name": "withdrawal_credentials", "indexed": false}, {"type": "bytes", "name": "amount", "indexed": false}, {"type": "bytes", "name": "signature", "indexed": false}, {"type": "bytes", "name": "index", "indexed": false}], "anonymous": false, "type": "event"}, {"outputs": [], "inputs": [], "constant": false, "payable": false, "type": "constructor"}, {"name": "get_deposit_root", "outputs": [{"type": "bytes32", "name": "out"}], "inputs": [], "constant": true, "payable": false, "type": "function", "gas": 95389}, {"name": "get_deposit_count", "outputs": [{"type": "bytes", "name": "out"}], "inputs": [], "constant": true, "payable": false, "type": "function", "gas": 17683}, {"name": "deposit", "outputs": [], "inputs": [{"type": "bytes", "name": "pubkey"}, {"type": "bytes", "name": "withdrawal_credentials"}, {"type": "bytes", "name": "signature"}, {"type": "bytes32", "name": "deposit_data_root"}], "constant": false, "payable": true, "type": "function", "gas": 1754607}], "bytecode": "0x740100000000000000000000000000000000000000006020526f7fffffffffffffffffffffffffffffff6040527fffffffffffffffffffffffffffffffff8000000000000000000000000000000060605274012a05f1fffffffffffffffffffffffffdabf41c006080527ffffffffffffffffffffffffed5fa0e000000000000000000000000000000000060a052341561009857600080fd5b6101406000601f818352015b600061014051602081106100b757600080fd5b600260c052602060c020015460208261016001015260208101905061014051602081106100e357600080fd5b600260c052602060c020015460208261016001015260208101905080610160526101609050602060c0825160208401600060025af161012157600080fd5b60c0519050606051600161014051018060405190131561014057600080fd5b809190121561014e57600080fd5b6020811061015b57600080fd5b600260c052602060c02001555b81516001018083528114156100a4575b50506111d656600035601c52740100000000000000000000000000000000000000006020526f7fffffffffffffffffffffffffffffff6040527fffffffffffffffffffffffffffffffff8000000000000000000000000000000060605274012a05f1fffffffffffffffffffffffffdabf41c006080527ffffffffffffffffffffffffed5fa0e000000000000000000000000000000000060a052600015610265575b6101605261014052600061018052610140516101a0526101c060006008818352015b61018051600860008112156100da578060000360020a82046100e1565b8060020a82025b905090506101805260ff6101a051166101e052610180516101e0516101805101101561010c57600080fd5b6101e0516101805101610180526101a0517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff86000811215610155578060000360020a820461015c565b8060020a82025b905090506101a0525b81516001018083528114156100bd575b50506018600860208206610200016020828401111561019357600080fd5b60208061022082610180600060046015f15050818152809050905090508051602001806102c0828460006004600a8704601201f16101d057600080fd5b50506103206102c0516020818352015b60206103205111156101f15761020d565b6000610320516102e001535b81516001018083528114156101e0575b505060206102a05260406102c0510160206001820306601f8201039050610280525b6000610280511115156102415761025d565b602061028051036102a00151602061028051036102805261022f565b610160515650005b63c5f2892f60005114156104f757341561027e57600080fd5b6000610140526101405161016052600154610180526101a060006020818352015b60016001610180511614156103205760006101a051602081106102c157600080fd5b600060c052602060c02001546020826102400101526020810190506101605160208261024001015260208101905080610240526102409050602060c0825160208401600060025af161031257600080fd5b60c05190506101605261038e565b6000610160516020826101c00101526020810190506101a0516020811061034657600080fd5b600260c052602060c02001546020826101c0010152602081019050806101c0526101c09050602060c0825160208401600060025af161038457600080fd5b60c0519050610160525b610180600261039c57600080fd5b60028151048152505b815160010180835281141561029f575b505060006101605160208261046001015260208101905061014051610160516101805163806732896102e05260015461030052610300516006580161009b565b506103605260006103c0525b6103605160206001820306601f82010390506103c0511015156104235761043c565b6103c05161038001526103c0516020016103c052610401565b61018052610160526101405261036060088060208461046001018260208501600060046012f150508051820191505060006018602082066103e0016020828401111561048757600080fd5b60208061040082610140600060046015f150508181528090509050905060188060208461046001018260208501600060046014f150508051820191505080610460526104609050602060c0825160208401600060025af16104e757600080fd5b60c051905060005260206000f350005b63621fd13060005114156105f857341561051057600080fd5b63806732896101405260015461016052610160516006580161009b565b506101c0526000610220525b6101c05160206001820306601f82010390506102205110151561055b57610574565b610220516101e001526102205160200161022052610539565b6101c0805160200180610280828460006004600a8704601201f161059757600080fd5b50506102e0610280516020818352015b60206102e05111156105b8576105d4565b60006102e0516102a001535b81516001018083528114156105a7575b50506020610260526040610280510160206001820306601f8201039050610260f350005b6322895118600051141561105157605060043560040161014037603060043560040135111561062657600080fd5b60406024356004016101c037602060243560040135111561064657600080fd5b608060443560040161022037606060443560040135111561066657600080fd5b63ffffffff6001541061067857600080fd5b633b9aca006102e0526102e05161068e57600080fd5b6102e05134046102c052633b9aca006102c05110156106ac57600080fd5b603061014051146106bc57600080fd5b60206101c051146106cc57600080fd5b606061022051146106dc57600080fd5b610140610360525b61036051516020610360510161036052610360610360511015610706576106e4565b6380673289610380526102c0516103a0526103a0516006580161009b565b50610400526000610460525b6104005160206001820306601f8201039050610460511015156107525761076b565b6104605161042001526104605160200161046052610730565b610340610360525b610360515260206103605103610360526101406103605110151561079657610773565b610400805160200180610300828460006004600a8704601201f16107b957600080fd5b5050610140610480525b610480515160206104805101610480526104806104805110156107e5576107c3565b63806732896104a0526001546104c0526104c0516006580161009b565b50610520526000610580525b6105205160206001820306601f82010390506105805110151561083057610849565b610580516105400152610580516020016105805261080e565b610460610480525b610480515260206104805103610480526101406104805110151561087457610851565b6105208051602001806105a0828460006004600a8704601201f161089757600080fd5b505060a06106205261062051610660526101408051602001806106205161066001828460006004600a8704601201f16108cf57600080fd5b5050610600610620516106600151610240818352015b6102406106005111156108f757610918565b600061060051610620516106800101535b81516001018083528114156108e5575b5050602061062051610660015160206001820306601f82010390506106205101016106205261062051610680526101c08051602001806106205161066001828460006004600a8704601201f161096d57600080fd5b5050610600610620516106600151610240818352015b610240610600511115610995576109b6565b600061060051610620516106800101535b8151600101808352811415610983575b5050602061062051610660015160206001820306601f820103905061062051010161062052610620516106a0526103008051602001806106205161066001828460006004600a8704601201f1610a0b57600080fd5b5050610600610620516106600151610240818352015b610240610600511115610a3357610a54565b600061060051610620516106800101535b8151600101808352811415610a21575b5050602061062051610660015160206001820306601f820103905061062051010161062052610620516106c0526102208051602001806106205161066001828460006004600a8704601201f1610aa957600080fd5b5050610600610620516106600151610240818352015b610240610600511115610ad157610af2565b600061060051610620516106800101535b8151600101808352811415610abf575b5050602061062051610660015160206001820306601f820103905061062051010161062052610620516106e0526105a08051602001806106205161066001828460006004600a8704601201f1610b4757600080fd5b5050610600610620516106600151610240818352015b610240610600511115610b6f57610b90565b600061060051610620516106800101535b8151600101808352811415610b5d575b5050602061062051610660015160206001820306601f8201039050610620510101610620527f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c561062051610660a160006107005260006101406030806020846107c001018260208501600060046016f150508051820191505060006010602082066107400160208284011115610c2557600080fd5b60208061076082610700600060046015f15050818152809050905090506010806020846107c001018260208501600060046013f1505080518201915050806107c0526107c09050602060c0825160208401600060025af1610c8557600080fd5b60c0519050610720526000600060406020820661086001610220518284011115610cae57600080fd5b606080610880826020602088068803016102200160006004601bf1505081815280905090509050602060c0825160208401600060025af1610cee57600080fd5b60c0519050602082610a600101526020810190506000604060206020820661092001610220518284011115610d2257600080fd5b606080610940826020602088068803016102200160006004601bf15050818152809050905090506020806020846109e001018260208501600060046015f1505080518201915050610700516020826109e0010152602081019050806109e0526109e09050602060c0825160208401600060025af1610d9f57600080fd5b60c0519050602082610a6001015260208101905080610a6052610a609050602060c0825160208401600060025af1610dd657600080fd5b60c0519050610840526000600061072051602082610b000101526020810190506101c0602080602084610b0001018260208501600060046015f150508051820191505080610b0052610b009050602060c0825160208401600060025af1610e3c57600080fd5b60c0519050602082610c800101526020810190506000610300600880602084610c0001018260208501600060046012f15050805182019150506000601860208206610b800160208284011115610e9157600080fd5b602080610ba082610700600060046015f1505081815280905090509050601880602084610c0001018260208501600060046014f150508051820191505061084051602082610c0001015260208101905080610c0052610c009050602060c0825160208401600060025af1610f0457600080fd5b60c0519050602082610c8001015260208101905080610c8052610c809050602060c0825160208401600060025af1610f3b57600080fd5b60c0519050610ae052606435610ae05114610f5557600080fd5b6001805460018254011015610f6957600080fd5b6001815401815550600154610d0052610d2060006020818352015b60016001610d0051161415610fb957610ae051610d205160208110610fa857600080fd5b600060c052602060c020015561104d565b6000610d205160208110610fcc57600080fd5b600060c052602060c0200154602082610d40010152602081019050610ae051602082610d4001015260208101905080610d4052610d409050602060c0825160208401600060025af161101d57600080fd5b60c0519050610ae052610d00600261103457600080fd5b60028151048152505b8151600101808352811415610f84575b5050005b60006000fd5b61017f6111d60361017f60003961017f6111d6036000f3"} \ No newline at end of file diff --git a/deposit_contract/contracts/validator_registration.v.py b/deposit_contract/contracts/validator_registration.v.py index bad619b07..6ee27db7a 100644 --- a/deposit_contract/contracts/validator_registration.v.py +++ b/deposit_contract/contracts/validator_registration.v.py @@ -1,10 +1,11 @@ +# Vyper target 0.1.0b12 MIN_DEPOSIT_AMOUNT: constant(uint256) = 1000000000 # Gwei DEPOSIT_CONTRACT_TREE_DEPTH: constant(uint256) = 32 MAX_DEPOSIT_COUNT: constant(uint256) = 4294967295 # 2**DEPOSIT_CONTRACT_TREE_DEPTH - 1 PUBKEY_LENGTH: constant(uint256) = 48 # bytes WITHDRAWAL_CREDENTIALS_LENGTH: constant(uint256) = 32 # bytes -AMOUNT_LENGTH: constant(uint256) = 8 # bytes SIGNATURE_LENGTH: constant(uint256) = 96 # bytes +AMOUNT_LENGTH: constant(uint256) = 8 # bytes DepositEvent: event({ pubkey: bytes[48], @@ -42,7 +43,7 @@ def to_little_endian_64(value: uint256) -> bytes[8]: @public @constant -def get_hash_tree_root() -> bytes32: +def get_deposit_root() -> bytes32: zero_bytes32: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000 node: bytes32 = zero_bytes32 size: uint256 = self.deposit_count @@ -65,13 +66,16 @@ def get_deposit_count() -> bytes[8]: @public def deposit(pubkey: bytes[PUBKEY_LENGTH], withdrawal_credentials: bytes[WITHDRAWAL_CREDENTIALS_LENGTH], - signature: bytes[SIGNATURE_LENGTH]): + signature: bytes[SIGNATURE_LENGTH], + deposit_data_root: bytes32): # Avoid overflowing the Merkle tree (and prevent edge case in computing `self.branch`) assert self.deposit_count < MAX_DEPOSIT_COUNT - # Validate deposit data + # Check deposit amount deposit_amount: uint256 = msg.value / as_wei_value(1, "gwei") assert deposit_amount >= MIN_DEPOSIT_AMOUNT + + # Length checks to facilitate formal verification (see https://github.com/ethereum/eth2.0-specs/pull/1362/files#r320361859) assert len(pubkey) == PUBKEY_LENGTH assert len(withdrawal_credentials) == WITHDRAWAL_CREDENTIALS_LENGTH assert len(signature) == SIGNATURE_LENGTH @@ -80,7 +84,7 @@ def deposit(pubkey: bytes[PUBKEY_LENGTH], amount: bytes[8] = self.to_little_endian_64(deposit_amount) log.DepositEvent(pubkey, withdrawal_credentials, amount, signature, self.to_little_endian_64(self.deposit_count)) - # Compute `DepositData` hash tree root + # Compute deposit data root (`DepositData` hash tree root) zero_bytes32: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000 pubkey_root: bytes32 = sha256(concat(pubkey, slice(zero_bytes32, start=0, len=64 - PUBKEY_LENGTH))) signature_root: bytes32 = sha256(concat( @@ -91,8 +95,10 @@ def deposit(pubkey: bytes[PUBKEY_LENGTH], sha256(concat(pubkey_root, withdrawal_credentials)), sha256(concat(amount, slice(zero_bytes32, start=0, len=32 - AMOUNT_LENGTH), signature_root)), )) + # Verify computed and expected deposit data roots match + assert node == deposit_data_root - # Add `DepositData` hash tree root to Merkle tree (update a single `branch` node) + # Add deposit data root to Merkle tree (update a single `branch` node) self.deposit_count += 1 size: uint256 = self.deposit_count for height in range(DEPOSIT_CONTRACT_TREE_DEPTH): diff --git a/deposit_contract/requirements-testing.txt b/deposit_contract/requirements-testing.txt index 280d7e527..0b3d9d22c 100644 --- a/deposit_contract/requirements-testing.txt +++ b/deposit_contract/requirements-testing.txt @@ -1,5 +1,5 @@ eth-tester[py-evm]==0.1.0b39 -vyper==0.1.0b10 +vyper==0.1.0b12 web3==5.0.0b2 pytest==3.6.1 ../test_libs/pyspec diff --git a/deposit_contract/tests/contracts/test_deposit.py b/deposit_contract/tests/contracts/test_deposit.py index 1c96d074e..01586d070 100644 --- a/deposit_contract/tests/contracts/test_deposit.py +++ b/deposit_contract/tests/contracts/test_deposit.py @@ -6,7 +6,6 @@ import pytest import eth_utils from tests.contracts.conftest import ( - DEPOSIT_CONTRACT_TREE_DEPTH, FULL_DEPOSIT_AMOUNT, MIN_DEPOSIT_AMOUNT, ) @@ -14,29 +13,42 @@ from tests.contracts.conftest import ( from eth2spec.phase0.spec import ( DepositData, ) -from eth2spec.utils.hash_function import hash from eth2spec.utils.ssz.ssz_typing import List from eth2spec.utils.ssz.ssz_impl import ( hash_tree_root, ) +SAMPLE_PUBKEY = b'\x11' * 48 +SAMPLE_WITHDRAWAL_CREDENTIALS = b'\x22' * 32 +SAMPLE_VALID_SIGNATURE = b'\x33' * 96 + + @pytest.fixture -def deposit_input(): +def deposit_input(amount): """ pubkey: bytes[48] withdrawal_credentials: bytes[32] signature: bytes[96] + deposit_data_root: bytes[32] """ return ( - b'\x11' * 48, - b'\x22' * 32, - b'\x33' * 96, + SAMPLE_PUBKEY, + SAMPLE_WITHDRAWAL_CREDENTIALS, + SAMPLE_VALID_SIGNATURE, + hash_tree_root( + DepositData( + pubkey=SAMPLE_PUBKEY, + withdrawal_credentials=SAMPLE_WITHDRAWAL_CREDENTIALS, + amount=amount, + signature=SAMPLE_VALID_SIGNATURE, + ), + ) ) @pytest.mark.parametrize( - 'success,deposit_amount', + ('success', 'amount'), [ (True, FULL_DEPOSIT_AMOUNT), (True, MIN_DEPOSIT_AMOUNT), @@ -47,18 +59,24 @@ def deposit_input(): def test_deposit_amount(registration_contract, w3, success, - deposit_amount, + amount, assert_tx_failed, deposit_input): call = registration_contract.functions.deposit(*deposit_input) if success: - assert call.transact({"value": deposit_amount * eth_utils.denoms.gwei}) + assert call.transact({"value": amount * eth_utils.denoms.gwei}) else: assert_tx_failed( - lambda: call.transact({"value": deposit_amount * eth_utils.denoms.gwei}) + lambda: call.transact({"value": amount * eth_utils.denoms.gwei}) ) +@pytest.mark.parametrize( + 'amount', + [ + (FULL_DEPOSIT_AMOUNT) + ] +) @pytest.mark.parametrize( 'invalid_pubkey,invalid_withdrawal_credentials,invalid_signature,success', [ @@ -71,38 +89,62 @@ def test_deposit_amount(registration_contract, def test_deposit_inputs(registration_contract, w3, assert_tx_failed, - deposit_input, + amount, invalid_pubkey, invalid_withdrawal_credentials, invalid_signature, success): - pubkey = deposit_input[0][2:] if invalid_pubkey else deposit_input[0] - if invalid_withdrawal_credentials: # this one is different to satisfy linter - withdrawal_credentials = deposit_input[1][2:] - else: - withdrawal_credentials = deposit_input[1] - signature = deposit_input[2][2:] if invalid_signature else deposit_input[2] + pubkey = SAMPLE_PUBKEY[2:] if invalid_pubkey else SAMPLE_PUBKEY + withdrawal_credentials = ( + SAMPLE_WITHDRAWAL_CREDENTIALS[2:] if invalid_withdrawal_credentials + else SAMPLE_WITHDRAWAL_CREDENTIALS + ) + signature = SAMPLE_VALID_SIGNATURE[2:] if invalid_signature else SAMPLE_VALID_SIGNATURE call = registration_contract.functions.deposit( pubkey, withdrawal_credentials, signature, + hash_tree_root( + DepositData( + pubkey=SAMPLE_PUBKEY if invalid_pubkey else pubkey, + withdrawal_credentials=( + SAMPLE_WITHDRAWAL_CREDENTIALS if invalid_withdrawal_credentials + else withdrawal_credentials + ), + amount=amount, + signature=SAMPLE_VALID_SIGNATURE if invalid_signature else signature, + ), + ) ) if success: - assert call.transact({"value": FULL_DEPOSIT_AMOUNT * eth_utils.denoms.gwei}) + assert call.transact({"value": amount * eth_utils.denoms.gwei}) else: assert_tx_failed( - lambda: call.transact({"value": FULL_DEPOSIT_AMOUNT * eth_utils.denoms.gwei}) + lambda: call.transact({"value": amount * eth_utils.denoms.gwei}) ) -def test_deposit_event_log(registration_contract, a0, w3, deposit_input): +def test_deposit_event_log(registration_contract, a0, w3): log_filter = registration_contract.events.DepositEvent.createFilter( fromBlock='latest', ) - deposit_amount_list = [randint(MIN_DEPOSIT_AMOUNT, FULL_DEPOSIT_AMOUNT * 2) for _ in range(3)] + for i in range(3): + deposit_input = ( + SAMPLE_PUBKEY, + SAMPLE_WITHDRAWAL_CREDENTIALS, + SAMPLE_VALID_SIGNATURE, + hash_tree_root( + DepositData( + pubkey=SAMPLE_PUBKEY, + withdrawal_credentials=SAMPLE_WITHDRAWAL_CREDENTIALS, + amount=deposit_amount_list[i], + signature=SAMPLE_VALID_SIGNATURE, + ), + ) + ) registration_contract.functions.deposit( *deposit_input, ).transact({"value": deposit_amount_list[i] * eth_utils.denoms.gwei}) @@ -118,7 +160,7 @@ def test_deposit_event_log(registration_contract, a0, w3, deposit_input): assert log['index'] == i.to_bytes(8, 'little') -def test_deposit_tree(registration_contract, w3, assert_tx_failed, deposit_input): +def test_deposit_tree(registration_contract, w3, assert_tx_failed): log_filter = registration_contract.events.DepositEvent.createFilter( fromBlock='latest', ) @@ -126,6 +168,20 @@ def test_deposit_tree(registration_contract, w3, assert_tx_failed, deposit_input deposit_amount_list = [randint(MIN_DEPOSIT_AMOUNT, FULL_DEPOSIT_AMOUNT * 2) for _ in range(10)] deposit_data_list = [] for i in range(0, 10): + deposit_data = DepositData( + pubkey=SAMPLE_PUBKEY, + withdrawal_credentials=SAMPLE_WITHDRAWAL_CREDENTIALS, + amount=deposit_amount_list[i], + signature=SAMPLE_VALID_SIGNATURE, + ) + deposit_input = ( + SAMPLE_PUBKEY, + SAMPLE_WITHDRAWAL_CREDENTIALS, + SAMPLE_VALID_SIGNATURE, + hash_tree_root(deposit_data), + ) + deposit_data_list.append(deposit_data) + tx_hash = registration_contract.functions.deposit( *deposit_input, ).transact({"value": deposit_amount_list[i] * eth_utils.denoms.gwei}) @@ -138,12 +194,8 @@ def test_deposit_tree(registration_contract, w3, assert_tx_failed, deposit_input assert log["index"] == i.to_bytes(8, 'little') - deposit_data_list.append(DepositData( - pubkey=deposit_input[0], - withdrawal_credentials=deposit_input[1], - amount=deposit_amount_list[i], - signature=deposit_input[2], - )) - + # Check deposit count and root + count = len(deposit_data_list).to_bytes(8, 'little') + assert count == registration_contract.functions.get_deposit_count().call() root = hash_tree_root(List[DepositData, 2**32](*deposit_data_list)) - assert root == registration_contract.functions.get_hash_tree_root().call() + assert root == registration_contract.functions.get_deposit_root().call() diff --git a/specs/core/0_deposit-contract.md b/specs/core/0_deposit-contract.md index ade1006a0..06962594e 100644 --- a/specs/core/0_deposit-contract.md +++ b/specs/core/0_deposit-contract.md @@ -34,11 +34,11 @@ This document represents the specification for the beacon chain deposit contract ## Ethereum 1.0 deposit contract -The initial deployment phases of Ethereum 2.0 are implemented without consensus changes to Ethereum 1.0. A deposit contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to Ethereum 1.0 for deposits of ETH to the beacon chain. Validator balances will be withdrawable to the shards in Phase 2 (i.e. when the EVM 2.0 is deployed and the shards have state). +The initial deployment phases of Ethereum 2.0 are implemented without consensus changes to Ethereum 1.0. A deposit contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to Ethereum 1.0 for deposits of ETH to the beacon chain. Validator balances will be withdrawable to the shards in Phase 2. ### `deposit` function -The deposit contract has a public `deposit` function to make deposits. It takes as arguments `pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96]` corresponding to a [`DepositData`](./0_beacon-chain.md#depositdata) object. +The deposit contract has a public `deposit` function to make deposits. It takes as arguments `pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96], deposit_data_root: bytes32`. The first three arguments populate a [`DepositData`](./0_beacon-chain.md#depositdata) object, and `deposit_data_root` is the expected `DepositData` root as a protection against malformatted calldata. #### Deposit amount diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index ef5ad4415..3764e7df1 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -218,7 +218,7 @@ def get_epoch_signature(state: BeaconState, block: BeaconBlock, privkey: int) -> ##### Eth1 Data -The `block.eth1_data` field is for block proposers to vote on recent Eth 1.0 data. This recent data contains an Eth 1.0 block hash as well as the associated deposit root (as calculated by the `get_hash_tree_root()` method of the deposit contract) and deposit count after execution of the corresponding Eth 1.0 block. If over half of the block proposers in the current Eth 1.0 voting period vote for the same `eth1_data` then `state.eth1_data` updates at the end of the voting period. Each deposit in `block.body.deposits` must verify against `state.eth1_data.eth1_deposit_root`. +The `block.eth1_data` field is for block proposers to vote on recent Eth 1.0 data. This recent data contains an Eth 1.0 block hash as well as the associated deposit root (as calculated by the `get_deposit_root()` method of the deposit contract) and deposit count after execution of the corresponding Eth 1.0 block. If over half of the block proposers in the current Eth 1.0 voting period vote for the same `eth1_data` then `state.eth1_data` updates at the end of the voting period. Each deposit in `block.body.deposits` must verify against `state.eth1_data.eth1_deposit_root`. Let `get_eth1_data(distance: uint64) -> Eth1Data` be the (subjective) function that returns the Eth 1.0 data at distance `distance` relative to the Eth 1.0 head at the start of the current Eth 1.0 voting period. Let `previous_eth1_distance` be the distance relative to the Eth 1.0 block corresponding to `state.eth1_data.block_hash` at the start of the current Eth 1.0 voting period. An honest block proposer sets `block.eth1_data = get_eth1_vote(state, previous_eth1_distance)` where: From c4297ae330231afbe9fdc883118ee7bbc44fcfce Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 3 Sep 2019 19:17:19 +0100 Subject: [PATCH 148/250] Update 1_shard-data-chains.md --- specs/core/1_shard-data-chains.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index e052e2bcb..5601da936 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -62,11 +62,11 @@ This document describes the shard transition function (data layer only) and the | Name | Value | | - | - | -| `MIN_BLOCK_SIZE_PRICE` | `2**0` (= 1) | +| `MIN_BLOCK_BODY_PRICE` | `2**0` (= 1) | | `MAX_PERIOD_COMMITTEE_SIZE` | `2**7` (= 128) | | `SHARD_HEADER_SIZE` | `2**9` (= 512) | | `SHARD_BLOCK_SIZE_TARGET` | `2**14` (= 16,384) | -| `SHARD_BLOCK_SIZE_LIMIT` | `2**16` (= 65,536) | +| `MAX_SHARD_BLOCK_SIZE` | `2**16` (= 65,536) | ### Initial values @@ -91,7 +91,7 @@ This document describes the shard transition function (data layer only) and the | Name | Value | | - | - | -| `SHARD_BLOCK_SIZE_PRICE_QUOTIENT` | `2**3` (= 8) | +| `BLOCK_BODY_PRICE_QUOTIENT` | `2**3` (= 8) | ### Signature domain types @@ -112,7 +112,7 @@ class ShardBlock(FlatContainer): parent_root: Hash state_root: Hash block_size_sum: uint64 - body: List[byte, SHARD_BLOCK_SIZE_LIMIT - SHARD_HEADER_SIZE] + body: List[byte, MAX_SHARD_BLOCK_SIZE - SHARD_HEADER_SIZE] attestation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE] attestation_signature: BLSSignature signature: BLSSignature @@ -128,7 +128,7 @@ class ShardBlockHeader(FlatContainer): parent_root: Hash state_root: Hash block_size_sum: uint64 - body_root: List[byte, SHARD_BLOCK_SIZE_LIMIT - SHARD_HEADER_SIZE] + body_root: List[byte, MAX_SHARD_BLOCK_SIZE - SHARD_HEADER_SIZE] attestation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE] attestation_signature: BLSSignature signature: BLSSignature @@ -233,7 +233,7 @@ def get_genesis_shard_state(state: BeaconState, shard: Shard) -> ShardState: return ShardState( shard=shard, slot=ShardSlot(SHARD_GENESIS_EPOCH * SHARD_SLOTS_PER_EPOCH), - block_body_price=MIN_BLOCK_SIZE_PRICE, + block_body_price=MIN_BLOCK_BODY_PRICE, ) ``` @@ -375,19 +375,19 @@ def process_shard_attestations(state: BeaconState, shard_state: ShardState, bloc def process_shard_block_body_fee(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: # Apply proposer block body fee proposer_index = get_shard_proposer_index(state, state.shard, block.slot) - block_body_fee = state.block_body_price * len(block.body) // SHARD_BLOCK_SIZE_LIMIT + block_body_fee = state.block_body_price * len(block.body) // MAX_SHARD_BLOCK_SIZE process_delta(state, shard_state, proposer_index, -block_body_fee) # Burn process_delta(state, shard_state, proposer_index, block_body_fee // PROPOSER_REWARD_QUOTIENT) # Reward # Calculate new block body price block_size = SHARD_HEADER_SIZE + len(block.body) - QUOTIENT = SHARD_BLOCK_SIZE_LIMIT * SHARD_BLOCK_SIZE_PRICE_QUOTIENT + QUOTIENT = MAX_SHARD_BLOCK_SIZE * BLOCK_BODY_PRICE_QUOTIENT price_delta = GweiDelta(state.block_body_price * (block_size - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT) if price_delta > 0: - # The maximum gas price caps the amount burnt on gas fees within a period - MAX_BLOCK_SIZE_PRICE = MAX_EFFECTIVE_BALANCE // EPOCHS_PER_SHARD_PERIOD // SHARD_SLOTS_PER_EPOCH - state.block_body_price = Gwei(min(MAX_BLOCK_SIZE_PRICE, state.block_body_price + price_delta)) + # The maximum block body price caps the amount burnt on fees within a period + MAX_BLOCK_BODY_PRICE = MAX_EFFECTIVE_BALANCE // EPOCHS_PER_SHARD_PERIOD // SHARD_SLOTS_PER_EPOCH + state.block_body_price = Gwei(min(MAX_BLOCK_BODY_PRICE, state.block_body_price + price_delta)) else: - state.block_body_price = Gwei(max(MIN_BLOCK_SIZE_PRICE, state.block_body_price + price_delta)) + state.block_body_price = Gwei(max(MIN_BLOCK_BODY_PRICE, state.block_body_price + price_delta)) ``` ## Shard fork choice rule From a0bbc940ceb1f014f6d02143c4f9e4631e0ab760 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 3 Sep 2019 19:27:30 +0100 Subject: [PATCH 149/250] Update 1_shard-data-chains.md --- specs/core/1_shard-data-chains.md | 46 ++++++++++++++++--------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 5601da936..cc1e9b0a2 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -40,7 +40,7 @@ - [Block processing](#block-processing) - [Block header](#block-header) - [Attestations](#attestations) - - [Block body fee](#block-body-fee) + - [Block body](#block-body) - [Shard fork choice rule](#shard-fork-choice-rule) @@ -113,8 +113,8 @@ class ShardBlock(FlatContainer): state_root: Hash block_size_sum: uint64 body: List[byte, MAX_SHARD_BLOCK_SIZE - SHARD_HEADER_SIZE] - attestation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE] - attestation_signature: BLSSignature + aggregation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE] + attestations: BLSSignature signature: BLSSignature ``` @@ -128,9 +128,9 @@ class ShardBlockHeader(FlatContainer): parent_root: Hash state_root: Hash block_size_sum: uint64 - body_root: List[byte, MAX_SHARD_BLOCK_SIZE - SHARD_HEADER_SIZE] - attestation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE] - attestation_signature: BLSSignature + body_root: Hash + aggregation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE] + attestations: BLSSignature signature: BLSSignature ``` @@ -305,19 +305,21 @@ def process_shard_period(state: BeaconState, shard_state: ShardState) -> None: def process_shard_block(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: process_shard_block_header(state, shard_state, block) process_shard_attestations(state, shard_state, block) - process_shard_block_body_fee(state, shard_state, block) + process_shard_block_body(state, shard_state, block) ``` #### Block header ```python def process_shard_block_header(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: - # Verify that the slots match + # Verify the shard number + assert block.shard == state.shard + # Verify the slot number assert block.slot == state.slot - # Verify that the beacon chain root matches + # Verify the beacon chain root parent_epoch = compute_epoch_of_shard_slot(state.latest_block_header.slot) assert block.beacon_block_root == get_block_root(state, parent_epoch) - # Verify that the parent matches + # Verify the parent root assert block.parent_root == hash_tree_root(state.latest_block_header) # Save current block as the new latest block state.latest_block_header = ShardBlockHeader( @@ -328,20 +330,18 @@ def process_shard_block_header(state: BeaconState, shard_state: ShardState, bloc # `state_root` is zeroed and overwritten in the next `process_shard_slot` call block_size_sum=block.block_size_sum, body_root=hash_tree_root(block.body), - attestation_bits=block.attestation_bits, - attestation_signature=block.attestation_signature, + aggregation_bits=block.aggregation_bits, + attestations=block.attestations, # `signature` is zeroed ) + # Verify the sum of the block sizes since genesis + state.block_size_sum += SHARD_HEADER_SIZE + len(block.body) + assert block.block_size_sum == state.block_size_sum # Verify proposer signature proposer_index = get_shard_proposer_index(state, state.shard, block.slot) pubkey = state.validators[proposer_index].pubkey domain = get_domain(state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(block.slot)) assert bls_verify(pubkey, hash_tree_root(block.block), block.proposer, domain) - # Verify body size is a multiple of the header size - assert len(block.body) % SHARD_HEADER_SIZE == 0 - # Verify the sum of the block sizes since genesis - state.block_size_sum += SHARD_HEADER_SIZE + len(block.body) - assert block.block_size_sum == state.block_size_sum ``` #### Attestations @@ -352,27 +352,29 @@ def process_shard_attestations(state: BeaconState, shard_state: ShardState, bloc attestation_count = 0 shard_committee = get_shard_committee(state, state.shard, block.slot) for i, validator_index in enumerate(shard_committee): - if block.attestation_bits[i]: + if block.aggregation_bits[i]: pubkeys.append(state.validators[validator_index].pubkey) process_delta(state, shard_state, validator_index, get_base_reward(state, validator_index)) attestation_count += 1 # Verify there are no extraneous bits set beyond the shard committee for i in range(len(shard_committee), 2 * MAX_PERIOD_COMMITTEE_SIZE): - assert block.attestation_bits[i] == 0b0 + assert block.aggregation_bits[i] == 0b0 # Verify attester aggregate signature domain = get_domain(state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_shard_slot(block.slot)) message = hash_tree_root(ShardCheckpoint(shard_state.slot, block.parent_root)) - assert bls_verify(bls_aggregate_pubkeys(pubkeys), message, block.attestation_signature, domain) + assert bls_verify(bls_aggregate_pubkeys(pubkeys), message, block.attestations, domain) # Proposer micro-reward proposer_index = get_shard_proposer_index(state, state.shard, block.slot) reward = attestation_count * get_base_reward(state, proposer_index) // PROPOSER_REWARD_QUOTIENT process_delta(state, shard_state, proposer_index, reward) ``` -#### Block body fee +#### Block body ```python -def process_shard_block_body_fee(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: +def process_shard_block_body(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: + # Verify block body size is a multiple of the header size + assert len(block.body) % SHARD_HEADER_SIZE == 0 # Apply proposer block body fee proposer_index = get_shard_proposer_index(state, state.shard, block.slot) block_body_fee = state.block_body_price * len(block.body) // MAX_SHARD_BLOCK_SIZE From 225c740107698fe2b89ec7fb0a6a4d9daeb04284 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 3 Sep 2019 21:56:12 +0100 Subject: [PATCH 150/250] Apply suggestions from code review Co-Authored-By: terence tsao --- specs/core/1_shard-data-chains.md | 32 +++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index cc1e9b0a2..7db452718 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -229,7 +229,7 @@ def process_delta(state: BeaconState, shard_state: ShardState, index: ValidatorI ### `get_genesis_shard_state` ```python -def get_genesis_shard_state(state: BeaconState, shard: Shard) -> ShardState: +def get_genesis_shard_state(shard: Shard) -> ShardState: return ShardState( shard=shard, slot=ShardSlot(SHARD_GENESIS_EPOCH * SHARD_SLOTS_PER_EPOCH), @@ -295,8 +295,8 @@ def process_shard_slot(state: BeaconState, shard_state: ShardState) -> None: ```python def process_shard_period(state: BeaconState, shard_state: ShardState) -> None: # Rotate rewards and fees - state.older_committee_deltas = state.newer_committee_deltas - state.newer_committee_deltas = [GweiDelta(0) for _ in range(MAX_PERIOD_COMMITTEE_SIZE)] + shard_state.older_committee_deltas = shard_state.newer_committee_deltas + shard_state.newer_committee_deltas = [GweiDelta(0) for _ in range(MAX_PERIOD_COMMITTEE_SIZE)] ``` ### Block processing @@ -313,16 +313,16 @@ def process_shard_block(state: BeaconState, shard_state: ShardState, block: Shar ```python def process_shard_block_header(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: # Verify the shard number - assert block.shard == state.shard + assert block.shard == shard_state.shard # Verify the slot number - assert block.slot == state.slot + assert block.slot == shard_state.slot # Verify the beacon chain root - parent_epoch = compute_epoch_of_shard_slot(state.latest_block_header.slot) + parent_epoch = compute_epoch_of_shard_slot(shard_state.latest_block_header.slot) assert block.beacon_block_root == get_block_root(state, parent_epoch) # Verify the parent root - assert block.parent_root == hash_tree_root(state.latest_block_header) + assert block.parent_root == hash_tree_root(shard_state.latest_block_header) # Save current block as the new latest block - state.latest_block_header = ShardBlockHeader( + shard_state.latest_block_header = ShardBlockHeader( shard=block.shard, slot=block.slot, beacon_block_root=block.beacon_block_root, @@ -335,10 +335,10 @@ def process_shard_block_header(state: BeaconState, shard_state: ShardState, bloc # `signature` is zeroed ) # Verify the sum of the block sizes since genesis - state.block_size_sum += SHARD_HEADER_SIZE + len(block.body) + shard_state.block_size_sum += SHARD_HEADER_SIZE + len(block.body) assert block.block_size_sum == state.block_size_sum # Verify proposer signature - proposer_index = get_shard_proposer_index(state, state.shard, block.slot) + proposer_index = get_shard_proposer_index(state, shard_state.shard, block.slot) pubkey = state.validators[proposer_index].pubkey domain = get_domain(state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(block.slot)) assert bls_verify(pubkey, hash_tree_root(block.block), block.proposer, domain) @@ -350,7 +350,7 @@ def process_shard_block_header(state: BeaconState, shard_state: ShardState, bloc def process_shard_attestations(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: pubkeys = [] attestation_count = 0 - shard_committee = get_shard_committee(state, state.shard, block.slot) + shard_committee = get_shard_committee(state, shard_state.shard, block.slot) for i, validator_index in enumerate(shard_committee): if block.aggregation_bits[i]: pubkeys.append(state.validators[validator_index].pubkey) @@ -364,7 +364,7 @@ def process_shard_attestations(state: BeaconState, shard_state: ShardState, bloc message = hash_tree_root(ShardCheckpoint(shard_state.slot, block.parent_root)) assert bls_verify(bls_aggregate_pubkeys(pubkeys), message, block.attestations, domain) # Proposer micro-reward - proposer_index = get_shard_proposer_index(state, state.shard, block.slot) + proposer_index = get_shard_proposer_index(state, shard_state.shard, block.slot) reward = attestation_count * get_base_reward(state, proposer_index) // PROPOSER_REWARD_QUOTIENT process_delta(state, shard_state, proposer_index, reward) ``` @@ -376,20 +376,20 @@ def process_shard_block_body(state: BeaconState, shard_state: ShardState, block: # Verify block body size is a multiple of the header size assert len(block.body) % SHARD_HEADER_SIZE == 0 # Apply proposer block body fee - proposer_index = get_shard_proposer_index(state, state.shard, block.slot) + proposer_index = get_shard_proposer_index(state, shard_state.shard, block.slot) block_body_fee = state.block_body_price * len(block.body) // MAX_SHARD_BLOCK_SIZE process_delta(state, shard_state, proposer_index, -block_body_fee) # Burn process_delta(state, shard_state, proposer_index, block_body_fee // PROPOSER_REWARD_QUOTIENT) # Reward # Calculate new block body price block_size = SHARD_HEADER_SIZE + len(block.body) QUOTIENT = MAX_SHARD_BLOCK_SIZE * BLOCK_BODY_PRICE_QUOTIENT - price_delta = GweiDelta(state.block_body_price * (block_size - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT) + price_delta = GweiDelta(shard_state.block_body_price * (block_size - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT) if price_delta > 0: # The maximum block body price caps the amount burnt on fees within a period MAX_BLOCK_BODY_PRICE = MAX_EFFECTIVE_BALANCE // EPOCHS_PER_SHARD_PERIOD // SHARD_SLOTS_PER_EPOCH - state.block_body_price = Gwei(min(MAX_BLOCK_BODY_PRICE, state.block_body_price + price_delta)) + shard_state.block_body_price = Gwei(min(MAX_BLOCK_BODY_PRICE, shard_state.block_body_price + price_delta)) else: - state.block_body_price = Gwei(max(MIN_BLOCK_BODY_PRICE, state.block_body_price + price_delta)) + shard_state.block_body_price = Gwei(max(MIN_BLOCK_BODY_PRICE, shard_state.block_body_price + price_delta)) ``` ## Shard fork choice rule From 40dc0622793c904021cc9de70fb5ad41b92c1459 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 3 Sep 2019 22:04:59 +0100 Subject: [PATCH 151/250] Update 1_shard-data-chains.md --- specs/core/1_shard-data-chains.md | 110 +++++++++++++++--------------- 1 file changed, 55 insertions(+), 55 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 7db452718..94d941f38 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -180,18 +180,18 @@ def compute_shard_period_start_epoch(epoch: Epoch, lookback: uint64) -> Epoch: #### `get_period_committee` ```python -def get_period_committee(state: BeaconState, shard: Shard, epoch: Epoch) -> Sequence[ValidatorIndex]: - active_validator_indices = get_active_validator_indices(state, epoch) - seed = get_seed(state, epoch) +def get_period_committee(beacon_state: BeaconState, shard: Shard, epoch: Epoch) -> Sequence[ValidatorIndex]: + active_validator_indices = get_active_validator_indices(beacon_state, epoch) + seed = get_seed(beacon_state, epoch) return compute_committee(active_validator_indices, seed, shard, SHARD_COUNT)[:MAX_PERIOD_COMMITTEE_SIZE] ``` #### `get_shard_committee` ```python -def get_shard_committee(state: BeaconState, shard: Shard, epoch: Epoch) -> Sequence[ValidatorIndex]: - older_committee = get_period_committee(state, shard, compute_shard_period_start_epoch(epoch, 2)) - newer_committee = get_period_committee(state, shard, compute_shard_period_start_epoch(epoch, 1)) +def get_shard_committee(beacon_state: BeaconState, shard: Shard, epoch: Epoch) -> Sequence[ValidatorIndex]: + older_committee = get_period_committee(beacon_state, shard, compute_shard_period_start_epoch(epoch, 2)) + newer_committee = get_period_committee(beacon_state, shard, compute_shard_period_start_epoch(epoch, 1)) # Every epoch cycle out validators from the older committee and cycle in validators from the newer committee older_subcommittee = [i for i in older_committee if i % EPOCHS_PER_SHARD_PERIOD > epoch % EPOCHS_PER_SHARD_PERIOD] newer_subcommittee = [i for i in newer_committee if i % EPOCHS_PER_SHARD_PERIOD <= epoch % EPOCHS_PER_SHARD_PERIOD] @@ -201,12 +201,12 @@ def get_shard_committee(state: BeaconState, shard: Shard, epoch: Epoch) -> Seque #### `get_shard_proposer_index` ```python -def get_shard_proposer_index(state: BeaconState, shard: Shard, slot: ShardSlot) -> ValidatorIndex: - epoch = get_current_epoch(state) - shard_committee = get_shard_committee(state, shard, epoch) - active_indices = [i for i in shard_committee if is_active_validator(state.validators[i], epoch)] - seed = hash(get_seed(state, epoch) + int_to_bytes(slot, length=8) + int_to_bytes(shard, length=8)) - compute_proposer_index(state, active_indices, seed) +def get_shard_proposer_index(beacon_state: BeaconState, shard: Shard, slot: ShardSlot) -> ValidatorIndex: + epoch = get_current_epoch(beacon_state) + shard_committee = get_shard_committee(beacon_state, shard, epoch) + active_indices = [i for i in shard_committee if is_active_validator(beacon_state.validators[i], epoch)] + seed = hash(get_seed(beacon_state, epoch) + int_to_bytes(slot, length=8) + int_to_bytes(shard, length=8)) + compute_proposer_index(beacon_state, active_indices, seed) ``` ### Shard state mutators @@ -214,10 +214,10 @@ def get_shard_proposer_index(state: BeaconState, shard: Shard, slot: ShardSlot) #### `process_delta` ```python -def process_delta(state: BeaconState, shard_state: ShardState, index: ValidatorIndex, delta: GweiDelta) -> None: - epoch = compute_epoch_of_shard_slot(state.slot) - older_committee = get_period_committee(state, shard_state.shard, compute_shard_period_start_epoch(epoch, 2)) - newer_committee = get_period_committee(state, shard_state.shard, compute_shard_period_start_epoch(epoch, 1)) +def process_delta(beacon_state: BeaconState, shard_state: ShardState, index: ValidatorIndex, delta: GweiDelta) -> None: + epoch = compute_epoch_of_shard_slot(beacon_state.slot) + older_committee = get_period_committee(beacon_state, shard_state.shard, compute_shard_period_start_epoch(epoch, 2)) + newer_committee = get_period_committee(beacon_state, shard_state.shard, compute_shard_period_start_epoch(epoch, 1)) if index in older_committee: shard_state.older_committee_deltas[older_committee.index(index)] += delta elif index in newer_committee: @@ -240,25 +240,25 @@ def get_genesis_shard_state(shard: Shard) -> ShardState: ### `get_genesis_shard_block` ```python -def get_genesis_shard_block(state: BeaconState, shard: Shard) -> ShardBlock: +def get_genesis_shard_block(beacon_state: BeaconState, shard: Shard) -> ShardBlock: return ShardBlock( shard=shard, slot=ShardSlot(SHARD_GENESIS_EPOCH * SHARD_SLOTS_PER_EPOCH), - state_root=hash_tree_root(get_genesis_shard_state(state, shard)), + state_root=hash_tree_root(get_genesis_shard_state(beacon_state, shard)), ) ``` ## Shard state transition function ```python -def shard_state_transition(state: BeaconState, +def shard_state_transition(beacon_state: BeaconState, shard_state: ShardState, block: ShardBlock, validate_state_root: bool=False) -> ShardState: # Process slots (including those with no blocks) since block - process_shard_slots(state, shard_state, block.slot) + process_shard_slots(beacon_state, shard_state, block.slot) # Process block - process_shard_block(state, shard_state, block) + process_shard_block(beacon_state, shard_state, block) # Validate state root (`validate_state_root == True` in production) if validate_state_root: assert block.state_root == hash_tree_root(shard_state) @@ -267,33 +267,33 @@ def shard_state_transition(state: BeaconState, ``` ```python -def process_shard_slots(state: BeaconState, shard_state: ShardState, slot: ShardSlot) -> None: +def process_shard_slots(beacon_state: BeaconState, shard_state: ShardState, slot: ShardSlot) -> None: assert shard_state.slot <= slot while shard_state.slot < slot: - process_shard_slot(state, shard_state) + process_shard_slot(shard_state) # Process period on the start slot of the next period if (shard_state.slot + 1) % (SHARD_SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD) == 0: - process_shard_period(state, shard_state) + process_shard_period(shard_state) shard_state.slot += ShardSlot(1) ``` ```python -def process_shard_slot(state: BeaconState, shard_state: ShardState) -> None: +def process_shard_slot(shard_state: ShardState) -> None: # Cache state root - previous_state_root = hash_tree_root(state) - if state.latest_block_header.state_root == Bytes32(): - state.latest_block_header.state_root = previous_state_root + previous_state_root = hash_tree_root(shard_state) + if shard_state.latest_block_header.state_root == Bytes32(): + shard_state.latest_block_header.state_root = previous_state_root # Cache state root in history accumulator depth = 0 - while state.slot % 2**depth == 0 and depth < HISTORY_ACCUMULATOR_VECTOR: - state.history_accumulator[depth] = previous_state_root + while shard_state.slot % 2**depth == 0 and depth < HISTORY_ACCUMULATOR_VECTOR: + shard_state.history_accumulator[depth] = previous_state_root depth += 1 ``` ### Period processing ```python -def process_shard_period(state: BeaconState, shard_state: ShardState) -> None: +def process_shard_period(shard_state: ShardState) -> None: # Rotate rewards and fees shard_state.older_committee_deltas = shard_state.newer_committee_deltas shard_state.newer_committee_deltas = [GweiDelta(0) for _ in range(MAX_PERIOD_COMMITTEE_SIZE)] @@ -302,23 +302,23 @@ def process_shard_period(state: BeaconState, shard_state: ShardState) -> None: ### Block processing ```python -def process_shard_block(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: - process_shard_block_header(state, shard_state, block) - process_shard_attestations(state, shard_state, block) - process_shard_block_body(state, shard_state, block) +def process_shard_block(beacon_state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: + process_shard_block_header(beacon_state, shard_state, block) + process_shard_attestations(beacon_state, shard_state, block) + process_shard_block_body(beacon_state, shard_state, block) ``` #### Block header ```python -def process_shard_block_header(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: +def process_shard_block_header(beacon_state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: # Verify the shard number assert block.shard == shard_state.shard # Verify the slot number assert block.slot == shard_state.slot # Verify the beacon chain root parent_epoch = compute_epoch_of_shard_slot(shard_state.latest_block_header.slot) - assert block.beacon_block_root == get_block_root(state, parent_epoch) + assert block.beacon_block_root == get_block_root(beacon_state, parent_epoch) # Verify the parent root assert block.parent_root == hash_tree_root(shard_state.latest_block_header) # Save current block as the new latest block @@ -336,50 +336,50 @@ def process_shard_block_header(state: BeaconState, shard_state: ShardState, bloc ) # Verify the sum of the block sizes since genesis shard_state.block_size_sum += SHARD_HEADER_SIZE + len(block.body) - assert block.block_size_sum == state.block_size_sum + assert block.block_size_sum == shard_state.block_size_sum # Verify proposer signature - proposer_index = get_shard_proposer_index(state, shard_state.shard, block.slot) - pubkey = state.validators[proposer_index].pubkey - domain = get_domain(state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(block.slot)) + proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot) + pubkey = beacon_state.validators[proposer_index].pubkey + domain = get_domain(beacon_state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(block.slot)) assert bls_verify(pubkey, hash_tree_root(block.block), block.proposer, domain) ``` #### Attestations ```python -def process_shard_attestations(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: +def process_shard_attestations(beacon_state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: pubkeys = [] attestation_count = 0 - shard_committee = get_shard_committee(state, shard_state.shard, block.slot) + shard_committee = get_shard_committee(beacon_state, shard_state.shard, block.slot) for i, validator_index in enumerate(shard_committee): if block.aggregation_bits[i]: - pubkeys.append(state.validators[validator_index].pubkey) - process_delta(state, shard_state, validator_index, get_base_reward(state, validator_index)) + pubkeys.append(beacon_state.validators[validator_index].pubkey) + process_delta(beacon_state, shard_state, validator_index, get_base_reward(beacon_state, validator_index)) attestation_count += 1 # Verify there are no extraneous bits set beyond the shard committee for i in range(len(shard_committee), 2 * MAX_PERIOD_COMMITTEE_SIZE): assert block.aggregation_bits[i] == 0b0 # Verify attester aggregate signature - domain = get_domain(state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_shard_slot(block.slot)) + domain = get_domain(beacon_state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_shard_slot(block.slot)) message = hash_tree_root(ShardCheckpoint(shard_state.slot, block.parent_root)) assert bls_verify(bls_aggregate_pubkeys(pubkeys), message, block.attestations, domain) # Proposer micro-reward - proposer_index = get_shard_proposer_index(state, shard_state.shard, block.slot) - reward = attestation_count * get_base_reward(state, proposer_index) // PROPOSER_REWARD_QUOTIENT - process_delta(state, shard_state, proposer_index, reward) + proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot) + reward = attestation_count * get_base_reward(beacon_state, proposer_index) // PROPOSER_REWARD_QUOTIENT + process_delta(beacon_state, shard_state, proposer_index, reward) ``` #### Block body ```python -def process_shard_block_body(state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: +def process_shard_block_body(beacon_state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None: # Verify block body size is a multiple of the header size assert len(block.body) % SHARD_HEADER_SIZE == 0 # Apply proposer block body fee - proposer_index = get_shard_proposer_index(state, shard_state.shard, block.slot) - block_body_fee = state.block_body_price * len(block.body) // MAX_SHARD_BLOCK_SIZE - process_delta(state, shard_state, proposer_index, -block_body_fee) # Burn - process_delta(state, shard_state, proposer_index, block_body_fee // PROPOSER_REWARD_QUOTIENT) # Reward + proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot) + block_body_fee = shard_state.block_body_price * len(block.body) // MAX_SHARD_BLOCK_SIZE + process_delta(beacon_state, shard_state, proposer_index, -block_body_fee) # Burn + process_delta(beacon_state, shard_state, proposer_index, block_body_fee // PROPOSER_REWARD_QUOTIENT) # Reward # Calculate new block body price block_size = SHARD_HEADER_SIZE + len(block.body) QUOTIENT = MAX_SHARD_BLOCK_SIZE * BLOCK_BODY_PRICE_QUOTIENT @@ -394,4 +394,4 @@ def process_shard_block_body(state: BeaconState, shard_state: ShardState, block: ## Shard fork choice rule -The fork choice rule for any shard is LMD GHOST using the shard attestations of the shard committee and the beacon chain attestations of the crosslink committee currently assigned to that shard, but instead of being rooted in the genesis it is rooted in the block referenced in the most recent accepted crosslink (i.e. `state.crosslinks[shard].shard_block_root`). Only blocks whose `beacon_block_root` is the block in the main beacon chain at the specified `slot` should be considered. (If the beacon chain skips a slot, then the block at that slot is considered to be the block in the beacon chain at the highest slot lower than that slot.) +The fork choice rule for any shard is LMD GHOST using the shard attestations of the shard committee and the beacon chain attestations of the crosslink committee currently assigned to that shard, but instead of being rooted in the genesis it is rooted in the block referenced in the most recent accepted crosslink (i.e. `beacon_state.crosslinks[shard].shard_block_root`). Only blocks whose `beacon_block_root` is the block in the main beacon chain at the specified `slot` should be considered. (If the beacon chain skips a slot, then the block at that slot is considered to be the block in the beacon chain at the highest slot lower than that slot.) From 2eda4c5dbc051696ba11e10b08b0717910834b54 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 3 Sep 2019 22:09:47 +0100 Subject: [PATCH 152/250] Update 1_shard-data-chains.md --- specs/core/1_shard-data-chains.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 94d941f38..61c5c43aa 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -256,7 +256,7 @@ def shard_state_transition(beacon_state: BeaconState, block: ShardBlock, validate_state_root: bool=False) -> ShardState: # Process slots (including those with no blocks) since block - process_shard_slots(beacon_state, shard_state, block.slot) + process_shard_slots(shard_state, block.slot) # Process block process_shard_block(beacon_state, shard_state, block) # Validate state root (`validate_state_root == True` in production) @@ -267,7 +267,7 @@ def shard_state_transition(beacon_state: BeaconState, ``` ```python -def process_shard_slots(beacon_state: BeaconState, shard_state: ShardState, slot: ShardSlot) -> None: +def process_shard_slots(shard_state: ShardState, slot: ShardSlot) -> None: assert shard_state.slot <= slot while shard_state.slot < slot: process_shard_slot(shard_state) From 91e73c1f5794082c05665bb29a12cf941a92f909 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 3 Sep 2019 22:15:52 +0100 Subject: [PATCH 153/250] Deduplicate indices --- specs/core/1_shard-data-chains.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 61c5c43aa..28585d02b 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -195,7 +195,8 @@ def get_shard_committee(beacon_state: BeaconState, shard: Shard, epoch: Epoch) - # Every epoch cycle out validators from the older committee and cycle in validators from the newer committee older_subcommittee = [i for i in older_committee if i % EPOCHS_PER_SHARD_PERIOD > epoch % EPOCHS_PER_SHARD_PERIOD] newer_subcommittee = [i for i in newer_committee if i % EPOCHS_PER_SHARD_PERIOD <= epoch % EPOCHS_PER_SHARD_PERIOD] - return older_subcommittee + newer_subcommittee + # Deduplicate and sort indices + return sorted(set(older_subcommittee + newer_subcommittee)) ``` #### `get_shard_proposer_index` From 2b60c9cf90f989a73973bea8db00b88f532c28af Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 3 Sep 2019 22:45:13 +0100 Subject: [PATCH 154/250] Update 1_shard-data-chains.md --- specs/core/1_shard-data-chains.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 28585d02b..61c5c43aa 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -195,8 +195,7 @@ def get_shard_committee(beacon_state: BeaconState, shard: Shard, epoch: Epoch) - # Every epoch cycle out validators from the older committee and cycle in validators from the newer committee older_subcommittee = [i for i in older_committee if i % EPOCHS_PER_SHARD_PERIOD > epoch % EPOCHS_PER_SHARD_PERIOD] newer_subcommittee = [i for i in newer_committee if i % EPOCHS_PER_SHARD_PERIOD <= epoch % EPOCHS_PER_SHARD_PERIOD] - # Deduplicate and sort indices - return sorted(set(older_subcommittee + newer_subcommittee)) + return older_subcommittee + newer_subcommittee ``` #### `get_shard_proposer_index` From c01d036ed1293afa16edc97bcc02aa58cb571a80 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 3 Sep 2019 22:49:33 +0100 Subject: [PATCH 155/250] Update 1_shard-data-chains.md --- specs/core/1_shard-data-chains.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 61c5c43aa..72228c633 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -240,11 +240,11 @@ def get_genesis_shard_state(shard: Shard) -> ShardState: ### `get_genesis_shard_block` ```python -def get_genesis_shard_block(beacon_state: BeaconState, shard: Shard) -> ShardBlock: +def get_genesis_shard_block(shard: Shard) -> ShardBlock: return ShardBlock( shard=shard, slot=ShardSlot(SHARD_GENESIS_EPOCH * SHARD_SLOTS_PER_EPOCH), - state_root=hash_tree_root(get_genesis_shard_state(beacon_state, shard)), + state_root=hash_tree_root(get_genesis_shard_state(shard)), ) ``` From dbcce177691c3466bc964254b186bcf87e98b5f7 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Wed, 4 Sep 2019 09:00:59 -0700 Subject: [PATCH 156/250] Update sync_protocol.md --- specs/light_client/sync_protocol.md | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 207e0e63e..3425c0f54 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -13,9 +13,8 @@ - [Constants](#constants) - [Containers](#containers) - [`LightClientUpdate`](#lightclientupdate) - - [Helpers](#helpers) - [`LightClientMemory`](#lightclientmemory) - - [`unpack_compact_validator`](#unpack_compact_validator) + - [Helpers](#helpers) - [`get_persistent_committee_pubkeys_and_balances`](#get_persistent_committee_pubkeys_and_balances) - [Light client state updates](#light-client-state-updates) - [Data overhead](#data-overhead) @@ -62,12 +61,9 @@ class LightClientUpdate(container): committee_branch: Vector[Hash, PERSISTENT_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH + log_2(SHARD_COUNT)] ``` -## Helpers - ### `LightClientMemory` ```python -@dataclass class LightClientMemory(object): shard: Shard # Randomly initialized and retained forever header: BeaconBlockHeader # Beacon header which is not expected to revert @@ -77,19 +73,7 @@ class LightClientMemory(object): next_committee: CompactCommittee ``` -### `unpack_compact_validator` - -```python -def unpack_compact_validator(compact_validator: CompactValidator) -> Tuple[ValidatorIndex, bool, uint64]: - """ - Return the index, slashed, effective_balance // EFFECTIVE_BALANCE_INCREMENT of ``compact_validator``. - """ - return ( - ValidatorIndex(compact_validator >> 16), - bool((compact_validator >> 15) % 2), - uint64(compact_validator & (2**15 - 1)), - ) -``` +## Helpers ### `get_persistent_committee_pubkeys_and_balances` From f1065faf9cd711fd6b2b03fd58b4b50fc201a4b8 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Thu, 5 Sep 2019 06:54:20 -0700 Subject: [PATCH 157/250] Update sync_protocol.md --- specs/light_client/sync_protocol.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 3425c0f54..18810d48d 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -61,10 +61,13 @@ class LightClientUpdate(container): committee_branch: Vector[Hash, PERSISTENT_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH + log_2(SHARD_COUNT)] ``` +## Helpers + ### `LightClientMemory` ```python class LightClientMemory(object): + @dataclass shard: Shard # Randomly initialized and retained forever header: BeaconBlockHeader # Beacon header which is not expected to revert # Persistent committees corresponding to the beacon header @@ -73,8 +76,6 @@ class LightClientMemory(object): next_committee: CompactCommittee ``` -## Helpers - ### `get_persistent_committee_pubkeys_and_balances` ```python From 1e74cf5f0d2866d1811b5abb8548452c3db5be26 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Thu, 5 Sep 2019 06:55:40 -0700 Subject: [PATCH 158/250] Update sync_protocol.md --- specs/light_client/sync_protocol.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 18810d48d..c3b035270 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -13,8 +13,8 @@ - [Constants](#constants) - [Containers](#containers) - [`LightClientUpdate`](#lightclientupdate) - - [`LightClientMemory`](#lightclientmemory) - [Helpers](#helpers) + - [`LightClientMemory`](#lightclientmemory) - [`get_persistent_committee_pubkeys_and_balances`](#get_persistent_committee_pubkeys_and_balances) - [Light client state updates](#light-client-state-updates) - [Data overhead](#data-overhead) @@ -66,8 +66,8 @@ class LightClientUpdate(container): ### `LightClientMemory` ```python +@dataclass class LightClientMemory(object): - @dataclass shard: Shard # Randomly initialized and retained forever header: BeaconBlockHeader # Beacon header which is not expected to revert # Persistent committees corresponding to the beacon header From 2a4957c6ccee89769c9413427486a1393c7e3c12 Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 5 Sep 2019 20:07:25 +0100 Subject: [PATCH 159/250] Update 1_shard-data-chains.md --- specs/core/1_shard-data-chains.md | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 72228c633..c985294aa 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -83,9 +83,9 @@ This document describes the shard transition function (data layer only) and the ### State list lengths -| Name | Value | Unit | -| - | - | :-: | -| `HISTORY_ACCUMULATOR_VECTOR` | `2**6` (= 64) | state tree maximum depth | +| Name | Value | +| - | - | +| `HISTORY_ACCUMULATOR_DEPTH` | `2**6` (= 64) | ### Rewards and penalties @@ -111,8 +111,8 @@ class ShardBlock(FlatContainer): beacon_block_root: Hash parent_root: Hash state_root: Hash - block_size_sum: uint64 body: List[byte, MAX_SHARD_BLOCK_SIZE - SHARD_HEADER_SIZE] + block_size_sum: uint64 aggregation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE] attestations: BLSSignature signature: BLSSignature @@ -127,8 +127,8 @@ class ShardBlockHeader(FlatContainer): beacon_block_root: Hash parent_root: Hash state_root: Hash - block_size_sum: uint64 body_root: Hash + block_size_sum: uint64 aggregation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE] attestations: BLSSignature signature: BLSSignature @@ -140,7 +140,7 @@ class ShardBlockHeader(FlatContainer): class ShardState(FlatContainer): shard: Shard slot: ShardSlot - history_accumulator: Vector[Hash, HISTORY_ACCUMULATOR_VECTOR] + history_accumulator: Vector[Hash, HISTORY_ACCUMULATOR_DEPTH] latest_block_header: ShardBlockHeader block_size_sum: uint64 # Fees and rewards @@ -271,7 +271,7 @@ def process_shard_slots(shard_state: ShardState, slot: ShardSlot) -> None: assert shard_state.slot <= slot while shard_state.slot < slot: process_shard_slot(shard_state) - # Process period on the start slot of the next period + # Process shard period on the start slot of the next shard period if (shard_state.slot + 1) % (SHARD_SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD) == 0: process_shard_period(shard_state) shard_state.slot += ShardSlot(1) @@ -285,7 +285,7 @@ def process_shard_slot(shard_state: ShardState) -> None: shard_state.latest_block_header.state_root = previous_state_root # Cache state root in history accumulator depth = 0 - while shard_state.slot % 2**depth == 0 and depth < HISTORY_ACCUMULATOR_VECTOR: + while shard_state.slot % 2**depth == 0 and depth < HISTORY_ACCUMULATOR_DEPTH: shard_state.history_accumulator[depth] = previous_state_root depth += 1 ``` @@ -294,7 +294,7 @@ def process_shard_slot(shard_state: ShardState) -> None: ```python def process_shard_period(shard_state: ShardState) -> None: - # Rotate rewards and fees + # Rotate committee deltas shard_state.older_committee_deltas = shard_state.newer_committee_deltas shard_state.newer_committee_deltas = [GweiDelta(0) for _ in range(MAX_PERIOD_COMMITTEE_SIZE)] ``` @@ -328,20 +328,19 @@ def process_shard_block_header(beacon_state: BeaconState, shard_state: ShardStat beacon_block_root=block.beacon_block_root, parent_root=block.parent_root, # `state_root` is zeroed and overwritten in the next `process_shard_slot` call - block_size_sum=block.block_size_sum, body_root=hash_tree_root(block.body), + block_size_sum=block.block_size_sum, aggregation_bits=block.aggregation_bits, attestations=block.attestations, # `signature` is zeroed ) # Verify the sum of the block sizes since genesis - shard_state.block_size_sum += SHARD_HEADER_SIZE + len(block.body) + shard_state.block_size_sum += SHARD_HEADER_SIZE + len(block.body) assert block.block_size_sum == shard_state.block_size_sum # Verify proposer signature proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot) - pubkey = beacon_state.validators[proposer_index].pubkey domain = get_domain(beacon_state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(block.slot)) - assert bls_verify(pubkey, hash_tree_root(block.block), block.proposer, domain) + assert bls_verify(beacon_state.validators[proposer_index].pubkey, signing_root(block), block.signature, domain) ``` #### Attestations @@ -376,8 +375,8 @@ def process_shard_block_body(beacon_state: BeaconState, shard_state: ShardState, # Verify block body size is a multiple of the header size assert len(block.body) % SHARD_HEADER_SIZE == 0 # Apply proposer block body fee - proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot) block_body_fee = shard_state.block_body_price * len(block.body) // MAX_SHARD_BLOCK_SIZE + proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot) process_delta(beacon_state, shard_state, proposer_index, -block_body_fee) # Burn process_delta(beacon_state, shard_state, proposer_index, block_body_fee // PROPOSER_REWARD_QUOTIENT) # Reward # Calculate new block body price @@ -385,7 +384,7 @@ def process_shard_block_body(beacon_state: BeaconState, shard_state: ShardState, QUOTIENT = MAX_SHARD_BLOCK_SIZE * BLOCK_BODY_PRICE_QUOTIENT price_delta = GweiDelta(shard_state.block_body_price * (block_size - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT) if price_delta > 0: - # The maximum block body price caps the amount burnt on fees within a period + # The maximum block body price caps the amount burnt on fees within a shard period MAX_BLOCK_BODY_PRICE = MAX_EFFECTIVE_BALANCE // EPOCHS_PER_SHARD_PERIOD // SHARD_SLOTS_PER_EPOCH shard_state.block_body_price = Gwei(min(MAX_BLOCK_BODY_PRICE, shard_state.block_body_price + price_delta)) else: From c06ffc4924bbc80d4864c7370e05eacc02003544 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Fri, 6 Sep 2019 17:07:45 -0400 Subject: [PATCH 160/250] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0de6cd9ef..8fb2bc591 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,7 @@ See the [Eth 2.0 Phase 2 Wiki](https://hackmd.io/UzysWse1Th240HELswKqVA?view) fo * [General test format](specs/test_formats/README.md) * [Merkle proof formats](specs/light_client/merkle_proofs.md) * [Light client syncing protocol](specs/light_client/sync_protocol.md) -* [Beacon node API for validator](specs/validator/0_beacon-node-validator-api.md) +* [Beacon node API for validator](https://github.com/ethereum/eth2.0-APIs/blob/master/apis/validator/beacon-node-validator-api.md) ## Additional specifications for client implementers From 1d71ae9af4251b2bf3e6b1200b52104da771a8d8 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Sat, 7 Sep 2019 09:28:45 -0400 Subject: [PATCH 161/250] Update README.md --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index 8fb2bc591..fdfbd1c5e 100644 --- a/README.md +++ b/README.md @@ -35,8 +35,6 @@ See the [Eth 2.0 Phase 2 Wiki](https://hackmd.io/UzysWse1Th240HELswKqVA?view) fo * [General test format](specs/test_formats/README.md) * [Merkle proof formats](specs/light_client/merkle_proofs.md) * [Light client syncing protocol](specs/light_client/sync_protocol.md) -* [Beacon node API for validator](https://github.com/ethereum/eth2.0-APIs/blob/master/apis/validator/beacon-node-validator-api.md) - ## Additional specifications for client implementers From ea6a3b293c1457c5b9873f35813f9aefda3adacc Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sat, 7 Sep 2019 14:03:32 -0400 Subject: [PATCH 162/250] add explicit comments for int_to_bytes and bytes_to_int --- specs/core/0_beacon-chain.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 96e76c9ce..8bee6072c 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -560,6 +560,8 @@ def xor(bytes_1: Bytes32, bytes_2: Bytes32) -> Bytes32: def int_to_bytes(n: uint64, length: uint64) -> bytes: """ Return the ``length``-byte serialization of ``n``. + ``n.to_bytes(length, endianness)`` returns ``n`` in endianness-endian bytes padded out to length bytes. + """ return n.to_bytes(length, ENDIANNESS) ``` @@ -570,6 +572,7 @@ def int_to_bytes(n: uint64, length: uint64) -> bytes: def bytes_to_int(data: bytes) -> uint64: """ Return the integer deserialization of ``data``. + ``n.to_bytes(data, endianness)`` returns ``data`` as in integer intepretted in endianness-endian. """ return int.from_bytes(data, ENDIANNESS) ``` From d66e6ca0b8f055591d42ba3d09918013dbee7448 Mon Sep 17 00:00:00 2001 From: Gregory Markou <16929357+GregTheGreek@users.noreply.github.com> Date: Sun, 15 Sep 2019 17:12:36 -0400 Subject: [PATCH 163/250] Update 1_custody-game.md Fix table --- specs/core/1_custody-game.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index d789a6855..f03ffada6 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -77,7 +77,8 @@ This document details the beacon chain additions and changes in Phase 1 of Ether ## Constants ### Misc - +| Name | Value | +| - | - | | `BLS12_381_Q` | `4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787` | | `MINOR_REWARD_QUOTIENT` | `2**8` (= 256) | From 6d9dd04b5024dd21fe68e8bef40798a9cd68968d Mon Sep 17 00:00:00 2001 From: Gregory Markou <16929357+GregTheGreek@users.noreply.github.com> Date: Sun, 15 Sep 2019 21:03:50 -0400 Subject: [PATCH 164/250] Update 1_custody-game.md --- specs/core/1_custody-game.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index f03ffada6..3a7668caf 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -340,7 +340,7 @@ def legendre_bit(a: int, q: int) -> int: Given one proof of custody chunk, returns the proof of custody subchunks of the correct sizes. ```python -def custody_subchunkify(bytez: bytes) -> list: +def custody_subchunkify(bytez: bytes) -> list[int]: bytez += b'\x00' * (-len(bytez) % BYTES_PER_CUSTODY_SUBCHUNK) return [bytez[i:i + BYTES_PER_CUSTODY_SUBCHUNK] for i in range(0, len(bytez), BYTES_PER_CUSTODY_SUBCHUNK)] From 970ae2fad1c82e250e8a1369126e6e4df310ed47 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 16 Sep 2019 09:16:44 -0500 Subject: [PATCH 165/250] update validator doc with modified beacon proposer selection --- specs/validator/0_beacon-chain-validator.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index ef5ad4415..ddf5b5f23 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -160,7 +160,7 @@ def get_committee_assignment(state: BeaconState, return None ``` -A validator can use the following function to see if they are supposed to propose during their assigned committee slot. This function can only be run with a `state` of the slot in question. Proposer selection is only stable within the context of the current epoch. +A validator can use the following function to see if they are supposed to propose during a slot. This function can only be run with a `state` of the slot in question. Proposer selection is only stable within the context of the current epoch. ```python def is_proposer(state: BeaconState, @@ -170,6 +170,8 @@ def is_proposer(state: BeaconState, *Note*: To see if a validator is assigned to propose during the slot, the beacon state must be in the epoch in question. At the epoch boundaries, the validator must run an epoch transition into the epoch to successfully check the proposal assignment of the first slot. +*Note*: `BeaconBlock` proposal is distinct from crosslink committee assignment, and in a given epoch each responsbility might occur at different a different slot. + ### Lookahead The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing, which must be checked during the epoch in question. From 9582814c62412c43261501ad1fc409613d291a9a Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 16 Sep 2019 09:58:09 -0500 Subject: [PATCH 166/250] clarify endianness --- specs/core/0_beacon-chain.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 8bee6072c..dfb7fbdb6 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -559,9 +559,7 @@ def xor(bytes_1: Bytes32, bytes_2: Bytes32) -> Bytes32: ```python def int_to_bytes(n: uint64, length: uint64) -> bytes: """ - Return the ``length``-byte serialization of ``n``. - ``n.to_bytes(length, endianness)`` returns ``n`` in endianness-endian bytes padded out to length bytes. - + Return the ``length``-byte serialization of ``n`` in ``ENDIANNESS``-endian. """ return n.to_bytes(length, ENDIANNESS) ``` @@ -571,8 +569,7 @@ def int_to_bytes(n: uint64, length: uint64) -> bytes: ```python def bytes_to_int(data: bytes) -> uint64: """ - Return the integer deserialization of ``data``. - ``n.to_bytes(data, endianness)`` returns ``data`` as in integer intepretted in endianness-endian. + Return the integer deserialization of ``data`` intepretted as ``ENDIANNESS``-endian. """ return int.from_bytes(data, ENDIANNESS) ``` From 794a2407a197ad98a9ee87158a71ec10a3b52f54 Mon Sep 17 00:00:00 2001 From: Gregory Markou <16929357+GregTheGreek@users.noreply.github.com> Date: Mon, 16 Sep 2019 11:19:51 -0400 Subject: [PATCH 167/250] Update specs/core/1_custody-game.md Co-Authored-By: Danny Ryan --- specs/core/1_custody-game.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index 3a7668caf..64807d80d 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -340,7 +340,7 @@ def legendre_bit(a: int, q: int) -> int: Given one proof of custody chunk, returns the proof of custody subchunks of the correct sizes. ```python -def custody_subchunkify(bytez: bytes) -> list[int]: +def custody_subchunkify(bytez: bytes) -> list[bytes]: bytez += b'\x00' * (-len(bytez) % BYTES_PER_CUSTODY_SUBCHUNK) return [bytez[i:i + BYTES_PER_CUSTODY_SUBCHUNK] for i in range(0, len(bytez), BYTES_PER_CUSTODY_SUBCHUNK)] From 834b81122237afd977cd35a9e9b2e172ebafc733 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 16 Sep 2019 13:26:27 -0600 Subject: [PATCH 168/250] fix custody_subchunkify return type --- specs/core/1_custody-game.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index 64807d80d..0eea43dc0 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -340,7 +340,7 @@ def legendre_bit(a: int, q: int) -> int: Given one proof of custody chunk, returns the proof of custody subchunks of the correct sizes. ```python -def custody_subchunkify(bytez: bytes) -> list[bytes]: +def custody_subchunkify(bytez: bytes) -> Sequence[bytes]: bytez += b'\x00' * (-len(bytez) % BYTES_PER_CUSTODY_SUBCHUNK) return [bytez[i:i + BYTES_PER_CUSTODY_SUBCHUNK] for i in range(0, len(bytez), BYTES_PER_CUSTODY_SUBCHUNK)] From 51a0d8059ddb17013760b9ef2512317d899b7cc0 Mon Sep 17 00:00:00 2001 From: Cayman Date: Fri, 20 Sep 2019 14:37:37 -0500 Subject: [PATCH 169/250] sync protocol: clarify committee type Updates at period boundaries occur per period committee, not per persistent committee --- specs/light_client/sync_protocol.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index c3b035270..1f47e818f 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -39,8 +39,8 @@ We define the following Python custom types for type hinting and readability: | - | - | | `BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_DEPTH` | `4` | | `BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_INDEX` | **TBD** | -| `PERSISTENT_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH` | `5` | -| `PERSISTENT_COMMITTEE_ROOT_IN_BEACON_STATE_INDEX` | **TBD** | +| `PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH` | `5` | +| `PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_INDEX` | **TBD** | ## Containers @@ -56,9 +56,9 @@ class LightClientUpdate(container): # Updated beacon header (and authenticating branch) header: BeaconBlockHeader header_branch: Vector[Hash, BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_DEPTH] - # Updated persistent committee (and authenticating branch) + # Updated period committee (and authenticating branch) committee: CompactCommittee - committee_branch: Vector[Hash, PERSISTENT_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH + log_2(SHARD_COUNT)] + committee_branch: Vector[Hash, PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH + log_2(SHARD_COUNT)] ``` ## Helpers @@ -70,7 +70,7 @@ class LightClientUpdate(container): class LightClientMemory(object): shard: Shard # Randomly initialized and retained forever header: BeaconBlockHeader # Beacon header which is not expected to revert - # Persistent committees corresponding to the beacon header + # period committees corresponding to the beacon header previous_committee: CompactCommittee current_committee: CompactCommittee next_committee: CompactCommittee @@ -137,13 +137,13 @@ def update_memory(memory: LightClientMemory, update: LightClientUpdate) -> None: domain = compute_domain(DOMAIN_SHARD_ATTESTER, update.fork_version) assert bls_verify(pubkey, update.shard_block_root, update.signature, domain) - # Update persistent committees if entering a new period + # Update period committees if entering a new period if next_period == current_period + 1: assert is_valid_merkle_branch( leaf=hash_tree_root(update.committee), branch=update.committee_branch, - depth=PERSISTENT_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH + log_2(SHARD_COUNT), - index=PERSISTENT_COMMITTEE_ROOT_IN_BEACON_STATE_INDEX << log_2(SHARD_COUNT) + memory.shard, + depth=PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH + log_2(SHARD_COUNT), + index=PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_INDEX << log_2(SHARD_COUNT) + memory.shard, root=hash_tree_root(update.header), ) memory.previous_committee = memory.current_committee From 50209ea806820f83ceff3f21e1578191db0a71e5 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sun, 22 Sep 2019 10:09:10 +0300 Subject: [PATCH 170/250] Refactor Merkle proof verification Goal: support calculating roots of trees modified by setting new leaves --- specs/light_client/merkle_proofs.md | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index b920f50b1..d8b8c2464 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -282,23 +282,27 @@ def get_helper_indices(indices: Sequence[GeneralizedIndex]) -> Sequence[Generali Now we provide the Merkle proof verification functions. First, for single item proofs: ```python -def verify_merkle_proof(leaf: Hash, proof: Sequence[Hash], index: GeneralizedIndex, root: Hash) -> bool: +def calculate_merkle_root(leaf: Hash, proof: Sequence[Hash], index: GeneralizedIndex) -> Hash: assert len(proof) == get_generalized_index_length(index) for i, h in enumerate(proof): if get_generalized_index_bit(index, i): leaf = hash(h + leaf) else: leaf = hash(leaf + h) - return leaf == root + return leaf +``` + +```python +def verify_merkle_proof(leaf: Hash, proof: Sequence[Hash], index: GeneralizedIndex, root: Hash) -> bool: + return calculate_merkle_root(leaf, proof, index) == root ``` Now for multi-item proofs: ```python -def verify_merkle_multiproof(leaves: Sequence[Hash], - proof: Sequence[Hash], - indices: Sequence[GeneralizedIndex], - root: Hash) -> bool: +def calculate_multi_merkle_root(leaves: Sequence[Hash], + proof: Sequence[Hash], + indices: Sequence[GeneralizedIndex]) -> Hash: assert len(leaves) == len(indices) helper_indices = get_helper_indices(indices) assert len(proof) == len(helper_indices) @@ -317,7 +321,15 @@ def verify_merkle_multiproof(leaves: Sequence[Hash], ) keys.append(GeneralizedIndex(k // 2)) pos += 1 - return objects[GeneralizedIndex(1)] == root + return objects[GeneralizedIndex(1)] ``` -Note that the single-item proof is a special case of a multi-item proof; a valid single-item proof verifies correctly when put into the multi-item verification function (making the natural trivial changes to input arguments, `index -> [index]` and `leaf -> [leaf]`). +```python +def verify_merkle_multiproof(leaves: Sequence[Hash], + proof: Sequence[Hash], + indices: Sequence[GeneralizedIndex], + root: Hash) -> bool: + return calculate_multi_merkle_root(leaf, proof, index) == root +``` + +Note that the single-item proof is a special case of a multi-item proof; a valid single-item proof verifies correctly when put into the multi-item verification function (making the natural trivial changes to input arguments, `index -> [index]` and `leaf -> [leaf]`). Note also that `calculate_merkle_root` and `calculate_multi_merkle_root` can be used independently to compute the new Merkle root of a proof with leaves updated. From 2e5389978402736121819fd6dfe0d9031da47b6a Mon Sep 17 00:00:00 2001 From: Justin Date: Sat, 21 Sep 2019 02:59:41 +0300 Subject: [PATCH 171/250] typo --- specs/validator/0_beacon-chain-validator.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index ddf5b5f23..a9736de70 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -170,7 +170,7 @@ def is_proposer(state: BeaconState, *Note*: To see if a validator is assigned to propose during the slot, the beacon state must be in the epoch in question. At the epoch boundaries, the validator must run an epoch transition into the epoch to successfully check the proposal assignment of the first slot. -*Note*: `BeaconBlock` proposal is distinct from crosslink committee assignment, and in a given epoch each responsbility might occur at different a different slot. +*Note*: `BeaconBlock` proposal is distinct from crosslink committee assignment, and in a given epoch each responsability might occur at different a different slot. ### Lookahead From 2ec363d614ea0d597824c45e9bd28cad15663ae3 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 23 Sep 2019 12:42:29 -0600 Subject: [PATCH 172/250] typo --- specs/validator/0_beacon-chain-validator.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index a9736de70..a25dc969b 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -170,7 +170,7 @@ def is_proposer(state: BeaconState, *Note*: To see if a validator is assigned to propose during the slot, the beacon state must be in the epoch in question. At the epoch boundaries, the validator must run an epoch transition into the epoch to successfully check the proposal assignment of the first slot. -*Note*: `BeaconBlock` proposal is distinct from crosslink committee assignment, and in a given epoch each responsability might occur at different a different slot. +*Note*: `BeaconBlock` proposal is distinct from crosslink committee assignment, and in a given epoch each responsibility might occur at different a different slot. ### Lookahead From 47a818c705335d166b5a0d99bdefff9f3d1f042a Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Sun, 22 Sep 2019 20:27:42 +0100 Subject: [PATCH 173/250] Add domain_type to get_seed --- configs/mainnet.yaml | 4 ++-- configs/minimal.yaml | 4 ++-- specs/core/0_beacon-chain.md | 20 +++++++++---------- specs/core/1_shard-data-chains.md | 5 +++-- specs/validator/0_beacon-chain-validator.md | 2 +- .../eth2spec/test/helpers/attestations.py | 2 +- 6 files changed, 19 insertions(+), 18 deletions(-) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index d4e69dab5..86fa8dcc5 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -123,8 +123,8 @@ MAX_TRANSFERS: 0 # Signature domains # --------------------------------------------------------------- DOMAIN_BEACON_PROPOSER: 0x00000000 -DOMAIN_RANDAO: 0x01000000 -DOMAIN_ATTESTATION: 0x02000000 +DOMAIN_BEACON_ATTESTER: 0x01000000 +DOMAIN_RANDAO: 0x02000000 DOMAIN_DEPOSIT: 0x03000000 DOMAIN_VOLUNTARY_EXIT: 0x04000000 DOMAIN_TRANSFER: 0x05000000 diff --git a/configs/minimal.yaml b/configs/minimal.yaml index be787ca3c..8c6a9e11f 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -125,8 +125,8 @@ MAX_TRANSFERS: 0 # Signature domains # --------------------------------------------------------------- DOMAIN_BEACON_PROPOSER: 0x00000000 -DOMAIN_RANDAO: 0x01000000 -DOMAIN_ATTESTATION: 0x02000000 +DOMAIN_BEACON_ATTESTER: 0x01000000 +DOMAIN_RANDAO: 0x02000000 DOMAIN_DEPOSIT: 0x03000000 DOMAIN_VOLUNTARY_EXIT: 0x04000000 DOMAIN_TRANSFER: 0x05000000 diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 5bdfc7a5b..cfdf24e71 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -19,7 +19,7 @@ - [State list lengths](#state-list-lengths) - [Rewards and penalties](#rewards-and-penalties) - [Max operations per block](#max-operations-per-block) - - [Signature domain types](#signature-domain-types) + - [Domain types](#domain-types) - [Containers](#containers) - [Misc dependencies](#misc-dependencies) - [`Fork`](#fork) @@ -148,7 +148,7 @@ We define the following Python custom types for type hinting and readability: | `Gwei` | `uint64` | an amount in Gwei | | `Hash` | `Bytes32` | a hash | | `Version` | `Bytes4` | a fork version number | -| `DomainType` | `Bytes4` | a signature domain type | +| `DomainType` | `Bytes4` | a domain type | | `Domain` | `Bytes8` | a signature domain | | `BLSPubkey` | `Bytes48` | a BLS12-381 public key | | `BLSSignature` | `Bytes96` | a BLS12-381 signature | @@ -250,15 +250,15 @@ The following values are (non-configurable) constants used throughout the specif | `MAX_VOLUNTARY_EXITS` | `2**4` (= 16) | | `MAX_TRANSFERS` | `0` | -### Signature domain types +### Domain types The following types are defined, mapping into `DomainType` (little endian): | Name | Value | | - | - | | `DOMAIN_BEACON_PROPOSER` | `0` | -| `DOMAIN_RANDAO` | `1` | -| `DOMAIN_ATTESTATION` | `2` | +| `DOMAIN_BEACON_ATTESTER` | `1` | +| `DOMAIN_RANDAO` | `2` | | `DOMAIN_DEPOSIT` | `3` | | `DOMAIN_VOLUNTARY_EXIT` | `4` | | `DOMAIN_TRANSFER` | `5` | @@ -671,7 +671,7 @@ def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: Indexe hash_tree_root(AttestationDataAndCustodyBit(data=indexed_attestation.data, custody_bit=0b1)), ], signature=indexed_attestation.signature, - domain=get_domain(state, DOMAIN_ATTESTATION, indexed_attestation.data.target.epoch), + domain=get_domain(state, DOMAIN_BEACON_ATTESTER, indexed_attestation.data.target.epoch), ): return False return True @@ -870,12 +870,12 @@ def get_validator_churn_limit(state: BeaconState) -> uint64: #### `get_seed` ```python -def get_seed(state: BeaconState, epoch: Epoch) -> Hash: +def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Hash: """ Return the seed at ``epoch``. """ mix = get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)) # Avoid underflow - return hash(mix + int_to_bytes(epoch, length=32)) + return hash(domain_type + mix + int_to_bytes(epoch, length=32)) ``` #### `get_committee_count` @@ -901,7 +901,7 @@ def get_crosslink_committee(state: BeaconState, epoch: Epoch, shard: Shard) -> S """ return compute_committee( indices=get_active_validator_indices(state, epoch), - seed=get_seed(state, epoch), + seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER), index=(shard + SHARD_COUNT - get_start_shard(state, epoch)) % SHARD_COUNT, count=get_committee_count(state, epoch), ) @@ -941,7 +941,7 @@ def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: Return the beacon proposer index at the current slot. """ epoch = get_current_epoch(state) - seed = hash(get_seed(state, epoch) + int_to_bytes(state.slot, length=8)) + seed = hash(get_seed(state, epoch, DOMAIN_BEACON_PROPOSER) + int_to_bytes(state.slot, length=8)) indices = get_active_validator_indices(state, epoch) return compute_proposer_index(state, indices, seed) ``` diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 3dc549816..a6e9c997f 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -225,7 +225,7 @@ def get_period_committee(state: BeaconState, epoch: Epoch, shard: Shard) -> Sequ """ full_committee = compute_committee( indices=get_active_validator_indices(state, epoch), - seed=get_seed(state, epoch), + seed=get_seed(state, epoch, DOMAIN_SHARD_ATTESTER), index=shard, count=SHARD_COUNT, ) @@ -270,7 +270,8 @@ def get_shard_block_proposer_index(state: BeaconState, return None MAX_RANDOM_BYTE = 2**8 - 1 - seed = hash(get_seed(state, current_epoch) + int_to_bytes(shard, length=8) + int_to_bytes(slot, length=8)) + seed = get_seed(state, current_epoch, DOMAIN_SHARD_PROPOSER) + seed = hash(seed + int_to_bytes(shard, length=8) + int_to_bytes(slot, length=8)) i = 0 while True: candidate_index = active_indices[(slot + i) % len(active_indices)] diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index fc91fd2e7..29723f391 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -345,7 +345,7 @@ def get_signed_attestation_data(state: BeaconState, attestation: IndexedAttestat custody_bit=0b0, ) - domain = get_domain(state, DOMAIN_ATTESTATION, attestation.data.target.epoch) + domain = get_domain(state, DOMAIN_BEACON_ATTESTER, attestation.data.target.epoch) return bls_sign(privkey, hash_tree_root(attestation_data_and_custody_bit), domain) ``` diff --git a/test_libs/pyspec/eth2spec/test/helpers/attestations.py b/test_libs/pyspec/eth2spec/test/helpers/attestations.py index 868517018..23d1a8f8f 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/attestations.py +++ b/test_libs/pyspec/eth2spec/test/helpers/attestations.py @@ -122,7 +122,7 @@ def get_attestation_signature(spec, state, attestation_data, privkey, custody_bi privkey=privkey, domain=spec.get_domain( state=state, - domain_type=spec.DOMAIN_ATTESTATION, + domain_type=spec.DOMAIN_BEACON_ATTESTER, message_epoch=attestation_data.target.epoch, ) ) From a18312559507f27cc0092fc55bfee07bfa2d5334 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 22 Sep 2019 21:04:48 +0100 Subject: [PATCH 174/250] cleanups to get_seed 1) Put `domain_type` and `epoch` upfront. This pattern can be reused for signature domains. 2) Change `int_to_bytes(epoch, length=32)` to `int_to_bytes(epoch, length=8)` to match `uint64` length. --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index cfdf24e71..4f20898a3 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -875,7 +875,7 @@ def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Hash: Return the seed at ``epoch``. """ mix = get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)) # Avoid underflow - return hash(domain_type + mix + int_to_bytes(epoch, length=32)) + return hash(domain_type + int_to_bytes(epoch, length=8) + mix) ``` #### `get_committee_count` From e7db58cb7e402a943c698f5d0a235ed21abea558 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Mon, 23 Sep 2019 19:07:10 +0100 Subject: [PATCH 175/250] Rename ACTIVATION_EXIT_DELAY to MAX_SEED_LOOKAHEAD for phase 1 --- configs/mainnet.yaml | 2 +- configs/minimal.yaml | 2 +- specs/core/0_beacon-chain.md | 4 ++-- specs/core/1_custody-game.md | 8 ++++---- specs/validator/0_beacon-chain-validator.md | 2 +- .../epoch_processing/test_process_registry_updates.py | 4 ++-- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index 86fa8dcc5..c11f1e54c 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -60,7 +60,7 @@ SLOTS_PER_EPOCH: 64 # 2**0 (= 1) epochs 6.4 minutes MIN_SEED_LOOKAHEAD: 1 # 2**2 (= 4) epochs 25.6 minutes -ACTIVATION_EXIT_DELAY: 4 +MAX_SEED_LOOKAHEAD: 4 # 2**10 (= 1,024) slots ~1.7 hours SLOTS_PER_ETH1_VOTING_PERIOD: 1024 # 2**13 (= 8,192) slots ~13 hours diff --git a/configs/minimal.yaml b/configs/minimal.yaml index 8c6a9e11f..4c32eae4d 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -59,7 +59,7 @@ SLOTS_PER_EPOCH: 8 # 2**0 (= 1) epochs MIN_SEED_LOOKAHEAD: 1 # 2**2 (= 4) epochs -ACTIVATION_EXIT_DELAY: 4 +MAX_SEED_LOOKAHEAD: 4 # [customized] higher frequency new deposits from eth1 for testing SLOTS_PER_ETH1_VOTING_PERIOD: 16 # [customized] smaller state diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 4f20898a3..681d82457 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -210,7 +210,7 @@ The following values are (non-configurable) constants used throughout the specif | `MIN_ATTESTATION_INCLUSION_DELAY` | `2**0` (= 1) | slots | 6 seconds | | `SLOTS_PER_EPOCH` | `2**6` (= 64) | slots | 6.4 minutes | | `MIN_SEED_LOOKAHEAD` | `2**0` (= 1) | epochs | 6.4 minutes | -| `ACTIVATION_EXIT_DELAY` | `2**2` (= 4) | epochs | 25.6 minutes | +| `MAX_SEED_LOOKAHEAD` | `2**2` (= 4) | epochs | 25.6 minutes | | `SLOTS_PER_ETH1_VOTING_PERIOD` | `2**10` (= 1,024) | slots | ~1.7 hours | | `SLOTS_PER_HISTORICAL_ROOT` | `2**13` (= 8,192) | slots | ~13 hours | | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours | @@ -779,7 +779,7 @@ def compute_activation_exit_epoch(epoch: Epoch) -> Epoch: """ Return the epoch during which validator activations and exits initiated in ``epoch`` take effect. """ - return Epoch(epoch + 1 + ACTIVATION_EXIT_DELAY) + return Epoch(epoch + 1 + MAX_SEED_LOOKAHEAD) ``` #### `compute_domain` diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index 0eea43dc0..158d575e2 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -282,7 +282,7 @@ def ceillog2(x: uint64) -> int: ### `is_valid_merkle_branch_with_mixin` ```python -def is_valid_merkle_branch_with_mixin(leaf: Hash, +def is_valid_merkle_branch_with_mixin(leaf: Hash, branch: Sequence[Hash], depth: uint64, index: uint64, @@ -315,7 +315,7 @@ def legendre_bit(a: int, q: int) -> int: if a >= q: return legendre_bit(a % q, q) if a == 0: - return 0 + return 0 assert(q > a > 0 and q % 2 == 1) t = 1 n = q @@ -602,7 +602,7 @@ def process_bit_challenge(state: BeaconState, challenge: CustodyBitChallenge) -> # Verify attestation is eligible for challenging responder = state.validators[challenge.responder_index] assert get_current_epoch(state) <= get_randao_epoch_for_custody_period( - get_custody_period_for_validator(state, challenge.responder_index, epoch), + get_custody_period_for_validator(state, challenge.responder_index, epoch), challenge.responder_index ) + 2 * EPOCHS_PER_CUSTODY_PERIOD + responder.max_reveal_lateness @@ -673,7 +673,7 @@ def process_chunk_challenge_response(state: BeaconState, # Verify bit challenge data is null assert response.chunk_bits_branch == [] and response.chunk_bits_leaf == Hash() # Verify minimum delay - assert get_current_epoch(state) >= challenge.inclusion_epoch + ACTIVATION_EXIT_DELAY + assert get_current_epoch(state) >= challenge.inclusion_epoch + MAX_SEED_LOOKAHEAD # Verify the chunk matches the crosslink data root assert is_valid_merkle_branch( leaf=hash_tree_root(response.chunk), diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 29723f391..8a9cf1b5d 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -114,7 +114,7 @@ Once a validator has been processed and added to the beacon state's `validators` ### Activation -In normal operation, the validator is quickly activated, at which point the validator is added to the shuffling and begins validation after an additional `ACTIVATION_EXIT_DELAY` epochs (25.6 minutes). +In normal operation, the validator is quickly activated, at which point the validator is added to the shuffling and begins validation after an additional `MAX_SEED_LOOKAHEAD` epochs (25.6 minutes). The function [`is_active_validator`](../core/0_beacon-chain.md#is_active_validator) can be used to check if a validator is active during a given epoch. Usage is as follows: diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py index ab6a74a70..bfd992ffa 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py @@ -21,7 +21,7 @@ def test_activation(spec, state): index = 0 mock_deposit(spec, state, index) - for _ in range(spec.ACTIVATION_EXIT_DELAY + 1): + for _ in range(spec.MAX_SEED_LOOKAHEAD + 1): next_epoch(spec, state) yield from run_process_registry_updates(spec, state) @@ -73,7 +73,7 @@ def test_ejection(spec, state): # Mock an ejection state.validators[index].effective_balance = spec.EJECTION_BALANCE - for _ in range(spec.ACTIVATION_EXIT_DELAY + 1): + for _ in range(spec.MAX_SEED_LOOKAHEAD + 1): next_epoch(spec, state) yield from run_process_registry_updates(spec, state) From 25efbe74589895bcd63d908e5686520b5bbaf699 Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 23 Sep 2019 20:58:29 +0100 Subject: [PATCH 176/250] Rename seed to epoch_seed as per Danny --- specs/core/1_shard-data-chains.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index a6e9c997f..f24c6f9c3 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -270,8 +270,8 @@ def get_shard_block_proposer_index(state: BeaconState, return None MAX_RANDOM_BYTE = 2**8 - 1 - seed = get_seed(state, current_epoch, DOMAIN_SHARD_PROPOSER) - seed = hash(seed + int_to_bytes(shard, length=8) + int_to_bytes(slot, length=8)) + epoch_seed = get_seed(state, current_epoch, DOMAIN_SHARD_PROPOSER) + seed = hash(epoch_seed + int_to_bytes(shard, length=8) + int_to_bytes(slot, length=8)) i = 0 while True: candidate_index = active_indices[(slot + i) % len(active_indices)] From fd759a2cca9ba993ff0ce590845dc9479f33fa81 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Wed, 25 Sep 2019 21:51:35 +0100 Subject: [PATCH 177/250] Remove flat containers and revert back to uint64 --- specs/core/1_shard-data-chains.md | 42 ++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index fceac061c..4596d250b 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -54,7 +54,6 @@ This document describes the shard transition function (data layer only) and the | Name | SSZ equivalent | Description | | - | - | - | | `ShardSlot` | `uint64` | a shard slot number | -| `GweiDelta` | `int64` | a signed Gwei delta | ## Configuration @@ -105,7 +104,7 @@ This document describes the shard transition function (data layer only) and the ### `ShardBlock` ```python -class ShardBlock(FlatContainer): +class ShardBlock(Container): shard: Shard slot: ShardSlot beacon_block_root: Hash @@ -121,7 +120,7 @@ class ShardBlock(FlatContainer): ### `ShardBlockHeader` ```python -class ShardBlockHeader(FlatContainer): +class ShardBlockHeader(Container): shard: Shard slot: ShardSlot beacon_block_root: Hash @@ -137,7 +136,7 @@ class ShardBlockHeader(FlatContainer): ### `ShardState` ```python -class ShardState(FlatContainer): +class ShardState(Container): shard: Shard slot: ShardSlot history_accumulator: Vector[Hash, HISTORY_ACCUMULATOR_DEPTH] @@ -145,8 +144,10 @@ class ShardState(FlatContainer): block_size_sum: uint64 # Fees and rewards block_body_price: Gwei - older_committee_deltas: Vector[GweiDelta, MAX_PERIOD_COMMITTEE_SIZE] - newer_committee_deltas: Vector[GweiDelta, MAX_PERIOD_COMMITTEE_SIZE] + older_committee_positive_deltas: Vector[Gwei, MAX_PERIOD_COMMITTEE_SIZE] + older_committee_negative_deltas: Vector[Gwei, MAX_PERIOD_COMMITTEE_SIZE] + newer_committee_positive_deltas: Vector[Gwei, MAX_PERIOD_COMMITTEE_SIZE] + newer_committee_negative_deltas: Vector[Gwei, MAX_PERIOD_COMMITTEE_SIZE] ``` ### `ShardCheckpoint` @@ -218,14 +219,24 @@ def get_shard_proposer_index(beacon_state: BeaconState, shard: Shard, slot: Shar #### `process_delta` ```python -def process_delta(beacon_state: BeaconState, shard_state: ShardState, index: ValidatorIndex, delta: GweiDelta) -> None: +def process_delta(beacon_state: BeaconState, + shard_state: ShardState, + index: ValidatorIndex, + delta: Gwei, + positive: bool=True) -> None: epoch = compute_epoch_of_shard_slot(beacon_state.slot) older_committee = get_period_committee(beacon_state, shard_state.shard, compute_shard_period_start_epoch(epoch, 2)) newer_committee = get_period_committee(beacon_state, shard_state.shard, compute_shard_period_start_epoch(epoch, 1)) if index in older_committee: - shard_state.older_committee_deltas[older_committee.index(index)] += delta + if positive: + shard_state.older_committee_positive_deltas[older_committee.index(index)] += delta + else: + shard_state.older_committee_negative_deltas[older_committee.index(index)] += delta elif index in newer_committee: - shard_state.newer_committee_deltas[newer_committee.index(index)] += delta + if positive: + shard_state.newer_committee_positive_deltas[newer_committee.index(index)] += delta + else: + shard_state.newer_committee_negative_deltas[newer_committee.index(index)] += delta ``` ## Genesis @@ -299,8 +310,10 @@ def process_shard_slot(shard_state: ShardState) -> None: ```python def process_shard_period(shard_state: ShardState) -> None: # Rotate committee deltas - shard_state.older_committee_deltas = shard_state.newer_committee_deltas - shard_state.newer_committee_deltas = [GweiDelta(0) for _ in range(MAX_PERIOD_COMMITTEE_SIZE)] + shard_state.older_committee_positive_deltas = shard_state.newer_committee_positive_deltas + shard_state.older_committee_negative_deltas = shard_state.newer_committee_negative_deltas + shard_state.newer_committee_positive_deltas = [Gwei(0) for _ in range(MAX_PERIOD_COMMITTEE_SIZE)] + shard_state.newer_committee_negative_deltas = [Gwei(0) for _ in range(MAX_PERIOD_COMMITTEE_SIZE)] ``` ### Block processing @@ -381,17 +394,18 @@ def process_shard_block_body(beacon_state: BeaconState, shard_state: ShardState, # Apply proposer block body fee block_body_fee = shard_state.block_body_price * len(block.body) // MAX_SHARD_BLOCK_SIZE proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot) - process_delta(beacon_state, shard_state, proposer_index, -block_body_fee) # Burn + process_delta(beacon_state, shard_state, proposer_index, block_body_fee, positive=False) # Burn process_delta(beacon_state, shard_state, proposer_index, block_body_fee // PROPOSER_REWARD_QUOTIENT) # Reward # Calculate new block body price block_size = SHARD_HEADER_SIZE + len(block.body) QUOTIENT = MAX_SHARD_BLOCK_SIZE * BLOCK_BODY_PRICE_QUOTIENT - price_delta = GweiDelta(shard_state.block_body_price * (block_size - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT) - if price_delta > 0: + if block_size > SHARD_BLOCK_SIZE_TARGET: + price_delta = Gwei(shard_state.block_body_price * (block_size - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT) # The maximum block body price caps the amount burnt on fees within a shard period MAX_BLOCK_BODY_PRICE = MAX_EFFECTIVE_BALANCE // EPOCHS_PER_SHARD_PERIOD // SHARD_SLOTS_PER_EPOCH shard_state.block_body_price = Gwei(min(MAX_BLOCK_BODY_PRICE, shard_state.block_body_price + price_delta)) else: + price_delta = Gwei(shard_state.block_body_price * (SHARD_BLOCK_SIZE_TARGET - block_size) // QUOTIENT) shard_state.block_body_price = Gwei(max(MIN_BLOCK_BODY_PRICE, shard_state.block_body_price + price_delta)) ``` From d4f48117d3463cddb18f0f65d66798933d679beb Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Thu, 26 Sep 2019 19:32:50 +0100 Subject: [PATCH 178/250] Rename ShardCheckpoint, double header size, verify proposer not slashed --- specs/core/1_shard-data-chains.md | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 4596d250b..595909745 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -21,7 +21,7 @@ - [`ShardBlock`](#shardblock) - [`ShardBlockHeader`](#shardblockheader) - [`ShardState`](#shardstate) - - [`ShardCheckpoint`](#shardcheckpoint) + - [`ShardAttestationData`](#ShardAttestationData) - [Helper functions](#helper-functions) - [Misc](#misc-1) - [`compute_epoch_of_shard_slot`](#compute_epoch_of_shard_slot) @@ -63,7 +63,7 @@ This document describes the shard transition function (data layer only) and the | - | - | | `MIN_BLOCK_BODY_PRICE` | `2**0` (= 1) | | `MAX_PERIOD_COMMITTEE_SIZE` | `2**7` (= 128) | -| `SHARD_HEADER_SIZE` | `2**9` (= 512) | +| `SHARD_HEADER_SIZE` | `2**10` (= 1024) | | `SHARD_BLOCK_SIZE_TARGET` | `2**14` (= 16,384) | | `MAX_SHARD_BLOCK_SIZE` | `2**16` (= 65,536) | @@ -150,10 +150,10 @@ class ShardState(Container): newer_committee_negative_deltas: Vector[Gwei, MAX_PERIOD_COMMITTEE_SIZE] ``` -### `ShardCheckpoint` +### `ShardAttestationData` ```python -class ShardCheckpoint(Container): +class ShardAttestationData(Container): slot: ShardSlot parent_root: Hash ``` @@ -354,10 +354,12 @@ def process_shard_block_header(beacon_state: BeaconState, shard_state: ShardStat # Verify the sum of the block sizes since genesis shard_state.block_size_sum += SHARD_HEADER_SIZE + len(block.body) assert block.block_size_sum == shard_state.block_size_sum + # Verify proposer is not slashed + proposer = beacon_state.validators[get_shard_proposer_index(beacon_state, shard_state.shard, block.slot)] + assert not proposer.slashed # Verify proposer signature - proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot) domain = get_domain(beacon_state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(block.slot)) - assert bls_verify(beacon_state.validators[proposer_index].pubkey, signing_root(block), block.signature, domain) + assert bls_verify(proposer.pubkey, signing_root(block), block.signature, domain) ``` #### Attestations @@ -377,7 +379,7 @@ def process_shard_attestations(beacon_state: BeaconState, shard_state: ShardStat assert block.aggregation_bits[i] == 0b0 # Verify attester aggregate signature domain = get_domain(beacon_state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_shard_slot(block.slot)) - message = hash_tree_root(ShardCheckpoint(shard_state.slot, block.parent_root)) + message = hash_tree_root(ShardAttestationData(shard_state.slot, block.parent_root)) assert bls_verify(bls_aggregate_pubkeys(pubkeys), message, block.attestations, domain) # Proposer micro-reward proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot) From 9ce7a8e5919dbafba7d7b9436759dd931647b114 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 27 Sep 2019 09:41:12 +0900 Subject: [PATCH 179/250] working through lint and format on shard chains --- specs/core/1_beacon-chain-misc.md | 29 ++++++++++++++++++++--------- specs/core/1_shard-data-chains.md | 10 +++++++--- 2 files changed, 27 insertions(+), 12 deletions(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 5bb0f6da0..0b5ed5234 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -34,6 +34,7 @@ | `MAX_SHARD_RECEIPT_PROOFS` | `2**0` (= 1) | - | - | | `PERIOD_COMMITTEE_ROOT_LENGTH` | `2**8` (= 256) | periods | ~9 months | | `MINOR_REWARD_QUOTIENT` | `2**8` (=256) | - | - | +| `REWARD_COEFFICIENT_BASE` | **TBD** | - | - | ## Containers @@ -45,6 +46,16 @@ class CompactCommittee(Container): compact_validators: List[uint64, MAX_VALIDATORS_PER_COMMITTEE] ``` +#### `ShardReceiptDelta` + +```python +class ShardReceiptDelta(Container): + index: ValidatorIndex + reward_coefficient: uint64 + block_fee: Gwei +``` + + #### `ShardReceiptProof` ```python @@ -112,16 +123,17 @@ def verify_merkle_proof(leaf: Hash, proof: Sequence[Hash], index: GeneralizedInd ```python def compute_historical_state_generalized_index(earlier: ShardSlot, later: ShardSlot) -> GeneralizedIndex: """ - Computes the generalized index of the state root of slot `frm` based on the state root of slot `to`. - Relies on the `history_acc` in the `ShardState`, where `history_acc[i]` maintains the most recent 2**i'th - slot state. Works by tracing a `log(later-earlier)` step path from `later` to `earlier` through intermediate - blocks at the next available multiples of descending powers of two. + Computes the generalized index of the state root of slot `earlier` based on the state root of slot `later`. + Relies on the `history_accumulator` in the `ShardState`, where `history_accumulator[i]` maintains the most + recent 2**i'th slot state. Works by tracing a `log(later-earlier)` step path from `later` to `earlier` + through intermediate blocks at the next available multiples of descending powers of two. """ o = GeneralizedIndex(1) - for i in range(HISTORY_ACCUMULATOR_VECTOR - 1, -1, -1): + for i in range(HISTORY_ACCUMULATOR_DEPTH - 1, -1, -1): if (later - 1) & 2**i > (earlier - 1) & 2**i: later = later - ((later - 1) % 2**i) - 1 - o = concat_generalized_indices(o, GeneralizedIndex(get_generalized_index(ShardState, ['history_acc', i]))) + gindex = GeneralizedIndex(get_generalized_index(ShardState, ['history_accumulator', i])) + o = concat_generalized_indices(o, gindex) return o ``` @@ -133,7 +145,7 @@ def get_generalized_index_of_crosslink_header(index: int) -> GeneralizedIndex: Gets the generalized index for the root of the index'th header in a crosslink. """ MAX_CROSSLINK_SIZE = ( - SHARD_BLOCK_SIZE_LIMIT * SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * MAX_EPOCHS_PER_CROSSLINK + MAX_SHARD_BLOCK_SIZE * SHARD_SLOTS_PER_EPOCH * MAX_EPOCHS_PER_CROSSLINK ) assert MAX_CROSSLINK_SIZE == get_previous_power_of_two(MAX_CROSSLINK_SIZE) return GeneralizedIndex(MAX_CROSSLINK_SIZE // SHARD_HEADER_SIZE + index) @@ -146,10 +158,9 @@ def process_shard_receipt_proof(state: BeaconState, receipt_proof: ShardReceiptP """ Processes a ShardReceipt object. """ - SHARD_SLOTS_PER_EPOCH = SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH receipt_slot = ( state.next_shard_receipt_period[receipt_proof.shard] * - SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD + SHARD_SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD ) first_slot_in_last_crosslink = state.current_crosslinks[receipt_proof.shard].start_epoch * SHARD_SLOTS_PER_EPOCH gindex = concat_generalized_indices( diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 4596d250b..4e26c7c72 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -356,6 +356,8 @@ def process_shard_block_header(beacon_state: BeaconState, shard_state: ShardStat assert block.block_size_sum == shard_state.block_size_sum # Verify proposer signature proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot) + assert proposer_index is not None + domain = get_domain(beacon_state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(block.slot)) assert bls_verify(beacon_state.validators[proposer_index].pubkey, signing_root(block), block.signature, domain) ``` @@ -381,8 +383,9 @@ def process_shard_attestations(beacon_state: BeaconState, shard_state: ShardStat assert bls_verify(bls_aggregate_pubkeys(pubkeys), message, block.attestations, domain) # Proposer micro-reward proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot) + assert proposer_index is not None reward = attestation_count * get_base_reward(beacon_state, proposer_index) // PROPOSER_REWARD_QUOTIENT - process_delta(beacon_state, shard_state, proposer_index, reward) + process_delta(beacon_state, shard_state, proposer_index, Gwei(reward)) ``` #### Block body @@ -394,8 +397,9 @@ def process_shard_block_body(beacon_state: BeaconState, shard_state: ShardState, # Apply proposer block body fee block_body_fee = shard_state.block_body_price * len(block.body) // MAX_SHARD_BLOCK_SIZE proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot) - process_delta(beacon_state, shard_state, proposer_index, block_body_fee, positive=False) # Burn - process_delta(beacon_state, shard_state, proposer_index, block_body_fee // PROPOSER_REWARD_QUOTIENT) # Reward + assert proposer_index is not None + process_delta(beacon_state, shard_state, proposer_index, Gwei(block_body_fee), positive=False) # Burn + process_delta(beacon_state, shard_state, proposer_index, Gwei(block_body_fee // PROPOSER_REWARD_QUOTIENT)) # Reward # Calculate new block body price block_size = SHARD_HEADER_SIZE + len(block.body) QUOTIENT = MAX_SHARD_BLOCK_SIZE * BLOCK_BODY_PRICE_QUOTIENT From 86ed3937dc6f5e52ae7b869e4696a132ec054171 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 27 Sep 2019 10:34:19 +0900 Subject: [PATCH 180/250] fix a couple of minor shard chain bugs --- specs/core/1_beacon-chain-misc.md | 22 ++++++++++++---------- specs/core/1_shard-data-chains.md | 2 +- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 0b5ed5234..6dd6e19c3 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -226,16 +226,18 @@ def update_period_committee(state: BeaconState) -> None: """ Updates period committee roots at boundary blocks. """ - if (get_current_epoch(state) + 1) % EPOCHS_PER_SHARD_PERIOD == 0: - period = (get_current_epoch(state) + 1) // EPOCHS_PER_SHARD_PERIOD - committees = Vector[CompactCommittee, SHARD_COUNT]([ - committee_to_compact_committee( - state, - get_period_committee(state, Epoch(get_current_epoch(state) + 1), Shard(shard)), - ) - for shard in range(SHARD_COUNT) - ]) - state.period_committee_roots[period % PERIOD_COMMITTEE_ROOT_LENGTH] = hash_tree_root(committees) + if (get_current_epoch(state) + 1) % EPOCHS_PER_SHARD_PERIOD != 0: + return + + period = (get_current_epoch(state) + 1) // EPOCHS_PER_SHARD_PERIOD + committees = Vector[CompactCommittee, SHARD_COUNT]([ + committee_to_compact_committee( + state, + get_period_committee(state, Shard(shard), Epoch(get_current_epoch(state) + 1)), + ) + for shard in range(SHARD_COUNT) + ]) + state.period_committee_roots[period % PERIOD_COMMITTEE_ROOT_LENGTH] = hash_tree_root(committees) ``` ### Shard receipt processing diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index ebb1b6b68..96b2de41a 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -224,7 +224,7 @@ def process_delta(beacon_state: BeaconState, index: ValidatorIndex, delta: Gwei, positive: bool=True) -> None: - epoch = compute_epoch_of_shard_slot(beacon_state.slot) + epoch = compute_epoch_of_shard_slot(shard_state.slot) older_committee = get_period_committee(beacon_state, shard_state.shard, compute_shard_period_start_epoch(epoch, 2)) newer_committee = get_period_committee(beacon_state, shard_state.shard, compute_shard_period_start_epoch(epoch, 1)) if index in older_committee: From b892d46f26da482af948c7da53586299d335c4f6 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 27 Sep 2019 13:02:16 +0900 Subject: [PATCH 181/250] working through shard chain tests --- scripts/function_puller.py | 2 +- specs/core/1_shard-data-chains.md | 21 ++++++- .../test/helpers/phase1/attestations.py | 13 ++-- .../test/helpers/phase1/shard_block.py | 61 +++++++++---------- .../shard_data_chain/test_shard_block.py | 34 +++++------ 5 files changed, 71 insertions(+), 60 deletions(-) diff --git a/scripts/function_puller.py b/scripts/function_puller.py index 26671bafc..b30e5b75c 100644 --- a/scripts/function_puller.py +++ b/scripts/function_puller.py @@ -81,7 +81,7 @@ def get_spec(file_name: str) -> SpecObject: if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789': is_constant_def = False if is_constant_def: - constants[row[0]] = row[1].replace('**TBD**', '0x1234567890123456789012345678901234567890') + constants[row[0]] = row[1].replace('**TBD**', '2**32') elif row[1].startswith('uint') or row[1].startswith('Bytes'): custom_types[row[0]] = row[1] return functions, custom_types, constants, ssz_objects, inserts diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 96b2de41a..866140831 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -248,6 +248,11 @@ def get_genesis_shard_state(shard: Shard) -> ShardState: return ShardState( shard=shard, slot=ShardSlot(SHARD_GENESIS_EPOCH * SHARD_SLOTS_PER_EPOCH), + latest_block_header=ShardBlockHeader( + shard=shard, + slot=ShardSlot(SHARD_GENESIS_EPOCH * SHARD_SLOTS_PER_EPOCH), + body_root=hash_tree_root(List[byte, MAX_SHARD_BLOCK_SIZE - SHARD_HEADER_SIZE]()), + ), block_body_price=MIN_BLOCK_BODY_PRICE, ) ``` @@ -335,9 +340,14 @@ def process_shard_block_header(beacon_state: BeaconState, shard_state: ShardStat assert block.slot == shard_state.slot # Verify the beacon chain root parent_epoch = compute_epoch_of_shard_slot(shard_state.latest_block_header.slot) - assert block.beacon_block_root == get_block_root(beacon_state, parent_epoch) + # --super dirty. need to think-- # + if parent_epoch * SLOTS_PER_EPOCH == beacon_state.slot: + beacon_block_root = signing_root(beacon_state.latest_block_header) + else: + beacon_block_root = get_block_root(beacon_state, parent_epoch) + assert block.beacon_block_root == beacon_block_root # Verify the parent root - assert block.parent_root == hash_tree_root(shard_state.latest_block_header) + assert block.parent_root == signing_root(shard_state.latest_block_header) # Save current block as the new latest block shard_state.latest_block_header = ShardBlockHeader( shard=block.shard, @@ -376,12 +386,17 @@ def process_shard_attestations(beacon_state: BeaconState, shard_state: ShardStat pubkeys.append(beacon_state.validators[validator_index].pubkey) process_delta(beacon_state, shard_state, validator_index, get_base_reward(beacon_state, validator_index)) attestation_count += 1 + # Exit early if no participants + if not any(pubkeys): + assert block.attestations == BLSSignature() + return + # Verify there are no extraneous bits set beyond the shard committee for i in range(len(shard_committee), 2 * MAX_PERIOD_COMMITTEE_SIZE): assert block.aggregation_bits[i] == 0b0 # Verify attester aggregate signature domain = get_domain(beacon_state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_shard_slot(block.slot)) - message = hash_tree_root(ShardAttestationData(shard_state.slot, block.parent_root)) + message = hash_tree_root(ShardAttestationData(slot=shard_state.slot, parent_root=block.parent_root)) assert bls_verify(bls_aggregate_pubkeys(pubkeys), message, block.attestations, domain) # Proposer micro-reward proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot) diff --git a/test_libs/pyspec/eth2spec/test/helpers/phase1/attestations.py b/test_libs/pyspec/eth2spec/test/helpers/phase1/attestations.py index 750ab5048..4f0a9fb0a 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/phase1/attestations.py +++ b/test_libs/pyspec/eth2spec/test/helpers/phase1/attestations.py @@ -5,17 +5,20 @@ from eth2spec.utils.bls import ( ) -def sign_shard_attestation(spec, shard_state, beacon_state, block, participants): +def sign_shard_attestation(spec, beacon_state, shard_state, block, participants): signatures = [] - message_hash = block.core.parent_root - block_epoch = spec.compute_epoch_of_shard_slot(block.core.slot) + message_hash = spec.ShardAttestationData( + slot=block.slot, + parent_root=block.parent_root, + ).hash_tree_root() + block_epoch = spec.compute_epoch_of_shard_slot(block.slot) for validator_index in participants: privkey = privkeys[validator_index] signatures.append( get_attestation_signature( spec, - shard_state, beacon_state, + shard_state, message_hash, block_epoch, privkey, @@ -25,7 +28,7 @@ def sign_shard_attestation(spec, shard_state, beacon_state, block, participants) return bls_aggregate_signatures(signatures) -def get_attestation_signature(spec, shard_state, beacon_state, message_hash, block_epoch, privkey): +def get_attestation_signature(spec, beacon_state, shard_state, message_hash, block_epoch, privkey): return bls_sign( message_hash=message_hash, privkey=privkey, diff --git a/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py b/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py index b9c388a3f..3ceb3e0aa 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py +++ b/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py @@ -1,3 +1,5 @@ +from copy import deepcopy + from eth2spec.test.helpers.keys import privkeys from eth2spec.utils.bls import ( bls_sign, @@ -13,66 +15,63 @@ from .attestations import ( @only_with_bls() -def sign_shard_block(spec, state, block, shard, proposer_index=None): +def sign_shard_block(spec, beacon_state, block, shard, proposer_index=None): if proposer_index is None: - proposer_index = spec.get_shard_block_proposer_index(state, shard, block.core.slot) + proposer_index = spec.get_shard_proposer_index(beacon_state, shard, block.slot) privkey = privkeys[proposer_index] - block.signatures.proposer_signature = bls_sign( + block.signature = bls_sign( message_hash=signing_root(block), privkey=privkey, domain=spec.get_domain( - state, + beacon_state, spec.DOMAIN_SHARD_PROPOSER, - spec.compute_epoch_of_shard_slot(block.core.slot), + spec.compute_epoch_of_shard_slot(block.slot), ) ) def build_empty_shard_block(spec, - shard_state, beacon_state, + shard_state, slot, - parent_root, signed=False, full_attestation=False): if slot is None: slot = shard_state.slot + parent_epoch = spec.compute_epoch_of_shard_slot(shard_state.latest_block_header.slot) + if parent_epoch * spec.SLOTS_PER_EPOCH == beacon_state.slot: + beacon_block_root = spec.signing_root(beacon_state.latest_block_header) + else: + beacon_block_root = spec.get_block_root(beacon_state, parent_epoch) + + previous_block_header = deepcopy(shard_state.latest_block_header) + if previous_block_header.state_root == spec.Hash(): + previous_block_header.state_root = shard_state.hash_tree_root() + parent_root = signing_root(previous_block_header) + block = spec.ShardBlock( - core=spec.ExtendedShardBlockCore( - slot=slot, - beacon_chain_root=beacon_state.block_roots[beacon_state.slot % spec.SLOTS_PER_HISTORICAL_ROOT], - parent_root=parent_root, - ), - signatures=spec.ShardBlockSignatures( - attestation_signature=b'\x00' * 96, - proposer_signature=b'\x25' * 96, - ) + shard=shard_state.shard, + slot=slot, + beacon_block_root=beacon_block_root, + parent_root=parent_root, + block_size_sum=shard_state.block_size_sum + spec.SHARD_HEADER_SIZE, ) - # attestation if full_attestation: - attester_committee = spec.get_persistent_committee(beacon_state, shard_state.shard, block.core.slot) - block.core.attester_bitfield = list( - (True,) * len(attester_committee) + - (False,) * (spec.TARGET_PERSISTENT_COMMITTEE_SIZE * 2 - len(attester_committee)) + shard_committee = spec.get_shard_committee(beacon_state, shard_state.shard, block.slot) + block.aggregation_bits = list( + (True,) * len(shard_committee) + + (False,) * (spec.MAX_PERIOD_COMMITTEE_SIZE * 2 - len(shard_committee)) ) - block.signatures.attestation_signature = sign_shard_attestation( + block.attestations = sign_shard_attestation( spec, - shard_state, beacon_state, - block, - participants=attester_committee, - ) - else: - block.signatures.attestation_signature = sign_shard_attestation( - spec, shard_state, - beacon_state, block, - participants=(), + participants=shard_committee, ) if signed: diff --git a/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py b/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py index 2bb0232f0..b0d8ad5e6 100644 --- a/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py +++ b/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py @@ -9,22 +9,19 @@ from eth2spec.test.context import ( @with_all_phases_except(['phase0']) -@always_bls @spec_state_test +@always_bls def test_process_empty_shard_block(spec, state): beacon_state = state - - shard_slot = spec.PHASE_1_FORK_SLOT - beacon_state.slot = spec.Slot(spec.PHASE_1_FORK_EPOCH * spec.SLOTS_PER_EPOCH) - shard_state = spec.get_default_shard_state(beacon_state, shard=spec.Shard(0)) - shard_state.slot = shard_slot + beacon_state.slot = spec.Slot(spec.SHARD_GENESIS_EPOCH * spec.SLOTS_PER_EPOCH) + shard_state = spec.get_genesis_shard_state(spec.Shard(0)) + shard_state.slot = spec.ShardSlot(spec.SHARD_GENESIS_EPOCH * spec.SHARD_SLOTS_PER_EPOCH) block = build_empty_shard_block( spec, - shard_state, beacon_state, - slot=shard_slot + 1, - parent_root=spec.Hash(), + shard_state, + slot=shard_state.slot + 1, signed=True, full_attestation=False, ) @@ -33,28 +30,25 @@ def test_process_empty_shard_block(spec, state): yield 'beacon_state', beacon_state yield 'block', block - spec.shard_state_transition(shard_state, beacon_state, block) + spec.shard_state_transition(beacon_state, shard_state, block) yield 'post', shard_state @with_all_phases_except(['phase0']) -@always_bls @spec_state_test +@always_bls def test_process_full_attestation_shard_block(spec, state): beacon_state = state - - shard_slot = spec.PHASE_1_FORK_SLOT - beacon_state.slot = spec.Slot(spec.PHASE_1_FORK_EPOCH * spec.SLOTS_PER_EPOCH) - shard_state = spec.get_default_shard_state(beacon_state, shard=spec.Shard(0)) - shard_state.slot = shard_slot + beacon_state.slot = spec.Slot(spec.SHARD_GENESIS_EPOCH * spec.SLOTS_PER_EPOCH) + shard_state = spec.get_genesis_shard_state(spec.Shard(0)) + shard_state.slot = spec.SHARD_GENESIS_EPOCH * spec.SHARD_SLOTS_PER_EPOCH block = build_empty_shard_block( spec, - shard_state, beacon_state, - slot=shard_slot + 1, - parent_root=spec.Hash(), + shard_state, + slot=shard_state.slot + 1, signed=True, full_attestation=True, ) @@ -63,6 +57,6 @@ def test_process_full_attestation_shard_block(spec, state): yield 'beacon_state', beacon_state yield 'block', block - spec.shard_state_transition(shard_state, beacon_state, block) + spec.shard_state_transition(beacon_state, shard_state, block) yield 'post', shard_state From 3bc05dfff7cf0833a381d54b679f898a7d9753bf Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 27 Sep 2019 13:20:23 +0900 Subject: [PATCH 182/250] remove outdated beacon attestation test --- .../test_beacon_attestation.py | 48 ------------------- .../shard_data_chain/test_shard_block.py | 6 +-- 2 files changed, 2 insertions(+), 52 deletions(-) delete mode 100644 test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_beacon_attestation.py diff --git a/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_beacon_attestation.py b/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_beacon_attestation.py deleted file mode 100644 index aface905b..000000000 --- a/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_beacon_attestation.py +++ /dev/null @@ -1,48 +0,0 @@ -from eth2spec.test.context import ( - with_all_phases_except, - spec_state_test, - always_bls, -) -from eth2spec.test.helpers.phase1.shard_block import ( - build_empty_shard_block, -) -from eth2spec.test.helpers.attestations import get_valid_attestation - - -@with_all_phases_except(['phase0']) -@always_bls -@spec_state_test -def test_process_empty_shard_block(spec, state): - beacon_state = state - - shard_slot = spec.PHASE_1_FORK_SLOT - beacon_state.slot = spec.Slot(spec.PHASE_1_FORK_EPOCH * spec.SLOTS_PER_EPOCH) - shard_state = spec.get_default_shard_state(beacon_state, shard=spec.Shard(0)) - shard_state.slot = shard_slot - - block = build_empty_shard_block( - spec, - shard_state, - beacon_state, - slot=shard_slot + 1, - parent_root=spec.Hash(), - signed=True, - full_attestation=True, - ) - - yield 'pre', shard_state - yield 'beacon_state', beacon_state - yield 'block', block - - beacon_attestation = get_valid_attestation(spec, beacon_state, signed=True) - yield 'beacon_attestation', beacon_attestation - - is_valid_beacon_attestation = spec.is_valid_beacon_attestation( - pre_state=shard_state, - shard_blocks_or_state_roots=(block,), - beacon_state=beacon_state, - valid_attestations=set([beacon_attestation]), - candidate=beacon_attestation, - ) - assert is_valid_beacon_attestation - yield 'is_valid_beacon_attestation', is_valid_beacon_attestation diff --git a/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py b/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py index b0d8ad5e6..6622c1940 100644 --- a/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py +++ b/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py @@ -11,8 +11,7 @@ from eth2spec.test.context import ( @with_all_phases_except(['phase0']) @spec_state_test @always_bls -def test_process_empty_shard_block(spec, state): - beacon_state = state +def test_process_empty_shard_block(spec, beacon_state): beacon_state.slot = spec.Slot(spec.SHARD_GENESIS_EPOCH * spec.SLOTS_PER_EPOCH) shard_state = spec.get_genesis_shard_state(spec.Shard(0)) shard_state.slot = spec.ShardSlot(spec.SHARD_GENESIS_EPOCH * spec.SHARD_SLOTS_PER_EPOCH) @@ -38,8 +37,7 @@ def test_process_empty_shard_block(spec, state): @with_all_phases_except(['phase0']) @spec_state_test @always_bls -def test_process_full_attestation_shard_block(spec, state): - beacon_state = state +def test_process_full_attestation_shard_block(spec, beacon_state): beacon_state.slot = spec.Slot(spec.SHARD_GENESIS_EPOCH * spec.SLOTS_PER_EPOCH) shard_state = spec.get_genesis_shard_state(spec.Shard(0)) shard_state.slot = spec.SHARD_GENESIS_EPOCH * spec.SHARD_SLOTS_PER_EPOCH From 77faa026a03834b33a01852175f8919f0089ab72 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 27 Sep 2019 13:30:31 +0900 Subject: [PATCH 183/250] minor fix to tests --- .../test/phase_1/shard_data_chain/test_shard_block.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py b/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py index 6622c1940..b0d8ad5e6 100644 --- a/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py +++ b/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py @@ -11,7 +11,8 @@ from eth2spec.test.context import ( @with_all_phases_except(['phase0']) @spec_state_test @always_bls -def test_process_empty_shard_block(spec, beacon_state): +def test_process_empty_shard_block(spec, state): + beacon_state = state beacon_state.slot = spec.Slot(spec.SHARD_GENESIS_EPOCH * spec.SLOTS_PER_EPOCH) shard_state = spec.get_genesis_shard_state(spec.Shard(0)) shard_state.slot = spec.ShardSlot(spec.SHARD_GENESIS_EPOCH * spec.SHARD_SLOTS_PER_EPOCH) @@ -37,7 +38,8 @@ def test_process_empty_shard_block(spec, beacon_state): @with_all_phases_except(['phase0']) @spec_state_test @always_bls -def test_process_full_attestation_shard_block(spec, beacon_state): +def test_process_full_attestation_shard_block(spec, state): + beacon_state = state beacon_state.slot = spec.Slot(spec.SHARD_GENESIS_EPOCH * spec.SLOTS_PER_EPOCH) shard_state = spec.get_genesis_shard_state(spec.Shard(0)) shard_state.slot = spec.SHARD_GENESIS_EPOCH * spec.SHARD_SLOTS_PER_EPOCH From e2230d106fad768094b4b4c1fa40a77e9c110b1b Mon Sep 17 00:00:00 2001 From: Jim McDonald Date: Sat, 28 Sep 2019 13:17:22 +0100 Subject: [PATCH 184/250] Fix array formatting --- specs/core/0_deposit-contract.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_deposit-contract.md b/specs/core/0_deposit-contract.md index 06962594e..d3d811c6f 100644 --- a/specs/core/0_deposit-contract.md +++ b/specs/core/0_deposit-contract.md @@ -38,7 +38,7 @@ The initial deployment phases of Ethereum 2.0 are implemented without consensus ### `deposit` function -The deposit contract has a public `deposit` function to make deposits. It takes as arguments `pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96], deposit_data_root: bytes32`. The first three arguments populate a [`DepositData`](./0_beacon-chain.md#depositdata) object, and `deposit_data_root` is the expected `DepositData` root as a protection against malformatted calldata. +The deposit contract has a public `deposit` function to make deposits. It takes as arguments `pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96], deposit_data_root: bytes[32]`. The first three arguments populate a [`DepositData`](./0_beacon-chain.md#depositdata) object, and `deposit_data_root` is the expected `DepositData` root as a protection against malformatted calldata. #### Deposit amount From 81b2566cd8213b54247a218d02ac7b8c8d67de83 Mon Sep 17 00:00:00 2001 From: Jim McDonald Date: Sat, 28 Sep 2019 13:18:06 +0100 Subject: [PATCH 185/250] Update validator doc to match recent changes --- specs/validator/0_beacon-chain-validator.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 8a9cf1b5d..f3ca3793d 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -97,10 +97,11 @@ In Phase 0, all incoming validator deposits originate from the Ethereum 1.0 proo To submit a deposit: - Pack the validator's [initialization parameters](#initialization) into `deposit_data`, a [`DepositData`](../core/0_beacon-chain.md#depositdata) SSZ object. -- Let `amount` be the amount in Gwei to be deposited by the validator where `MIN_DEPOSIT_AMOUNT <= amount <= MAX_EFFECTIVE_BALANCE`. +- Let `amount` be the amount in Gwei to be deposited by the validator where `amount >= MIN_DEPOSIT_AMOUNT`. - Set `deposit_data.amount = amount`. - Let `signature` be the result of `bls_sign` of the `signing_root(deposit_data)` with `domain=compute_domain(DOMAIN_DEPOSIT)`. (Deposits are valid regardless of fork version, `compute_domain` will default to zeroes there). -- Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `def deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96])` along with a deposit of `amount` Gwei. +- Let `deposit_data_root` be `hash_tree_root(deposit_data)`. +- Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `def deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96], deposit_data_root: bytes[32])` along with a deposit of `amount` Gwei. *Note*: Deposits made for the same `pubkey` are treated as for the same validator. A singular `Validator` will be added to `state.validators` with each additional deposit amount added to the validator's balance. A validator can only be activated when total deposits for the validator pubkey meet or exceed `MAX_EFFECTIVE_BALANCE`. From b05053146859e9bb84aec688244f861cf6aacf3d Mon Sep 17 00:00:00 2001 From: Jim McDonald Date: Sat, 28 Sep 2019 17:01:29 +0100 Subject: [PATCH 186/250] Revert to bytes32 --- specs/core/0_deposit-contract.md | 2 +- specs/validator/0_beacon-chain-validator.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_deposit-contract.md b/specs/core/0_deposit-contract.md index d3d811c6f..06962594e 100644 --- a/specs/core/0_deposit-contract.md +++ b/specs/core/0_deposit-contract.md @@ -38,7 +38,7 @@ The initial deployment phases of Ethereum 2.0 are implemented without consensus ### `deposit` function -The deposit contract has a public `deposit` function to make deposits. It takes as arguments `pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96], deposit_data_root: bytes[32]`. The first three arguments populate a [`DepositData`](./0_beacon-chain.md#depositdata) object, and `deposit_data_root` is the expected `DepositData` root as a protection against malformatted calldata. +The deposit contract has a public `deposit` function to make deposits. It takes as arguments `pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96], deposit_data_root: bytes32`. The first three arguments populate a [`DepositData`](./0_beacon-chain.md#depositdata) object, and `deposit_data_root` is the expected `DepositData` root as a protection against malformatted calldata. #### Deposit amount diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index f3ca3793d..8fc3fb7fa 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -101,7 +101,7 @@ To submit a deposit: - Set `deposit_data.amount = amount`. - Let `signature` be the result of `bls_sign` of the `signing_root(deposit_data)` with `domain=compute_domain(DOMAIN_DEPOSIT)`. (Deposits are valid regardless of fork version, `compute_domain` will default to zeroes there). - Let `deposit_data_root` be `hash_tree_root(deposit_data)`. -- Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `def deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96], deposit_data_root: bytes[32])` along with a deposit of `amount` Gwei. +- Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `def deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96], deposit_data_root: bytes32)` along with a deposit of `amount` Gwei. *Note*: Deposits made for the same `pubkey` are treated as for the same validator. A singular `Validator` will be added to `state.validators` with each additional deposit amount added to the validator's balance. A validator can only be activated when total deposits for the validator pubkey meet or exceed `MAX_EFFECTIVE_BALANCE`. From b259d3518b28622b2627a4b23932d790ff8d652c Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 30 Sep 2019 11:34:28 +0900 Subject: [PATCH 187/250] move assert to get_beacon_proposer_index --- scripts/build_spec.py | 2 +- specs/core/1_shard-data-chains.md | 8 ++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/scripts/build_spec.py b/scripts/build_spec.py index ffdbda505..f35332e64 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -37,7 +37,7 @@ from eth2spec.utils.bls import ( from eth2spec.utils.hash_function import hash ''' PHASE1_IMPORTS = '''from typing import ( - Any, Dict, Optional, Set, Sequence, MutableSequence, NewType, Tuple, Union, + Any, Dict, Set, Sequence, MutableSequence, NewType, Tuple, Union, ) from math import ( log2, diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 96b2de41a..9293a50da 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -202,12 +202,11 @@ def get_shard_committee(beacon_state: BeaconState, shard: Shard, epoch: Epoch) - #### `get_shard_proposer_index` ```python -def get_shard_proposer_index(beacon_state: BeaconState, shard: Shard, slot: ShardSlot) -> Optional[ValidatorIndex]: +def get_shard_proposer_index(beacon_state: BeaconState, shard: Shard, slot: ShardSlot) -> ValidatorIndex: epoch = get_current_epoch(beacon_state) shard_committee = get_shard_committee(beacon_state, shard, epoch) active_indices = [i for i in shard_committee if is_active_validator(beacon_state.validators[i], epoch)] - if not any(active_indices): - return None + assert any(active_indices) epoch_seed = get_seed(beacon_state, epoch, DOMAIN_SHARD_PROPOSER) seed = hash(epoch_seed + int_to_bytes(slot, length=8) + int_to_bytes(shard, length=8)) @@ -356,7 +355,6 @@ def process_shard_block_header(beacon_state: BeaconState, shard_state: ShardStat assert block.block_size_sum == shard_state.block_size_sum # Verify proposer is not slashed proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot) - assert proposer_index is not None proposer = beacon_state.validators[proposer_index] assert not proposer.slashed # Verify proposer signature @@ -385,7 +383,6 @@ def process_shard_attestations(beacon_state: BeaconState, shard_state: ShardStat assert bls_verify(bls_aggregate_pubkeys(pubkeys), message, block.attestations, domain) # Proposer micro-reward proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot) - assert proposer_index is not None reward = attestation_count * get_base_reward(beacon_state, proposer_index) // PROPOSER_REWARD_QUOTIENT process_delta(beacon_state, shard_state, proposer_index, Gwei(reward)) ``` @@ -399,7 +396,6 @@ def process_shard_block_body(beacon_state: BeaconState, shard_state: ShardState, # Apply proposer block body fee block_body_fee = shard_state.block_body_price * len(block.body) // MAX_SHARD_BLOCK_SIZE proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot) - assert proposer_index is not None process_delta(beacon_state, shard_state, proposer_index, Gwei(block_body_fee), positive=False) # Burn process_delta(beacon_state, shard_state, proposer_index, Gwei(block_body_fee // PROPOSER_REWARD_QUOTIENT)) # Reward # Calculate new block body price From 49a291909936cec4653db407f8e53ca09631f136 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 30 Sep 2019 12:58:05 +0900 Subject: [PATCH 188/250] add more shard block sanity tests --- .../test/helpers/phase1/shard_block.py | 6 +- .../test/helpers/phase1/shard_state.py | 18 ++ .../test/phase_1/sanity/test_shard_blocks.py | 170 ++++++++++++++++++ .../shard_data_chain/test_shard_block.py | 62 ------- 4 files changed, 191 insertions(+), 65 deletions(-) create mode 100644 test_libs/pyspec/eth2spec/test/helpers/phase1/shard_state.py create mode 100644 test_libs/pyspec/eth2spec/test/phase_1/sanity/test_shard_blocks.py delete mode 100644 test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py diff --git a/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py b/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py index 3ceb3e0aa..ea5783655 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py +++ b/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py @@ -15,9 +15,9 @@ from .attestations import ( @only_with_bls() -def sign_shard_block(spec, beacon_state, block, shard, proposer_index=None): +def sign_shard_block(spec, beacon_state, shard_state, block, proposer_index=None): if proposer_index is None: - proposer_index = spec.get_shard_proposer_index(beacon_state, shard, block.slot) + proposer_index = spec.get_shard_proposer_index(beacon_state, shard_state.shard, block.slot) privkey = privkeys[proposer_index] @@ -75,6 +75,6 @@ def build_empty_shard_block(spec, ) if signed: - sign_shard_block(spec, beacon_state, block, shard_state.shard) + sign_shard_block(spec, beacon_state, shard_state, block) return block diff --git a/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_state.py b/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_state.py new file mode 100644 index 000000000..24240b5fa --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_state.py @@ -0,0 +1,18 @@ +from eth2spec.test.helpers.phase1.shard_block import sign_shard_block + + +def configure_shard_state(spec, beacon_state, shard=0): + beacon_state.slot = spec.Slot(spec.SHARD_GENESIS_EPOCH * spec.SLOTS_PER_EPOCH) + shard_state = spec.get_genesis_shard_state(spec.Shard(shard)) + shard_state.slot = spec.ShardSlot(spec.SHARD_GENESIS_EPOCH * spec.SHARD_SLOTS_PER_EPOCH) + return beacon_state, shard_state + + +def shard_state_transition_and_sign_block(spec, beacon_state, shard_state, block): + """ + Shard state transition via the provided ``block`` + then package the block with the state root and signature. + """ + spec.shard_state_transition(beacon_state, shard_state, block) + block.state_root = shard_state.hash_tree_root() + sign_shard_block(spec, beacon_state, shard_state, block) diff --git a/test_libs/pyspec/eth2spec/test/phase_1/sanity/test_shard_blocks.py b/test_libs/pyspec/eth2spec/test/phase_1/sanity/test_shard_blocks.py new file mode 100644 index 000000000..2669ffb53 --- /dev/null +++ b/test_libs/pyspec/eth2spec/test/phase_1/sanity/test_shard_blocks.py @@ -0,0 +1,170 @@ +from copy import deepcopy + +from eth2spec.test.helpers.phase1.shard_block import ( + build_empty_shard_block, + sign_shard_block, +) +from eth2spec.test.helpers.phase1.shard_state import ( + configure_shard_state, + shard_state_transition_and_sign_block, +) +from eth2spec.test.context import ( + always_bls, + expect_assertion_error, + spec_state_test, + with_all_phases_except, +) + + +@with_all_phases_except(['phase0']) +@spec_state_test +@always_bls +def test_process_empty_shard_block(spec, state): + beacon_state, shard_state = configure_shard_state(spec, state) + + block = build_empty_shard_block( + spec, + beacon_state, + shard_state, + slot=shard_state.slot + 1, + signed=True, + full_attestation=False, + ) + + yield 'pre', shard_state + yield 'beacon_state', beacon_state + + shard_state_transition_and_sign_block(spec, beacon_state, shard_state, block) + + yield 'blocks', [block] + yield 'post', shard_state + + +@with_all_phases_except(['phase0']) +@spec_state_test +@always_bls +def test_process_full_attestation_shard_block(spec, state): + beacon_state, shard_state = configure_shard_state(spec, state) + + block = build_empty_shard_block( + spec, + beacon_state, + shard_state, + slot=shard_state.slot + 1, + signed=True, + full_attestation=True, + ) + + yield 'pre', shard_state + yield 'beacon_state', beacon_state + + shard_state_transition_and_sign_block(spec, beacon_state, shard_state, block) + + yield 'blocks', [block] + yield 'post', shard_state + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_prev_slot_block_transition(spec, state): + beacon_state, shard_state = configure_shard_state(spec, state) + + # Go to clean slot + spec.process_shard_slots(shard_state, shard_state.slot + 1) + # Make a block for it + block = build_empty_shard_block(spec, beacon_state, shard_state, slot=shard_state.slot, signed=True) + # Transition to next slot, above block will not be invalid on top of new state. + spec.process_shard_slots(shard_state, shard_state.slot + 1) + + yield 'pre', shard_state + yield 'beacon_state', beacon_state + expect_assertion_error( + lambda: spec.shard_state_transition(beacon_state, shard_state, block) + ) + yield 'blocks', [block] + yield 'post', None + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_same_slot_block_transition(spec, state): + beacon_state, shard_state = configure_shard_state(spec, state) + + # Same slot on top of pre-state, but move out of slot 0 first. + spec.process_shard_slots(shard_state, shard_state.slot + 1) + block = build_empty_shard_block(spec, beacon_state, shard_state, slot=shard_state.slot, signed=True) + + yield 'pre', shard_state + yield 'beacon_state', beacon_state + + shard_state_transition_and_sign_block(spec, beacon_state, shard_state, block) + + yield 'blocks', [block] + yield 'post', shard_state + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_invalid_state_root(spec, state): + beacon_state, shard_state = configure_shard_state(spec, state) + + spec.process_shard_slots(shard_state, shard_state.slot + 1) + block = build_empty_shard_block(spec, beacon_state, shard_state, slot=shard_state.slot) + block.state_root = b'\x36' * 32 + sign_shard_block(spec, beacon_state, shard_state, block) + + yield 'pre', shard_state + yield 'beacon_state', beacon_state + expect_assertion_error( + lambda: spec.shard_state_transition(beacon_state, shard_state, block, validate_state_root=True) + ) + yield 'blocks', [block] + yield 'post', None + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_skipped_slots(spec, state): + beacon_state, shard_state = configure_shard_state(spec, state) + + block = build_empty_shard_block(spec, beacon_state, shard_state, slot=shard_state.slot + 3, signed=True) + + yield 'pre', shard_state + yield 'beacon_state', beacon_state + + shard_state_transition_and_sign_block(spec, beacon_state, shard_state, block) + + yield 'blocks', [block] + yield 'post', shard_state + + assert shard_state.slot == block.slot + latest_block_header = deepcopy(shard_state.latest_block_header) + latest_block_header.state_root = shard_state.hash_tree_root() + assert latest_block_header.signing_root() == block.signing_root() + + +@with_all_phases_except(['phase0']) +@spec_state_test +def test_empty_shard_period_transition(spec, state): + beacon_state, shard_state = configure_shard_state(spec, state) + + # modify some of the deltas to ensure the period transition works properly + stub_delta = 10 + shard_state.newer_committee_positive_deltas[0] = stub_delta + shard_state.newer_committee_negative_deltas[0] = stub_delta + + slot = shard_state.slot + spec.SHARD_SLOTS_PER_EPOCH * spec.EPOCHS_PER_SHARD_PERIOD + block = build_empty_shard_block(spec, beacon_state, shard_state, slot=slot, signed=True) + + yield 'pre', shard_state + yield 'beacon_state', beacon_state + + shard_state_transition_and_sign_block(spec, beacon_state, shard_state, block) + + yield 'blocks', [block] + yield 'post', shard_state + + shard_state.older_committee_positive_deltas[0] == stub_delta + shard_state.older_committee_negative_deltas[0] == stub_delta + shard_state.newer_committee_positive_deltas[0] == 0 + shard_state.newer_committee_negative_deltas[0] == 0 diff --git a/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py b/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py deleted file mode 100644 index b0d8ad5e6..000000000 --- a/test_libs/pyspec/eth2spec/test/phase_1/shard_data_chain/test_shard_block.py +++ /dev/null @@ -1,62 +0,0 @@ -from eth2spec.test.helpers.phase1.shard_block import ( - build_empty_shard_block, -) -from eth2spec.test.context import ( - with_all_phases_except, - spec_state_test, - always_bls, -) - - -@with_all_phases_except(['phase0']) -@spec_state_test -@always_bls -def test_process_empty_shard_block(spec, state): - beacon_state = state - beacon_state.slot = spec.Slot(spec.SHARD_GENESIS_EPOCH * spec.SLOTS_PER_EPOCH) - shard_state = spec.get_genesis_shard_state(spec.Shard(0)) - shard_state.slot = spec.ShardSlot(spec.SHARD_GENESIS_EPOCH * spec.SHARD_SLOTS_PER_EPOCH) - - block = build_empty_shard_block( - spec, - beacon_state, - shard_state, - slot=shard_state.slot + 1, - signed=True, - full_attestation=False, - ) - - yield 'pre', shard_state - yield 'beacon_state', beacon_state - yield 'block', block - - spec.shard_state_transition(beacon_state, shard_state, block) - - yield 'post', shard_state - - -@with_all_phases_except(['phase0']) -@spec_state_test -@always_bls -def test_process_full_attestation_shard_block(spec, state): - beacon_state = state - beacon_state.slot = spec.Slot(spec.SHARD_GENESIS_EPOCH * spec.SLOTS_PER_EPOCH) - shard_state = spec.get_genesis_shard_state(spec.Shard(0)) - shard_state.slot = spec.SHARD_GENESIS_EPOCH * spec.SHARD_SLOTS_PER_EPOCH - - block = build_empty_shard_block( - spec, - beacon_state, - shard_state, - slot=shard_state.slot + 1, - signed=True, - full_attestation=True, - ) - - yield 'pre', shard_state - yield 'beacon_state', beacon_state - yield 'block', block - - spec.shard_state_transition(beacon_state, shard_state, block) - - yield 'post', shard_state From a5fb9408aee6f6bdfc9b9ff57c3989d8b25dd080 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 30 Sep 2019 13:11:23 +0900 Subject: [PATCH 189/250] make beacon state for shard sate transition from current shard epoch --- specs/core/1_shard-data-chains.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 3fe6f6a65..2045c7a7e 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -338,12 +338,12 @@ def process_shard_block_header(beacon_state: BeaconState, shard_state: ShardStat # Verify the slot number assert block.slot == shard_state.slot # Verify the beacon chain root - parent_epoch = compute_epoch_of_shard_slot(shard_state.latest_block_header.slot) - # --super dirty. need to think-- # - if parent_epoch * SLOTS_PER_EPOCH == beacon_state.slot: + epoch = compute_epoch_of_shard_slot(shard_state.slot) + assert epoch == compute_epoch_of_slot(beacon_state.slot) + if epoch * SLOTS_PER_EPOCH == beacon_state.slot: beacon_block_root = signing_root(beacon_state.latest_block_header) else: - beacon_block_root = get_block_root(beacon_state, parent_epoch) + beacon_block_root = get_block_root(beacon_state, epoch) assert block.beacon_block_root == beacon_block_root # Verify the parent root assert block.parent_root == signing_root(shard_state.latest_block_header) From 2af39ad4694a1ad7eb91856e3820d2d50c21953f Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 30 Sep 2019 13:58:48 +0900 Subject: [PATCH 190/250] fix shard period sanity test --- .../pyspec/eth2spec/test/helpers/phase1/shard_block.py | 6 +++--- .../eth2spec/test/phase_1/sanity/test_shard_blocks.py | 7 +++++++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py b/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py index ea5783655..a339a0e70 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py +++ b/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py @@ -41,11 +41,11 @@ def build_empty_shard_block(spec, if slot is None: slot = shard_state.slot - parent_epoch = spec.compute_epoch_of_shard_slot(shard_state.latest_block_header.slot) - if parent_epoch * spec.SLOTS_PER_EPOCH == beacon_state.slot: + epoch = spec.compute_epoch_of_shard_slot(slot) + if epoch * spec.SLOTS_PER_EPOCH == beacon_state.slot: beacon_block_root = spec.signing_root(beacon_state.latest_block_header) else: - beacon_block_root = spec.get_block_root(beacon_state, parent_epoch) + beacon_block_root = spec.get_block_root(beacon_state, epoch) previous_block_header = deepcopy(shard_state.latest_block_header) if previous_block_header.state_root == spec.Hash(): diff --git a/test_libs/pyspec/eth2spec/test/phase_1/sanity/test_shard_blocks.py b/test_libs/pyspec/eth2spec/test/phase_1/sanity/test_shard_blocks.py index 2669ffb53..51575f2d5 100644 --- a/test_libs/pyspec/eth2spec/test/phase_1/sanity/test_shard_blocks.py +++ b/test_libs/pyspec/eth2spec/test/phase_1/sanity/test_shard_blocks.py @@ -154,6 +154,13 @@ def test_empty_shard_period_transition(spec, state): shard_state.newer_committee_negative_deltas[0] = stub_delta slot = shard_state.slot + spec.SHARD_SLOTS_PER_EPOCH * spec.EPOCHS_PER_SHARD_PERIOD + beacon_state.slot = spec.compute_epoch_of_shard_slot(slot) * spec.SLOTS_PER_EPOCH - 4 + spec.process_slots(beacon_state, spec.compute_epoch_of_shard_slot(slot) * spec.SLOTS_PER_EPOCH) + + # all validators get slashed for not revealing keys + # undo this to allow for a block proposal + for index in range(len(beacon_state.validators)): + beacon_state.validators[index].slashed = False block = build_empty_shard_block(spec, beacon_state, shard_state, slot=slot, signed=True) yield 'pre', shard_state From 56fd91b9f962d01092e4b9c1bf1357b990d68f50 Mon Sep 17 00:00:00 2001 From: Cayman Date: Tue, 1 Oct 2019 11:23:03 -0500 Subject: [PATCH 191/250] merkle proofs: Fix get_helper_indices --- specs/light_client/merkle_proofs.md | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index b920f50b1..b021f1ac0 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -258,6 +258,18 @@ def get_branch_indices(tree_index: GeneralizedIndex) -> Sequence[GeneralizedInde return o[:-1] ``` +```python +def get_path_indices(tree_index: GeneralizedIndex) -> Sequence[GeneralizedIndex]: + """ + Get the generalized indices of the chunks along the path from the chunk with the + given tree index to the root. + """ + o = [tree_index] + while o[-1] > 1: + o.append(generalized_index_parent(o[-1])) + return o[:-1] +``` + ```python def get_helper_indices(indices: Sequence[GeneralizedIndex]) -> Sequence[GeneralizedIndex]: """ @@ -265,17 +277,14 @@ def get_helper_indices(indices: Sequence[GeneralizedIndex]) -> Sequence[Generali generalized indices. Note that the decreasing order is chosen deliberately to ensure equivalence to the order of hashes in a regular single-item Merkle proof in the single-item case. """ - all_indices: Set[GeneralizedIndex] = set() + all_helper_indices: Set[GeneralizedIndex] = set() + all_path_indices: Set[GeneralizedIndex] = set() for index in indices: - all_indices = all_indices.union(set(list(get_branch_indices(index)) + [index])) + all_helper_indices = all_helper_indices.union(set(get_branch_indices(index))) + all_path_indices = all_path_indices.union(set(get_path_indices(index))) return sorted([ - x for x in all_indices if ( - not ( - generalized_index_child(x, False) in all_indices and - generalized_index_child(x, True) in all_indices - ) and not (x in indices) - ) + x for x in all_helper_indices if x not in all_path_indices ], reverse=True) ``` From dc4869349c4d0a14a07476863959add418c22ab2 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sat, 5 Oct 2019 06:56:54 +0900 Subject: [PATCH 192/250] remove explicitly handling empty attestation --- specs/core/1_shard-data-chains.md | 5 ----- .../eth2spec/test/helpers/phase1/shard_block.py | 17 ++++++++++------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 2045c7a7e..a31f16880 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -384,11 +384,6 @@ def process_shard_attestations(beacon_state: BeaconState, shard_state: ShardStat pubkeys.append(beacon_state.validators[validator_index].pubkey) process_delta(beacon_state, shard_state, validator_index, get_base_reward(beacon_state, validator_index)) attestation_count += 1 - # Exit early if no participants - if not any(pubkeys): - assert block.attestations == BLSSignature() - return - # Verify there are no extraneous bits set beyond the shard committee for i in range(len(shard_committee), 2 * MAX_PERIOD_COMMITTEE_SIZE): assert block.aggregation_bits[i] == 0b0 diff --git a/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py b/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py index a339a0e70..834adc93f 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py +++ b/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py @@ -66,13 +66,16 @@ def build_empty_shard_block(spec, (True,) * len(shard_committee) + (False,) * (spec.MAX_PERIOD_COMMITTEE_SIZE * 2 - len(shard_committee)) ) - block.attestations = sign_shard_attestation( - spec, - beacon_state, - shard_state, - block, - participants=shard_committee, - ) + else: + shard_committee = [] + + block.attestations = sign_shard_attestation( + spec, + beacon_state, + shard_state, + block, + participants=shard_committee, + ) if signed: sign_shard_block(spec, beacon_state, shard_state, block) From 3c3ae9ac02ed896b59b03c95f60778c6f4f22f8b Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sat, 5 Oct 2019 07:22:20 +0900 Subject: [PATCH 193/250] enforce beacon state at epoch boundary slot --- specs/core/1_shard-data-chains.md | 16 ++++++++++------ .../eth2spec/test/helpers/phase1/shard_block.py | 11 +++++------ 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index a31f16880..69962b6fe 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -339,12 +339,16 @@ def process_shard_block_header(beacon_state: BeaconState, shard_state: ShardStat assert block.slot == shard_state.slot # Verify the beacon chain root epoch = compute_epoch_of_shard_slot(shard_state.slot) - assert epoch == compute_epoch_of_slot(beacon_state.slot) - if epoch * SLOTS_PER_EPOCH == beacon_state.slot: - beacon_block_root = signing_root(beacon_state.latest_block_header) - else: - beacon_block_root = get_block_root(beacon_state, epoch) - assert block.beacon_block_root == beacon_block_root + assert epoch * SLOTS_PER_EPOCH == beacon_state.slot + beacon_block_header = BeaconBlockHeader( + slot=beacon_state.latest_block_header.slot, + parent_root=beacon_state.latest_block_header.parent_root, + state_root=beacon_state.latest_block_header.state_root, + body_root=beacon_state.latest_block_header.body_root, + ) + if beacon_block_header.state_root == Bytes32(): + beacon_block_header.state_root = hash_tree_root(beacon_state) + assert block.beacon_block_root == signing_root(beacon_block_header) # Verify the parent root assert block.parent_root == signing_root(shard_state.latest_block_header) # Save current block as the new latest block diff --git a/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py b/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py index 834adc93f..7955c613e 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py +++ b/test_libs/pyspec/eth2spec/test/helpers/phase1/shard_block.py @@ -41,14 +41,13 @@ def build_empty_shard_block(spec, if slot is None: slot = shard_state.slot - epoch = spec.compute_epoch_of_shard_slot(slot) - if epoch * spec.SLOTS_PER_EPOCH == beacon_state.slot: - beacon_block_root = spec.signing_root(beacon_state.latest_block_header) - else: - beacon_block_root = spec.get_block_root(beacon_state, epoch) + previous_beacon_header = deepcopy(beacon_state.latest_block_header) + if previous_beacon_header.state_root == spec.Bytes32(): + previous_beacon_header.state_root = beacon_state.hash_tree_root() + beacon_block_root = spec.signing_root(previous_beacon_header) previous_block_header = deepcopy(shard_state.latest_block_header) - if previous_block_header.state_root == spec.Hash(): + if previous_block_header.state_root == spec.Bytes32(): previous_block_header.state_root = shard_state.hash_tree_root() parent_root = signing_root(previous_block_header) From dfdf3ab5cf256cb2ab4933493844fc5c7174004f Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sat, 12 Oct 2019 11:48:34 +0900 Subject: [PATCH 194/250] initial removal and cleanup of shard/crosslink from phase 0 --- specs/core/0_beacon-chain.md | 154 +++----------------- specs/core/0_fork-choice.md | 2 +- specs/validator/0_beacon-chain-validator.md | 31 ++-- 3 files changed, 30 insertions(+), 157 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 681d82457..838d9d681 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -25,7 +25,6 @@ - [`Fork`](#fork) - [`Checkpoint`](#checkpoint) - [`Validator`](#validator) - - [`Crosslink`](#crosslink) - [`AttestationData`](#attestationdata) - [`AttestationDataAndCustodyBit`](#attestationdataandcustodybit) - [`IndexedAttestation`](#indexedattestation) @@ -84,8 +83,6 @@ - [`get_seed`](#get_seed) - [`get_committee_count`](#get_committee_count) - [`get_crosslink_committee`](#get_crosslink_committee) - - [`get_start_shard`](#get_start_shard) - - [`get_shard_delta`](#get_shard_delta) - [`get_beacon_proposer_index`](#get_beacon_proposer_index) - [`get_attestation_data_slot`](#get_attestation_data_slot) - [`get_total_balance`](#get_total_balance) @@ -105,7 +102,6 @@ - [Epoch processing](#epoch-processing) - [Helper functions](#helper-functions-1) - [Justification and finalization](#justification-and-finalization) - - [Crosslinks](#crosslinks) - [Rewards and penalties](#rewards-and-penalties-1) - [Registry updates](#registry-updates) - [Slashings](#slashings) @@ -174,6 +170,7 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | | - | - | +| `COMMITTEES_PER_SLOT` | `2**5` (= 32) | | `SHARD_COUNT` | `2**10` (= 1,024) | | `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) | | `MAX_VALIDATORS_PER_COMMITTEE` | `2**12` (= 4,096) | @@ -305,29 +302,18 @@ class Validator(Container): withdrawable_epoch: Epoch # When validator can withdraw or transfer funds ``` -#### `Crosslink` - -```python -class Crosslink(Container): - shard: Shard - parent_root: Hash - # Crosslinking data - start_epoch: Epoch - end_epoch: Epoch - data_root: Hash -``` - #### `AttestationData` ```python class AttestationData(Container): + slot: Slot # LMD GHOST vote beacon_block_root: Hash # FFG vote source: Checkpoint target: Checkpoint - # Crosslink vote - crosslink: Crosslink + # Index -- Maybe remove + index: uint64 ``` #### `AttestationDataAndCustodyBit` @@ -507,16 +493,12 @@ class BeaconState(Container): validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] # Shuffling - start_shard: Shard randao_mixes: Vector[Hash, EPOCHS_PER_HISTORICAL_VECTOR] # Slashings slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances # Attestations previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] - # Crosslinks - previous_crosslinks: Vector[Crosslink, SHARD_COUNT] # Previous epoch snapshot - current_crosslinks: Vector[Crosslink, SHARD_COUNT] # Finality justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch previous_justified_checkpoint: Checkpoint # Previous epoch snapshot @@ -885,54 +867,32 @@ def get_committee_count(state: BeaconState, epoch: Epoch) -> uint64: """ Return the number of committees at ``epoch``. """ + # Consider not hard coding but just return committees per slot for now + """ committees_per_slot = max(1, min( SHARD_COUNT // SLOTS_PER_EPOCH, len(get_active_validator_indices(state, epoch)) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, )) return committees_per_slot * SLOTS_PER_EPOCH + """ + return COMMITTEES_PER_SLOT ``` #### `get_crosslink_committee` ```python -def get_crosslink_committee(state: BeaconState, epoch: Epoch, shard: Shard) -> Sequence[ValidatorIndex]: +def get_crosslink_committee(state: BeaconState, epoch: Epoch, index: uint64) -> Sequence[ValidatorIndex]: """ - Return the crosslink committee at ``epoch`` for ``shard``. + Return the crosslink committee at ``epoch`` for ``index``. """ return compute_committee( indices=get_active_validator_indices(state, epoch), seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER), - index=(shard + SHARD_COUNT - get_start_shard(state, epoch)) % SHARD_COUNT, + index=index, count=get_committee_count(state, epoch), ) ``` -#### `get_start_shard` - -```python -def get_start_shard(state: BeaconState, epoch: Epoch) -> Shard: - """ - Return the start shard of the 0th committee at ``epoch``. - """ - assert epoch <= get_current_epoch(state) + 1 - check_epoch = Epoch(get_current_epoch(state) + 1) - shard = Shard((state.start_shard + get_shard_delta(state, get_current_epoch(state))) % SHARD_COUNT) - while check_epoch > epoch: - check_epoch -= Epoch(1) - shard = Shard((shard + SHARD_COUNT - get_shard_delta(state, check_epoch)) % SHARD_COUNT) - return shard -``` - -#### `get_shard_delta` - -```python -def get_shard_delta(state: BeaconState, epoch: Epoch) -> uint64: - """ - Return the number of shards to increment ``state.start_shard`` at ``epoch``. - """ - return min(get_committee_count(state, epoch), SHARD_COUNT - SHARD_COUNT // SLOTS_PER_EPOCH) -``` - #### `get_beacon_proposer_index` ```python @@ -946,18 +906,6 @@ def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: return compute_proposer_index(state, indices, seed) ``` -#### `get_attestation_data_slot` - -```python -def get_attestation_data_slot(state: BeaconState, data: AttestationData) -> Slot: - """ - Return the slot corresponding to the attestation ``data``. - """ - committee_count = get_committee_count(state, data.target.epoch) - offset = (data.crosslink.shard + SHARD_COUNT - get_start_shard(state, data.target.epoch)) % SHARD_COUNT - return Slot(compute_start_slot_of_epoch(data.target.epoch) + offset // (committee_count // SLOTS_PER_EPOCH)) -``` - #### `get_total_balance` ```python @@ -1019,7 +967,7 @@ def get_attesting_indices(state: BeaconState, """ Return the set of attesting indices corresponding to ``data`` and ``bits``. """ - committee = get_crosslink_committee(state, data.target.epoch, data.crosslink.shard) + committee = get_crosslink_committee(state, data.target.epoch, data.index) return set(index for i, index in enumerate(committee) if bits[i]) ``` @@ -1199,7 +1147,6 @@ def process_slot(state: BeaconState) -> None: ```python def process_epoch(state: BeaconState) -> None: process_justification_and_finalization(state) - process_crosslinks(state) process_rewards_and_penalties(state) process_registry_updates(state) # @process_reveal_deadlines @@ -1230,7 +1177,7 @@ def get_matching_target_attestations(state: BeaconState, epoch: Epoch) -> Sequen def get_matching_head_attestations(state: BeaconState, epoch: Epoch) -> Sequence[PendingAttestation]: return [ a for a in get_matching_source_attestations(state, epoch) - if a.data.beacon_block_root == get_block_root_at_slot(state, get_attestation_data_slot(state, a.data)) + if a.data.beacon_block_root == get_block_root_at_slot(state, a.data.slot) ] ``` @@ -1248,23 +1195,6 @@ def get_attesting_balance(state: BeaconState, attestations: Sequence[PendingAtte return get_total_balance(state, get_unslashed_attesting_indices(state, attestations)) ``` -```python -def get_winning_crosslink_and_attesting_indices(state: BeaconState, - epoch: Epoch, - shard: Shard) -> Tuple[Crosslink, Set[ValidatorIndex]]: - attestations = [a for a in get_matching_source_attestations(state, epoch) if a.data.crosslink.shard == shard] - crosslinks = filter( - lambda c: hash_tree_root(state.current_crosslinks[shard]) in (c.parent_root, hash_tree_root(c)), - [a.data.crosslink for a in attestations] - ) - # Winning crosslink has the crosslink data root with the most balance voting for it (ties broken lexicographically) - winning_crosslink = max(crosslinks, key=lambda c: ( - get_attesting_balance(state, [a for a in attestations if a.data.crosslink == c]), c.data_root - ), default=Crosslink()) - winning_attestations = [a for a in attestations if a.data.crosslink == winning_crosslink] - return winning_crosslink, get_unslashed_attesting_indices(state, winning_attestations) -``` - #### Justification and finalization ```python @@ -1308,20 +1238,6 @@ def process_justification_and_finalization(state: BeaconState) -> None: state.finalized_checkpoint = old_current_justified_checkpoint ``` -#### Crosslinks - -```python -def process_crosslinks(state: BeaconState) -> None: - state.previous_crosslinks = [c for c in state.current_crosslinks] - for epoch in (get_previous_epoch(state), get_current_epoch(state)): - for offset in range(get_committee_count(state, epoch)): - shard = Shard((get_start_shard(state, epoch) + offset) % SHARD_COUNT) - crosslink_committee = set(get_crosslink_committee(state, epoch, shard)) - winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, epoch, shard) - if 3 * get_total_balance(state, attesting_indices) >= 2 * get_total_balance(state, crosslink_committee): - state.current_crosslinks[shard] = winning_crosslink -``` - #### Rewards and penalties ```python @@ -1384,36 +1300,15 @@ def get_attestation_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence return rewards, penalties ``` -```python -def get_crosslink_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: - rewards = [Gwei(0) for _ in range(len(state.validators))] - penalties = [Gwei(0) for _ in range(len(state.validators))] - epoch = get_previous_epoch(state) - for offset in range(get_committee_count(state, epoch)): - shard = Shard((get_start_shard(state, epoch) + offset) % SHARD_COUNT) - crosslink_committee = set(get_crosslink_committee(state, epoch, shard)) - winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, epoch, shard) - attesting_balance = get_total_balance(state, attesting_indices) - committee_balance = get_total_balance(state, crosslink_committee) - for index in crosslink_committee: - base_reward = get_base_reward(state, index) - if index in attesting_indices: - rewards[index] += base_reward * attesting_balance // committee_balance - else: - penalties[index] += base_reward - return rewards, penalties -``` - ```python def process_rewards_and_penalties(state: BeaconState) -> None: if get_current_epoch(state) == GENESIS_EPOCH: return rewards1, penalties1 = get_attestation_deltas(state) - rewards2, penalties2 = get_crosslink_deltas(state) for index in range(len(state.validators)): - increase_balance(state, ValidatorIndex(index), rewards1[index] + rewards2[index]) - decrease_balance(state, ValidatorIndex(index), penalties1[index] + penalties2[index]) + increase_balance(state, ValidatorIndex(index), rewards1[index]) + decrease_balance(state, ValidatorIndex(index), penalties1[index]) ``` #### Registry updates @@ -1481,8 +1376,6 @@ def process_final_updates(state: BeaconState) -> None: if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0: historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots) state.historical_roots.append(hash_tree_root(historical_batch)) - # Update start shard - state.start_shard = Shard((state.start_shard + get_shard_delta(state, current_epoch)) % SHARD_COUNT) # Rotate current/previous epoch attestations state.previous_epoch_attestations = state.current_epoch_attestations state.current_epoch_attestations = [] @@ -1609,37 +1502,28 @@ def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSla ```python def process_attestation(state: BeaconState, attestation: Attestation) -> None: data = attestation.data - assert data.crosslink.shard < SHARD_COUNT + assert data.index < COMMITTEES_PER_SLOT assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) - attestation_slot = get_attestation_data_slot(state, data) - assert attestation_slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= attestation_slot + SLOTS_PER_EPOCH + assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH - committee = get_crosslink_committee(state, data.target.epoch, data.crosslink.shard) + committee = get_crosslink_committee(state, data.target.epoch, data.index) assert len(attestation.aggregation_bits) == len(attestation.custody_bits) == len(committee) pending_attestation = PendingAttestation( data=data, aggregation_bits=attestation.aggregation_bits, - inclusion_delay=state.slot - attestation_slot, + inclusion_delay=state.slot - data.slot, proposer_index=get_beacon_proposer_index(state), ) if data.target.epoch == get_current_epoch(state): assert data.source == state.current_justified_checkpoint - parent_crosslink = state.current_crosslinks[data.crosslink.shard] state.current_epoch_attestations.append(pending_attestation) else: assert data.source == state.previous_justified_checkpoint - parent_crosslink = state.previous_crosslinks[data.crosslink.shard] state.previous_epoch_attestations.append(pending_attestation) - # Check crosslink against expected parent crosslink - assert data.crosslink.parent_root == hash_tree_root(parent_crosslink) - assert data.crosslink.start_epoch == parent_crosslink.end_epoch - assert data.crosslink.end_epoch == min(data.target.epoch, parent_crosslink.end_epoch + MAX_EPOCHS_PER_CROSSLINK) - assert data.crosslink.data_root == Bytes32() # [to be removed in phase 1] - # Check signature assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation)) ``` diff --git a/specs/core/0_fork-choice.md b/specs/core/0_fork-choice.md index ad0590685..8e25fe8f3 100644 --- a/specs/core/0_fork-choice.md +++ b/specs/core/0_fork-choice.md @@ -192,7 +192,7 @@ def on_attestation(store: Store, attestation: Attestation) -> None: # Attestations can only affect the fork choice of subsequent slots. # Delay consideration in the fork choice until their slot is in the past. - attestation_slot = get_attestation_data_slot(target_state, attestation.data) + attestation_slot = attestation.data.slot assert store.time >= (attestation_slot + 1) * SECONDS_PER_SLOT # Get state at the `target` to validate attestation and calculate the committees diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 8a9cf1b5d..d43200463 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -41,7 +41,6 @@ - [Attestation data](#attestation-data) - [LMD GHOST vote](#lmd-ghost-vote) - [FFG vote](#ffg-vote) - - [Crosslink vote](#crosslink-vote) - [Construct attestation](#construct-attestation) - [Data](#data) - [Aggregation bits](#aggregation-bits) @@ -135,28 +134,25 @@ A validator can get committee assignments for a given epoch using the following ```python def get_committee_assignment(state: BeaconState, epoch: Epoch, - validator_index: ValidatorIndex) -> Optional[Tuple[Sequence[ValidatorIndex], Shard, Slot]]: + validator_index: ValidatorIndex + ) -> Optional[Tuple[Sequence[ValidatorIndex], uint64, Slot]]: """ Return the committee assignment in the ``epoch`` for ``validator_index``. ``assignment`` returned is a tuple of the following form: * ``assignment[0]`` is the list of validators in the committee - * ``assignment[1]`` is the shard to which the committee is assigned + * ``assignment[1]`` is the index to which the committee is assigned * ``assignment[2]`` is the slot at which the committee is assigned Return None if no assignment. """ next_epoch = get_current_epoch(state) + 1 assert epoch <= next_epoch - committees_per_slot = get_committee_count(state, epoch) // SLOTS_PER_EPOCH start_slot = compute_start_slot_of_epoch(epoch) for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH): - offset = committees_per_slot * (slot % SLOTS_PER_EPOCH) - slot_start_shard = (get_start_shard(state, epoch) + offset) % SHARD_COUNT - for i in range(committees_per_slot): - shard = Shard((slot_start_shard + i) % SHARD_COUNT) - committee = get_crosslink_committee(state, epoch, shard) + for index in range(COMMITTEES_PER_SLOT): + committee = get_crosslink_committee(state, epoch, index) if validator_index in committee: - return committee, shard, Slot(slot) + return committee, index, Slot(slot) return None ``` @@ -176,7 +172,7 @@ def is_proposer(state: BeaconState, The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing, which must be checked during the epoch in question. -`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments by noting at which future slot they will have to attest and also which shard they should begin syncing (in Phase 1+). +`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments by noting at which future slot they will have to attest. Specifically, a validator should call `get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments. @@ -278,7 +274,7 @@ Up to `MAX_VOLUNTARY_EXITS`, [`VoluntaryExit`](../core/0_beacon-chain.md#volunta ### Attestations -A validator is expected to create, sign, and broadcast an attestation during each epoch. The `committee`, assigned `shard`, and assigned `slot` for which the validator performs this role during an epoch are defined by `get_committee_assignment(state, epoch, validator_index)`. +A validator is expected to create, sign, and broadcast an attestation during each epoch. The `committee`, assigned `index`, and assigned `slot` for which the validator performs this role during an epoch are defined by `get_committee_assignment(state, epoch, validator_index)`. A validator should create and broadcast the attestation halfway through the `slot` during which the validator is assigned―that is, `SECONDS_PER_SLOT * 0.5` seconds after the start of `slot`. @@ -303,16 +299,9 @@ Set `attestation_data.beacon_block_root = signing_root(head_block)`. - Let `start_slot = compute_start_slot_of_epoch(get_current_epoch(head_state))`. - Let `epoch_boundary_block_root = signing_root(head_block) if start_slot == head_state.slot else get_block_root(state, start_slot)`. -##### Crosslink vote +##### Index -Construct `attestation_data.crosslink` via the following. - -- Set `attestation_data.crosslink.shard = shard` where `shard` is the shard associated with the validator's committee. -- Let `parent_crosslink = head_state.current_crosslinks[shard]`. -- Set `attestation_data.crosslink.start_epoch = parent_crosslink.end_epoch`. -- Set `attestation_data.crosslink.end_epoch = min(attestation_data.target.epoch, parent_crosslink.end_epoch + MAX_EPOCHS_PER_CROSSLINK)`. -- Set `attestation_data.crosslink.parent_root = hash_tree_root(head_state.current_crosslinks[shard])`. -- Set `attestation_data.crosslink.data_root = ZERO_HASH`. *Note*: This is a stub for Phase 0. +Set `attestation_data.index = index` where `index` is the index associated with the validator's committee. #### Construct attestation From b3b9b434b49e3f7e5264ef310a0c9f7485d47513 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sat, 12 Oct 2019 12:16:13 +0900 Subject: [PATCH 195/250] working through phase 0 tests after crosslink/shard removal --- specs/core/0_beacon-chain.md | 1 - specs/core/1_shard-data-chains.md | 9 ++ .../eth2spec/test/helpers/attestations.py | 32 ++-- .../test_process_attestation.py | 143 ++---------------- .../test_process_crosslinks.py | 132 ---------------- 5 files changed, 27 insertions(+), 290 deletions(-) delete mode 100644 test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_crosslinks.py diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 838d9d681..b57aa7ff4 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -84,7 +84,6 @@ - [`get_committee_count`](#get_committee_count) - [`get_crosslink_committee`](#get_crosslink_committee) - [`get_beacon_proposer_index`](#get_beacon_proposer_index) - - [`get_attestation_data_slot`](#get_attestation_data_slot) - [`get_total_balance`](#get_total_balance) - [`get_total_active_balance`](#get_total_active_balance) - [`get_domain`](#get_domain) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 69962b6fe..9bd9c70ee 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -53,6 +53,7 @@ This document describes the shard transition function (data layer only) and the | Name | SSZ equivalent | Description | | - | - | - | +| `Shard` | `uint64` | a shard number | | `ShardSlot` | `uint64` | a shard slot number | ## Configuration @@ -101,6 +102,14 @@ This document describes the shard transition function (data layer only) and the ## Containers +### `Crosslink` + +```python +class Crosslink(Container): + # STUB: placeholder data structure while reworking phase 0 + shard: Shard +``` + ### `ShardBlock` ```python diff --git a/test_libs/pyspec/eth2spec/test/helpers/attestations.py b/test_libs/pyspec/eth2spec/test/helpers/attestations.py index 23d1a8f8f..00c25b340 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/attestations.py +++ b/test_libs/pyspec/eth2spec/test/helpers/attestations.py @@ -3,11 +3,10 @@ from typing import List from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block from eth2spec.test.helpers.keys import privkeys from eth2spec.utils.bls import bls_sign, bls_aggregate_signatures -from eth2spec.utils.ssz.ssz_impl import hash_tree_root from eth2spec.utils.ssz.ssz_typing import Bitlist -def build_attestation_data(spec, state, slot, shard): +def build_attestation_data(spec, state, slot, index): assert state.slot >= slot if slot == state.slot: @@ -30,40 +29,27 @@ def build_attestation_data(spec, state, slot, shard): source_epoch = state.current_justified_checkpoint.epoch source_root = state.current_justified_checkpoint.root - if spec.compute_epoch_of_slot(slot) == spec.get_current_epoch(state): - parent_crosslink = state.current_crosslinks[shard] - else: - parent_crosslink = state.previous_crosslinks[shard] - return spec.AttestationData( + slot=slot, beacon_block_root=block_root, source=spec.Checkpoint(epoch=source_epoch, root=source_root), target=spec.Checkpoint(epoch=spec.compute_epoch_of_slot(slot), root=epoch_boundary_root), - crosslink=spec.Crosslink( - shard=shard, - start_epoch=parent_crosslink.end_epoch, - end_epoch=min(spec.compute_epoch_of_slot(slot), parent_crosslink.end_epoch + spec.MAX_EPOCHS_PER_CROSSLINK), - data_root=spec.Hash(), - parent_root=hash_tree_root(parent_crosslink), - ), + index=index, ) -def get_valid_attestation(spec, state, slot=None, signed=False): +def get_valid_attestation(spec, state, slot=None, index=None, signed=False): if slot is None: slot = state.slot + if index is None: + index = 0 - epoch = spec.compute_epoch_of_slot(slot) - epoch_start_shard = spec.get_start_shard(state, epoch) - committees_per_slot = spec.get_committee_count(state, epoch) // spec.SLOTS_PER_EPOCH - shard = (epoch_start_shard + committees_per_slot * (slot % spec.SLOTS_PER_EPOCH)) % spec.SHARD_COUNT - - attestation_data = build_attestation_data(spec, state, slot, shard) + attestation_data = build_attestation_data(spec, state, slot, index) crosslink_committee = spec.get_crosslink_committee( state, attestation_data.target.epoch, - attestation_data.crosslink.shard, + attestation_data.index, ) committee_size = len(crosslink_committee) @@ -132,7 +118,7 @@ def fill_aggregate_attestation(spec, state, attestation): crosslink_committee = spec.get_crosslink_committee( state, attestation.data.target.epoch, - attestation.data.crosslink.shard, + attestation.data.index, ) for i in range(len(crosslink_committee)): attestation.aggregation_bits[i] = True diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py index 8ae45788f..b3952b7ea 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py @@ -1,4 +1,9 @@ -from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases, with_phases +from eth2spec.test.context import ( + spec_state_test, + expect_assertion_error, + always_bls, never_bls, + with_all_phases, with_phases, +) from eth2spec.test.helpers.attestations import ( get_valid_attestation, sign_aggregate_attestation, @@ -6,7 +11,6 @@ from eth2spec.test.helpers.attestations import ( ) from eth2spec.test.helpers.state import ( next_epoch, - next_slot, ) from eth2spec.test.helpers.block import apply_empty_block from eth2spec.utils.ssz.ssz_typing import Bitlist @@ -67,54 +71,6 @@ def test_success_previous_epoch(spec, state): yield from run_attestation_processing(spec, state, attestation) -@with_all_phases -@spec_state_test -def test_success_since_max_epochs_per_crosslink(spec, state): - # Do not run mainnet (64 epochs), that would mean the equivalent of ~7 hours chain simulation. - if spec.MAX_EPOCHS_PER_CROSSLINK > 4: - return - for _ in range(spec.MAX_EPOCHS_PER_CROSSLINK + 2): - next_epoch(spec, state) - apply_empty_block(spec, state) - - attestation = get_valid_attestation(spec, state, signed=True) - data = attestation.data - # test logic sanity check: make sure the attestation only includes MAX_EPOCHS_PER_CROSSLINK epochs - assert data.crosslink.end_epoch - data.crosslink.start_epoch == spec.MAX_EPOCHS_PER_CROSSLINK - - for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): - next_slot(spec, state) - apply_empty_block(spec, state) - - yield from run_attestation_processing(spec, state, attestation) - - -@with_all_phases -@spec_state_test -def test_wrong_end_epoch_with_max_epochs_per_crosslink(spec, state): - # Do not run mainnet (64 epochs), that would mean the equivalent of ~7 hours chain simulation. - if spec.MAX_EPOCHS_PER_CROSSLINK > 4: - return - for _ in range(spec.MAX_EPOCHS_PER_CROSSLINK + 2): - next_epoch(spec, state) - apply_empty_block(spec, state) - - attestation = get_valid_attestation(spec, state) - data = attestation.data - # test logic sanity check: make sure the attestation only includes MAX_EPOCHS_PER_CROSSLINK epochs - assert data.crosslink.end_epoch - data.crosslink.start_epoch == spec.MAX_EPOCHS_PER_CROSSLINK - # Now change it to be different - data.crosslink.end_epoch += 1 - - sign_attestation(spec, state, attestation) - - for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): - next_slot(spec, state) - apply_empty_block(spec, state) - - yield from run_attestation_processing(spec, state, attestation, False) - - @with_all_phases @spec_state_test @always_bls @@ -168,27 +124,13 @@ def test_old_source_epoch(spec, state): @with_all_phases @spec_state_test -def test_wrong_shard(spec, state): - attestation = get_valid_attestation(spec, state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - attestation.data.crosslink.shard += 1 - - sign_attestation(spec, state, attestation) - - yield from run_attestation_processing(spec, state, attestation, False) - - -@with_all_phases -@spec_state_test -def test_invalid_shard(spec, state): +@never_bls +def test_invalid_index(spec, state): attestation = get_valid_attestation(spec, state) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY # off by one (with respect to valid range) on purpose - attestation.data.crosslink.shard = spec.SHARD_COUNT - - sign_attestation(spec, state, attestation) + attestation.data.index = spec.COMMITTEES_PER_SLOT yield from run_attestation_processing(spec, state, attestation, False) @@ -290,73 +232,6 @@ def test_bad_source_root(spec, state): yield from run_attestation_processing(spec, state, attestation, False) -@with_phases(['phase0']) -@spec_state_test -def test_non_zero_crosslink_data_root(spec, state): - attestation = get_valid_attestation(spec, state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - - attestation.data.crosslink.data_root = b'\x42' * 32 - - sign_attestation(spec, state, attestation) - - yield from run_attestation_processing(spec, state, attestation, False) - - -@with_all_phases -@spec_state_test -def test_bad_parent_crosslink(spec, state): - state.slot = spec.SLOTS_PER_EPOCH - 1 - next_epoch(spec, state) - apply_empty_block(spec, state) - - attestation = get_valid_attestation(spec, state, signed=False) - for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): - next_slot(spec, state) - apply_empty_block(spec, state) - - attestation.data.crosslink.parent_root = b'\x27' * 32 - sign_attestation(spec, state, attestation) - - yield from run_attestation_processing(spec, state, attestation, False) - - -@with_all_phases -@spec_state_test -def test_bad_crosslink_start_epoch(spec, state): - state.slot = spec.SLOTS_PER_EPOCH - 1 - next_epoch(spec, state) - apply_empty_block(spec, state) - - attestation = get_valid_attestation(spec, state, signed=False) - for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): - next_slot(spec, state) - apply_empty_block(spec, state) - - attestation.data.crosslink.start_epoch += 1 - sign_attestation(spec, state, attestation) - - yield from run_attestation_processing(spec, state, attestation, False) - - -@with_all_phases -@spec_state_test -def test_bad_crosslink_end_epoch(spec, state): - state.slot = spec.SLOTS_PER_EPOCH - 1 - next_epoch(spec, state) - apply_empty_block(spec, state) - - attestation = get_valid_attestation(spec, state, signed=False) - for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): - next_slot(spec, state) - apply_empty_block(spec, state) - - attestation.data.crosslink.end_epoch += 1 - sign_attestation(spec, state, attestation) - - yield from run_attestation_processing(spec, state, attestation, False) - - @with_all_phases @spec_state_test def test_inconsistent_bits(spec, state): diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_crosslinks.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_crosslinks.py deleted file mode 100644 index 41d784c50..000000000 --- a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_crosslinks.py +++ /dev/null @@ -1,132 +0,0 @@ -from copy import deepcopy - -from eth2spec.test.context import spec_state_test, with_all_phases -from eth2spec.test.helpers.state import ( - next_epoch, - next_slot -) -from eth2spec.test.helpers.block import apply_empty_block -from eth2spec.test.helpers.attestations import ( - add_attestation_to_state, - fill_aggregate_attestation, - get_valid_attestation, - sign_attestation, -) -from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import run_epoch_processing_with - - -def run_process_crosslinks(spec, state): - yield from run_epoch_processing_with(spec, state, 'process_crosslinks') - - -@with_all_phases -@spec_state_test -def test_no_attestations(spec, state): - yield from run_process_crosslinks(spec, state) - - for shard in range(spec.SHARD_COUNT): - assert state.previous_crosslinks[shard] == state.current_crosslinks[shard] - - -@with_all_phases -@spec_state_test -def test_single_crosslink_update_from_current_epoch(spec, state): - next_epoch(spec, state) - - attestation = get_valid_attestation(spec, state, signed=True) - - fill_aggregate_attestation(spec, state, attestation) - add_attestation_to_state(spec, state, attestation, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) - - assert len(state.current_epoch_attestations) == 1 - - shard = attestation.data.crosslink.shard - pre_crosslink = deepcopy(state.current_crosslinks[shard]) - - yield from run_process_crosslinks(spec, state) - - assert state.previous_crosslinks[shard] != state.current_crosslinks[shard] - assert pre_crosslink != state.current_crosslinks[shard] - - -@with_all_phases -@spec_state_test -def test_single_crosslink_update_from_previous_epoch(spec, state): - next_epoch(spec, state) - - attestation = get_valid_attestation(spec, state, signed=True) - - fill_aggregate_attestation(spec, state, attestation) - add_attestation_to_state(spec, state, attestation, state.slot + spec.SLOTS_PER_EPOCH) - - assert len(state.previous_epoch_attestations) == 1 - - shard = attestation.data.crosslink.shard - pre_crosslink = deepcopy(state.current_crosslinks[shard]) - - crosslink_deltas = spec.get_crosslink_deltas(state) - - yield from run_process_crosslinks(spec, state) - - assert state.previous_crosslinks[shard] != state.current_crosslinks[shard] - assert pre_crosslink != state.current_crosslinks[shard] - - # ensure rewarded - for index in spec.get_crosslink_committee( - state, - attestation.data.target.epoch, - attestation.data.crosslink.shard): - assert crosslink_deltas[0][index] > 0 - assert crosslink_deltas[1][index] == 0 - - -@with_all_phases -@spec_state_test -def test_double_late_crosslink(spec, state): - if spec.get_committee_count(state, spec.get_current_epoch(state)) < spec.SHARD_COUNT: - print("warning: ignoring test, test-assumptions are incompatible with configuration") - return - - next_epoch(spec, state) - state.slot += 4 - - attestation_1 = get_valid_attestation(spec, state, signed=True) - fill_aggregate_attestation(spec, state, attestation_1) - - # add attestation_1 to next epoch - next_epoch(spec, state) - add_attestation_to_state(spec, state, attestation_1, state.slot + 1) - - for _ in range(spec.SLOTS_PER_EPOCH): - attestation_2 = get_valid_attestation(spec, state) - if attestation_2.data.crosslink.shard == attestation_1.data.crosslink.shard: - sign_attestation(spec, state, attestation_2) - break - next_slot(spec, state) - apply_empty_block(spec, state) - - fill_aggregate_attestation(spec, state, attestation_2) - - # add attestation_2 in the next epoch after attestation_1 has - # already updated the relevant crosslink - next_epoch(spec, state) - add_attestation_to_state(spec, state, attestation_2, state.slot + 1) - - assert len(state.previous_epoch_attestations) == 1 - assert len(state.current_epoch_attestations) == 0 - - crosslink_deltas = spec.get_crosslink_deltas(state) - - yield from run_process_crosslinks(spec, state) - - shard = attestation_2.data.crosslink.shard - - # ensure that the current crosslinks were not updated by the second attestation - assert state.previous_crosslinks[shard] == state.current_crosslinks[shard] - # ensure no reward, only penalties for the failed crosslink - for index in spec.get_crosslink_committee( - state, - attestation_2.data.target.epoch, - attestation_2.data.crosslink.shard): - assert crosslink_deltas[0][index] == 0 - assert crosslink_deltas[1][index] > 0 From d98cabf7e7c2a1e79a39ba50dd798a29f61236ec Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sat, 12 Oct 2019 13:06:52 +0900 Subject: [PATCH 196/250] fix get crosslink committee and finalitytests --- configs/minimal.yaml | 4 ++-- specs/core/0_beacon-chain.md | 11 ++++++----- specs/validator/0_beacon-chain-validator.md | 2 +- .../pyspec/eth2spec/test/helpers/attestations.py | 4 ++-- test_libs/pyspec/eth2spec/test/helpers/custody.py | 2 +- test_libs/pyspec/eth2spec/test/helpers/state.py | 10 ++++++---- .../test_process_justification_and_finalization.py | 2 +- 7 files changed, 19 insertions(+), 16 deletions(-) diff --git a/configs/minimal.yaml b/configs/minimal.yaml index 4c32eae4d..dfed19426 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -4,8 +4,8 @@ # Misc # --------------------------------------------------------------- -# [customized] Just 8 shards for testing purposes -SHARD_COUNT: 8 +# [customized] Just 2 committees for slot for testing purposes +COMMITTEES_PER_SLOT: 2 # [customized] unsecure, but fast TARGET_COMMITTEE_SIZE: 4 # 2**12 (= 4,096) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index b57aa7ff4..a371df628 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -880,15 +880,16 @@ def get_committee_count(state: BeaconState, epoch: Epoch) -> uint64: #### `get_crosslink_committee` ```python -def get_crosslink_committee(state: BeaconState, epoch: Epoch, index: uint64) -> Sequence[ValidatorIndex]: +def get_crosslink_committee(state: BeaconState, slot: Slot, index: uint64) -> Sequence[ValidatorIndex]: """ Return the crosslink committee at ``epoch`` for ``index``. """ + epoch = compute_epoch_of_slot(slot) return compute_committee( indices=get_active_validator_indices(state, epoch), seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER), - index=index, - count=get_committee_count(state, epoch), + index=(slot % SLOTS_PER_EPOCH) * COMMITTEES_PER_SLOT + index, + count=COMMITTEES_PER_SLOT * SLOTS_PER_EPOCH, ) ``` @@ -966,7 +967,7 @@ def get_attesting_indices(state: BeaconState, """ Return the set of attesting indices corresponding to ``data`` and ``bits``. """ - committee = get_crosslink_committee(state, data.target.epoch, data.index) + committee = get_crosslink_committee(state, data.slot, data.index) return set(index for i, index in enumerate(committee) if bits[i]) ``` @@ -1506,7 +1507,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH - committee = get_crosslink_committee(state, data.target.epoch, data.index) + committee = get_crosslink_committee(state, data.slot, data.index) assert len(attestation.aggregation_bits) == len(attestation.custody_bits) == len(committee) pending_attestation = PendingAttestation( diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index d43200463..1efdadb35 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -150,7 +150,7 @@ def get_committee_assignment(state: BeaconState, start_slot = compute_start_slot_of_epoch(epoch) for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH): for index in range(COMMITTEES_PER_SLOT): - committee = get_crosslink_committee(state, epoch, index) + committee = get_crosslink_committee(state, Slot(slot), index) if validator_index in committee: return committee, index, Slot(slot) return None diff --git a/test_libs/pyspec/eth2spec/test/helpers/attestations.py b/test_libs/pyspec/eth2spec/test/helpers/attestations.py index 00c25b340..afd51b0bd 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/attestations.py +++ b/test_libs/pyspec/eth2spec/test/helpers/attestations.py @@ -48,7 +48,7 @@ def get_valid_attestation(spec, state, slot=None, index=None, signed=False): crosslink_committee = spec.get_crosslink_committee( state, - attestation_data.target.epoch, + attestation_data.slot, attestation_data.index, ) @@ -117,7 +117,7 @@ def get_attestation_signature(spec, state, attestation_data, privkey, custody_bi def fill_aggregate_attestation(spec, state, attestation): crosslink_committee = spec.get_crosslink_committee( state, - attestation.data.target.epoch, + attestation.data.slot, attestation.data.index, ) for i in range(len(crosslink_committee)): diff --git a/test_libs/pyspec/eth2spec/test/helpers/custody.py b/test_libs/pyspec/eth2spec/test/helpers/custody.py index 4b7c8c97b..98659ee8e 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/custody.py +++ b/test_libs/pyspec/eth2spec/test/helpers/custody.py @@ -82,7 +82,7 @@ def bitlist_from_int(max_len, num_bits, n): def get_valid_bit_challenge(spec, state, attestation, invalid_custody_bit=False): crosslink_committee = spec.get_crosslink_committee( state, - attestation.data.target.epoch, + attestation.data.slot, attestation.data.crosslink.shard, ) responder_index = crosslink_committee[0] diff --git a/test_libs/pyspec/eth2spec/test/helpers/state.py b/test_libs/pyspec/eth2spec/test/helpers/state.py index e88fc6ade..5a2f72ff3 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/state.py +++ b/test_libs/pyspec/eth2spec/test/helpers/state.py @@ -53,13 +53,15 @@ def next_epoch_with_attestations(spec, if fill_cur_epoch and post_state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY: slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 if slot_to_attest >= spec.compute_start_slot_of_epoch(spec.get_current_epoch(post_state)): - cur_attestation = get_valid_attestation(spec, post_state, slot_to_attest) - block.body.attestations.append(cur_attestation) + for index in range(spec.COMMITTEES_PER_SLOT): + cur_attestation = get_valid_attestation(spec, post_state, slot_to_attest, index=index) + block.body.attestations.append(cur_attestation) if fill_prev_epoch: slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1 - prev_attestation = get_valid_attestation(spec, post_state, slot_to_attest) - block.body.attestations.append(prev_attestation) + for index in range(spec.COMMITTEES_PER_SLOT): + prev_attestation = get_valid_attestation(spec, post_state, slot_to_attest, index=index) + block.body.attestations.append(prev_attestation) state_transition_and_sign_block(spec, post_state, block) blocks.append(block) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py index 7dcdb42a4..6d1762ecb 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py @@ -41,7 +41,7 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support if remaining_balance < 0: return - committee = spec.get_crosslink_committee(state, spec.compute_epoch_of_slot(slot), shard) + committee = spec.get_crosslink_committee(state, slot, shard) # Create a bitfield filled with the given count per attestation, # exactly on the right-most part of the committee field. From 667bf67d7178b5b7f30ebd1b4146df8cf2b09390 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sat, 12 Oct 2019 13:24:17 +0900 Subject: [PATCH 197/250] temporarily disable phase 1 tests --- test_libs/pyspec/eth2spec/test/context.py | 6 ++++-- .../epoch_processing/run_epoch_process_base.py | 1 - ...test_process_justification_and_finalization.py | 15 ++++----------- 3 files changed, 8 insertions(+), 14 deletions(-) diff --git a/test_libs/pyspec/eth2spec/test/context.py b/test_libs/pyspec/eth2spec/test/context.py index 80edaba9b..3bb8f1153 100644 --- a/test_libs/pyspec/eth2spec/test/context.py +++ b/test_libs/pyspec/eth2spec/test/context.py @@ -1,5 +1,5 @@ from eth2spec.phase0 import spec as spec_phase0 -from eth2spec.phase1 import spec as spec_phase1 +# from eth2spec.phase1 import spec as spec_phase1 from eth2spec.utils import bls from .helpers.genesis import create_genesis_state @@ -137,7 +137,9 @@ def with_phases(phases): if 'phase0' in run_phases: ret = run_with_spec_version(spec_phase0, *args, **kw) if 'phase1' in run_phases: - ret = run_with_spec_version(spec_phase1, *args, **kw) + # temporarily disable phase 1 tests + return + # ret = run_with_spec_version(spec_phase1, *args, **kw) return ret return wrapper return decorator diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/run_epoch_process_base.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/run_epoch_process_base.py index 5b2a2ece4..af4587a2a 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/run_epoch_process_base.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/run_epoch_process_base.py @@ -1,7 +1,6 @@ process_calls = [ 'process_justification_and_finalization', - 'process_crosslinks', 'process_rewards_and_penalties', 'process_registry_updates', 'process_reveal_deadlines', diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py index 6d1762ecb..4e0085076 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py @@ -8,14 +8,6 @@ def run_process_just_and_fin(spec, state): yield from run_epoch_processing_with(spec, state, 'process_justification_and_finalization') -def get_shards_for_slot(spec, state, slot): - epoch = spec.compute_epoch_of_slot(slot) - epoch_start_shard = spec.get_start_shard(state, epoch) - committees_per_slot = spec.get_committee_count(state, epoch) // spec.SLOTS_PER_EPOCH - shard = (epoch_start_shard + committees_per_slot * (slot % spec.SLOTS_PER_EPOCH)) % spec.SHARD_COUNT - return [shard + i for i in range(committees_per_slot)] - - def add_mock_attestations(spec, state, epoch, source, target, sufficient_support=False): # we must be at the end of the epoch assert (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0 @@ -35,13 +27,13 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support start_slot = spec.compute_start_slot_of_epoch(epoch) for slot in range(start_slot, start_slot + spec.SLOTS_PER_EPOCH): - for shard in get_shards_for_slot(spec, state, slot): + for index in range(spec.COMMITTEES_PER_SLOT): # Check if we already have had sufficient balance. (and undone if we don't want it). # If so, do not create more attestations. (we do not have empty pending attestations normally anyway) if remaining_balance < 0: return - committee = spec.get_crosslink_committee(state, slot, shard) + committee = spec.get_crosslink_committee(state, slot, index) # Create a bitfield filled with the given count per attestation, # exactly on the right-most part of the committee field. @@ -60,10 +52,11 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support attestations.append(spec.PendingAttestation( aggregation_bits=aggregation_bits, data=spec.AttestationData( + slot=slot, beacon_block_root=b'\xff' * 32, # irrelevant to testing source=source, target=target, - crosslink=spec.Crosslink(shard=shard) + index=index, ), inclusion_delay=1, )) From b3a0a03f85c20a66df6984b64fbef3a17f2a9c55 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sat, 12 Oct 2019 13:32:46 +0900 Subject: [PATCH 198/250] remove some legacy code --- specs/core/0_beacon-chain.md | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a371df628..4bd839b88 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -311,7 +311,7 @@ class AttestationData(Container): # FFG vote source: Checkpoint target: Checkpoint - # Index -- Maybe remove + # Committee Index index: uint64 ``` @@ -866,14 +866,6 @@ def get_committee_count(state: BeaconState, epoch: Epoch) -> uint64: """ Return the number of committees at ``epoch``. """ - # Consider not hard coding but just return committees per slot for now - """ - committees_per_slot = max(1, min( - SHARD_COUNT // SLOTS_PER_EPOCH, - len(get_active_validator_indices(state, epoch)) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, - )) - return committees_per_slot * SLOTS_PER_EPOCH - """ return COMMITTEES_PER_SLOT ``` From 5ccac7c206237e0fe89b00e6984e0eec5346f41a Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sat, 12 Oct 2019 13:36:05 +0900 Subject: [PATCH 199/250] remove get_committee_count --- specs/core/0_beacon-chain.md | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 4bd839b88..efad5fff2 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -81,7 +81,6 @@ - [`get_active_validator_indices`](#get_active_validator_indices) - [`get_validator_churn_limit`](#get_validator_churn_limit) - [`get_seed`](#get_seed) - - [`get_committee_count`](#get_committee_count) - [`get_crosslink_committee`](#get_crosslink_committee) - [`get_beacon_proposer_index`](#get_beacon_proposer_index) - [`get_total_balance`](#get_total_balance) @@ -859,16 +858,6 @@ def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Hash: return hash(domain_type + int_to_bytes(epoch, length=8) + mix) ``` -#### `get_committee_count` - -```python -def get_committee_count(state: BeaconState, epoch: Epoch) -> uint64: - """ - Return the number of committees at ``epoch``. - """ - return COMMITTEES_PER_SLOT -``` - #### `get_crosslink_committee` ```python From 1fbf7f8e0da8f93f5b3e486dc69292cad56b1ba5 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sat, 12 Oct 2019 13:54:51 +0900 Subject: [PATCH 200/250] fix config files --- configs/mainnet.yaml | 6 ++---- configs/minimal.yaml | 2 -- specs/core/0_beacon-chain.md | 1 - 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index c11f1e54c..68c5052c2 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -5,8 +5,8 @@ # Misc # --------------------------------------------------------------- -# 2**10 (= 1,024) -SHARD_COUNT: 1024 +# 2**5 (= 32) +COMMITTEES_PER_SLOT: 32 # 2**7 (= 128) TARGET_COMMITTEE_SIZE: 128 # 2**12 (= 4,096) @@ -69,8 +69,6 @@ SLOTS_PER_HISTORICAL_ROOT: 8192 MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 # 2**11 (= 2,048) epochs 9 days PERSISTENT_COMMITTEE_PERIOD: 2048 -# 2**6 (= 64) epochs ~7 hours -MAX_EPOCHS_PER_CROSSLINK: 64 # 2**2 (= 4) epochs 25.6 minutes MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 # 2**14 (= 16,384) epochs ~73 days diff --git a/configs/minimal.yaml b/configs/minimal.yaml index dfed19426..8f7beebc7 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -68,8 +68,6 @@ SLOTS_PER_HISTORICAL_ROOT: 64 MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 # 2**11 (= 2,048) epochs PERSISTENT_COMMITTEE_PERIOD: 2048 -# [customized] fast catchup crosslinks -MAX_EPOCHS_PER_CROSSLINK: 4 # 2**2 (= 4) epochs MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 # [customized] 2**12 (= 4,096) epochs diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index efad5fff2..2ceae488e 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -210,7 +210,6 @@ The following values are (non-configurable) constants used throughout the specif | `SLOTS_PER_HISTORICAL_ROOT` | `2**13` (= 8,192) | slots | ~13 hours | | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours | | `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | 9 days | -| `MAX_EPOCHS_PER_CROSSLINK` | `2**6` (= 64) | epochs | ~7 hours | | `MIN_EPOCHS_TO_INACTIVITY_PENALTY` | `2**2` (= 4) | epochs | 25.6 minutes | ### State list lengths From c392db3b9b61065cc1c8af5d65f1230bc5862da5 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sat, 12 Oct 2019 14:44:53 +0900 Subject: [PATCH 201/250] fix max epochs var --- configs/mainnet.yaml | 2 ++ configs/minimal.yaml | 2 ++ specs/core/1_beacon-chain-misc.md | 1 + specs/core/1_custody-game.md | 1 + 4 files changed, 6 insertions(+) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index 68c5052c2..a45e965d2 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -69,6 +69,8 @@ SLOTS_PER_HISTORICAL_ROOT: 8192 MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 # 2**11 (= 2,048) epochs 9 days PERSISTENT_COMMITTEE_PERIOD: 2048 +# 2**6 (= 64) epochs ~7 hours +MAX_EPOCHS_PER_CROSSLINK: 64 # 2**2 (= 4) epochs 25.6 minutes MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 # 2**14 (= 16,384) epochs ~73 days diff --git a/configs/minimal.yaml b/configs/minimal.yaml index 8f7beebc7..dfed19426 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -68,6 +68,8 @@ SLOTS_PER_HISTORICAL_ROOT: 64 MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 # 2**11 (= 2,048) epochs PERSISTENT_COMMITTEE_PERIOD: 2048 +# [customized] fast catchup crosslinks +MAX_EPOCHS_PER_CROSSLINK: 4 # 2**2 (= 4) epochs MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 # [customized] 2**12 (= 4,096) epochs diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 6dd6e19c3..9f7a4d2d7 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -31,6 +31,7 @@ | Name | Value | Unit | Duration | - | - | - | - | +| `MAX_EPOCHS_PER_CROSSLINK` | `2**6` (= 64) | epochs | ~7 hours | | `MAX_SHARD_RECEIPT_PROOFS` | `2**0` (= 1) | - | - | | `PERIOD_COMMITTEE_ROOT_LENGTH` | `2**8` (= 256) | periods | ~9 months | | `MINOR_REWARD_QUOTIENT` | `2**8` (=256) | - | - | diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index 158d575e2..2fc4904ce 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -81,6 +81,7 @@ This document details the beacon chain additions and changes in Phase 1 of Ether | - | - | | `BLS12_381_Q` | `4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787` | | `MINOR_REWARD_QUOTIENT` | `2**8` (= 256) | +| `MAX_EPOCHS_PER_CROSSLINK` | `2**6` (= 64) | epochs | ~7 hours | ### Custody game parameters From d8431f847648041dd3efd4d55abc92ce2a872f44 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sun, 13 Oct 2019 13:53:43 +0900 Subject: [PATCH 202/250] add start index back in --- configs/mainnet.yaml | 2 +- configs/minimal.yaml | 2 +- specs/core/0_beacon-chain.md | 72 +++++++++++++++++-- specs/validator/0_beacon-chain-validator.md | 4 +- .../eth2spec/test/helpers/attestations.py | 5 +- .../pyspec/eth2spec/test/helpers/state.py | 10 ++- .../test_process_attestation.py | 8 ++- ..._process_justification_and_finalization.py | 5 +- 8 files changed, 96 insertions(+), 12 deletions(-) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index a45e965d2..1210d8076 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -6,7 +6,7 @@ # Misc # --------------------------------------------------------------- # 2**5 (= 32) -COMMITTEES_PER_SLOT: 32 +MAX_COMMITTEES_PER_SLOT: 32 # 2**7 (= 128) TARGET_COMMITTEE_SIZE: 128 # 2**12 (= 4,096) diff --git a/configs/minimal.yaml b/configs/minimal.yaml index dfed19426..a6040b3a3 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -5,7 +5,7 @@ # --------------------------------------------------------------- # [customized] Just 2 committees for slot for testing purposes -COMMITTEES_PER_SLOT: 2 +MAX_COMMITTEES_PER_SLOT: 2 # [customized] unsecure, but fast TARGET_COMMITTEE_SIZE: 4 # 2**12 (= 4,096) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2ceae488e..7b6b876de 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -168,8 +168,8 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | | - | - | -| `COMMITTEES_PER_SLOT` | `2**5` (= 32) | | `SHARD_COUNT` | `2**10` (= 1,024) | +| `MAX_COMMITTEES_PER_SLOT` | `2**5` (= 32) | | `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) | | `MAX_VALIDATORS_PER_COMMITTEE` | `2**12` (= 4,096) | | `MIN_PER_EPOCH_CHURN_LIMIT` | `2**2` (= 4) | @@ -490,6 +490,7 @@ class BeaconState(Container): validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] # Shuffling + start_index: uint64 randao_mixes: Vector[Hash, EPOCHS_PER_HISTORICAL_VECTOR] # Slashings slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances @@ -857,6 +858,21 @@ def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Hash: return hash(domain_type + int_to_bytes(epoch, length=8) + mix) ``` + +#### `get_committee_count` + +```python +def get_committee_count(state: BeaconState, epoch: Epoch) -> uint64: + """ + Return the number of committees at ``epoch``. + """ + committees_per_slot = max(1, min( + MAX_COMMITTEES_PER_SLOT, + len(get_active_validator_indices(state, epoch)) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, + )) + return committees_per_slot * SLOTS_PER_EPOCH +``` + #### `get_crosslink_committee` ```python @@ -865,14 +881,60 @@ def get_crosslink_committee(state: BeaconState, slot: Slot, index: uint64) -> Se Return the crosslink committee at ``epoch`` for ``index``. """ epoch = compute_epoch_of_slot(slot) + committees_per_slot = get_committee_count(state, epoch) // SLOTS_PER_EPOCH + slot_start_index = get_slot_start_index(state, slot) + slot_offset = (index + MAX_COMMITTEES_PER_SLOT - slot_start_index) % MAX_COMMITTEES_PER_SLOT + epoch_offset = slot_offset + (slot % SLOTS_PER_EPOCH) * committees_per_slot + print(epoch_offset) + return compute_committee( indices=get_active_validator_indices(state, epoch), seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER), - index=(slot % SLOTS_PER_EPOCH) * COMMITTEES_PER_SLOT + index, - count=COMMITTEES_PER_SLOT * SLOTS_PER_EPOCH, + index=epoch_offset, + count=get_committee_count(state, epoch), ) ``` +#### `get_slot_start_index` + +```python +def get_slot_start_index(state: BeaconState, slot: Slot) -> uint64: + """ + Return the start index of the 0th committee at ``slot``. + """ + epoch = compute_epoch_of_slot(slot) + committees_per_slot = get_committee_count(state, epoch) // SLOTS_PER_EPOCH + slot_start_index = ((slot % SLOTS_PER_EPOCH) * committees_per_slot + get_start_index(state, epoch)) % MAX_COMMITTEES_PER_SLOT + return slot_start_index +``` + +#### `get_start_index` + +```python +def get_start_index(state: BeaconState, epoch: Epoch) -> uint64: + """ + Return the start index of the 0th committee at ``epoch``. + """ + assert epoch <= get_current_epoch(state) + 1 + check_epoch = Epoch(get_current_epoch(state) + 1) + index = (state.start_index + get_index_delta(state, get_current_epoch(state))) % MAX_COMMITTEES_PER_SLOT + MAX_COMMITTEES_PER_EPOCH = MAX_COMMITTEES_PER_SLOT * SLOTS_PER_EPOCH + while check_epoch > epoch: + check_epoch -= Epoch(1) + index = (index + MAX_COMMITTEES_PER_EPOCH - get_index_delta(state, check_epoch)) % MAX_COMMITTEES_PER_SLOT + return index +``` + +#### `get_index_delta` + +```python +def get_index_delta(state: BeaconState, epoch: Epoch) -> uint64: + """ + Return the amount to increase ``state.start_index`` at ``epoch``. + """ + return get_committee_count(state, epoch) +``` + #### `get_beacon_proposer_index` ```python @@ -1356,6 +1418,8 @@ def process_final_updates(state: BeaconState) -> None: if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0: historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots) state.historical_roots.append(hash_tree_root(historical_batch)) + # Update start shard + state.start_index = (state.start_index + get_index_delta(state, current_epoch)) % MAX_COMMITTEES_PER_SLOT # Rotate current/previous epoch attestations state.previous_epoch_attestations = state.current_epoch_attestations state.current_epoch_attestations = [] @@ -1482,7 +1546,7 @@ def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSla ```python def process_attestation(state: BeaconState, attestation: Attestation) -> None: data = attestation.data - assert data.index < COMMITTEES_PER_SLOT + assert data.index < MAX_COMMITTEES_PER_SLOT assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 1efdadb35..774837ced 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -149,7 +149,9 @@ def get_committee_assignment(state: BeaconState, start_slot = compute_start_slot_of_epoch(epoch) for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH): - for index in range(COMMITTEES_PER_SLOT): + slot_start_index = get_slot_start_index(state, Slot(slot)) + for i in range(get_committee_count(state, epoch) // SLOTS_PER_EPOCH): + index = (slot_start_index + i) % MAX_COMMITTEES_PER_SLOT committee = get_crosslink_committee(state, Slot(slot), index) if validator_index in committee: return committee, index, Slot(slot) diff --git a/test_libs/pyspec/eth2spec/test/helpers/attestations.py b/test_libs/pyspec/eth2spec/test/helpers/attestations.py index afd51b0bd..49d6b6ad7 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/attestations.py +++ b/test_libs/pyspec/eth2spec/test/helpers/attestations.py @@ -42,7 +42,10 @@ def get_valid_attestation(spec, state, slot=None, index=None, signed=False): if slot is None: slot = state.slot if index is None: - index = 0 + index = spec.get_slot_start_index(state, slot) + print(slot) + print(index) + print(spec.get_committee_count(state, spec.compute_epoch_of_slot(slot))) attestation_data = build_attestation_data(spec, state, slot, index) diff --git a/test_libs/pyspec/eth2spec/test/helpers/state.py b/test_libs/pyspec/eth2spec/test/helpers/state.py index 5a2f72ff3..6ae845f69 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/state.py +++ b/test_libs/pyspec/eth2spec/test/helpers/state.py @@ -51,15 +51,21 @@ def next_epoch_with_attestations(spec, for _ in range(spec.SLOTS_PER_EPOCH): block = build_empty_block_for_next_slot(spec, post_state) if fill_cur_epoch and post_state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY: + committees_per_slot = spec.get_committee_count(state, spec.get_current_epoch(state)) // spec.SLOTS_PER_EPOCH slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 if slot_to_attest >= spec.compute_start_slot_of_epoch(spec.get_current_epoch(post_state)): - for index in range(spec.COMMITTEES_PER_SLOT): + slot_start_index = spec.get_slot_start_index(state, slot_to_attest) + for i in range(committees_per_slot): + index = (slot_start_index + i) % spec.MAX_COMMITTEES_PER_SLOT cur_attestation = get_valid_attestation(spec, post_state, slot_to_attest, index=index) block.body.attestations.append(cur_attestation) if fill_prev_epoch: + committees_per_slot = spec.get_committee_count(state, spec.get_previous_epoch(state)) // spec.SLOTS_PER_EPOCH slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1 - for index in range(spec.COMMITTEES_PER_SLOT): + slot_start_index = spec.get_slot_start_index(state, slot_to_attest) + for i in range(committees_per_slot): + index = (slot_start_index + i) % spec.MAX_COMMITTEES_PER_SLOT prev_attestation = get_valid_attestation(spec, post_state, slot_to_attest, index=index) block.body.attestations.append(prev_attestation) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py index b3952b7ea..87024a21d 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py @@ -122,6 +122,12 @@ def test_old_source_epoch(spec, state): yield from run_attestation_processing(spec, state, attestation, False) +@with_all_phases +@spec_state_test +def test_wrong_index(spec, state): + pass + + @with_all_phases @spec_state_test @never_bls @@ -130,7 +136,7 @@ def test_invalid_index(spec, state): state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY # off by one (with respect to valid range) on purpose - attestation.data.index = spec.COMMITTEES_PER_SLOT + attestation.data.index = spec.MAX_COMMITTEES_PER_SLOT yield from run_attestation_processing(spec, state, attestation, False) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py index 4e0085076..abd8f2c17 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py @@ -26,8 +26,11 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support remaining_balance = total_balance * 2 // 3 start_slot = spec.compute_start_slot_of_epoch(epoch) + committees_per_slot = spec.get_committee_count(state, epoch) // spec.SLOTS_PER_EPOCH for slot in range(start_slot, start_slot + spec.SLOTS_PER_EPOCH): - for index in range(spec.COMMITTEES_PER_SLOT): + slot_start_index = spec.get_slot_start_index(state, slot) + for i in range(committees_per_slot): + index = (slot_start_index + i) % spec.MAX_COMMITTEES_PER_SLOT # Check if we already have had sufficient balance. (and undone if we don't want it). # If so, do not create more attestations. (we do not have empty pending attestations normally anyway) if remaining_balance < 0: From 6208e74d3f9ff9b975fd625aaf89c31ea88d0838 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sun, 13 Oct 2019 21:52:58 +0900 Subject: [PATCH 203/250] get crosslink committees by slot --- configs/minimal.yaml | 2 +- specs/core/0_beacon-chain.md | 31 +++++++++++-------- specs/validator/0_beacon-chain-validator.md | 2 +- .../eth2spec/test/helpers/attestations.py | 3 -- .../pyspec/eth2spec/test/helpers/state.py | 4 +-- .../test_process_attestation.py | 27 ++++++++++++++-- ..._process_justification_and_finalization.py | 2 +- 7 files changed, 48 insertions(+), 23 deletions(-) diff --git a/configs/minimal.yaml b/configs/minimal.yaml index a6040b3a3..1cf35ca90 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -5,7 +5,7 @@ # --------------------------------------------------------------- # [customized] Just 2 committees for slot for testing purposes -MAX_COMMITTEES_PER_SLOT: 2 +MAX_COMMITTEES_PER_SLOT: 4 # [customized] unsecure, but fast TARGET_COMMITTEE_SIZE: 4 # 2**12 (= 4,096) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 7b6b876de..72cc4edb8 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -858,19 +858,18 @@ def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Hash: return hash(domain_type + int_to_bytes(epoch, length=8) + mix) ``` - -#### `get_committee_count` +#### `get_committees_per_slot` ```python -def get_committee_count(state: BeaconState, epoch: Epoch) -> uint64: +def get_committees_per_slot(state: BeaconState, slot: Slot) -> uint64: """ - Return the number of committees at ``epoch``. + Return the number of committees at ``slot``. """ - committees_per_slot = max(1, min( + epoch = compute_epoch_of_slot(slot) + return max(1, min( MAX_COMMITTEES_PER_SLOT, len(get_active_validator_indices(state, epoch)) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, )) - return committees_per_slot * SLOTS_PER_EPOCH ``` #### `get_crosslink_committee` @@ -878,20 +877,19 @@ def get_committee_count(state: BeaconState, epoch: Epoch) -> uint64: ```python def get_crosslink_committee(state: BeaconState, slot: Slot, index: uint64) -> Sequence[ValidatorIndex]: """ - Return the crosslink committee at ``epoch`` for ``index``. + Return the crosslink committee at ``slot`` for ``index``. """ epoch = compute_epoch_of_slot(slot) - committees_per_slot = get_committee_count(state, epoch) // SLOTS_PER_EPOCH + committees_per_slot = get_committees_per_slot(state, slot) slot_start_index = get_slot_start_index(state, slot) slot_offset = (index + MAX_COMMITTEES_PER_SLOT - slot_start_index) % MAX_COMMITTEES_PER_SLOT epoch_offset = slot_offset + (slot % SLOTS_PER_EPOCH) * committees_per_slot - print(epoch_offset) return compute_committee( indices=get_active_validator_indices(state, epoch), seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER), index=epoch_offset, - count=get_committee_count(state, epoch), + count=committees_per_slot * SLOTS_PER_EPOCH, ) ``` @@ -903,8 +901,9 @@ def get_slot_start_index(state: BeaconState, slot: Slot) -> uint64: Return the start index of the 0th committee at ``slot``. """ epoch = compute_epoch_of_slot(slot) - committees_per_slot = get_committee_count(state, epoch) // SLOTS_PER_EPOCH - slot_start_index = ((slot % SLOTS_PER_EPOCH) * committees_per_slot + get_start_index(state, epoch)) % MAX_COMMITTEES_PER_SLOT + committees_per_slot = get_committees_per_slot(state, slot) + start_index = get_start_index(state, epoch) + slot_start_index = ((slot % SLOTS_PER_EPOCH) * committees_per_slot + start_index) % MAX_COMMITTEES_PER_SLOT return slot_start_index ``` @@ -932,7 +931,7 @@ def get_index_delta(state: BeaconState, epoch: Epoch) -> uint64: """ Return the amount to increase ``state.start_index`` at ``epoch``. """ - return get_committee_count(state, epoch) + return get_committees_per_slot(state, compute_start_slot_of_epoch(epoch)) * SLOTS_PER_EPOCH ``` #### `get_beacon_proposer_index` @@ -1547,6 +1546,12 @@ def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSla def process_attestation(state: BeaconState, attestation: Attestation) -> None: data = attestation.data assert data.index < MAX_COMMITTEES_PER_SLOT + slot_start_index = get_slot_start_index(state, data.slot) + if data.index < slot_start_index: + test_index = data.index + MAX_COMMITTEES_PER_SLOT + else: + test_index = data.index + assert slot_start_index <= test_index < slot_start_index + get_committees_per_slot(state, data.slot) assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 774837ced..b0e64d6c9 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -150,7 +150,7 @@ def get_committee_assignment(state: BeaconState, start_slot = compute_start_slot_of_epoch(epoch) for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH): slot_start_index = get_slot_start_index(state, Slot(slot)) - for i in range(get_committee_count(state, epoch) // SLOTS_PER_EPOCH): + for i in range(get_committees_per_slot(state, Slot(slot))): index = (slot_start_index + i) % MAX_COMMITTEES_PER_SLOT committee = get_crosslink_committee(state, Slot(slot), index) if validator_index in committee: diff --git a/test_libs/pyspec/eth2spec/test/helpers/attestations.py b/test_libs/pyspec/eth2spec/test/helpers/attestations.py index 49d6b6ad7..879d878ac 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/attestations.py +++ b/test_libs/pyspec/eth2spec/test/helpers/attestations.py @@ -43,9 +43,6 @@ def get_valid_attestation(spec, state, slot=None, index=None, signed=False): slot = state.slot if index is None: index = spec.get_slot_start_index(state, slot) - print(slot) - print(index) - print(spec.get_committee_count(state, spec.compute_epoch_of_slot(slot))) attestation_data = build_attestation_data(spec, state, slot, index) diff --git a/test_libs/pyspec/eth2spec/test/helpers/state.py b/test_libs/pyspec/eth2spec/test/helpers/state.py index 6ae845f69..ffa59fafd 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/state.py +++ b/test_libs/pyspec/eth2spec/test/helpers/state.py @@ -51,8 +51,8 @@ def next_epoch_with_attestations(spec, for _ in range(spec.SLOTS_PER_EPOCH): block = build_empty_block_for_next_slot(spec, post_state) if fill_cur_epoch and post_state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY: - committees_per_slot = spec.get_committee_count(state, spec.get_current_epoch(state)) // spec.SLOTS_PER_EPOCH slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 + committees_per_slot = spec.get_committees_per_slot(state, slot_to_attest) if slot_to_attest >= spec.compute_start_slot_of_epoch(spec.get_current_epoch(post_state)): slot_start_index = spec.get_slot_start_index(state, slot_to_attest) for i in range(committees_per_slot): @@ -61,8 +61,8 @@ def next_epoch_with_attestations(spec, block.body.attestations.append(cur_attestation) if fill_prev_epoch: - committees_per_slot = spec.get_committee_count(state, spec.get_previous_epoch(state)) // spec.SLOTS_PER_EPOCH slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1 + committees_per_slot = spec.get_committees_per_slot(state, slot_to_attest) slot_start_index = spec.get_slot_start_index(state, slot_to_attest) for i in range(committees_per_slot): index = (slot_start_index + i) % spec.MAX_COMMITTEES_PER_SLOT diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py index 87024a21d..229eb85b3 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py @@ -124,8 +124,31 @@ def test_old_source_epoch(spec, state): @with_all_phases @spec_state_test -def test_wrong_index(spec, state): - pass +@always_bls +def test_wrong_index_for_committee_signature(spec, state): + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.data.index += 1 + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases +@spec_state_test +@never_bls +def test_wrong_index_for_slot(spec, state): + committees_per_slot = spec.get_committees_per_slot(state, state.slot) + assert committees_per_slot < spec.MAX_COMMITTEES_PER_SLOT + slot_start_index = spec.get_slot_start_index(state, state.slot) + index = slot_start_index + committees_per_slot + + attestation = get_valid_attestation(spec, state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.data.index = index + + yield from run_attestation_processing(spec, state, attestation, False) @with_all_phases diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py index abd8f2c17..25d8c083f 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py @@ -26,8 +26,8 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support remaining_balance = total_balance * 2 // 3 start_slot = spec.compute_start_slot_of_epoch(epoch) - committees_per_slot = spec.get_committee_count(state, epoch) // spec.SLOTS_PER_EPOCH for slot in range(start_slot, start_slot + spec.SLOTS_PER_EPOCH): + committees_per_slot = spec.get_committees_per_slot(state, slot) slot_start_index = spec.get_slot_start_index(state, slot) for i in range(committees_per_slot): index = (slot_start_index + i) % spec.MAX_COMMITTEES_PER_SLOT From bd1c71b82ed905321781f1be8452535c60d56d7c Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 16 Oct 2019 18:47:19 +0900 Subject: [PATCH 204/250] simplify index --- specs/core/0_beacon-chain.md | 57 +------------------ specs/validator/0_beacon-chain-validator.md | 4 +- .../eth2spec/test/helpers/attestations.py | 2 +- .../pyspec/eth2spec/test/helpers/state.py | 8 +-- .../test_process_attestation.py | 3 +- ..._process_justification_and_finalization.py | 4 +- 6 files changed, 8 insertions(+), 70 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 72cc4edb8..e0d6c108b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -490,7 +490,6 @@ class BeaconState(Container): validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] # Shuffling - start_index: uint64 randao_mixes: Vector[Hash, EPOCHS_PER_HISTORICAL_VECTOR] # Slashings slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances @@ -881,9 +880,7 @@ def get_crosslink_committee(state: BeaconState, slot: Slot, index: uint64) -> Se """ epoch = compute_epoch_of_slot(slot) committees_per_slot = get_committees_per_slot(state, slot) - slot_start_index = get_slot_start_index(state, slot) - slot_offset = (index + MAX_COMMITTEES_PER_SLOT - slot_start_index) % MAX_COMMITTEES_PER_SLOT - epoch_offset = slot_offset + (slot % SLOTS_PER_EPOCH) * committees_per_slot + epoch_offset = index + (slot % SLOTS_PER_EPOCH) * committees_per_slot return compute_committee( indices=get_active_validator_indices(state, epoch), @@ -893,47 +890,6 @@ def get_crosslink_committee(state: BeaconState, slot: Slot, index: uint64) -> Se ) ``` -#### `get_slot_start_index` - -```python -def get_slot_start_index(state: BeaconState, slot: Slot) -> uint64: - """ - Return the start index of the 0th committee at ``slot``. - """ - epoch = compute_epoch_of_slot(slot) - committees_per_slot = get_committees_per_slot(state, slot) - start_index = get_start_index(state, epoch) - slot_start_index = ((slot % SLOTS_PER_EPOCH) * committees_per_slot + start_index) % MAX_COMMITTEES_PER_SLOT - return slot_start_index -``` - -#### `get_start_index` - -```python -def get_start_index(state: BeaconState, epoch: Epoch) -> uint64: - """ - Return the start index of the 0th committee at ``epoch``. - """ - assert epoch <= get_current_epoch(state) + 1 - check_epoch = Epoch(get_current_epoch(state) + 1) - index = (state.start_index + get_index_delta(state, get_current_epoch(state))) % MAX_COMMITTEES_PER_SLOT - MAX_COMMITTEES_PER_EPOCH = MAX_COMMITTEES_PER_SLOT * SLOTS_PER_EPOCH - while check_epoch > epoch: - check_epoch -= Epoch(1) - index = (index + MAX_COMMITTEES_PER_EPOCH - get_index_delta(state, check_epoch)) % MAX_COMMITTEES_PER_SLOT - return index -``` - -#### `get_index_delta` - -```python -def get_index_delta(state: BeaconState, epoch: Epoch) -> uint64: - """ - Return the amount to increase ``state.start_index`` at ``epoch``. - """ - return get_committees_per_slot(state, compute_start_slot_of_epoch(epoch)) * SLOTS_PER_EPOCH -``` - #### `get_beacon_proposer_index` ```python @@ -1417,8 +1373,6 @@ def process_final_updates(state: BeaconState) -> None: if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0: historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots) state.historical_roots.append(hash_tree_root(historical_batch)) - # Update start shard - state.start_index = (state.start_index + get_index_delta(state, current_epoch)) % MAX_COMMITTEES_PER_SLOT # Rotate current/previous epoch attestations state.previous_epoch_attestations = state.current_epoch_attestations state.current_epoch_attestations = [] @@ -1545,15 +1499,8 @@ def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSla ```python def process_attestation(state: BeaconState, attestation: Attestation) -> None: data = attestation.data - assert data.index < MAX_COMMITTEES_PER_SLOT - slot_start_index = get_slot_start_index(state, data.slot) - if data.index < slot_start_index: - test_index = data.index + MAX_COMMITTEES_PER_SLOT - else: - test_index = data.index - assert slot_start_index <= test_index < slot_start_index + get_committees_per_slot(state, data.slot) + assert data.index < get_committees_per_slot(state, data.slot) assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) - assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH committee = get_crosslink_committee(state, data.slot, data.index) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index b0e64d6c9..51f560243 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -149,9 +149,7 @@ def get_committee_assignment(state: BeaconState, start_slot = compute_start_slot_of_epoch(epoch) for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH): - slot_start_index = get_slot_start_index(state, Slot(slot)) - for i in range(get_committees_per_slot(state, Slot(slot))): - index = (slot_start_index + i) % MAX_COMMITTEES_PER_SLOT + for index in range(get_committees_per_slot(state, Slot(slot))): committee = get_crosslink_committee(state, Slot(slot), index) if validator_index in committee: return committee, index, Slot(slot) diff --git a/test_libs/pyspec/eth2spec/test/helpers/attestations.py b/test_libs/pyspec/eth2spec/test/helpers/attestations.py index 879d878ac..afd51b0bd 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/attestations.py +++ b/test_libs/pyspec/eth2spec/test/helpers/attestations.py @@ -42,7 +42,7 @@ def get_valid_attestation(spec, state, slot=None, index=None, signed=False): if slot is None: slot = state.slot if index is None: - index = spec.get_slot_start_index(state, slot) + index = 0 attestation_data = build_attestation_data(spec, state, slot, index) diff --git a/test_libs/pyspec/eth2spec/test/helpers/state.py b/test_libs/pyspec/eth2spec/test/helpers/state.py index ffa59fafd..68b8283bc 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/state.py +++ b/test_libs/pyspec/eth2spec/test/helpers/state.py @@ -54,18 +54,14 @@ def next_epoch_with_attestations(spec, slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 committees_per_slot = spec.get_committees_per_slot(state, slot_to_attest) if slot_to_attest >= spec.compute_start_slot_of_epoch(spec.get_current_epoch(post_state)): - slot_start_index = spec.get_slot_start_index(state, slot_to_attest) - for i in range(committees_per_slot): - index = (slot_start_index + i) % spec.MAX_COMMITTEES_PER_SLOT + for index in range(committees_per_slot): cur_attestation = get_valid_attestation(spec, post_state, slot_to_attest, index=index) block.body.attestations.append(cur_attestation) if fill_prev_epoch: slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1 committees_per_slot = spec.get_committees_per_slot(state, slot_to_attest) - slot_start_index = spec.get_slot_start_index(state, slot_to_attest) - for i in range(committees_per_slot): - index = (slot_start_index + i) % spec.MAX_COMMITTEES_PER_SLOT + for index in range(committees_per_slot): prev_attestation = get_valid_attestation(spec, post_state, slot_to_attest, index=index) block.body.attestations.append(prev_attestation) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py index 229eb85b3..e3ccb5d9f 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py @@ -140,8 +140,7 @@ def test_wrong_index_for_committee_signature(spec, state): def test_wrong_index_for_slot(spec, state): committees_per_slot = spec.get_committees_per_slot(state, state.slot) assert committees_per_slot < spec.MAX_COMMITTEES_PER_SLOT - slot_start_index = spec.get_slot_start_index(state, state.slot) - index = slot_start_index + committees_per_slot + index = committees_per_slot attestation = get_valid_attestation(spec, state) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py index 25d8c083f..8fc3f9866 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py @@ -28,9 +28,7 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support start_slot = spec.compute_start_slot_of_epoch(epoch) for slot in range(start_slot, start_slot + spec.SLOTS_PER_EPOCH): committees_per_slot = spec.get_committees_per_slot(state, slot) - slot_start_index = spec.get_slot_start_index(state, slot) - for i in range(committees_per_slot): - index = (slot_start_index + i) % spec.MAX_COMMITTEES_PER_SLOT + for index in range(committees_per_slot): # Check if we already have had sufficient balance. (and undone if we don't want it). # If so, do not create more attestations. (we do not have empty pending attestations normally anyway) if remaining_balance < 0: From 219084a08a679997425522336b84476c2724e73f Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 16 Oct 2019 18:53:36 +0900 Subject: [PATCH 205/250] add CommitteeIndex type --- specs/core/0_beacon-chain.md | 5 +++-- specs/validator/0_beacon-chain-validator.md | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index e0d6c108b..19712a7d9 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -137,6 +137,7 @@ We define the following Python custom types for type hinting and readability: | - | - | - | | `Slot` | `uint64` | a slot number | | `Epoch` | `uint64` | an epoch number | +| `CommitteeIndex` | `uint64` | an index for a committee within a slot | | `Shard` | `uint64` | a shard number | | `ValidatorIndex` | `uint64` | a validator registry index | | `Gwei` | `uint64` | an amount in Gwei | @@ -310,7 +311,7 @@ class AttestationData(Container): source: Checkpoint target: Checkpoint # Committee Index - index: uint64 + index: CommitteeIndex ``` #### `AttestationDataAndCustodyBit` @@ -874,7 +875,7 @@ def get_committees_per_slot(state: BeaconState, slot: Slot) -> uint64: #### `get_crosslink_committee` ```python -def get_crosslink_committee(state: BeaconState, slot: Slot, index: uint64) -> Sequence[ValidatorIndex]: +def get_crosslink_committee(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Sequence[ValidatorIndex]: """ Return the crosslink committee at ``slot`` for ``index``. """ diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 51f560243..15edc1e28 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -135,7 +135,7 @@ A validator can get committee assignments for a given epoch using the following def get_committee_assignment(state: BeaconState, epoch: Epoch, validator_index: ValidatorIndex - ) -> Optional[Tuple[Sequence[ValidatorIndex], uint64, Slot]]: + ) -> Optional[Tuple[Sequence[ValidatorIndex], CommitteeIndex, Slot]]: """ Return the committee assignment in the ``epoch`` for ``validator_index``. ``assignment`` returned is a tuple of the following form: @@ -150,9 +150,9 @@ def get_committee_assignment(state: BeaconState, start_slot = compute_start_slot_of_epoch(epoch) for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH): for index in range(get_committees_per_slot(state, Slot(slot))): - committee = get_crosslink_committee(state, Slot(slot), index) + committee = get_crosslink_committee(state, Slot(slot), CommitteeIndex(index)) if validator_index in committee: - return committee, index, Slot(slot) + return committee, CommitteeIndex(index), Slot(slot) return None ``` From c239ce0b5eb1b350c80fde7617ecc194a26dbad1 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 17 Oct 2019 10:45:07 +0900 Subject: [PATCH 206/250] crosslink committee -> beacon committee --- specs/core/0_beacon-chain.md | 10 +++++----- specs/core/1_custody-game.md | 2 +- specs/core/1_shard-data-chains.md | 1 + specs/validator/0_beacon-chain-validator.md | 4 ++-- test_libs/pyspec/eth2spec/test/helpers/attestations.py | 8 ++++---- test_libs/pyspec/eth2spec/test/helpers/custody.py | 6 +++--- .../test_process_justification_and_finalization.py | 2 +- 7 files changed, 17 insertions(+), 16 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 19712a7d9..bf941eabd 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -81,7 +81,7 @@ - [`get_active_validator_indices`](#get_active_validator_indices) - [`get_validator_churn_limit`](#get_validator_churn_limit) - [`get_seed`](#get_seed) - - [`get_crosslink_committee`](#get_crosslink_committee) + - [`get_beacon_committee`](#get_beacon_committee) - [`get_beacon_proposer_index`](#get_beacon_proposer_index) - [`get_total_balance`](#get_total_balance) - [`get_total_active_balance`](#get_total_active_balance) @@ -872,10 +872,10 @@ def get_committees_per_slot(state: BeaconState, slot: Slot) -> uint64: )) ``` -#### `get_crosslink_committee` +#### `get_beacon_committee` ```python -def get_crosslink_committee(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Sequence[ValidatorIndex]: +def get_beacon_committee(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Sequence[ValidatorIndex]: """ Return the crosslink committee at ``slot`` for ``index``. """ @@ -965,7 +965,7 @@ def get_attesting_indices(state: BeaconState, """ Return the set of attesting indices corresponding to ``data`` and ``bits``. """ - committee = get_crosslink_committee(state, data.slot, data.index) + committee = get_beacon_committee(state, data.slot, data.index) return set(index for i, index in enumerate(committee) if bits[i]) ``` @@ -1504,7 +1504,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH - committee = get_crosslink_committee(state, data.slot, data.index) + committee = get_beacon_committee(state, data.slot, data.index) assert len(attestation.aggregation_bits) == len(attestation.custody_bits) == len(committee) pending_attestation = PendingAttestation( diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index 2fc4904ce..80a42cc14 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -624,7 +624,7 @@ def process_bit_challenge(state: BeaconState, challenge: CustodyBitChallenge) -> chunk_count = get_custody_chunk_count(attestation.data.crosslink) assert chunk_count == len(challenge.chunk_bits) # Verify custody bit is incorrect - committee = get_crosslink_committee(state, epoch, shard) + committee = get_beacon_committee(state, epoch, shard) custody_bit = attestation.custody_bits[committee.index(challenge.responder_index)] assert custody_bit != get_chunk_bits_root(challenge.chunk_bits) # Add new bit challenge record diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 9bd9c70ee..477dce44f 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -62,6 +62,7 @@ This document describes the shard transition function (data layer only) and the | Name | Value | | - | - | +| `SHARD_COUNT` | `2**10` (= 1,024) | | `MIN_BLOCK_BODY_PRICE` | `2**0` (= 1) | | `MAX_PERIOD_COMMITTEE_SIZE` | `2**7` (= 128) | | `SHARD_HEADER_SIZE` | `2**10` (= 1024) | diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 15edc1e28..454b99a41 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -150,7 +150,7 @@ def get_committee_assignment(state: BeaconState, start_slot = compute_start_slot_of_epoch(epoch) for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH): for index in range(get_committees_per_slot(state, Slot(slot))): - committee = get_crosslink_committee(state, Slot(slot), CommitteeIndex(index)) + committee = get_beacon_committee(state, Slot(slot), CommitteeIndex(index)) if validator_index in committee: return committee, CommitteeIndex(index), Slot(slot) return None @@ -166,7 +166,7 @@ def is_proposer(state: BeaconState, *Note*: To see if a validator is assigned to propose during the slot, the beacon state must be in the epoch in question. At the epoch boundaries, the validator must run an epoch transition into the epoch to successfully check the proposal assignment of the first slot. -*Note*: `BeaconBlock` proposal is distinct from crosslink committee assignment, and in a given epoch each responsibility might occur at different a different slot. +*Note*: `BeaconBlock` proposal is distinct from beacon committee assignment, and in a given epoch each responsibility might occur at different a different slot. ### Lookahead diff --git a/test_libs/pyspec/eth2spec/test/helpers/attestations.py b/test_libs/pyspec/eth2spec/test/helpers/attestations.py index afd51b0bd..e4540738a 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/attestations.py +++ b/test_libs/pyspec/eth2spec/test/helpers/attestations.py @@ -46,13 +46,13 @@ def get_valid_attestation(spec, state, slot=None, index=None, signed=False): attestation_data = build_attestation_data(spec, state, slot, index) - crosslink_committee = spec.get_crosslink_committee( + beacon_committee = spec.get_beacon_committee( state, attestation_data.slot, attestation_data.index, ) - committee_size = len(crosslink_committee) + committee_size = len(beacon_committee) aggregation_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](*([0] * committee_size)) custody_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](*([0] * committee_size)) attestation = spec.Attestation( @@ -115,12 +115,12 @@ def get_attestation_signature(spec, state, attestation_data, privkey, custody_bi def fill_aggregate_attestation(spec, state, attestation): - crosslink_committee = spec.get_crosslink_committee( + beacon_committee = spec.get_beacon_committee( state, attestation.data.slot, attestation.data.index, ) - for i in range(len(crosslink_committee)): + for i in range(len(beacon_committee)): attestation.aggregation_bits[i] = True diff --git a/test_libs/pyspec/eth2spec/test/helpers/custody.py b/test_libs/pyspec/eth2spec/test/helpers/custody.py index 98659ee8e..205a335f4 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/custody.py +++ b/test_libs/pyspec/eth2spec/test/helpers/custody.py @@ -80,13 +80,13 @@ def bitlist_from_int(max_len, num_bits, n): def get_valid_bit_challenge(spec, state, attestation, invalid_custody_bit=False): - crosslink_committee = spec.get_crosslink_committee( + beacon_committee = spec.get_beacon_committee( state, attestation.data.slot, attestation.data.crosslink.shard, ) - responder_index = crosslink_committee[0] - challenger_index = crosslink_committee[-1] + responder_index = beacon_committee[0] + challenger_index = beacon_committee[-1] epoch = spec.get_randao_epoch_for_custody_period(attestation.data.target.epoch, responder_index) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py index 8fc3f9866..88470eb93 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py @@ -34,7 +34,7 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support if remaining_balance < 0: return - committee = spec.get_crosslink_committee(state, slot, index) + committee = spec.get_beacon_committee(state, slot, index) # Create a bitfield filled with the given count per attestation, # exactly on the right-most part of the committee field. From 7af2c232708425a7574d509fa028f50e2b3c1cb3 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 17 Oct 2019 10:47:39 +0900 Subject: [PATCH 207/250] remove refs to crosslinks --- specs/core/0_beacon-chain.md | 9 ++++----- specs/test_formats/epoch_processing/README.md | 1 - 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index bf941eabd..fd52b69f1 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -123,7 +123,7 @@ This document represents the specification for Phase 0 of Ethereum 2.0 -- The Beacon Chain. At the core of Ethereum 2.0 is a system chain called the "beacon chain". The beacon chain stores and manages the registry of validators. In the initial deployment phases of Ethereum 2.0, the only mechanism to become a validator is to make a one-way ETH transaction to a deposit contract on Ethereum 1.0. Activation as a validator happens when Ethereum 1.0 deposit receipts are processed by the beacon chain, the activation balance is reached, and a queuing process is completed. Exit is either voluntary or done forcibly as a penalty for misbehavior. -The primary source of load on the beacon chain is "attestations". Attestations are simultaneously availability votes for a shard block and proof-of-stake votes for a beacon block. A sufficient number of attestations for the same shard block create a "crosslink", confirming the shard segment up to that shard block into the beacon chain. Crosslinks also serve as infrastructure for asynchronous cross-shard communication. +The primary source of load on the beacon chain is "attestations". Attestations are simultaneously availability votes for a shard block (Phase 1) and proof-of-stake votes for a beacon block (Phase 0). ## Notation @@ -169,7 +169,6 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | | - | - | -| `SHARD_COUNT` | `2**10` (= 1,024) | | `MAX_COMMITTEES_PER_SLOT` | `2**5` (= 32) | | `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) | | `MAX_VALIDATORS_PER_COMMITTEE` | `2**12` (= 4,096) | @@ -179,7 +178,7 @@ The following values are (non-configurable) constants used throughout the specif | `MIN_GENESIS_ACTIVE_VALIDATOR_COUNT` | `2**16` (= 65,536) | | `MIN_GENESIS_TIME` | `1578009600` (Jan 3, 2020) | -- For the safety of crosslinks, `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) +- For the safety of committees, `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) ### Gwei values @@ -319,7 +318,7 @@ class AttestationData(Container): ```python class AttestationDataAndCustodyBit(Container): data: AttestationData - custody_bit: bit # Challengeable bit (SSZ-bool, 1 byte) for the custody of crosslink data + custody_bit: bit # Challengeable bit (SSZ-bool, 1 byte) for the custody of shard data ``` #### `IndexedAttestation` @@ -877,7 +876,7 @@ def get_committees_per_slot(state: BeaconState, slot: Slot) -> uint64: ```python def get_beacon_committee(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Sequence[ValidatorIndex]: """ - Return the crosslink committee at ``slot`` for ``index``. + Return the beacon committee at ``slot`` for ``index``. """ epoch = compute_epoch_of_slot(slot) committees_per_slot = get_committees_per_slot(state, slot) diff --git a/specs/test_formats/epoch_processing/README.md b/specs/test_formats/epoch_processing/README.md index d5b5e2c6d..7c5e2dc70 100644 --- a/specs/test_formats/epoch_processing/README.md +++ b/specs/test_formats/epoch_processing/README.md @@ -38,7 +38,6 @@ The provided pre-state is already transitioned to just before the specific sub-t Sub-transitions: - `justification_and_finalization` -- `crosslinks` - *`rewards_and_penalties` - planned testing extension* - `registry_updates` - `slashings` From 283a8cbf0d67e00950d2a1800c9e8803b447643b Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 17 Oct 2019 10:49:49 +0900 Subject: [PATCH 208/250] remove refs to crosslinks --- test_generators/epoch_processing/main.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/test_generators/epoch_processing/main.py b/test_generators/epoch_processing/main.py index f0505ee94..52581d8c3 100644 --- a/test_generators/epoch_processing/main.py +++ b/test_generators/epoch_processing/main.py @@ -3,7 +3,6 @@ from typing import Iterable from eth2spec.phase0 import spec as spec_phase0 from eth2spec.phase1 import spec as spec_phase1 from eth2spec.test.phase_0.epoch_processing import ( - test_process_crosslinks, test_process_final_updates, test_process_justification_and_finalization, test_process_registry_updates, @@ -35,8 +34,6 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin if __name__ == "__main__": gen_runner.run_generator("epoch_processing", [ - create_provider('crosslinks', test_process_crosslinks, 'minimal'), - create_provider('crosslinks', test_process_crosslinks, 'mainnet'), create_provider('final_updates', test_process_final_updates, 'minimal'), create_provider('final_updates', test_process_final_updates, 'mainnet'), create_provider('justification_and_finalization', test_process_justification_and_finalization, 'minimal'), From 8de3b8315766bc63fb5bfdc423b541c8a247989a Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 17 Oct 2019 10:39:21 +0800 Subject: [PATCH 209/250] Fix delay-based attestation inclusion reward Modify the delay-based reward function from the current `r = (65-d)/64` to `r = 1/d`. Rationale is that in the normal case delay is close to 1, so we want a larger incentive to get included more quickly to encourage stability of the fork choice. Particularly, in the status quo if you know that you will be a proposer <4 slots in the future, you can maximize earnings by delaying inclusion of your attestation until you can include it yourself and get the proposer reward; this adjustment fixes this in the normal case. --- specs/core/0_beacon-chain.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 681d82457..9a680dc7d 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1365,9 +1365,7 @@ def get_attestation_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence rewards[attestation.proposer_index] += proposer_reward max_attester_reward = get_base_reward(state, index) - proposer_reward rewards[index] += Gwei( - max_attester_reward - * (SLOTS_PER_EPOCH + MIN_ATTESTATION_INCLUSION_DELAY - attestation.inclusion_delay) - // SLOTS_PER_EPOCH + max_attester_reward // attestation.inclusion_delay ) # Inactivity penalty From 437a65d3e121f8ff887a1fb20c0568e467ad686b Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 17 Oct 2019 17:47:51 +0900 Subject: [PATCH 210/250] cleanup wrt Justin's comments --- configs/mainnet.yaml | 8 ++--- configs/minimal.yaml | 6 ++-- scripts/build_spec.py | 2 +- specs/core/0_beacon-chain.md | 36 +++++++++---------- specs/core/1_custody-game.md | 2 +- specs/validator/0_beacon-chain-validator.md | 10 +++--- .../eth2spec/test/helpers/attestations.py | 4 +-- .../pyspec/eth2spec/test/helpers/state.py | 4 +-- .../test_process_attestation.py | 2 +- ..._process_justification_and_finalization.py | 2 +- 10 files changed, 37 insertions(+), 39 deletions(-) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index 1210d8076..1a65084e2 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -5,12 +5,12 @@ # Misc # --------------------------------------------------------------- -# 2**5 (= 32) -MAX_COMMITTEES_PER_SLOT: 32 +# 2**6 (= 64) +MAX_COMMITTEES_PER_SLOT: 64 # 2**7 (= 128) TARGET_COMMITTEE_SIZE: 128 -# 2**12 (= 4,096) -MAX_VALIDATORS_PER_COMMITTEE: 4096 +# 2**10 (= 1,024) +MAX_VALIDATORS_PER_COMMITTEE: 1024 # 2**2 (= 4) MIN_PER_EPOCH_CHURN_LIMIT: 4 # 2**16 (= 65,536) diff --git a/configs/minimal.yaml b/configs/minimal.yaml index 1cf35ca90..f9eb5c4fe 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -4,12 +4,12 @@ # Misc # --------------------------------------------------------------- -# [customized] Just 2 committees for slot for testing purposes +# [customized] Just 4 committees for slot for testing purposes MAX_COMMITTEES_PER_SLOT: 4 # [customized] unsecure, but fast TARGET_COMMITTEE_SIZE: 4 -# 2**12 (= 4,096) -MAX_VALIDATORS_PER_COMMITTEE: 4096 +# 2**10 (= 1,024) +MAX_VALIDATORS_PER_COMMITTEE: 1024 # 2**2 (= 4) MIN_PER_EPOCH_CHURN_LIMIT: 4 # 2**16 (= 65,536) diff --git a/scripts/build_spec.py b/scripts/build_spec.py index f35332e64..c47249fe4 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -118,7 +118,7 @@ def apply_constants_preset(preset: Dict[str, Any]) -> None: global_vars[k] = v # Deal with derived constants - global_vars['GENESIS_EPOCH'] = compute_epoch_of_slot(GENESIS_SLOT) + global_vars['GENESIS_EPOCH'] = compute_epoch_at_slot(GENESIS_SLOT) # Initialize SSZ types again, to account for changed lengths init_SSZ_types() diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index fd52b69f1..dced36a1b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -68,7 +68,7 @@ - [`compute_shuffled_index`](#compute_shuffled_index) - [`compute_proposer_index`](#compute_proposer_index) - [`compute_committee`](#compute_committee) - - [`compute_epoch_of_slot`](#compute_epoch_of_slot) + - [`compute_epoch_at_slot`](#compute_epoch_at_slot) - [`compute_start_slot_of_epoch`](#compute_start_slot_of_epoch) - [`compute_activation_exit_epoch`](#compute_activation_exit_epoch) - [`compute_domain`](#compute_domain) @@ -137,8 +137,7 @@ We define the following Python custom types for type hinting and readability: | - | - | - | | `Slot` | `uint64` | a slot number | | `Epoch` | `uint64` | an epoch number | -| `CommitteeIndex` | `uint64` | an index for a committee within a slot | -| `Shard` | `uint64` | a shard number | +| `CommitteeIndex` | `uint64` | a committee index at a slot | | `ValidatorIndex` | `uint64` | a validator registry index | | `Gwei` | `uint64` | an amount in Gwei | | `Hash` | `Bytes32` | a hash | @@ -169,9 +168,9 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | | - | - | -| `MAX_COMMITTEES_PER_SLOT` | `2**5` (= 32) | +| `MAX_COMMITTEES_PER_SLOT` | `2**6` (= 64) | | `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) | -| `MAX_VALIDATORS_PER_COMMITTEE` | `2**12` (= 4,096) | +| `MAX_VALIDATORS_PER_COMMITTEE` | `2**10` (= 1,024) | | `MIN_PER_EPOCH_CHURN_LIMIT` | `2**2` (= 4) | | `CHURN_LIMIT_QUOTIENT` | `2**16` (= 65,536) | | `SHUFFLE_ROUND_COUNT` | `90` | @@ -304,13 +303,12 @@ class Validator(Container): ```python class AttestationData(Container): slot: Slot + index: CommitteeIndex # LMD GHOST vote beacon_block_root: Hash # FFG vote source: Checkpoint target: Checkpoint - # Committee Index - index: CommitteeIndex ``` #### `AttestationDataAndCustodyBit` @@ -489,7 +487,7 @@ class BeaconState(Container): # Registry validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] - # Shuffling + # Randomness randao_mixes: Vector[Hash, EPOCHS_PER_HISTORICAL_VECTOR] # Slashings slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances @@ -731,12 +729,12 @@ def compute_committee(indices: Sequence[ValidatorIndex], return [indices[compute_shuffled_index(ValidatorIndex(i), len(indices), seed)] for i in range(start, end)] ``` -#### `compute_epoch_of_slot` +#### `compute_epoch_at_slot` ```python -def compute_epoch_of_slot(slot: Slot) -> Epoch: +def compute_epoch_at_slot(slot: Slot) -> Epoch: """ - Return the epoch number of ``slot``. + Return the epoch number at ``slot``. """ return Epoch(slot // SLOTS_PER_EPOCH) ``` @@ -780,7 +778,7 @@ def get_current_epoch(state: BeaconState) -> Epoch: """ Return the current epoch. """ - return compute_epoch_of_slot(state.slot) + return compute_epoch_at_slot(state.slot) ``` #### `get_previous_epoch` @@ -857,14 +855,14 @@ def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Hash: return hash(domain_type + int_to_bytes(epoch, length=8) + mix) ``` -#### `get_committees_per_slot` +#### `get_committee_count_at_slot` ```python -def get_committees_per_slot(state: BeaconState, slot: Slot) -> uint64: +def get_committee_count_at_slot(state: BeaconState, slot: Slot) -> uint64: """ Return the number of committees at ``slot``. """ - epoch = compute_epoch_of_slot(slot) + epoch = compute_epoch_at_slot(slot) return max(1, min( MAX_COMMITTEES_PER_SLOT, len(get_active_validator_indices(state, epoch)) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, @@ -878,8 +876,8 @@ def get_beacon_committee(state: BeaconState, slot: Slot, index: CommitteeIndex) """ Return the beacon committee at ``slot`` for ``index``. """ - epoch = compute_epoch_of_slot(slot) - committees_per_slot = get_committees_per_slot(state, slot) + epoch = compute_epoch_at_slot(slot) + committees_per_slot = get_committee_count_at_slot(state, slot) epoch_offset = index + (slot % SLOTS_PER_EPOCH) * committees_per_slot return compute_committee( @@ -1468,7 +1466,7 @@ def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSla assert is_slashable_validator(proposer, get_current_epoch(state)) # Signatures are valid for header in (proposer_slashing.header_1, proposer_slashing.header_2): - domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_of_slot(header.slot)) + domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(header.slot)) assert bls_verify(proposer.pubkey, signing_root(header), header.signature, domain) slash_validator(state, proposer_slashing.proposer_index) @@ -1499,7 +1497,7 @@ def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSla ```python def process_attestation(state: BeaconState, attestation: Attestation) -> None: data = attestation.data - assert data.index < get_committees_per_slot(state, data.slot) + assert data.index < get_committee_count_at_slot(state, data.slot) assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index 80a42cc14..224dfd5f2 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -547,7 +547,7 @@ def process_chunk_challenge(state: BeaconState, challenge: CustodyChunkChallenge # Verify the attestation assert is_valid_indexed_attestation(state, get_indexed_attestation(state, challenge.attestation)) # Verify it is not too late to challenge - assert (compute_epoch_of_slot(challenge.attestation.data.slot) + assert (compute_epoch_at_slot(challenge.attestation.data.slot) >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY) responder = state.validators[challenge.responder_index] assert responder.exit_epoch >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 454b99a41..331184699 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -149,7 +149,7 @@ def get_committee_assignment(state: BeaconState, start_slot = compute_start_slot_of_epoch(epoch) for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH): - for index in range(get_committees_per_slot(state, Slot(slot))): + for index in range(get_committee_count_at_slot(state, Slot(slot))): committee = get_beacon_committee(state, Slot(slot), CommitteeIndex(index)) if validator_index in committee: return committee, CommitteeIndex(index), Slot(slot) @@ -210,8 +210,8 @@ Set `block.randao_reveal = epoch_signature` where `epoch_signature` is obtained ```python def get_epoch_signature(state: BeaconState, block: BeaconBlock, privkey: int) -> BLSSignature: - domain = get_domain(state, DOMAIN_RANDAO, compute_epoch_of_slot(block.slot)) - return bls_sign(privkey, hash_tree_root(compute_epoch_of_slot(block.slot)), domain) + domain = get_domain(state, DOMAIN_RANDAO, compute_epoch_at_slot(block.slot)) + return bls_sign(privkey, hash_tree_root(compute_epoch_at_slot(block.slot)), domain) ``` ##### Eth1 Data @@ -244,7 +244,7 @@ Set `header.signature = block_signature` where `block_signature` is obtained fro ```python def get_block_signature(state: BeaconState, header: BeaconBlockHeader, privkey: int) -> BLSSignature: - domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_of_slot(header.slot)) + domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(header.slot)) return bls_sign(privkey, signing_root(header), domain) ``` @@ -352,7 +352,7 @@ To avoid "proposer slashings", a validator must not sign two conflicting [`Beaco Specifically, when signing a `BeaconBlock`, a validator should perform the following steps in the following order: -1. Save a record to hard disk that a beacon block has been signed for the `epoch=compute_epoch_of_slot(block.slot)`. +1. Save a record to hard disk that a beacon block has been signed for the `epoch=compute_epoch_at_slot(block.slot)`. 2. Generate and broadcast the block. If the software crashes at some point within this routine, then when the validator comes back online, the hard disk has the record of the *potentially* signed/broadcast block and can effectively avoid slashing. diff --git a/test_libs/pyspec/eth2spec/test/helpers/attestations.py b/test_libs/pyspec/eth2spec/test/helpers/attestations.py index e4540738a..5e5073441 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/attestations.py +++ b/test_libs/pyspec/eth2spec/test/helpers/attestations.py @@ -31,10 +31,10 @@ def build_attestation_data(spec, state, slot, index): return spec.AttestationData( slot=slot, + index=index, beacon_block_root=block_root, source=spec.Checkpoint(epoch=source_epoch, root=source_root), - target=spec.Checkpoint(epoch=spec.compute_epoch_of_slot(slot), root=epoch_boundary_root), - index=index, + target=spec.Checkpoint(epoch=spec.compute_epoch_at_slot(slot), root=epoch_boundary_root), ) diff --git a/test_libs/pyspec/eth2spec/test/helpers/state.py b/test_libs/pyspec/eth2spec/test/helpers/state.py index 68b8283bc..c5a9bd4ce 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/state.py +++ b/test_libs/pyspec/eth2spec/test/helpers/state.py @@ -52,7 +52,7 @@ def next_epoch_with_attestations(spec, block = build_empty_block_for_next_slot(spec, post_state) if fill_cur_epoch and post_state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY: slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 - committees_per_slot = spec.get_committees_per_slot(state, slot_to_attest) + committees_per_slot = spec.get_committee_count_at_slot(state, slot_to_attest) if slot_to_attest >= spec.compute_start_slot_of_epoch(spec.get_current_epoch(post_state)): for index in range(committees_per_slot): cur_attestation = get_valid_attestation(spec, post_state, slot_to_attest, index=index) @@ -60,7 +60,7 @@ def next_epoch_with_attestations(spec, if fill_prev_epoch: slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1 - committees_per_slot = spec.get_committees_per_slot(state, slot_to_attest) + committees_per_slot = spec.get_committee_count_at_slot(state, slot_to_attest) for index in range(committees_per_slot): prev_attestation = get_valid_attestation(spec, post_state, slot_to_attest, index=index) block.body.attestations.append(prev_attestation) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py index e3ccb5d9f..13faed3f4 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py @@ -138,7 +138,7 @@ def test_wrong_index_for_committee_signature(spec, state): @spec_state_test @never_bls def test_wrong_index_for_slot(spec, state): - committees_per_slot = spec.get_committees_per_slot(state, state.slot) + committees_per_slot = spec.get_committee_count_at_slot(state, state.slot) assert committees_per_slot < spec.MAX_COMMITTEES_PER_SLOT index = committees_per_slot diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py index 88470eb93..81f761a72 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py @@ -27,7 +27,7 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support start_slot = spec.compute_start_slot_of_epoch(epoch) for slot in range(start_slot, start_slot + spec.SLOTS_PER_EPOCH): - committees_per_slot = spec.get_committees_per_slot(state, slot) + committees_per_slot = spec.get_committee_count_at_slot(state, slot) for index in range(committees_per_slot): # Check if we already have had sufficient balance. (and undone if we don't want it). # If so, do not create more attestations. (we do not have empty pending attestations normally anyway) From fbcc2a4870f230a9744456702ca6ef5d48ff4e50 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 18 Oct 2019 12:05:43 +0900 Subject: [PATCH 211/250] fix comment re: proto --- specs/core/1_shard-data-chains.md | 2 +- test_libs/pyspec/eth2spec/test/helpers/block.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 477dce44f..6edc5daba 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -106,8 +106,8 @@ This document describes the shard transition function (data layer only) and the ### `Crosslink` ```python +# Crosslink is a placeholder to appease the build script until phase 1 is reworked class Crosslink(Container): - # STUB: placeholder data structure while reworking phase 0 shard: Shard ``` diff --git a/test_libs/pyspec/eth2spec/test/helpers/block.py b/test_libs/pyspec/eth2spec/test/helpers/block.py index 2682a0c82..779f2e1cf 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/block.py +++ b/test_libs/pyspec/eth2spec/test/helpers/block.py @@ -14,7 +14,7 @@ def sign_block(spec, state, block, proposer_index=None): if block.slot == state.slot: proposer_index = spec.get_beacon_proposer_index(state) else: - if spec.compute_epoch_of_slot(state.slot) + 1 > spec.compute_epoch_of_slot(block.slot): + if spec.compute_epoch_at_slot(state.slot) + 1 > spec.compute_epoch_at_slot(block.slot): print("warning: block slot far away, and no proposer index manually given." " Signing block is slow due to transition for proposer index calculation.") # use stub state to get proposer index of future slot @@ -26,10 +26,10 @@ def sign_block(spec, state, block, proposer_index=None): block.body.randao_reveal = bls_sign( privkey=privkey, - message_hash=hash_tree_root(spec.compute_epoch_of_slot(block.slot)), + message_hash=hash_tree_root(spec.compute_epoch_at_slot(block.slot)), domain=spec.get_domain( state, - message_epoch=spec.compute_epoch_of_slot(block.slot), + message_epoch=spec.compute_epoch_at_slot(block.slot), domain_type=spec.DOMAIN_RANDAO, ) ) @@ -39,7 +39,7 @@ def sign_block(spec, state, block, proposer_index=None): domain=spec.get_domain( state, spec.DOMAIN_BEACON_PROPOSER, - spec.compute_epoch_of_slot(block.slot))) + spec.compute_epoch_at_slot(block.slot))) def apply_empty_block(spec, state): From 58c28349de46c34fe36997725201485c43fbc566 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 18 Oct 2019 12:10:36 +0900 Subject: [PATCH 212/250] proto pr feedback --- specs/core/0_fork-choice.md | 3 +-- specs/validator/0_beacon-chain-validator.md | 9 +++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/specs/core/0_fork-choice.md b/specs/core/0_fork-choice.md index 8e25fe8f3..552a3cb02 100644 --- a/specs/core/0_fork-choice.md +++ b/specs/core/0_fork-choice.md @@ -192,8 +192,7 @@ def on_attestation(store: Store, attestation: Attestation) -> None: # Attestations can only affect the fork choice of subsequent slots. # Delay consideration in the fork choice until their slot is in the past. - attestation_slot = attestation.data.slot - assert store.time >= (attestation_slot + 1) * SECONDS_PER_SLOT + assert store.time >= (attestation.data.slot + 1) * SECONDS_PER_SLOT # Get state at the `target` to validate attestation and calculate the committees indexed_attestation = get_indexed_attestation(target_state, attestation) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 331184699..1be028df4 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -285,6 +285,11 @@ First, the validator should construct `attestation_data`, an [`AttestationData`] - Let `head_block` be the result of running the fork choice during the assigned slot. - Let `head_state` be the state of `head_block` processed through any empty slots up to the assigned slot using `process_slots(state, slot)`. +##### General + +* Set `attestation_data.slot = slot` where `slot` is the assigned slot. +* Set `attestation_data.index = index` where `index` is the index associated with the validator's committee. + ##### LMD GHOST vote Set `attestation_data.beacon_block_root = signing_root(head_block)`. @@ -299,10 +304,6 @@ Set `attestation_data.beacon_block_root = signing_root(head_block)`. - Let `start_slot = compute_start_slot_of_epoch(get_current_epoch(head_state))`. - Let `epoch_boundary_block_root = signing_root(head_block) if start_slot == head_state.slot else get_block_root(state, start_slot)`. -##### Index - -Set `attestation_data.index = index` where `index` is the index associated with the validator's committee. - #### Construct attestation Next, the validator creates `attestation`, an [`Attestation`](../core/0_beacon-chain.md#attestation) object. From d5a2535f98b48d9ef80d66ba684f16745ef787e2 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 18 Oct 2019 12:17:46 +0900 Subject: [PATCH 213/250] minor comments resolved from hww --- specs/core/0_beacon-chain.md | 10 ++++------ specs/core/1_beacon-chain-misc.md | 1 - 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index dced36a1b..ca968713d 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -878,12 +878,10 @@ def get_beacon_committee(state: BeaconState, slot: Slot, index: CommitteeIndex) """ epoch = compute_epoch_at_slot(slot) committees_per_slot = get_committee_count_at_slot(state, slot) - epoch_offset = index + (slot % SLOTS_PER_EPOCH) * committees_per_slot - return compute_committee( indices=get_active_validator_indices(state, epoch), seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER), - index=epoch_offset, + index=(slot % SLOTS_PER_EPOCH) * committees_per_slot + index, count=committees_per_slot * SLOTS_PER_EPOCH, ) ``` @@ -1300,10 +1298,10 @@ def process_rewards_and_penalties(state: BeaconState) -> None: if get_current_epoch(state) == GENESIS_EPOCH: return - rewards1, penalties1 = get_attestation_deltas(state) + rewards, penalties = get_attestation_deltas(state) for index in range(len(state.validators)): - increase_balance(state, ValidatorIndex(index), rewards1[index]) - decrease_balance(state, ValidatorIndex(index), penalties1[index]) + increase_balance(state, ValidatorIndex(index), rewards[index]) + decrease_balance(state, ValidatorIndex(index), penalties[index]) ``` #### Registry updates diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 9f7a4d2d7..6dd6e19c3 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -31,7 +31,6 @@ | Name | Value | Unit | Duration | - | - | - | - | -| `MAX_EPOCHS_PER_CROSSLINK` | `2**6` (= 64) | epochs | ~7 hours | | `MAX_SHARD_RECEIPT_PROOFS` | `2**0` (= 1) | - | - | | `PERIOD_COMMITTEE_ROOT_LENGTH` | `2**8` (= 256) | periods | ~9 months | | `MINOR_REWARD_QUOTIENT` | `2**8` (=256) | - | - | From a9961d4ce46d0653dd5d2e18a597282a54b905ca Mon Sep 17 00:00:00 2001 From: Cayman Date: Fri, 18 Oct 2019 03:38:06 -0500 Subject: [PATCH 214/250] Simplify get_helper_indices --- specs/light_client/merkle_proofs.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index b021f1ac0..f6c77fffb 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -283,9 +283,7 @@ def get_helper_indices(indices: Sequence[GeneralizedIndex]) -> Sequence[Generali all_helper_indices = all_helper_indices.union(set(get_branch_indices(index))) all_path_indices = all_path_indices.union(set(get_path_indices(index))) - return sorted([ - x for x in all_helper_indices if x not in all_path_indices - ], reverse=True) + return sorted(all_helper_indices.difference(all_path_indices), reverse=True) ``` Now we provide the Merkle proof verification functions. First, for single item proofs: From a11b01296bc1d19deba3c01c26de82d5f88d474d Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sun, 20 Oct 2019 12:38:00 +0800 Subject: [PATCH 215/250] update constants for phase 0 simplification --- configs/mainnet.yaml | 12 ++++++------ configs/minimal.yaml | 6 +++--- specs/core/0_beacon-chain.md | 10 +++++----- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index 1a65084e2..1a2e1ea83 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -9,8 +9,8 @@ MAX_COMMITTEES_PER_SLOT: 64 # 2**7 (= 128) TARGET_COMMITTEE_SIZE: 128 -# 2**10 (= 1,024) -MAX_VALIDATORS_PER_COMMITTEE: 1024 +# 2**11 (= 2,048) +MAX_VALIDATORS_PER_COMMITTEE: 2048 # 2**2 (= 4) MIN_PER_EPOCH_CHURN_LIMIT: 4 # 2**16 (= 65,536) @@ -51,12 +51,12 @@ BLS_WITHDRAWAL_PREFIX: 0x00 # Time parameters # --------------------------------------------------------------- -# 6 seconds 6 seconds -SECONDS_PER_SLOT: 6 +# 12 seconds +SECONDS_PER_SLOT: 12 # 2**0 (= 1) slots 6 seconds MIN_ATTESTATION_INCLUSION_DELAY: 1 -# 2**6 (= 64) slots 6.4 minutes -SLOTS_PER_EPOCH: 64 +# 2**6 (= 32) slots 6.4 minutes +SLOTS_PER_EPOCH: 32 # 2**0 (= 1) epochs 6.4 minutes MIN_SEED_LOOKAHEAD: 1 # 2**2 (= 4) epochs 25.6 minutes diff --git a/configs/minimal.yaml b/configs/minimal.yaml index f9eb5c4fe..75b93f66a 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -8,8 +8,8 @@ MAX_COMMITTEES_PER_SLOT: 4 # [customized] unsecure, but fast TARGET_COMMITTEE_SIZE: 4 -# 2**10 (= 1,024) -MAX_VALIDATORS_PER_COMMITTEE: 1024 +# 2**11 (= 2,048) +MAX_VALIDATORS_PER_COMMITTEE: 2048 # 2**2 (= 4) MIN_PER_EPOCH_CHURN_LIMIT: 4 # 2**16 (= 65,536) @@ -50,7 +50,7 @@ BLS_WITHDRAWAL_PREFIX: 0x00 # Time parameters # --------------------------------------------------------------- -# 6 seconds 6 seconds +# [customized] Faster for testing purposes SECONDS_PER_SLOT: 6 # 2**0 (= 1) slots 6 seconds MIN_ATTESTATION_INCLUSION_DELAY: 1 diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ca968713d..9be1f07d8 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -154,7 +154,7 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | | - | - | | `FAR_FUTURE_EPOCH` | `Epoch(2**64 - 1)` | -| `BASE_REWARDS_PER_EPOCH` | `5` | +| `BASE_REWARDS_PER_EPOCH` | `4` | | `DEPOSIT_CONTRACT_TREE_DEPTH` | `2**5` (= 32) | | `SECONDS_PER_DAY` | `86400` | | `JUSTIFICATION_BITS_LENGTH` | `4` | @@ -170,7 +170,7 @@ The following values are (non-configurable) constants used throughout the specif | - | - | | `MAX_COMMITTEES_PER_SLOT` | `2**6` (= 64) | | `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) | -| `MAX_VALIDATORS_PER_COMMITTEE` | `2**10` (= 1,024) | +| `MAX_VALIDATORS_PER_COMMITTEE` | `2**11` (= 2,048) | | `MIN_PER_EPOCH_CHURN_LIMIT` | `2**2` (= 4) | | `CHURN_LIMIT_QUOTIENT` | `2**16` (= 65,536) | | `SHUFFLE_ROUND_COUNT` | `90` | @@ -200,9 +200,9 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | Unit | Duration | | - | - | :-: | :-: | -| `SECONDS_PER_SLOT` | `6` | seconds | 6 seconds | -| `MIN_ATTESTATION_INCLUSION_DELAY` | `2**0` (= 1) | slots | 6 seconds | -| `SLOTS_PER_EPOCH` | `2**6` (= 64) | slots | 6.4 minutes | +| `SECONDS_PER_SLOT` | `12` | seconds | 12 seconds | +| `MIN_ATTESTATION_INCLUSION_DELAY` | `2**0` (= 1) | slots | 12 seconds | +| `SLOTS_PER_EPOCH` | `2**5` (= 32) | slots | 6.4 minutes | | `MIN_SEED_LOOKAHEAD` | `2**0` (= 1) | epochs | 6.4 minutes | | `MAX_SEED_LOOKAHEAD` | `2**2` (= 4) | epochs | 25.6 minutes | | `SLOTS_PER_ETH1_VOTING_PERIOD` | `2**10` (= 1,024) | slots | ~1.7 hours | From e86ff1ead7db5e3c87eefe4ace3d401521a4523e Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sun, 20 Oct 2019 00:43:36 -0500 Subject: [PATCH 216/250] minor fix to mainnet config comments Co-Authored-By: Cayman --- configs/mainnet.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index 1a2e1ea83..45d5b6894 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -55,7 +55,7 @@ BLS_WITHDRAWAL_PREFIX: 0x00 SECONDS_PER_SLOT: 12 # 2**0 (= 1) slots 6 seconds MIN_ATTESTATION_INCLUSION_DELAY: 1 -# 2**6 (= 32) slots 6.4 minutes +# 2**5 (= 32) slots 6.4 minutes SLOTS_PER_EPOCH: 32 # 2**0 (= 1) epochs 6.4 minutes MIN_SEED_LOOKAHEAD: 1 From bc8ff33a371768cc2347fc68bc154d3984203d60 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 22 Oct 2019 15:49:50 +0800 Subject: [PATCH 217/250] working through attestation aggregation --- specs/networking/p2p-interface.md | 4 +- specs/validator/0_beacon-chain-validator.md | 80 +++++++++++++++++++-- 2 files changed, 78 insertions(+), 6 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index c9bab8406..ed63281b4 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -164,9 +164,9 @@ Unaggregated and aggregated attestations from all shards are sent to the `beacon #### Mainnet -Shards are grouped into their own subnets (defined by a shard topic). The number of shard subnets is defined via `SHARD_SUBNET_COUNT` and the shard `shard_number % SHARD_SUBNET_COUNT` is assigned to the topic: `shard{shard_number % SHARD_SUBNET_COUNT}_beacon_attestation`. Unaggregated attestations are sent to the subnet topic. Aggregated attestations are sent to the `beacon_attestation` topic. +Attestation broadcasting is grouped into subnets defined by a topic. The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`. The `CommitteeIndex`, `index`, is assigned to the topic: `index{index % ATTESTATION_SUBNET_COUNT}_beacon_attestation`. -TODO: [aggregation strategy](https://github.com/ethereum/eth2.0-specs/issues/1331) +Unaggregated attestations are sent to the subnet topic, `index{attestation.data.index % ATTESTATION_SUBNET_COUNT}_beacon_attestation`. Aggregated attestations are sent to the `beacon_attestation` topic. ### Messages diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 1be028df4..8e36e649f 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -37,7 +37,7 @@ - [Attestations](#attestations) - [Deposits](#deposits) - [Voluntary exits](#voluntary-exits) - - [Attestations](#attestations-1) + - [Attesting](#attesting) - [Attestation data](#attestation-data) - [LMD GHOST vote](#lmd-ghost-vote) - [FFG vote](#ffg-vote) @@ -46,6 +46,15 @@ - [Aggregation bits](#aggregation-bits) - [Custody bits](#custody-bits) - [Aggregate signature](#aggregate-signature) + - [Broadcast attestation](#broadcast-attestation) + - [Attestation aggregation](#attestation-aggregation) + - [Construct aggregate](#construct-aggregate) + - [Data](#data-1) + - [Aggregation bits](#aggregation-bits-1) + - [Custody bits](#custody-bits-1) + - [Aggregate signature](#aggregate-signature-1) + - [Broadcast aggregate](#broadcast-aggregate) + - [How to avoid slashing](#how-to-avoid-slashing) - [Proposer slashing](#proposer-slashing) - [Attester slashing](#attester-slashing) @@ -272,11 +281,11 @@ The `proof` for each deposit must be constructed against the deposit root contai Up to `MAX_VOLUNTARY_EXITS`, [`VoluntaryExit`](../core/0_beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](../core/0_beacon-chain.md#voluntary-exits). -### Attestations +### Attesting A validator is expected to create, sign, and broadcast an attestation during each epoch. The `committee`, assigned `index`, and assigned `slot` for which the validator performs this role during an epoch are defined by `get_committee_assignment(state, epoch, validator_index)`. -A validator should create and broadcast the attestation halfway through the `slot` during which the validator is assigned―that is, `SECONDS_PER_SLOT * 0.5` seconds after the start of `slot`. +A validator should create and broadcast the `attestation` to the associated attestation subnet one-third of the way through the `slot` during which the validator is assigned―that is, `SECONDS_PER_SLOT / 3` seconds after the start of `slot`. #### Attestation data @@ -314,7 +323,7 @@ Set `attestation.data = attestation_data` where `attestation_data` is the `Attes ##### Aggregation bits -- Let `attestation.aggregation_bits` be a `Bitlist[MAX_VALIDATORS_PER_COMMITTEE]` where the bits at the index in the aggregated validator's `committee` is set to `0b1`. +- Let `attestation.aggregation_bits` be a `Bitlist[MAX_VALIDATORS_PER_COMMITTEE]` of length `len(committee)`, where the bit of the index of the validator in the `committee` is set to `0b1`. *Note*: Calling `get_attesting_indices(state, attestation.data, attestation.aggregation_bits)` should return a list of length equal to 1, containing `validator_index`. @@ -339,6 +348,69 @@ def get_signed_attestation_data(state: BeaconState, attestation: IndexedAttestat return bls_sign(privkey, hash_tree_root(attestation_data_and_custody_bit), domain) ``` +#### Broadcast attestation + +Finally, the validator broadcasts `attestation` to the associated attestation subnet -- the `index{attestation.data.index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` pubsub topic. + +## Attestation aggregation + +The validator is expected to locally aggregate attestations with a similar `attestation_data` to their constructed `attestation` for the assigned `slot`. + +The validator then _might_ broadcast their best aggregate to a global attestation channel two-thirds of the way through the `slot`-that is, `SECONDS_PER_SLOT * 2 / 3` seconds after the start of `slot`. + +#### Construct aggregate + +Collect `attestations` seen via gossip during the `slot` that have an equivalent `attestation_data` to that constructed by the validator. + +The validator should create an `aggregate_attestation` with the following fields. + +##### Data + +Set `aggregate_attestation.data = attestation_data` where `attestation_data` is the `AttestationData` object that is the same for each individual attestation being aggregated. + +##### Aggregation bits + +Let `aggregate_attestation.aggregation_bits` be a `Bitlist[MAX_VALIDATORS_PER_COMMITTEE]` of length `len(committee)`, where each bit set from each individual attestation is set to `0b1`. + +##### Custody bits + +- Let `aggregate_attestation.custody_bits` be a `Bitlist[MAX_VALIDATORS_PER_COMMITTEE]` filled with zeros of length `len(committee)`. + +*Note*: This is a stub for Phase 0. + +##### Aggregate signature + +Set `aggregate_attestation.signature = aggregate_signature` where `aggregate_signature` is obtained from: + +```python +def get_aggregate_signature(attestations: Attestation) -> BLSSignature: + signatures = [attestation.signature for attestation in attestations] + aggregate_signature = bls_aggregate_signatures(signatures) + return aggregate_signature +``` + +#### Broadcast aggregate + +Starting two-thirds of the way into the slot, the validator begins running the following routine to decide if their best aggregate should be broadcast to the global attestation channel (`beacon_attestation`). `seen()` is defined as having seen an aggregate + +```python +def should_broadcast_aggregate(aggregate_attestation: Attestation, index: ValidatorIndex) -> bool: + if seen(index): + return False + if random.randrange(SIDED_DIE) == 0: + return True + time.sleep(WAIT // 1000) +``` + +Define `seen_better(aggregate_attestation, attestations)` as your aggregate +having more attestations included than any in the attestations you've seen on +the network so far. Could also make it better by _some_ amount. + +```python +def have_better_aggregate(aggregate_attestation, attestations): + +``` + ## How to avoid slashing "Slashing" is the burning of some amount of validator funds and immediate ejection from the active validator set. In Phase 0, there are two ways in which funds can be slashed: [proposer slashing](#proposer-slashing) and [attester slashing](#attester-slashing). Although being slashed has serious repercussions, it is simple enough to avoid being slashed all together by remaining _consistent_ with respect to the messages a validator has previously signed. From f809b21241cedeeb6d287c921c0880feb3efebdc Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 23 Oct 2019 09:37:15 +0900 Subject: [PATCH 218/250] minor pr review --- specs/core/0_beacon-chain.md | 8 ++++---- specs/core/0_fork-choice.md | 8 ++++---- specs/core/1_beacon-chain-misc.md | 2 +- specs/light_client/sync_protocol.md | 4 ++-- specs/validator/0_beacon-chain-validator.md | 4 ++-- test_libs/pyspec/eth2spec/test/helpers/attestations.py | 2 +- test_libs/pyspec/eth2spec/test/helpers/state.py | 2 +- .../test_process_justification_and_finalization.py | 4 ++-- 8 files changed, 17 insertions(+), 17 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 9be1f07d8..8d8fd2c15 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -69,7 +69,7 @@ - [`compute_proposer_index`](#compute_proposer_index) - [`compute_committee`](#compute_committee) - [`compute_epoch_at_slot`](#compute_epoch_at_slot) - - [`compute_start_slot_of_epoch`](#compute_start_slot_of_epoch) + - [`compute_start_slot_at_epoch`](#compute_start_slot_at_epoch) - [`compute_activation_exit_epoch`](#compute_activation_exit_epoch) - [`compute_domain`](#compute_domain) - [Beacon state accessors](#beacon-state-accessors) @@ -739,10 +739,10 @@ def compute_epoch_at_slot(slot: Slot) -> Epoch: return Epoch(slot // SLOTS_PER_EPOCH) ``` -#### `compute_start_slot_of_epoch` +#### `compute_start_slot_at_epoch` ```python -def compute_start_slot_of_epoch(epoch: Epoch) -> Slot: +def compute_start_slot_at_epoch(epoch: Epoch) -> Slot: """ Return the start slot of ``epoch``. """ @@ -799,7 +799,7 @@ def get_block_root(state: BeaconState, epoch: Epoch) -> Hash: """ Return the block root at the start of a recent ``epoch``. """ - return get_block_root_at_slot(state, compute_start_slot_of_epoch(epoch)) + return get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch)) ``` #### `get_block_root_at_slot` diff --git a/specs/core/0_fork-choice.md b/specs/core/0_fork-choice.md index 552a3cb02..88edceaa0 100644 --- a/specs/core/0_fork-choice.md +++ b/specs/core/0_fork-choice.md @@ -118,7 +118,7 @@ def get_latest_attesting_balance(store: Store, root: Hash) -> Gwei: def get_head(store: Store) -> Hash: # Execute the LMD-GHOST fork choice head = store.justified_checkpoint.root - justified_slot = compute_start_slot_of_epoch(store.justified_checkpoint.epoch) + justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch) while True: children = [ root for root in store.blocks.keys() @@ -156,7 +156,7 @@ def on_block(store: Store, block: BeaconBlock) -> None: store.finalized_checkpoint.root ) # Check that block is later than the finalized epoch slot - assert block.slot > compute_start_slot_of_epoch(store.finalized_checkpoint.epoch) + assert block.slot > compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) # Check the block is valid and compute the post-state state = state_transition(pre_state, block) # Add new state for this block to the store @@ -182,11 +182,11 @@ def on_attestation(store: Store, attestation: Attestation) -> None: # Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrives base_state = store.block_states[target.root].copy() - assert store.time >= base_state.genesis_time + compute_start_slot_of_epoch(target.epoch) * SECONDS_PER_SLOT + assert store.time >= base_state.genesis_time + compute_start_slot_at_epoch(target.epoch) * SECONDS_PER_SLOT # Store target checkpoint state if not yet seen if target not in store.checkpoint_states: - process_slots(base_state, compute_start_slot_of_epoch(target.epoch)) + process_slots(base_state, compute_start_slot_at_epoch(target.epoch)) store.checkpoint_states[target] = base_state target_state = store.checkpoint_states[target] diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 6dd6e19c3..6a8d06d4a 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -203,7 +203,7 @@ class BeaconState(Container): ``` `period_committee_roots` values are initialized to `Bytes32()` (empty bytes value). -`next_shard_receipt_period` values are initialized to `compute_epoch_of_slot(PHASE_1_FORK_SLOT) // EPOCHS_PER_SHARD_PERIOD`. +`next_shard_receipt_period` values are initialized to `compute_epoch_at_slot(PHASE_1_FORK_SLOT) // EPOCHS_PER_SHARD_PERIOD`. #### `BeaconBlockBody` diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index c3b035270..ef9e2b7bc 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -84,7 +84,7 @@ def get_persistent_committee_pubkeys_and_balances(memory: LightClientMemory, """ Return pubkeys and balances for the persistent committee at ``epoch``. """ - current_period = compute_epoch_of_slot(memory.header.slot) // EPOCHS_PER_SHARD_PERIOD + current_period = compute_epoch_at_slot(memory.header.slot) // EPOCHS_PER_SHARD_PERIOD next_period = epoch // EPOCHS_PER_SHARD_PERIOD assert next_period in (current_period, current_period + 1) if next_period == current_period: @@ -114,7 +114,7 @@ The state of a light client is stored in a `memory` object of type `LightClientM ```python def update_memory(memory: LightClientMemory, update: LightClientUpdate) -> None: # Verify the update does not skip a period - current_period = compute_epoch_of_slot(memory.header.slot) // EPOCHS_PER_SHARD_PERIOD + current_period = compute_epoch_at_slot(memory.header.slot) // EPOCHS_PER_SHARD_PERIOD next_epoch = compute_epoch_of_shard_slot(update.header.slot) next_period = next_epoch // EPOCHS_PER_SHARD_PERIOD assert next_period in (current_period, current_period + 1) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 1be028df4..13c688468 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -147,7 +147,7 @@ def get_committee_assignment(state: BeaconState, next_epoch = get_current_epoch(state) + 1 assert epoch <= next_epoch - start_slot = compute_start_slot_of_epoch(epoch) + start_slot = compute_start_slot_at_epoch(epoch) for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH): for index in range(get_committee_count_at_slot(state, Slot(slot))): committee = get_beacon_committee(state, Slot(slot), CommitteeIndex(index)) @@ -301,7 +301,7 @@ Set `attestation_data.beacon_block_root = signing_root(head_block)`. *Note*: `epoch_boundary_block_root` can be looked up in the state using: -- Let `start_slot = compute_start_slot_of_epoch(get_current_epoch(head_state))`. +- Let `start_slot = compute_start_slot_at_epoch(get_current_epoch(head_state))`. - Let `epoch_boundary_block_root = signing_root(head_block) if start_slot == head_state.slot else get_block_root(state, start_slot)`. #### Construct attestation diff --git a/test_libs/pyspec/eth2spec/test/helpers/attestations.py b/test_libs/pyspec/eth2spec/test/helpers/attestations.py index 5e5073441..f3454fe4c 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/attestations.py +++ b/test_libs/pyspec/eth2spec/test/helpers/attestations.py @@ -14,7 +14,7 @@ def build_attestation_data(spec, state, slot, index): else: block_root = spec.get_block_root_at_slot(state, slot) - current_epoch_start_slot = spec.compute_start_slot_of_epoch(spec.get_current_epoch(state)) + current_epoch_start_slot = spec.compute_start_slot_at_epoch(spec.get_current_epoch(state)) if slot < current_epoch_start_slot: epoch_boundary_root = spec.get_block_root(state, spec.get_previous_epoch(state)) elif slot == current_epoch_start_slot: diff --git a/test_libs/pyspec/eth2spec/test/helpers/state.py b/test_libs/pyspec/eth2spec/test/helpers/state.py index c5a9bd4ce..27e946cbb 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/state.py +++ b/test_libs/pyspec/eth2spec/test/helpers/state.py @@ -53,7 +53,7 @@ def next_epoch_with_attestations(spec, if fill_cur_epoch and post_state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY: slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 committees_per_slot = spec.get_committee_count_at_slot(state, slot_to_attest) - if slot_to_attest >= spec.compute_start_slot_of_epoch(spec.get_current_epoch(post_state)): + if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(post_state)): for index in range(committees_per_slot): cur_attestation = get_valid_attestation(spec, post_state, slot_to_attest, index=index) block.body.attestations.append(cur_attestation) diff --git a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py index 81f761a72..002d3b169 100644 --- a/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py +++ b/test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py @@ -25,7 +25,7 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support total_balance = spec.get_total_active_balance(state) remaining_balance = total_balance * 2 // 3 - start_slot = spec.compute_start_slot_of_epoch(epoch) + start_slot = spec.compute_start_slot_at_epoch(epoch) for slot in range(start_slot, start_slot + spec.SLOTS_PER_EPOCH): committees_per_slot = spec.get_committee_count_at_slot(state, slot) for index in range(committees_per_slot): @@ -74,7 +74,7 @@ def get_checkpoints(spec, epoch): def put_checkpoints_in_block_roots(spec, state, checkpoints): for c in checkpoints: - state.block_roots[spec.compute_start_slot_of_epoch(c.epoch) % spec.SLOTS_PER_HISTORICAL_ROOT] = c.root + state.block_roots[spec.compute_start_slot_at_epoch(c.epoch) % spec.SLOTS_PER_HISTORICAL_ROOT] = c.root def finalize_on_234(spec, state, epoch, sufficient_support): From 13c3d9c6e9fbee8706e3cbbff131a630f86070c6 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 23 Oct 2019 12:10:43 +0800 Subject: [PATCH 219/250] Update ToCs --- specs/core/0_beacon-chain.md | 1 + specs/core/1_beacon-chain-misc.md | 1 + specs/core/1_custody-game.md | 2 +- specs/core/1_shard-data-chains.md | 3 ++- 4 files changed, 5 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 8d8fd2c15..24f944136 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -81,6 +81,7 @@ - [`get_active_validator_indices`](#get_active_validator_indices) - [`get_validator_churn_limit`](#get_validator_churn_limit) - [`get_seed`](#get_seed) + - [`get_committee_count_at_slot`](#get_committee_count_at_slot) - [`get_beacon_committee`](#get_beacon_committee) - [`get_beacon_proposer_index`](#get_beacon_proposer_index) - [`get_total_balance`](#get_total_balance) diff --git a/specs/core/1_beacon-chain-misc.md b/specs/core/1_beacon-chain-misc.md index 6a8d06d4a..3fdafe3ea 100644 --- a/specs/core/1_beacon-chain-misc.md +++ b/specs/core/1_beacon-chain-misc.md @@ -9,6 +9,7 @@ - [Configuration](#configuration) - [Containers](#containers) - [`CompactCommittee`](#compactcommittee) + - [`ShardReceiptDelta`](#shardreceiptdelta) - [`ShardReceiptProof`](#shardreceiptproof) - [Helper functions](#helper-functions) - [`pack_compact_validator`](#pack_compact_validator) diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index 224dfd5f2..67e12a08c 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -336,7 +336,7 @@ def legendre_bit(a: int, q: int) -> int: return 0 ``` -### ```custody_subchunkify``` +### `custody_subchunkify` Given one proof of custody chunk, returns the proof of custody subchunks of the correct sizes. diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 6edc5daba..d9c88e72b 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -18,10 +18,11 @@ - [Rewards and penalties](#rewards-and-penalties) - [Signature domain types](#signature-domain-types) - [Containers](#containers) + - [`Crosslink`](#crosslink) - [`ShardBlock`](#shardblock) - [`ShardBlockHeader`](#shardblockheader) - [`ShardState`](#shardstate) - - [`ShardAttestationData`](#ShardAttestationData) + - [`ShardAttestationData`](#shardattestationdata) - [Helper functions](#helper-functions) - [Misc](#misc-1) - [`compute_epoch_of_shard_slot`](#compute_epoch_of_shard_slot) From a9c21125715497dd8362a640a1155b048438f5ec Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 23 Oct 2019 17:13:01 +0900 Subject: [PATCH 220/250] add bulk of naive aggregation strategy --- specs/networking/p2p-interface.md | 32 +++++++++-- specs/validator/0_beacon-chain-validator.md | 61 ++++++++++++--------- 2 files changed, 62 insertions(+), 31 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index ed63281b4..68a46c504 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -119,6 +119,7 @@ This section outlines constants that are used in this spec. | `SHARD_SUBNET_COUNT` | `TODO` | The number of shard subnets used in the gossipsub protocol. | | `TTFB_TIMEOUT` | `5s` | The maximum time to wait for first byte of request response (time-to-first-byte). | | `RESP_TIMEOUT` | `10s` | The maximum time for complete response transfer. | +| `ATTESTATION_PROPAGATION_SLOT_RANGE` | `4` | The maximum number of slots during which an attestation can be propagated. | ## The gossip domain: gossipsub @@ -147,10 +148,27 @@ Topics are plain UTF-8 strings and are encoded on the wire as determined by prot Topic strings have form: `/eth2/TopicName/TopicEncoding`. This defines both the type of data being sent on the topic and how the data field of the message is encoded. (Further details can be found in [Messages](#Messages)). -There are two main topics used to propagate attestations and beacon blocks to all nodes on the network. Their `TopicName`s are: +There are two main topics used to propagate aggregate attestations and beacon blocks to all nodes on the network. Their `TopicName`s are: - `beacon_block` - This topic is used solely for propagating new beacon blocks to all nodes on the networks. Blocks are sent in their entirety. Clients MUST validate the block proposer signature before forwarding it across the network. -- `beacon_attestation` - This topic is used to propagate aggregated attestations (in their entirety) to subscribing nodes (typically block proposers) to be included in future blocks. Clients MUST validate that the block being voted for passes validation before forwarding the attestation on the network (TODO: [additional validations](https://github.com/ethereum/eth2.0-specs/issues/1332)). +- `beacon_aggregate_and_proof` - This topic is used to propagate aggregated attestations (as `AggregateAndProof`s) to subscribing nodes (typically validators) to be included in future blocks. The following validations MUST pass before forwarding the `aggregate_and_proof` on the network. + - Clients MUST validate that the block being voted for (`aggregate_and_proof.aggregate.data.beacon_block_root`) passes validation. + - Clients MUST validate that `aggregate_and_proof.aggregate.data.slot` is + within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots. + - Clients MUST validate that the validator index is within the aggregate's + committee -- i.e. `aggregate_and_proof.index in get_attesting_indices(state, aggregate_and_proof.aggregate.data, aggregate_and_proof.aggregate.aggregation_bits)`. + - Clients MUST validate that `aggregate_and_proof.selection_proof` selects + the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate_and_proof.aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`. + - Clients MUST validate that the `aggregate_and_proof.selection_proof` is a + valid signature of the `aggregate_and_proof.aggregate.data.slot` by the validator with index `aggregate_and_proof.index`. + - Clients MUST validate that the signature of `aggregate_and_proof.aggregate`. + +Attestation subnets are used to propagate unaggregated attestations to subsections of the network. Their `TopicName`s are: + +- `index{index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` - This topic is used to propagate unaggregated attestations to subsections of the network (typically beacon and persistent committees) to be aggregated before being passed to `beacon_aggregate_and_proof`. The following validations MUST pass before forwarding the `attestation` on the network. + - Clients MUST validate that the block being voted for (`attestation.data.beacon_block_root`) passes validation. + - Clients MUST validate that `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots. + - Clients MUST validate the signature of `attestation`. Additional topics are used to propagate lower frequency validator messages. Their `TopicName`s are: @@ -160,13 +178,15 @@ Additional topics are used to propagate lower frequency validator messages. Thei #### Interop -Unaggregated and aggregated attestations from all shards are sent to the `beacon_attestation` topic. Clients are not required to publish aggregate attestations but must be able to process them. All validating clients SHOULD try to perform local attestation aggregation to prepare for block proposing. +Unaggregated and aggregated attestations from all shards are sent as `Attestation` to the `beacon_aggregate_and_proof` topic. Clients are not required to publish aggregate attestations but must be able to process them. All validating clients SHOULD try to perform local attestation aggregation to prepare for block proposing. #### Mainnet Attestation broadcasting is grouped into subnets defined by a topic. The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`. The `CommitteeIndex`, `index`, is assigned to the topic: `index{index % ATTESTATION_SUBNET_COUNT}_beacon_attestation`. -Unaggregated attestations are sent to the subnet topic, `index{attestation.data.index % ATTESTATION_SUBNET_COUNT}_beacon_attestation`. Aggregated attestations are sent to the `beacon_attestation` topic. +Unaggregated attestations are sent to the subnet topic, `index{attestation.data.index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` as `Attestation`s. + +Aggregated attestations are sent to the `beacon_aggregate_and_proof` topic as `AggregateAndProof`s. ### Messages @@ -180,7 +200,7 @@ The payload is carried in the `data` field of a gossipsub message, and varies de | Topic | Message Type | |------------------------------|-------------------| | beacon_block | BeaconBlock | -| beacon_attestation | Attestation | +| beacon_aggregate_and_proof | Attestation | | shard{N}\_beacon_attestation | Attestation | | voluntary_exit | VoluntaryExit | | proposer_slashing | ProposerSlashing | @@ -200,7 +220,7 @@ Topics are post-fixed with an encoding. Encodings define how the payload of a go #### Mainnet -- `ssz_snappy` - All objects are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy). Example: The beacon attestation topic string is `/eth2/beacon_attestation/ssz_snappy`, and the data field of a gossipsub message is an `Attestation` that has been SSZ-encoded and then compressed with Snappy. +- `ssz_snappy` - All objects are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy). Example: The beacon aggregate attestation topic string is `/eth2/beacon_aggregate_and_proof/ssz_snappy`, and the data field of a gossipsub message is an `AggregateAndProof` that has been SSZ-encoded and then compressed with Snappy. Implementations MUST use a single encoding. Changing an encoding will require coordination between participating implementations. diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 27f0037fa..294145c0e 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -354,31 +354,46 @@ Finally, the validator broadcasts `attestation` to the associated attestation su ## Attestation aggregation -The validator is expected to locally aggregate attestations with a similar `attestation_data` to their constructed `attestation` for the assigned `slot`. +Some validators are selected to locally aggregate attestations with a similar `attestation_data` to their constructed `attestation` for the assigned `slot`. -The validator then _might_ broadcast their best aggregate to a global attestation channel two-thirds of the way through the `slot`-that is, `SECONDS_PER_SLOT * 2 / 3` seconds after the start of `slot`. +### Aggregation selection -#### Construct aggregate +A validator is selected to aggregate based upon the following -Collect `attestations` seen via gossip during the `slot` that have an equivalent `attestation_data` to that constructed by the validator. +```python +def slot_signature(slot: Slot, privkey: int) -> BLSSignature: + domain = get_domain(state, DOMAIN_BEACON_AGGREGATOR, attestation.data.slot) + return bls_sign(prvkey, hash_tree_root(slot), domain) +``` -The validator should create an `aggregate_attestation` with the following fields. +```python +def is_aggregator(state: BeaconState, slot: Slot, committee_index: CommitteeIndex, slot_signature: BLSSignature) -> bool: + committee = get_beacon_committee(state, slot, committee_index) + modulo = max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE) + return bytes_to_int(hash(slot_signature)[0:8]) % modulo == 0 +``` -##### Data +### Construct aggregate + +If the validator is selected to aggregate (`is_aggregator()`), they construct an aggregate attestation via the following. + +Collect `attestations` seen via gossip during the `slot` that have an equivalent `attestation_data` to that constructed by the validator, and create an `aggregate_attestation` with the following fields. + +#### Data Set `aggregate_attestation.data = attestation_data` where `attestation_data` is the `AttestationData` object that is the same for each individual attestation being aggregated. -##### Aggregation bits +#### Aggregation bits Let `aggregate_attestation.aggregation_bits` be a `Bitlist[MAX_VALIDATORS_PER_COMMITTEE]` of length `len(committee)`, where each bit set from each individual attestation is set to `0b1`. -##### Custody bits +#### Custody bits - Let `aggregate_attestation.custody_bits` be a `Bitlist[MAX_VALIDATORS_PER_COMMITTEE]` filled with zeros of length `len(committee)`. *Note*: This is a stub for Phase 0. -##### Aggregate signature +#### Aggregate signature Set `aggregate_attestation.signature = aggregate_signature` where `aggregate_signature` is obtained from: @@ -389,27 +404,23 @@ def get_aggregate_signature(attestations: Attestation) -> BLSSignature: return aggregate_signature ``` -#### Broadcast aggregate +### Broadcast aggregate -Starting two-thirds of the way into the slot, the validator begins running the following routine to decide if their best aggregate should be broadcast to the global attestation channel (`beacon_attestation`). `seen()` is defined as having seen an aggregate +If the validator is selected to aggregate (`is_aggregator`), then they broadcast their best aggregate to the global aggregate channel (`beacon_aggregate_and_proof`) two-thirds of the way through the `slot`-that is, `SECONDS_PER_SLOT * 2 / 3` seconds after the start of `slot`. + +Aggregate attestations are broadcast as `AggregateAndProof` objects to prove to the gossip channel that the validator has been selected as an aggregator. ```python -def should_broadcast_aggregate(aggregate_attestation: Attestation, index: ValidatorIndex) -> bool: - if seen(index): - return False - if random.randrange(SIDED_DIE) == 0: - return True - time.sleep(WAIT // 1000) +class AggregateAndProof(Container): + index: ValidatorIndex + selection_proof: BLSSignature + aggregate: Attestation ``` -Define `seen_better(aggregate_attestation, attestations)` as your aggregate -having more attestations included than any in the attestations you've seen on -the network so far. Could also make it better by _some_ amount. - -```python -def have_better_aggregate(aggregate_attestation, attestations): - -``` +Where +* `index` is the validator's `validator_index`. +* `selection_proof` is the signature of the slot (`slot_signature()`). +* `aggregate` is the `aggregate_attestation` constructed in the previous section. ## How to avoid slashing From 7fcb60795bcc7b7eeaa8652397ff8c54fc7c5ed7 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 23 Oct 2019 17:29:53 +0900 Subject: [PATCH 221/250] lint --- scripts/build_spec.py | 1 + specs/networking/p2p-interface.md | 8 ++++---- specs/validator/0_beacon-chain-validator.md | 13 ++++++++----- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/scripts/build_spec.py b/scripts/build_spec.py index c47249fe4..e05907014 100644 --- a/scripts/build_spec.py +++ b/scripts/build_spec.py @@ -28,6 +28,7 @@ from eth2spec.utils.ssz.ssz_typing import ( Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector, ) from eth2spec.utils.bls import ( + bls_aggregate_signatures, bls_aggregate_pubkeys, bls_verify, bls_verify_multiple, diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 68a46c504..d3a8c156b 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -116,7 +116,7 @@ This section outlines constants that are used in this spec. | `REQ_RESP_MAX_SIZE` | `TODO` | The maximum size of uncompressed req/resp messages that clients will allow. | | `SSZ_MAX_LIST_SIZE` | `TODO` | The maximum size of SSZ-encoded variable lists. | | `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The maximum size of uncompressed gossip messages. | -| `SHARD_SUBNET_COUNT` | `TODO` | The number of shard subnets used in the gossipsub protocol. | +| `ATTESTATION_SUBNET_COUNT` | `TODO` | The number of shard subnets used in the gossipsub protocol. | | `TTFB_TIMEOUT` | `5s` | The maximum time to wait for first byte of request response (time-to-first-byte). | | `RESP_TIMEOUT` | `10s` | The maximum time for complete response transfer. | | `ATTESTATION_PROPAGATION_SLOT_RANGE` | `4` | The maximum number of slots during which an attestation can be propagated. | @@ -200,8 +200,8 @@ The payload is carried in the `data` field of a gossipsub message, and varies de | Topic | Message Type | |------------------------------|-------------------| | beacon_block | BeaconBlock | -| beacon_aggregate_and_proof | Attestation | -| shard{N}\_beacon_attestation | Attestation | +| beacon_aggregate_and_proof | AggregateAndProof | +| index{N}\_beacon_attestation | Attestation | | voluntary_exit | VoluntaryExit | | proposer_slashing | ProposerSlashing | | attester_slashing | AttesterSlashing | @@ -648,7 +648,7 @@ No security or privacy guarantees are lost as a result of choosing plaintext top Furthermore, the Eth 2.0 topic names are shorter than their digest equivalents (assuming SHA-256 hash), so hashing topics would bloat messages unnecessarily. -### Why are there `SHARD_SUBNET_COUNT` subnets, and why is this not defined? +### Why are there `ATTESTATION_SUBNET_COUNT` subnets, and why is this not defined? Depending on the number of validators, it may be more efficient to group shard subnets and might provide better stability for the gossipsub channel. The exact grouping will be dependent on more involved network tests. This constant allows for more flexibility in setting up the network topology for attestation aggregation (as aggregation should happen on each subnet). diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 294145c0e..3b4085ff3 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -78,6 +78,7 @@ All terminology, constants, functions, and protocol mechanics defined in the [Ph | Name | Value | Unit | Duration | | - | - | :-: | :-: | | `ETH1_FOLLOW_DISTANCE` | `2**10` (= 1,024) | blocks | ~4 hours | +| `TARGET_AGGREGATORS_PER_COMMITTEE` | `2**4` (= 16) | validators | | ## Becoming a validator @@ -361,14 +362,14 @@ Some validators are selected to locally aggregate attestations with a similar `a A validator is selected to aggregate based upon the following ```python -def slot_signature(slot: Slot, privkey: int) -> BLSSignature: - domain = get_domain(state, DOMAIN_BEACON_AGGREGATOR, attestation.data.slot) - return bls_sign(prvkey, hash_tree_root(slot), domain) +def slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSignature: + domain = get_domain(state, DOMAIN_BEACON_ATTESTER, compute_epoch_at_slot(slot)) + return bls_sign(privkey, hash_tree_root(slot), domain) ``` ```python -def is_aggregator(state: BeaconState, slot: Slot, committee_index: CommitteeIndex, slot_signature: BLSSignature) -> bool: - committee = get_beacon_committee(state, slot, committee_index) +def is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex, slot_signature: BLSSignature) -> bool: + committee = get_beacon_committee(state, slot, index) modulo = max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE) return bytes_to_int(hash(slot_signature)[0:8]) % modulo == 0 ``` @@ -410,6 +411,8 @@ If the validator is selected to aggregate (`is_aggregator`), then they broadcast Aggregate attestations are broadcast as `AggregateAndProof` objects to prove to the gossip channel that the validator has been selected as an aggregator. +### `AggregateAndProof` + ```python class AggregateAndProof(Container): index: ValidatorIndex From cf1d855be666b8f0221e4ca3aa42e9574a676ea9 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 23 Oct 2019 17:37:32 +0900 Subject: [PATCH 222/250] lint --- specs/networking/p2p-interface.md | 2 +- specs/validator/0_beacon-chain-validator.md | 26 ++++++++++----------- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index d3a8c156b..6b7b84b64 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -178,7 +178,7 @@ Additional topics are used to propagate lower frequency validator messages. Thei #### Interop -Unaggregated and aggregated attestations from all shards are sent as `Attestation` to the `beacon_aggregate_and_proof` topic. Clients are not required to publish aggregate attestations but must be able to process them. All validating clients SHOULD try to perform local attestation aggregation to prepare for block proposing. +Unaggregated and aggregated attestations from all shards are sent as `Attestation`s to the `beacon_aggregate_and_proof` topic. Clients are not required to publish aggregate attestations but must be able to process them. All validating clients SHOULD try to perform local attestation aggregation to prepare for block proposing. #### Mainnet diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 3b4085ff3..b477c5156 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -54,7 +54,6 @@ - [Custody bits](#custody-bits-1) - [Aggregate signature](#aggregate-signature-1) - [Broadcast aggregate](#broadcast-aggregate) - - [How to avoid slashing](#how-to-avoid-slashing) - [Proposer slashing](#proposer-slashing) - [Attester slashing](#attester-slashing) @@ -353,13 +352,13 @@ def get_signed_attestation_data(state: BeaconState, attestation: IndexedAttestat Finally, the validator broadcasts `attestation` to the associated attestation subnet -- the `index{attestation.data.index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` pubsub topic. -## Attestation aggregation +### Attestation aggregation Some validators are selected to locally aggregate attestations with a similar `attestation_data` to their constructed `attestation` for the assigned `slot`. -### Aggregation selection +#### Aggregation selection -A validator is selected to aggregate based upon the following +A validator is selected to aggregate based upon the return value of `is_aggregator()`. ```python def slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSignature: @@ -374,44 +373,43 @@ def is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex, slot_si return bytes_to_int(hash(slot_signature)[0:8]) % modulo == 0 ``` -### Construct aggregate +#### Construct aggregate If the validator is selected to aggregate (`is_aggregator()`), they construct an aggregate attestation via the following. Collect `attestations` seen via gossip during the `slot` that have an equivalent `attestation_data` to that constructed by the validator, and create an `aggregate_attestation` with the following fields. -#### Data +##### Data Set `aggregate_attestation.data = attestation_data` where `attestation_data` is the `AttestationData` object that is the same for each individual attestation being aggregated. -#### Aggregation bits +##### Aggregation bits Let `aggregate_attestation.aggregation_bits` be a `Bitlist[MAX_VALIDATORS_PER_COMMITTEE]` of length `len(committee)`, where each bit set from each individual attestation is set to `0b1`. -#### Custody bits +##### Custody bits - Let `aggregate_attestation.custody_bits` be a `Bitlist[MAX_VALIDATORS_PER_COMMITTEE]` filled with zeros of length `len(committee)`. *Note*: This is a stub for Phase 0. -#### Aggregate signature +##### Aggregate signature Set `aggregate_attestation.signature = aggregate_signature` where `aggregate_signature` is obtained from: ```python -def get_aggregate_signature(attestations: Attestation) -> BLSSignature: +def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature: signatures = [attestation.signature for attestation in attestations] - aggregate_signature = bls_aggregate_signatures(signatures) - return aggregate_signature + return bls_aggregate_signatures(signatures) ``` -### Broadcast aggregate +#### Broadcast aggregate If the validator is selected to aggregate (`is_aggregator`), then they broadcast their best aggregate to the global aggregate channel (`beacon_aggregate_and_proof`) two-thirds of the way through the `slot`-that is, `SECONDS_PER_SLOT * 2 / 3` seconds after the start of `slot`. Aggregate attestations are broadcast as `AggregateAndProof` objects to prove to the gossip channel that the validator has been selected as an aggregator. -### `AggregateAndProof` +##### `AggregateAndProof` ```python class AggregateAndProof(Container): From ce1ceee9af5c638d70295b2688321ef5f8caa953 Mon Sep 17 00:00:00 2001 From: Diederik Loerakker Date: Wed, 23 Oct 2019 17:23:46 +0800 Subject: [PATCH 223/250] Fix argument names, PR suggestion from @wemeetagain Co-Authored-By: Cayman --- specs/light_client/merkle_proofs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index d8b8c2464..24d3b637b 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -329,7 +329,7 @@ def verify_merkle_multiproof(leaves: Sequence[Hash], proof: Sequence[Hash], indices: Sequence[GeneralizedIndex], root: Hash) -> bool: - return calculate_multi_merkle_root(leaf, proof, index) == root + return calculate_multi_merkle_root(leaves, proof, indices) == root ``` Note that the single-item proof is a special case of a multi-item proof; a valid single-item proof verifies correctly when put into the multi-item verification function (making the natural trivial changes to input arguments, `index -> [index]` and `leaf -> [leaf]`). Note also that `calculate_merkle_root` and `calculate_multi_merkle_root` can be used independently to compute the new Merkle root of a proof with leaves updated. From 2be850c4284d92c26e06f66834b89043549d73f2 Mon Sep 17 00:00:00 2001 From: Sly Gryphon Date: Wed, 23 Oct 2019 20:53:38 +1000 Subject: [PATCH 224/250] Add details for an SSZ project I have just published a first version (basic serialization & Merkleization only) of --- specs/simple-serialize.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 5e17962d1..8a8137f43 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -247,4 +247,5 @@ We similarly define "summary types" and "expansion types". For example, [`Beacon | Go | Prysm | Prysmatic Labs | [https://github.com/prysmaticlabs/go-ssz](https://github.com/prysmaticlabs/go-ssz) | | Swift | Yeeth | Dean Eigenmann | [https://github.com/yeeth/SimpleSerialize.swift](https://github.com/yeeth/SimpleSerialize.swift) | | C# | | Jordan Andrews | [https://github.com/codingupastorm/csharp-ssz](https://github.com/codingupastorm/csharp-ssz) | +| C# | Cortex | Sly Gryphon | [https://www.nuget.org/packages/Cortex.SimpleSerialize](https://www.nuget.org/packages/Cortex.SimpleSerialize) | | C++ | | Jiyun Kim | [https://github.com/NAKsir-melody/cpp_ssz](https://github.com/NAKsir-melody/cpp_ssz) | From 1cda8c8a8b1f3e81c55d0e46b0ba16e3b83a898d Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 24 Oct 2019 10:08:23 +0900 Subject: [PATCH 225/250] aggregation cleanups --- specs/networking/p2p-interface.md | 16 +++++++++++----- specs/validator/0_beacon-chain-validator.md | 2 ++ 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 6b7b84b64..7f9977be3 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -116,10 +116,10 @@ This section outlines constants that are used in this spec. | `REQ_RESP_MAX_SIZE` | `TODO` | The maximum size of uncompressed req/resp messages that clients will allow. | | `SSZ_MAX_LIST_SIZE` | `TODO` | The maximum size of SSZ-encoded variable lists. | | `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The maximum size of uncompressed gossip messages. | -| `ATTESTATION_SUBNET_COUNT` | `TODO` | The number of shard subnets used in the gossipsub protocol. | +| `ATTESTATION_SUBNET_COUNT` | `64` | The number of shard subnets used in the gossipsub protocol. | | `TTFB_TIMEOUT` | `5s` | The maximum time to wait for first byte of request response (time-to-first-byte). | | `RESP_TIMEOUT` | `10s` | The maximum time for complete response transfer. | -| `ATTESTATION_PROPAGATION_SLOT_RANGE` | `4` | The maximum number of slots during which an attestation can be propagated. | +| `ATTESTATION_PROPAGATION_SLOT_RANGE` | `32` | The maximum number of slots during which an attestation can be propagated. | ## The gossip domain: gossipsub @@ -165,7 +165,7 @@ There are two main topics used to propagate aggregate attestations and beacon bl Attestation subnets are used to propagate unaggregated attestations to subsections of the network. Their `TopicName`s are: -- `index{index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` - This topic is used to propagate unaggregated attestations to subsections of the network (typically beacon and persistent committees) to be aggregated before being passed to `beacon_aggregate_and_proof`. The following validations MUST pass before forwarding the `attestation` on the network. +- `index{index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` - These topics are used to propagate unaggregated attestations to subsections of the network (typically beacon and persistent committees) to be aggregated before being gossiped to `beacon_aggregate_and_proof`. The following validations MUST pass before forwarding the `attestation` on the network. - Clients MUST validate that the block being voted for (`attestation.data.beacon_block_root`) passes validation. - Clients MUST validate that `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots. - Clients MUST validate the signature of `attestation`. @@ -648,9 +648,15 @@ No security or privacy guarantees are lost as a result of choosing plaintext top Furthermore, the Eth 2.0 topic names are shorter than their digest equivalents (assuming SHA-256 hash), so hashing topics would bloat messages unnecessarily. -### Why are there `ATTESTATION_SUBNET_COUNT` subnets, and why is this not defined? +### Why are there `ATTESTATION_SUBNET_COUNT` subnets? -Depending on the number of validators, it may be more efficient to group shard subnets and might provide better stability for the gossipsub channel. The exact grouping will be dependent on more involved network tests. This constant allows for more flexibility in setting up the network topology for attestation aggregation (as aggregation should happen on each subnet). +Depending on the number of validators, it may be more efficient to group shard subnets and might provide better stability for the gossipsub channel. The exact grouping will be dependent on more involved network tests. This constant allows for more flexibility in setting up the network topology for attestation aggregation (as aggregation should happen on each subnet). The value is currently set to to be equal `MAX_COMMITTEES_PER_SLOT` until network tests indicate otherwise. + +### Why are attestations limited to be broadcast on gossip channels within `SLOTS_PER_EPOCH` slots? + +Attestations can only be included on chain within an epoch's worth of slots so this is the natural cutoff. There is no utility to the chain to broadcast attestations older than one epoch, and because validators have a chance to make a new attestation each epoch, there is minimal utility to the fork choice to relay old attestations as a new latest message can soon be created by each validator. + +In addition to this, relaying attestations requires validating the attestation in the context of the `state` during which it was created. Thus, validating arbitrarily old attestations would put additional requirements on which states need to be readily available to the node. This would result in a higher resource burden and could serve as a DoS vector. ### Why are we sending entire objects in the pubsub and not just hashes? diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index b477c5156..a79b178b3 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -48,12 +48,14 @@ - [Aggregate signature](#aggregate-signature) - [Broadcast attestation](#broadcast-attestation) - [Attestation aggregation](#attestation-aggregation) + - [Aggregation selection](#aggregation-selection) - [Construct aggregate](#construct-aggregate) - [Data](#data-1) - [Aggregation bits](#aggregation-bits-1) - [Custody bits](#custody-bits-1) - [Aggregate signature](#aggregate-signature-1) - [Broadcast aggregate](#broadcast-aggregate) + - [`AggregateAndProof`](#aggregateandproof) - [How to avoid slashing](#how-to-avoid-slashing) - [Proposer slashing](#proposer-slashing) - [Attester slashing](#attester-slashing) From 7804f94279294275579c69cf99bae0c6d998413d Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 24 Oct 2019 10:23:37 +0900 Subject: [PATCH 226/250] explain why aggregate_and_proof for aggregate gossip in p2p faq --- specs/networking/p2p-interface.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 7f9977be3..4f8829cd8 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -11,7 +11,7 @@ It consists of four main sections: ## Table of contents - + @@ -658,6 +658,10 @@ Attestations can only be included on chain within an epoch's worth of slots so t In addition to this, relaying attestations requires validating the attestation in the context of the `state` during which it was created. Thus, validating arbitrarily old attestations would put additional requirements on which states need to be readily available to the node. This would result in a higher resource burden and could serve as a DoS vector. +### Why are aggregate attestations broadcast to the global topic as `AggregateAndProof`s rather than just as `Attestation`s? + +The dominant strategy for an individual validator is to always broadcast an aggregate containing their own attestation to the global channel to ensure that proposers see their attestation for inclusion. Using a private selection criteria and providing this proof of selection alongside the gossiped aggregate ensures that this dominant strategy will not flood the global channel. + ### Why are we sending entire objects in the pubsub and not just hashes? Entire objects should be sent to get the greatest propagation speeds. If only hashes are sent, then block and attestation propagation is dependent on recursive requests from each peer. In a hash-only scenario, peers could receive hashes without knowing who to download the actual contents from. Sending entire objects ensures that they get propagated through the entire network. From 69730cc2675cff10726ee424b42e6ce15c848743 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 24 Oct 2019 16:12:10 +0900 Subject: [PATCH 227/250] remove transfers entirely from phase 0 --- specs/core/0_beacon-chain.md | 51 --- .../pyspec/eth2spec/test/helpers/transfers.py | 53 --- .../block_processing/test_process_transfer.py | 368 ------------------ .../eth2spec/test/sanity/test_blocks.py | 34 -- 4 files changed, 506 deletions(-) delete mode 100644 test_libs/pyspec/eth2spec/test/helpers/transfers.py delete mode 100644 test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_transfer.py diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 24f944136..1819310a1 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -39,7 +39,6 @@ - [`Attestation`](#attestation) - [`Deposit`](#deposit) - [`VoluntaryExit`](#voluntaryexit) - - [`Transfer`](#transfer) - [Beacon blocks](#beacon-blocks) - [`BeaconBlockBody`](#beaconblockbody) - [`BeaconBlock`](#beaconblock) @@ -115,7 +114,6 @@ - [Attestations](#attestations) - [Deposits](#deposits) - [Voluntary exits](#voluntary-exits) - - [Transfers](#transfers) @@ -242,7 +240,6 @@ The following values are (non-configurable) constants used throughout the specif | `MAX_ATTESTATIONS` | `2**7` (= 128) | | `MAX_DEPOSITS` | `2**4` (= 16) | | `MAX_VOLUNTARY_EXITS` | `2**4` (= 16) | -| `MAX_TRANSFERS` | `0` | ### Domain types @@ -255,7 +252,6 @@ The following types are defined, mapping into `DomainType` (little endian): | `DOMAIN_RANDAO` | `2` | | `DOMAIN_DEPOSIT` | `3` | | `DOMAIN_VOLUNTARY_EXIT` | `4` | -| `DOMAIN_TRANSFER` | `5` | ## Containers @@ -424,19 +420,6 @@ class VoluntaryExit(Container): signature: BLSSignature ``` -#### `Transfer` - -```python -class Transfer(Container): - sender: ValidatorIndex - recipient: ValidatorIndex - amount: Gwei - fee: Gwei - slot: Slot # Slot at which transfer must be processed - pubkey: BLSPubkey # Withdrawal pubkey - signature: BLSSignature # Signature checked against withdrawal pubkey -``` - ### Beacon blocks #### `BeaconBlockBody` @@ -452,7 +435,6 @@ class BeaconBlockBody(Container): attestations: List[Attestation, MAX_ATTESTATIONS] deposits: List[Deposit, MAX_DEPOSITS] voluntary_exits: List[VoluntaryExit, MAX_VOLUNTARY_EXITS] - transfers: List[Transfer, MAX_TRANSFERS] ``` #### `BeaconBlock` @@ -1436,8 +1418,6 @@ def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None: def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: # Verify that outstanding deposits are processed up to the maximum number of deposits assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index) - # Verify that there are no duplicate transfers - assert len(body.transfers) == len(set(body.transfers)) for operations, function in ( (body.proposer_slashings, process_proposer_slashing), @@ -1445,7 +1425,6 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: (body.attestations, process_attestation), (body.deposits, process_deposit), (body.voluntary_exits, process_voluntary_exit), - (body.transfers, process_transfer), # @process_shard_receipt_proofs ): for operation in operations: @@ -1584,33 +1563,3 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: # Initiate exit initiate_validator_exit(state, exit.validator_index) ``` - -##### Transfers - -```python -def process_transfer(state: BeaconState, transfer: Transfer) -> None: - # Verify the balance the covers amount and fee (with overflow protection) - assert state.balances[transfer.sender] >= max(transfer.amount + transfer.fee, transfer.amount, transfer.fee) - # A transfer is valid in only one slot - assert state.slot == transfer.slot - # Sender must satisfy at least one of the following: - assert ( - # 1) Never have been eligible for activation - state.validators[transfer.sender].activation_eligibility_epoch == FAR_FUTURE_EPOCH or - # 2) Be withdrawable - get_current_epoch(state) >= state.validators[transfer.sender].withdrawable_epoch or - # 3) Have a balance of at least MAX_EFFECTIVE_BALANCE after the transfer - state.balances[transfer.sender] >= transfer.amount + transfer.fee + MAX_EFFECTIVE_BALANCE - ) - # Verify that the pubkey is valid - assert state.validators[transfer.sender].withdrawal_credentials == BLS_WITHDRAWAL_PREFIX + hash(transfer.pubkey)[1:] - # Verify that the signature is valid - assert bls_verify(transfer.pubkey, signing_root(transfer), transfer.signature, get_domain(state, DOMAIN_TRANSFER)) - # Process the transfer - decrease_balance(state, transfer.sender, transfer.amount + transfer.fee) - increase_balance(state, transfer.recipient, transfer.amount) - increase_balance(state, get_beacon_proposer_index(state), transfer.fee) - # Verify balances are not dust - assert not (0 < state.balances[transfer.sender] < MIN_DEPOSIT_AMOUNT) - assert not (0 < state.balances[transfer.recipient] < MIN_DEPOSIT_AMOUNT) -``` diff --git a/test_libs/pyspec/eth2spec/test/helpers/transfers.py b/test_libs/pyspec/eth2spec/test/helpers/transfers.py deleted file mode 100644 index 3d3b0f4e3..000000000 --- a/test_libs/pyspec/eth2spec/test/helpers/transfers.py +++ /dev/null @@ -1,53 +0,0 @@ -from eth2spec.test.helpers.keys import pubkeys, privkeys -from eth2spec.test.helpers.state import get_balance -from eth2spec.utils.bls import bls_sign -from eth2spec.utils.ssz.ssz_impl import signing_root - - -def get_valid_transfer(spec, state, slot=None, sender_index=None, - recipient_index=None, amount=None, fee=None, signed=False): - if slot is None: - slot = state.slot - current_epoch = spec.get_current_epoch(state) - if sender_index is None: - sender_index = spec.get_active_validator_indices(state, current_epoch)[-1] - if recipient_index is None: - recipient_index = spec.get_active_validator_indices(state, current_epoch)[0] - transfer_pubkey = pubkeys[-1] - transfer_privkey = privkeys[-1] - - if fee is None: - fee = get_balance(state, sender_index) // 32 - if amount is None: - amount = get_balance(state, sender_index) - fee - - transfer = spec.Transfer( - sender=sender_index, - recipient=recipient_index, - amount=amount, - fee=fee, - slot=slot, - pubkey=transfer_pubkey, - ) - if signed: - sign_transfer(spec, state, transfer, transfer_privkey) - - # ensure withdrawal_credentials reproducible - state.validators[transfer.sender].withdrawal_credentials = ( - spec.BLS_WITHDRAWAL_PREFIX + spec.hash(transfer.pubkey)[1:] - ) - - return transfer - - -def sign_transfer(spec, state, transfer, privkey): - transfer.signature = bls_sign( - message_hash=signing_root(transfer), - privkey=privkey, - domain=spec.get_domain( - state=state, - domain_type=spec.DOMAIN_TRANSFER, - message_epoch=spec.get_current_epoch(state), - ) - ) - return transfer diff --git a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_transfer.py b/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_transfer.py deleted file mode 100644 index 1b839562e..000000000 --- a/test_libs/pyspec/eth2spec/test/phase_0/block_processing/test_process_transfer.py +++ /dev/null @@ -1,368 +0,0 @@ -from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases -from eth2spec.test.helpers.state import next_epoch -from eth2spec.test.helpers.block import apply_empty_block -from eth2spec.test.helpers.transfers import get_valid_transfer, sign_transfer - - -def run_transfer_processing(spec, state, transfer, valid=True): - """ - Run ``process_transfer``, yielding: - - pre-state ('pre') - - transfer ('transfer') - - post-state ('post'). - If ``valid == False``, run expecting ``AssertionError`` - """ - - yield 'pre', state - yield 'transfer', transfer - - if not valid: - expect_assertion_error(lambda: spec.process_transfer(state, transfer)) - yield 'post', None - return - - proposer_index = spec.get_beacon_proposer_index(state) - pre_transfer_sender_balance = state.balances[transfer.sender] - pre_transfer_recipient_balance = state.balances[transfer.recipient] - pre_transfer_proposer_balance = state.balances[proposer_index] - - spec.process_transfer(state, transfer) - yield 'post', state - - sender_balance = state.balances[transfer.sender] - recipient_balance = state.balances[transfer.recipient] - assert sender_balance == pre_transfer_sender_balance - transfer.amount - transfer.fee - assert recipient_balance == pre_transfer_recipient_balance + transfer.amount - assert state.balances[proposer_index] == pre_transfer_proposer_balance + transfer.fee - - -@with_all_phases -@spec_state_test -def test_success_non_activated(spec, state): - transfer = get_valid_transfer(spec, state, signed=True) - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer) - - -@with_all_phases -@spec_state_test -def test_success_withdrawable(spec, state): - next_epoch(spec, state) - apply_empty_block(spec, state) - - transfer = get_valid_transfer(spec, state, signed=True) - - # withdrawable_epoch in past so can transfer - state.validators[transfer.sender].withdrawable_epoch = spec.get_current_epoch(state) - 1 - - yield from run_transfer_processing(spec, state, transfer) - - -@with_all_phases -@spec_state_test -def test_success_active_above_max_effective(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 - transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1, fee=0, signed=True) - - yield from run_transfer_processing(spec, state, transfer) - - -@with_all_phases -@spec_state_test -def test_success_active_above_max_effective_fee(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 - transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=0, fee=1, signed=True) - - yield from run_transfer_processing(spec, state, transfer) - - -@with_all_phases -@spec_state_test -@always_bls -def test_invalid_signature(spec, state): - transfer = get_valid_transfer(spec, state) - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer, False) - - -@with_all_phases -@spec_state_test -def test_active_but_transfer_past_effective_balance(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - amount = spec.MAX_EFFECTIVE_BALANCE // 32 - state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE - transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=amount, fee=0, signed=True) - - yield from run_transfer_processing(spec, state, transfer, False) - - -@with_all_phases -@spec_state_test -def test_incorrect_slot(spec, state): - transfer = get_valid_transfer(spec, state, slot=state.slot + 1, signed=True) - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer, False) - - -@with_all_phases -@spec_state_test -def test_transfer_clean(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT - transfer = get_valid_transfer(spec, state, sender_index=sender_index, - amount=spec.MIN_DEPOSIT_AMOUNT, fee=0, signed=True) - - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer) - - -@with_all_phases -@spec_state_test -def test_transfer_clean_split_to_fee(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT - transfer = get_valid_transfer(spec, state, sender_index=sender_index, - amount=spec.MIN_DEPOSIT_AMOUNT // 2, fee=spec.MIN_DEPOSIT_AMOUNT // 2, signed=True) - - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer) - - -@with_all_phases -@spec_state_test -def test_insufficient_balance_for_fee(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT - transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=0, fee=1, signed=True) - - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer, False) - - -@with_all_phases -@spec_state_test -def test_insufficient_balance_for_fee_result_full(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - transfer = get_valid_transfer(spec, state, sender_index=sender_index, - amount=0, fee=state.balances[sender_index] + 1, signed=True) - - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer, False) - - -@with_all_phases -@spec_state_test -def test_insufficient_balance_for_amount_result_dust(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT - transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1, fee=0, signed=True) - - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer, False) - - -@with_all_phases -@spec_state_test -def test_insufficient_balance_for_amount_result_full(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - transfer = get_valid_transfer(spec, state, sender_index=sender_index, - amount=state.balances[sender_index] + 1, fee=0, signed=True) - - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer, False) - - -@with_all_phases -@spec_state_test -def test_insufficient_balance_for_combined_result_dust(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - # Enough to pay fee without dust, and amount without dust, but not both. - state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT + 1 - transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1, fee=1, signed=True) - - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer, False) - - -@with_all_phases -@spec_state_test -def test_insufficient_balance_for_combined_result_full(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - # Enough to pay fee fully without dust left, and amount fully without dust left, but not both. - state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT * 2 + 1 - transfer = get_valid_transfer(spec, state, sender_index=sender_index, - amount=spec.MIN_DEPOSIT_AMOUNT + 1, - fee=spec.MIN_DEPOSIT_AMOUNT + 1, signed=True) - - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer, False) - - -@with_all_phases -@spec_state_test -def test_insufficient_balance_for_combined_big_amount(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - # Enough to pay fee fully without dust left, and amount fully without dust left, but not both. - # Try to create a dust balance (off by 1) with combination of fee and amount. - state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT * 2 + 1 - transfer = get_valid_transfer(spec, state, sender_index=sender_index, - amount=spec.MIN_DEPOSIT_AMOUNT + 1, fee=1, signed=True) - - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer, False) - - -@with_all_phases -@spec_state_test -def test_insufficient_balance_for_combined_big_fee(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - # Enough to pay fee fully without dust left, and amount fully without dust left, but not both. - # Try to create a dust balance (off by 1) with combination of fee and amount. - state.balances[sender_index] = spec.MIN_DEPOSIT_AMOUNT * 2 + 1 - transfer = get_valid_transfer(spec, state, sender_index=sender_index, - amount=1, fee=spec.MIN_DEPOSIT_AMOUNT + 1, signed=True) - - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer, False) - - -@with_all_phases -@spec_state_test -def test_insufficient_balance_off_by_1_fee(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - # Enough to pay fee fully without dust left, and amount fully without dust left, but not both. - # Try to print money by using the full balance as amount, plus 1 for fee. - transfer = get_valid_transfer(spec, state, sender_index=sender_index, - amount=state.balances[sender_index], fee=1, signed=True) - - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer, False) - - -@with_all_phases -@spec_state_test -def test_insufficient_balance_off_by_1_amount(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - # Enough to pay fee fully without dust left, and amount fully without dust left, but not both. - # Try to print money by using the full balance as fee, plus 1 for amount. - transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1, - fee=state.balances[sender_index], signed=True) - - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer, False) - - -@with_all_phases -@spec_state_test -def test_insufficient_balance_duplicate_as_fee_and_amount(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - # Enough to pay fee fully without dust left, and amount fully without dust left, but not both. - # Try to print money by using the full balance, twice. - transfer = get_valid_transfer(spec, state, sender_index=sender_index, - amount=state.balances[sender_index], - fee=state.balances[sender_index], signed=True) - - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer, False) - - -@with_all_phases -@spec_state_test -def test_no_dust_sender(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - balance = state.balances[sender_index] - transfer = get_valid_transfer( - spec, - state, - sender_index=sender_index, - amount=balance - spec.MIN_DEPOSIT_AMOUNT + 1, - fee=0, - signed=True, - ) - - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer, False) - - -@with_all_phases -@spec_state_test -def test_no_dust_recipient(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 - transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1, fee=0, signed=True) - state.balances[transfer.recipient] = 0 - - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer, False) - - -@with_all_phases -@spec_state_test -def test_non_existent_sender(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - transfer = get_valid_transfer(spec, state, sender_index=sender_index, amount=1, fee=0) - transfer.sender = len(state.validators) - sign_transfer(spec, state, transfer, 42) # mostly valid signature, but sender won't exist, use bogus key. - - yield from run_transfer_processing(spec, state, transfer, False) - - -@with_all_phases -@spec_state_test -def test_non_existent_recipient(spec, state): - sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1 - transfer = get_valid_transfer(spec, state, sender_index=sender_index, - recipient_index=len(state.validators), amount=1, fee=0, signed=True) - - yield from run_transfer_processing(spec, state, transfer, False) - - -@with_all_phases -@spec_state_test -def test_invalid_pubkey(spec, state): - transfer = get_valid_transfer(spec, state, signed=True) - state.validators[transfer.sender].withdrawal_credentials = spec.Hash() - - # un-activate so validator can transfer - state.validators[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - yield from run_transfer_processing(spec, state, transfer, False) diff --git a/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py b/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py index 886f9bf6a..5e919120d 100644 --- a/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py +++ b/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py @@ -378,40 +378,6 @@ def test_voluntary_exit(spec, state): assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH -# @with_all_phases -# @spec_state_test -# def test_transfer(spec, state): - # overwrite default 0 to test - # spec.MAX_TRANSFERS = 1 - - # sender_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] - # amount = get_balance(state, sender_index) - - # transfer = get_valid_transfer(spec, state, state.slot + 1, sender_index, amount, signed=True) - # recipient_index = transfer.recipient - # pre_transfer_recipient_balance = get_balance(state, recipient_index) - - # un-activate so validator can transfer - # state.validators[sender_index].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH - - # yield 'pre', state - - # Add to state via block transition - # block = build_empty_block_for_next_slot(spec, state) - # block.body.transfers.append(transfer) - # sign_block(spec, state, block) - - # state_transition_and_sign_block(spec, state, block) - - # yield 'blocks', [block] - # yield 'post', state - - # sender_balance = get_balance(state, sender_index) - # recipient_balance = get_balance(state, recipient_index) - # assert sender_balance == 0 - # assert recipient_balance == pre_transfer_recipient_balance + amount - - @with_all_phases @spec_state_test def test_balance_driven_status_transitions(spec, state): From 70c2063cfa4bb5cb21cc1135c4094c5ef85fa083 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 24 Oct 2019 21:49:07 +0900 Subject: [PATCH 228/250] PR feedback --- configs/mainnet.yaml | 3 --- configs/minimal.yaml | 3 --- specs/core/0_beacon-chain.md | 4 ++-- specs/test_formats/operations/README.md | 1 - test_generators/operations/main.py | 5 ----- 5 files changed, 2 insertions(+), 14 deletions(-) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index 45d5b6894..af446d575 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -116,8 +116,6 @@ MAX_ATTESTATIONS: 128 MAX_DEPOSITS: 16 # 2**4 (= 16) MAX_VOLUNTARY_EXITS: 16 -# Originally 2**4 (= 16), disabled for now. -MAX_TRANSFERS: 0 # Signature domains @@ -127,7 +125,6 @@ DOMAIN_BEACON_ATTESTER: 0x01000000 DOMAIN_RANDAO: 0x02000000 DOMAIN_DEPOSIT: 0x03000000 DOMAIN_VOLUNTARY_EXIT: 0x04000000 -DOMAIN_TRANSFER: 0x05000000 DOMAIN_CUSTODY_BIT_CHALLENGE: 0x06000000 DOMAIN_SHARD_PROPOSER: 0x80000000 DOMAIN_SHARD_ATTESTER: 0x81000000 diff --git a/configs/minimal.yaml b/configs/minimal.yaml index 75b93f66a..53599e83a 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -118,8 +118,6 @@ MAX_ATTESTATIONS: 128 MAX_DEPOSITS: 16 # 2**4 (= 16) MAX_VOLUNTARY_EXITS: 16 -# Originally 2**4 (= 16), disabled for now. -MAX_TRANSFERS: 0 # Signature domains @@ -129,7 +127,6 @@ DOMAIN_BEACON_ATTESTER: 0x01000000 DOMAIN_RANDAO: 0x02000000 DOMAIN_DEPOSIT: 0x03000000 DOMAIN_VOLUNTARY_EXIT: 0x04000000 -DOMAIN_TRANSFER: 0x05000000 DOMAIN_CUSTODY_BIT_CHALLENGE: 0x06000000 DOMAIN_SHARD_PROPOSER: 0x80000000 DOMAIN_SHARD_ATTESTER: 0x81000000 diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1819310a1..394a6d783 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -285,14 +285,14 @@ class Checkpoint(Container): ```python class Validator(Container): pubkey: BLSPubkey - withdrawal_credentials: Hash # Commitment to pubkey for withdrawals and transfers + withdrawal_credentials: Hash # Commitment to pubkey for withdrawals effective_balance: Gwei # Balance at stake slashed: boolean # Status epochs activation_eligibility_epoch: Epoch # When criteria for activation were met activation_epoch: Epoch exit_epoch: Epoch - withdrawable_epoch: Epoch # When validator can withdraw or transfer funds + withdrawable_epoch: Epoch # When validator can withdraw funds ``` #### `AttestationData` diff --git a/specs/test_formats/operations/README.md b/specs/test_formats/operations/README.md index be009486d..f1ec0429a 100644 --- a/specs/test_formats/operations/README.md +++ b/specs/test_formats/operations/README.md @@ -46,7 +46,6 @@ Operations: | `block_header` | `Block` | **`block`** | `process_block_header(state, block)` | | `deposit` | `Deposit` | `deposit` | `process_deposit(state, deposit)` | | `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` | -| `transfer` | `Transfer` | `transfer` | `process_transfer(state, transfer)` | | `voluntary_exit` | `VoluntaryExit` | `voluntary_exit` | `process_voluntary_exit(state, voluntary_exit)` | Note that `block_header` is not strictly an operation (and is a full `Block`), but processed in the same manner, and hence included here. diff --git a/test_generators/operations/main.py b/test_generators/operations/main.py index 995a626b4..de3eb7cf1 100644 --- a/test_generators/operations/main.py +++ b/test_generators/operations/main.py @@ -6,7 +6,6 @@ from eth2spec.test.phase_0.block_processing import ( test_process_block_header, test_process_deposit, test_process_proposer_slashing, - test_process_transfer, test_process_voluntary_exit, ) @@ -48,10 +47,6 @@ if __name__ == "__main__": create_provider('deposit', test_process_deposit, 'mainnet'), create_provider('proposer_slashing', test_process_proposer_slashing, 'minimal'), create_provider('proposer_slashing', test_process_proposer_slashing, 'mainnet'), - create_provider('transfer', test_process_transfer, 'minimal'), - # Disabled, due to the high amount of different transfer tests, this produces a shocking size of tests. - # Unnecessarily, as transfer are disabled currently, so not a priority. - # create_provider('transfer', test_process_transfer, 'mainnet'), create_provider('voluntary_exit', test_process_voluntary_exit, 'minimal'), create_provider('voluntary_exit', test_process_voluntary_exit, 'mainnet'), ]) From c2cebef64910de3a1b9e0f68f61b4f7f8d6dd3ef Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 25 Oct 2019 17:13:00 +0800 Subject: [PATCH 229/250] Update specs/validator/0_beacon-chain-validator.md Co-Authored-By: Hsiao-Wei Wang --- specs/validator/0_beacon-chain-validator.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index a79b178b3..3d679a991 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -379,7 +379,7 @@ def is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex, slot_si If the validator is selected to aggregate (`is_aggregator()`), they construct an aggregate attestation via the following. -Collect `attestations` seen via gossip during the `slot` that have an equivalent `attestation_data` to that constructed by the validator, and create an `aggregate_attestation` with the following fields. +Collect `attestations` seen via gossip during the `slot` that have an equivalent `attestation_data` to that constructed by the validator, and create an `aggregate_attestation: Attestation` with the following fields. ##### Data From e984d10a0cc3b19fc9c03b9ef731a0ccac5dc6b6 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 25 Oct 2019 12:02:12 +0200 Subject: [PATCH 230/250] fix typo, and fix bitlist end-bit description --- specs/simple-serialize.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 6e250fd81..fdd5a26ca 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -129,7 +129,7 @@ return bytes(array) ### `Bitlist[N]` -Note that from the offset coding, the length (in bytes) of the bitlist is known. An additional `1` bit is added at position `N` where `N` is the legnth of the bitlist so that the length in bits will also be known. +Note that from the offset coding, the length (in bytes) of the bitlist is known. An additional `1` bit is added to the end, at index `e` where `e` is the length of the bitlist (not the limit), so that the length in bits will also be known. ```python array = [0] * ((len(value) // 8) + 1) @@ -179,7 +179,7 @@ Deserialization can be implemented using a recursive algorithm. The deserializat * Using the first offset, we can compute the length of the list (divide by `BYTES_PER_LENGTH_OFFSET`), as it gives us the total number of bytes in the offset data. * The size of each object in the vector/list can be inferred from the difference of two offsets. To get the size of the last object, the total number of bytes has to be known (it is not generally possible to deserialize an SSZ object of unknown length) * Containers follow the same principles as vectors, with the difference that there may be fixed-size objects in a container as well. This means the `fixed_parts` data will contain offsets as well as fixed-size objects. -* In the case of bitlists, the length in bits cannot be uniquely inferred from the number of bytes in the object. Because of this, they have a bit in position `N` where `N` is the length of the list that is always set. This bit has to be used to infer the size of the bitlist in bits. +* In the case of bitlists, the length in bits cannot be uniquely inferred from the number of bytes in the object. Because of this, they have a bit at the end that is always set. This bit has to be used to infer the size of the bitlist in bits. Note that deserialization requires hardening against invalid inputs. A non-exhaustive list: From 2186c45f84b09fcd0733e81c0468b5d5feca9822 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 25 Oct 2019 13:22:59 +0200 Subject: [PATCH 231/250] implement (unpolished) solution for #1446, based on suggested use of eth1 hash --- specs/core/0_beacon-chain.md | 3 +++ test_libs/pyspec/eth2spec/test/helpers/genesis.py | 8 +++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 9a680dc7d..4a62342ed 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1113,6 +1113,9 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash, eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=len(deposits)), latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), ) + # Set the initial RANDAO mixes to hashes seeded by the Eth1 hash, to limit deposit ordering committee bias. + for i in range(MIN_SEED_LOOKAHEAD + 1): + state.randao_mixes[EPOCHS_PER_HISTORICAL_VECTOR - i - 1] = hash(eth1_block_hash + int_to_bytes(i, 8)) # Process deposits leaves = list(map(lambda deposit: deposit.data, deposits)) diff --git a/test_libs/pyspec/eth2spec/test/helpers/genesis.py b/test_libs/pyspec/eth2spec/test/helpers/genesis.py index 9e3c77b7b..6a9b8f9f1 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/genesis.py +++ b/test_libs/pyspec/eth2spec/test/helpers/genesis.py @@ -19,17 +19,23 @@ def build_mock_validator(spec, i: int, balance: int): def create_genesis_state(spec, num_validators): deposit_root = b'\x42' * 32 + eth1_block_hash = b'\xda' * 32 state = spec.BeaconState( genesis_time=0, eth1_deposit_index=num_validators, eth1_data=spec.Eth1Data( deposit_root=deposit_root, deposit_count=num_validators, - block_hash=spec.Hash(), + block_hash=eth1_block_hash, ), latest_block_header=spec.BeaconBlockHeader(body_root=spec.hash_tree_root(spec.BeaconBlockBody())), ) + # Set the initial RANDAO mixes to hashes seeded by the Eth1 hash, to limit deposit ordering committee bias. + for i in range(spec.MIN_SEED_LOOKAHEAD + 1): + state.randao_mixes[spec.EPOCHS_PER_HISTORICAL_VECTOR - i - 1] = \ + spec.hash(eth1_block_hash + spec.int_to_bytes(i, 8)) + # We "hack" in the initial validators, # as it is much faster than creating and processing genesis deposits for every single test case. state.balances = [spec.MAX_EFFECTIVE_BALANCE] * num_validators From 2cc643d2bb11297aedf715362c731191477f89b3 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 25 Oct 2019 13:39:50 +0200 Subject: [PATCH 232/250] update test_eth1_data_votes_no_consensus to not hardcode pre hash --- test_libs/pyspec/eth2spec/test/sanity/test_blocks.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py b/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py index 886f9bf6a..3676e3411 100644 --- a/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py +++ b/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py @@ -509,6 +509,8 @@ def test_eth1_data_votes_no_consensus(spec, state): if spec.SLOTS_PER_ETH1_VOTING_PERIOD > 16: return + pre_eth1_hash = state.eth1_data.block_hash + offset_block = build_empty_block(spec, state, slot=spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1) sign_block(spec, state, offset_block) state_transition_and_sign_block(spec, state, offset_block) @@ -528,7 +530,7 @@ def test_eth1_data_votes_no_consensus(spec, state): blocks.append(block) assert len(state.eth1_data_votes) == spec.SLOTS_PER_ETH1_VOTING_PERIOD - assert state.eth1_data.block_hash == b'\x00' * 32 + assert state.eth1_data.block_hash == pre_eth1_hash yield 'blocks', blocks yield 'post', state From db8347645e297ee44ca8ce5514e48774998b9f47 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sun, 27 Oct 2019 12:18:13 +0900 Subject: [PATCH 233/250] add validation condition to aggregate gossip to prevent forwarding duplicate aggregates along --- specs/networking/p2p-interface.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 4f8829cd8..7921b8d72 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -152,6 +152,7 @@ There are two main topics used to propagate aggregate attestations and beacon bl - `beacon_block` - This topic is used solely for propagating new beacon blocks to all nodes on the networks. Blocks are sent in their entirety. Clients MUST validate the block proposer signature before forwarding it across the network. - `beacon_aggregate_and_proof` - This topic is used to propagate aggregated attestations (as `AggregateAndProof`s) to subscribing nodes (typically validators) to be included in future blocks. The following validations MUST pass before forwarding the `aggregate_and_proof` on the network. + - Clients MUST validate that the aggregate attestation defined by `hash_tree_root(aggregate_and_proof.aggregate)` has _not_ already been seen (via aggregate gossip, within a block, or through the creation of an equivalent aggregate locally). - Clients MUST validate that the block being voted for (`aggregate_and_proof.aggregate.data.beacon_block_root`) passes validation. - Clients MUST validate that `aggregate_and_proof.aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots. From e005bb04c56cc2d948de5f505f5c76613ba180db Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sun, 27 Oct 2019 12:29:11 +0900 Subject: [PATCH 234/250] add unaggregated attestation validations --- specs/networking/p2p-interface.md | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 7921b8d72..c4bace2d7 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -154,19 +154,17 @@ There are two main topics used to propagate aggregate attestations and beacon bl - `beacon_aggregate_and_proof` - This topic is used to propagate aggregated attestations (as `AggregateAndProof`s) to subscribing nodes (typically validators) to be included in future blocks. The following validations MUST pass before forwarding the `aggregate_and_proof` on the network. - Clients MUST validate that the aggregate attestation defined by `hash_tree_root(aggregate_and_proof.aggregate)` has _not_ already been seen (via aggregate gossip, within a block, or through the creation of an equivalent aggregate locally). - Clients MUST validate that the block being voted for (`aggregate_and_proof.aggregate.data.beacon_block_root`) passes validation. - - Clients MUST validate that `aggregate_and_proof.aggregate.data.slot` is - within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots. - - Clients MUST validate that the validator index is within the aggregate's - committee -- i.e. `aggregate_and_proof.index in get_attesting_indices(state, aggregate_and_proof.aggregate.data, aggregate_and_proof.aggregate.aggregation_bits)`. - - Clients MUST validate that `aggregate_and_proof.selection_proof` selects - the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate_and_proof.aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`. - - Clients MUST validate that the `aggregate_and_proof.selection_proof` is a - valid signature of the `aggregate_and_proof.aggregate.data.slot` by the validator with index `aggregate_and_proof.index`. + - Clients MUST validate that `aggregate_and_proof.aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots. + - Clients MUST validate that the validator index is within the aggregate's committee -- i.e. `aggregate_and_proof.index in get_attesting_indices(state, aggregate_and_proof.aggregate.data, aggregate_and_proof.aggregate.aggregation_bits)`. + - Clients MUST validate that `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate_and_proof.aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`. + - Clients MUST validate that the `aggregate_and_proof.selection_proof` is a valid signature of the `aggregate_and_proof.aggregate.data.slot` by the validator with index `aggregate_and_proof.index`. - Clients MUST validate that the signature of `aggregate_and_proof.aggregate`. Attestation subnets are used to propagate unaggregated attestations to subsections of the network. Their `TopicName`s are: -- `index{index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` - These topics are used to propagate unaggregated attestations to subsections of the network (typically beacon and persistent committees) to be aggregated before being gossiped to `beacon_aggregate_and_proof`. The following validations MUST pass before forwarding the `attestation` on the network. +- `committee_index{index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` - These topics are used to propagate unaggregated attestations to subsections of the network (typically beacon and persistent committees) to be aggregated before being gossiped to `beacon_aggregate_and_proof`. The following validations MUST pass before forwarding the `attestation` on the network. + - Clients MUST validate that the attestation's committee index (`attestation.data.index`) is for the correct subnet. + - Clients MUST validate that the attestation is unaggregated -- that is, it has exactly one participating validator (`len([bit for bit in attestation.aggregation_bits if bit == 0b1]) == 1`). - Clients MUST validate that the block being voted for (`attestation.data.beacon_block_root`) passes validation. - Clients MUST validate that `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots. - Clients MUST validate the signature of `attestation`. From a700e7a86504e074a7089a5780537d4032c5c450 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sun, 27 Oct 2019 12:32:28 +0900 Subject: [PATCH 235/250] fmt --- specs/networking/p2p-interface.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index c4bace2d7..2afd2276c 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -152,22 +152,22 @@ There are two main topics used to propagate aggregate attestations and beacon bl - `beacon_block` - This topic is used solely for propagating new beacon blocks to all nodes on the networks. Blocks are sent in their entirety. Clients MUST validate the block proposer signature before forwarding it across the network. - `beacon_aggregate_and_proof` - This topic is used to propagate aggregated attestations (as `AggregateAndProof`s) to subscribing nodes (typically validators) to be included in future blocks. The following validations MUST pass before forwarding the `aggregate_and_proof` on the network. - - Clients MUST validate that the aggregate attestation defined by `hash_tree_root(aggregate_and_proof.aggregate)` has _not_ already been seen (via aggregate gossip, within a block, or through the creation of an equivalent aggregate locally). - - Clients MUST validate that the block being voted for (`aggregate_and_proof.aggregate.data.beacon_block_root`) passes validation. - - Clients MUST validate that `aggregate_and_proof.aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots. - - Clients MUST validate that the validator index is within the aggregate's committee -- i.e. `aggregate_and_proof.index in get_attesting_indices(state, aggregate_and_proof.aggregate.data, aggregate_and_proof.aggregate.aggregation_bits)`. - - Clients MUST validate that `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate_and_proof.aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`. - - Clients MUST validate that the `aggregate_and_proof.selection_proof` is a valid signature of the `aggregate_and_proof.aggregate.data.slot` by the validator with index `aggregate_and_proof.index`. - - Clients MUST validate that the signature of `aggregate_and_proof.aggregate`. + - The aggregate attestation defined by `hash_tree_root(aggregate_and_proof.aggregate)` has _not_ already been seen (via aggregate gossip, within a block, or through the creation of an equivalent aggregate locally). + - The block being voted for (`aggregate_and_proof.aggregate.data.beacon_block_root`) passes validation. + - `aggregate_and_proof.aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots. + - The validator index is within the aggregate's committee -- i.e. `aggregate_and_proof.index in get_attesting_indices(state, aggregate_and_proof.aggregate.data, aggregate_and_proof.aggregate.aggregation_bits)`. + - `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate_and_proof.aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`. + - The `aggregate_and_proof.selection_proof` is a valid signature of the `aggregate_and_proof.aggregate.data.slot` by the validator with index `aggregate_and_proof.index`. + - The signature of `aggregate_and_proof.aggregate` is valid. Attestation subnets are used to propagate unaggregated attestations to subsections of the network. Their `TopicName`s are: - `committee_index{index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` - These topics are used to propagate unaggregated attestations to subsections of the network (typically beacon and persistent committees) to be aggregated before being gossiped to `beacon_aggregate_and_proof`. The following validations MUST pass before forwarding the `attestation` on the network. - - Clients MUST validate that the attestation's committee index (`attestation.data.index`) is for the correct subnet. - - Clients MUST validate that the attestation is unaggregated -- that is, it has exactly one participating validator (`len([bit for bit in attestation.aggregation_bits if bit == 0b1]) == 1`). - - Clients MUST validate that the block being voted for (`attestation.data.beacon_block_root`) passes validation. - - Clients MUST validate that `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots. - - Clients MUST validate the signature of `attestation`. + - The attestation's committee index (`attestation.data.index`) is for the correct subnet. + - The attestation is unaggregated -- that is, it has exactly one participating validator (`len([bit for bit in attestation.aggregation_bits if bit == 0b1]) == 1`). + - The block being voted for (`attestation.data.beacon_block_root`) passes validation. + - `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots. + - The signature of `attestation` is valid. Additional topics are used to propagate lower frequency validator messages. Their `TopicName`s are: From 6a62cfd3f56bf0823d2edf5ce3cf72ccc5f0e49a Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sun, 27 Oct 2019 12:38:38 +0900 Subject: [PATCH 236/250] gossip cleanup --- specs/networking/p2p-interface.md | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 2afd2276c..ae84cb147 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -177,13 +177,13 @@ Additional topics are used to propagate lower frequency validator messages. Thei #### Interop -Unaggregated and aggregated attestations from all shards are sent as `Attestation`s to the `beacon_aggregate_and_proof` topic. Clients are not required to publish aggregate attestations but must be able to process them. All validating clients SHOULD try to perform local attestation aggregation to prepare for block proposing. +Unaggregated and aggregated attestations from all shards are sent as `Attestation`s to the `beacon_attestation` topic. Clients are not required to publish aggregate attestations but must be able to process them. All validating clients SHOULD try to perform local attestation aggregation to prepare for block proposing. #### Mainnet -Attestation broadcasting is grouped into subnets defined by a topic. The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`. The `CommitteeIndex`, `index`, is assigned to the topic: `index{index % ATTESTATION_SUBNET_COUNT}_beacon_attestation`. +Attestation broadcasting is grouped into subnets defined by a topic. The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`. The `CommitteeIndex`, `index`, is assigned to the topic: `committee_index{index % ATTESTATION_SUBNET_COUNT}_beacon_attestation`. -Unaggregated attestations are sent to the subnet topic, `index{attestation.data.index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` as `Attestation`s. +Unaggregated attestations are sent to the subnet topic, `committee_index{attestation.data.index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` as `Attestation`s. Aggregated attestations are sent to the `beacon_aggregate_and_proof` topic as `AggregateAndProof`s. @@ -196,19 +196,22 @@ Clients MUST reject (fail validation) messages that are over this size limit. Li The payload is carried in the `data` field of a gossipsub message, and varies depending on the topic: -| Topic | Message Type | -|------------------------------|-------------------| -| beacon_block | BeaconBlock | -| beacon_aggregate_and_proof | AggregateAndProof | -| index{N}\_beacon_attestation | Attestation | -| voluntary_exit | VoluntaryExit | -| proposer_slashing | ProposerSlashing | -| attester_slashing | AttesterSlashing | +| Topic | Message Type | +|----------------------------------------|-------------------| +| beacon_block | BeaconBlock | +| beacon_aggregate_and_proof | AggregateAndProof | +| beacon_attestation\* | Attestation | +| committee_index{N}\_beacon_attestation | Attestation | +| voluntary_exit | VoluntaryExit | +| proposer_slashing | ProposerSlashing | +| attester_slashing | AttesterSlashing | Clients MUST reject (fail validation) messages containing an incorrect type, or invalid payload. When processing incoming gossip, clients MAY descore or disconnect peers who fail to observe these constraints. +\* The `beacon_attestation` topic is only for interop and will be removed prior to mainnet. + ### Encodings Topics are post-fixed with an encoding. Encodings define how the payload of a gossipsub message is encoded. From d6563bce2d3bee99ad5597a34bb8b561e9bf218d Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sun, 27 Oct 2019 12:50:55 +0900 Subject: [PATCH 237/250] minor edits to p2p faq --- specs/networking/p2p-interface.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index ae84cb147..3f02757f9 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -11,7 +11,7 @@ It consists of four main sections: ## Table of contents - + @@ -650,7 +650,7 @@ No security or privacy guarantees are lost as a result of choosing plaintext top Furthermore, the Eth 2.0 topic names are shorter than their digest equivalents (assuming SHA-256 hash), so hashing topics would bloat messages unnecessarily. -### Why are there `ATTESTATION_SUBNET_COUNT` subnets? +### Why are there `ATTESTATION_SUBNET_COUNT` attestation subnets? Depending on the number of validators, it may be more efficient to group shard subnets and might provide better stability for the gossipsub channel. The exact grouping will be dependent on more involved network tests. This constant allows for more flexibility in setting up the network topology for attestation aggregation (as aggregation should happen on each subnet). The value is currently set to to be equal `MAX_COMMITTEES_PER_SLOT` until network tests indicate otherwise. @@ -664,6 +664,8 @@ In addition to this, relaying attestations requires validating the attestation i The dominant strategy for an individual validator is to always broadcast an aggregate containing their own attestation to the global channel to ensure that proposers see their attestation for inclusion. Using a private selection criteria and providing this proof of selection alongside the gossiped aggregate ensures that this dominant strategy will not flood the global channel. +Also, an attacker can create any number of honest-looking aggregates and broadcast them to the global pubsub channel. Thus without some sort of proof of selection as an aggregator, the global channel can trivially be spammed. + ### Why are we sending entire objects in the pubsub and not just hashes? Entire objects should be sent to get the greatest propagation speeds. If only hashes are sent, then block and attestation propagation is dependent on recursive requests from each peer. In a hash-only scenario, peers could receive hashes without knowing who to download the actual contents from. Sending entire objects ensures that they get propagated through the entire network. From e6e203ce851f3467472a2f691edb39b59447cd12 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sun, 27 Oct 2019 16:20:35 +0100 Subject: [PATCH 238/250] randaomixes init now with eth1 hash repeat --- specs/core/0_beacon-chain.md | 4 +--- test_libs/pyspec/eth2spec/test/helpers/genesis.py | 6 +----- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 4a62342ed..10640badc 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1112,10 +1112,8 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash, genesis_time=eth1_timestamp - eth1_timestamp % SECONDS_PER_DAY + 2 * SECONDS_PER_DAY, eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=len(deposits)), latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), + randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # to limit deposit order bias in early epochs ) - # Set the initial RANDAO mixes to hashes seeded by the Eth1 hash, to limit deposit ordering committee bias. - for i in range(MIN_SEED_LOOKAHEAD + 1): - state.randao_mixes[EPOCHS_PER_HISTORICAL_VECTOR - i - 1] = hash(eth1_block_hash + int_to_bytes(i, 8)) # Process deposits leaves = list(map(lambda deposit: deposit.data, deposits)) diff --git a/test_libs/pyspec/eth2spec/test/helpers/genesis.py b/test_libs/pyspec/eth2spec/test/helpers/genesis.py index 6a9b8f9f1..7e747e34e 100644 --- a/test_libs/pyspec/eth2spec/test/helpers/genesis.py +++ b/test_libs/pyspec/eth2spec/test/helpers/genesis.py @@ -29,13 +29,9 @@ def create_genesis_state(spec, num_validators): block_hash=eth1_block_hash, ), latest_block_header=spec.BeaconBlockHeader(body_root=spec.hash_tree_root(spec.BeaconBlockBody())), + randao_mixes=[eth1_block_hash] * spec.EPOCHS_PER_HISTORICAL_VECTOR, ) - # Set the initial RANDAO mixes to hashes seeded by the Eth1 hash, to limit deposit ordering committee bias. - for i in range(spec.MIN_SEED_LOOKAHEAD + 1): - state.randao_mixes[spec.EPOCHS_PER_HISTORICAL_VECTOR - i - 1] = \ - spec.hash(eth1_block_hash + spec.int_to_bytes(i, 8)) - # We "hack" in the initial validators, # as it is much faster than creating and processing genesis deposits for every single test case. state.balances = [spec.MAX_EFFECTIVE_BALANCE] * num_validators From 09339f2f1721e52c82accd7b75d87e9294ece4ea Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 27 Oct 2019 18:18:48 +0000 Subject: [PATCH 239/250] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 10640badc..f9b7e1d60 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1112,7 +1112,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash, genesis_time=eth1_timestamp - eth1_timestamp % SECONDS_PER_DAY + 2 * SECONDS_PER_DAY, eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=len(deposits)), latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), - randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # to limit deposit order bias in early epochs + randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy ) # Process deposits From 2d59ca6d575f7e48ba3a9907753212018f6121d2 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Mon, 28 Oct 2019 12:43:29 +0800 Subject: [PATCH 240/250] Update ToC --- specs/validator/0_beacon-chain-validator.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 3d679a991..c6623dd09 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -39,6 +39,7 @@ - [Voluntary exits](#voluntary-exits) - [Attesting](#attesting) - [Attestation data](#attestation-data) + - [General](#general) - [LMD GHOST vote](#lmd-ghost-vote) - [FFG vote](#ffg-vote) - [Construct attestation](#construct-attestation) From e8fc84eadc730bf5d3feaa3fdd131e3a7b071b8f Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 28 Oct 2019 15:00:55 +0800 Subject: [PATCH 241/250] Update specs/networking/p2p-interface.md Co-Authored-By: Hsiao-Wei Wang --- specs/networking/p2p-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 50a24a83a..9659dac5a 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -115,7 +115,7 @@ This section outlines constants that are used in this spec. |---|---|---| | `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The maximum allowed size of uncompressed gossip messages. | | `MAX_CHUNK_SIZE` | `2**20` (1048576, 1 MiB) | The maximum allowed size of uncompressed req/resp chunked responses. | -| `ATTESTATION_SUBNET_COUNT` | `64` | The number of shard subnets used in the gossipsub protocol. | +| `ATTESTATION_SUBNET_COUNT` | `64` | The number of attestation subnets used in the gossipsub protocol. | | `TTFB_TIMEOUT` | `5s` | The maximum time to wait for first byte of request response (time-to-first-byte). | | `RESP_TIMEOUT` | `10s` | The maximum time for complete response transfer. | | `ATTESTATION_PROPAGATION_SLOT_RANGE` | `32` | The maximum number of slots during which an attestation can be propagated. | From 6fc1feaa2a87e47bc8bb1b09642cdb7b3b177274 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 28 Oct 2019 16:15:26 +0900 Subject: [PATCH 242/250] rearrange some of the gossip info in response to hwwhww review --- specs/networking/p2p-interface.md | 97 +++++++++++++++---------------- 1 file changed, 47 insertions(+), 50 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 9659dac5a..2338aaf88 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -11,7 +11,7 @@ It consists of four main sections: ## Table of contents - + @@ -141,60 +141,14 @@ The following gossipsub [parameters](https://github.com/libp2p/specs/tree/master - `gossip_history` (number of heartbeat intervals to retain message IDs): 5 - `heartbeat_interval` (frequency of heartbeat, seconds): 1 -### Topics +### Topics and messages -Topics are plain UTF-8 strings and are encoded on the wire as determined by protobuf (gossipsub messages are enveloped in protobuf messages). +Topics are plain UTF-8 strings and are encoded on the wire as determined by protobuf (gossipsub messages are enveloped in protobuf messages). Topic strings have form: `/eth2/TopicName/TopicEncoding`. This defines both the type of data being sent on the topic and how the data field of the message is encoded. -Topic strings have form: `/eth2/TopicName/TopicEncoding`. This defines both the type of data being sent on the topic and how the data field of the message is encoded. (Further details can be found in [Messages](#Messages)). - -There are two main topics used to propagate aggregate attestations and beacon blocks to all nodes on the network. Their `TopicName`s are: - -- `beacon_block` - This topic is used solely for propagating new beacon blocks to all nodes on the networks. Blocks are sent in their entirety. Clients MUST validate the block proposer signature before forwarding it across the network. -- `beacon_aggregate_and_proof` - This topic is used to propagate aggregated attestations (as `AggregateAndProof`s) to subscribing nodes (typically validators) to be included in future blocks. The following validations MUST pass before forwarding the `aggregate_and_proof` on the network. - - The aggregate attestation defined by `hash_tree_root(aggregate_and_proof.aggregate)` has _not_ already been seen (via aggregate gossip, within a block, or through the creation of an equivalent aggregate locally). - - The block being voted for (`aggregate_and_proof.aggregate.data.beacon_block_root`) passes validation. - - `aggregate_and_proof.aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots. - - The validator index is within the aggregate's committee -- i.e. `aggregate_and_proof.index in get_attesting_indices(state, aggregate_and_proof.aggregate.data, aggregate_and_proof.aggregate.aggregation_bits)`. - - `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate_and_proof.aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`. - - The `aggregate_and_proof.selection_proof` is a valid signature of the `aggregate_and_proof.aggregate.data.slot` by the validator with index `aggregate_and_proof.index`. - - The signature of `aggregate_and_proof.aggregate` is valid. - -Attestation subnets are used to propagate unaggregated attestations to subsections of the network. Their `TopicName`s are: - -- `committee_index{index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` - These topics are used to propagate unaggregated attestations to subsections of the network (typically beacon and persistent committees) to be aggregated before being gossiped to `beacon_aggregate_and_proof`. The following validations MUST pass before forwarding the `attestation` on the network. - - The attestation's committee index (`attestation.data.index`) is for the correct subnet. - - The attestation is unaggregated -- that is, it has exactly one participating validator (`len([bit for bit in attestation.aggregation_bits if bit == 0b1]) == 1`). - - The block being voted for (`attestation.data.beacon_block_root`) passes validation. - - `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots. - - The signature of `attestation` is valid. - -Additional topics are used to propagate lower frequency validator messages. Their `TopicName`s are: - -- `voluntary_exit` - This topic is used solely for propagating voluntary validator exits to proposers on the network. Voluntary exits are sent in their entirety. Clients who receive a voluntary exit on this topic MUST validate the conditions within `process_voluntary_exit` before forwarding it across the network. -- `proposer_slashing` - This topic is used solely for propagating proposer slashings to proposers on the network. Proposer slashings are sent in their entirety. Clients who receive a proposer slashing on this topic MUST validate the conditions within `process_proposer_slashing` before forwarding it across the network. -- `attester_slashing` - This topic is used solely for propagating attester slashings to proposers on the network. Attester slashings are sent in their entirety. Clients who receive an attester slashing on this topic MUST validate the conditions within `process_attester_slashing` before forwarding it across the network. - -#### Interop - -Unaggregated and aggregated attestations from all shards are sent as `Attestation`s to the `beacon_attestation` topic. Clients are not required to publish aggregate attestations but must be able to process them. All validating clients SHOULD try to perform local attestation aggregation to prepare for block proposing. - -#### Mainnet - -Attestation broadcasting is grouped into subnets defined by a topic. The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`. The `CommitteeIndex`, `index`, is assigned to the topic: `committee_index{index % ATTESTATION_SUBNET_COUNT}_beacon_attestation`. - -Unaggregated attestations are sent to the subnet topic, `committee_index{attestation.data.index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` as `Attestation`s. - -Aggregated attestations are sent to the `beacon_aggregate_and_proof` topic as `AggregateAndProof`s. - -### Messages - -Each gossipsub [message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24) has a maximum size of `GOSSIP_MAX_SIZE`. - -Clients MUST reject (fail validation) messages that are over this size limit. Likewise, clients MUST NOT emit or propagate messages larger than this limit. +Each gossipsub [message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24) has a maximum size of `GOSSIP_MAX_SIZE`. Clients MUST reject (fail validation) messages that are over this size limit. Likewise, clients MUST NOT emit or propagate messages larger than this limit. The payload is carried in the `data` field of a gossipsub message, and varies depending on the topic: - | Topic | Message Type | |----------------------------------------|-------------------| | beacon_block | BeaconBlock | @@ -211,6 +165,49 @@ When processing incoming gossip, clients MAY descore or disconnect peers who fai \* The `beacon_attestation` topic is only for interop and will be removed prior to mainnet. +#### Global topics + +There are two primary global topics used to propagate beacon blocks and aggregate attestations to all nodes on the network. Their `TopicName`s are: + +- `beacon_block` - This topic is used solely for propagating new beacon blocks to all nodes on the networks. Blocks are sent in their entirety. Clients MUST validate the block proposer signature before forwarding it across the network. +- `beacon_aggregate_and_proof` - This topic is used to propagate aggregated attestations (as `AggregateAndProof`s) to subscribing nodes (typically validators) to be included in future blocks. The following validations MUST pass before forwarding the `aggregate_and_proof` on the network. + - The aggregate attestation defined by `hash_tree_root(aggregate_and_proof.aggregate)` has _not_ already been seen (via aggregate gossip, within a block, or through the creation of an equivalent aggregate locally). + - The block being voted for (`aggregate_and_proof.aggregate.data.beacon_block_root`) passes validation. + - `aggregate_and_proof.aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots. + - The validator index is within the aggregate's committee -- i.e. `aggregate_and_proof.index in get_attesting_indices(state, aggregate_and_proof.aggregate.data, aggregate_and_proof.aggregate.aggregation_bits)`. + - `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate_and_proof.aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`. + - The `aggregate_and_proof.selection_proof` is a valid signature of the `aggregate_and_proof.aggregate.data.slot` by the validator with index `aggregate_and_proof.index`. + - The signature of `aggregate_and_proof.aggregate` is valid. + +Additional global topics are used to propagate lower frequency validator messages. Their `TopicName`s are: + +- `voluntary_exit` - This topic is used solely for propagating voluntary validator exits to proposers on the network. Voluntary exits are sent in their entirety. Clients who receive a voluntary exit on this topic MUST validate the conditions within `process_voluntary_exit` before forwarding it across the network. +- `proposer_slashing` - This topic is used solely for propagating proposer slashings to proposers on the network. Proposer slashings are sent in their entirety. Clients who receive a proposer slashing on this topic MUST validate the conditions within `process_proposer_slashing` before forwarding it across the network. +- `attester_slashing` - This topic is used solely for propagating attester slashings to proposers on the network. Attester slashings are sent in their entirety. Clients who receive an attester slashing on this topic MUST validate the conditions within `process_attester_slashing` before forwarding it across the network. + +#### Attestation subnets + +Attestation subnets are used to propagate unaggregated attestations to subsections of the network. Their `TopicName`s are: + +- `committee_index{index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` - These topics are used to propagate unaggregated attestations to subsections of the network (typically beacon and persistent committees) to be aggregated before being gossiped to `beacon_aggregate_and_proof`. The following validations MUST pass before forwarding the `attestation` on the network. + - The attestation's committee index (`attestation.data.index`) is for the correct subnet. + - The attestation is unaggregated -- that is, it has exactly one participating validator (`len([bit for bit in attestation.aggregation_bits if bit == 0b1]) == 1`). + - The block being voted for (`attestation.data.beacon_block_root`) passes validation. + - `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots. + - The signature of `attestation` is valid. + +#### Interop + +Unaggregated and aggregated attestations from all shards are sent as `Attestation`s to the `beacon_attestation` topic. Clients are not required to publish aggregate attestations but must be able to process them. All validating clients SHOULD try to perform local attestation aggregation to prepare for block proposing. + +#### Mainnet + +Attestation broadcasting is grouped into subnets defined by a topic. The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`. The `CommitteeIndex`, `index`, is assigned to the topic: `committee_index{index % ATTESTATION_SUBNET_COUNT}_beacon_attestation`. + +Unaggregated attestations are sent to the subnet topic, `committee_index{attestation.data.index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` as `Attestation`s. + +Aggregated attestations are sent to the `beacon_aggregate_and_proof` topic as `AggregateAndProof`s. + ### Encodings Topics are post-fixed with an encoding. Encodings define how the payload of a gossipsub message is encoded. From 257fcd9c7908ad8a6cdcbd1c318fe1d37748ae5a Mon Sep 17 00:00:00 2001 From: Diederik Loerakker Date: Mon, 28 Oct 2019 08:29:01 +0100 Subject: [PATCH 243/250] typo --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index dfb7fbdb6..655775d22 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -569,7 +569,7 @@ def int_to_bytes(n: uint64, length: uint64) -> bytes: ```python def bytes_to_int(data: bytes) -> uint64: """ - Return the integer deserialization of ``data`` intepretted as ``ENDIANNESS``-endian. + Return the integer deserialization of ``data`` intepreted as ``ENDIANNESS``-endian. """ return int.from_bytes(data, ENDIANNESS) ``` From bb5a721707e3088c5b549bb55c40c6af6c9fa2cf Mon Sep 17 00:00:00 2001 From: terence tsao Date: Wed, 24 Jul 2019 15:41:26 -0700 Subject: [PATCH 244/250] Update 0_fork-choice.md --- specs/core/0_fork-choice.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_fork-choice.md b/specs/core/0_fork-choice.md index 88edceaa0..7d89e3044 100644 --- a/specs/core/0_fork-choice.md +++ b/specs/core/0_fork-choice.md @@ -158,7 +158,7 @@ def on_block(store: Store, block: BeaconBlock) -> None: # Check that block is later than the finalized epoch slot assert block.slot > compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) # Check the block is valid and compute the post-state - state = state_transition(pre_state, block) + state = state_transition(pre_state, block, true) # Add new state for this block to the store store.block_states[signing_root(block)] = state From 34ba645f7e62b41d3e523ac09d3744eb726a6dbe Mon Sep 17 00:00:00 2001 From: terence tsao Date: Wed, 24 Jul 2019 19:26:39 -0700 Subject: [PATCH 245/250] Update specs/core/0_fork-choice.md Co-Authored-By: Danny Ryan --- specs/core/0_fork-choice.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_fork-choice.md b/specs/core/0_fork-choice.md index 7d89e3044..b909ce732 100644 --- a/specs/core/0_fork-choice.md +++ b/specs/core/0_fork-choice.md @@ -158,7 +158,7 @@ def on_block(store: Store, block: BeaconBlock) -> None: # Check that block is later than the finalized epoch slot assert block.slot > compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) # Check the block is valid and compute the post-state - state = state_transition(pre_state, block, true) + state = state_transition(pre_state, block, True) # Add new state for this block to the store store.block_states[signing_root(block)] = state From 43132a30827371709482272a644e0ec747e35ebe Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 28 Oct 2019 15:53:10 +0800 Subject: [PATCH 246/250] [reopen] Eth2 shorthand standardized (#1452) * Update README.md * Update README.md * Update p2p-interface.md * Update simple-serialize.md * Update README.md * Update initialization.md * Update README.md * Update 0_beacon-chain-validator.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md --- README.md | 12 +++++----- configs/README.md | 7 +++--- specs/networking/p2p-interface.md | 24 ++++++++++---------- specs/simple-serialize.md | 2 +- specs/test_formats/README.md | 2 +- specs/test_formats/genesis/initialization.md | 2 +- specs/test_formats/ssz_static/README.md | 2 +- specs/validator/0_beacon-chain-validator.md | 8 +++---- test_generators/README.md | 4 ++-- test_generators/bls/README.md | 2 +- test_generators/shuffling/README.md | 2 +- test_generators/ssz_static/README.md | 2 +- test_libs/config_helpers/README.md | 2 +- test_libs/gen_helpers/README.md | 2 +- test_libs/pyspec/README.md | 4 ++-- 15 files changed, 38 insertions(+), 39 deletions(-) diff --git a/README.md b/README.md index fdfbd1c5e..a6f23db9d 100644 --- a/README.md +++ b/README.md @@ -4,12 +4,12 @@ To learn more about sharding and Ethereum 2.0 (Serenity), see the [sharding FAQ](https://github.com/ethereum/wiki/wiki/Sharding-FAQ) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm). -This repository hosts the current Eth 2.0 specifications. Discussions about design rationale and proposed changes can be brought up and discussed as issues. Solidified, agreed-upon changes to the spec can be made through pull requests. +This repository hosts the current Eth2 specifications. Discussions about design rationale and proposed changes can be brought up and discussed as issues. Solidified, agreed-upon changes to the spec can be made through pull requests. ## Specs -Core specifications for Eth 2.0 client validation can be found in [specs/core](specs/core). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are: +Core specifications for Eth2 client validation can be found in [specs/core](specs/core). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are: ### Phase 0 * [The Beacon Chain](specs/core/0_beacon-chain.md) @@ -26,7 +26,7 @@ Core specifications for Eth 2.0 client validation can be found in [specs/core](s Phase 2 is still actively in R&D and does not yet have any formal specifications. -See the [Eth 2.0 Phase 2 Wiki](https://hackmd.io/UzysWse1Th240HELswKqVA?view) for current progress, discussions, and definitions regarding this work. +See the [Eth2 Phase 2 Wiki](https://hackmd.io/UzysWse1Th240HELswKqVA?view) for current progress, discussions, and definitions regarding this work. ### Accompanying documents can be found in [specs](specs) and include: @@ -40,9 +40,9 @@ See the [Eth 2.0 Phase 2 Wiki](https://hackmd.io/UzysWse1Th240HELswKqVA?view) fo Additional specifications and standards outside of requisite client functionality can be found in the following repos: -* [Eth2.0 APIs](https://github.com/ethereum/eth2.0-apis) -* [Eth2.0 Metrics](https://github.com/ethereum/eth2.0-metrics/) -* [Interop Standards in Eth2.0-pm](https://github.com/ethereum/eth2.0-pm/tree/master/interop) +* [Eth2 APIs](https://github.com/ethereum/eth2.0-apis) +* [Eth2 Metrics](https://github.com/ethereum/eth2.0-metrics/) +* [Interop Standards in Eth2 PM](https://github.com/ethereum/eth2.0-pm/tree/master/interop) ## Design goals diff --git a/configs/README.md b/configs/README.md index 8adb939c8..4ca54e014 100644 --- a/configs/README.md +++ b/configs/README.md @@ -3,7 +3,7 @@ This directory contains a set of constants presets used for testing, testnets, and mainnet. A preset file contains all the constants known for its target. -Later-fork constants can be ignored, e.g. ignore phase1 constants as a client that only supports phase 0 currently. +Later-fork constants can be ignored, e.g. ignore Phase 1 constants as a client that only supports Phase 0 currently. ## Forking @@ -14,9 +14,8 @@ Instead, for forks that introduce changes in a constant, the constant name is pr Over time, the need to sync an older state may be deprecated. In this case, the prefix on the new constant may be removed, and the old constant will keep a special name before completely being removed. -A previous iteration of forking made use of "timelines", but this collides with the definitions used in the spec (constants for special forking slots etc.), - and was not integrated sufficiently in any of the spec tools or implementations. -Instead, the config essentially doubles as fork definition now, changing the value for e.g. `PHASE_1_GENESIS_SLOT` changes the fork. +A previous iteration of forking made use of "timelines", but this collides with the definitions used in the spec (constants for special forking slots, etc.), and was not integrated sufficiently in any of the spec tools or implementations. +Instead, the config essentially doubles as fork definition now, e.g. changing the value for `PHASE_1_GENESIS_SLOT` changes the fork. Another reason to prefer forking through constants is the ability to program a forking moment based on context, instead of being limited to a static slot number. diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 4f71ed6d9..78043b159 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -5,9 +5,9 @@ This document contains the networking specification for Ethereum 2.0 clients. It consists of four main sections: 1. A specification of the network fundamentals detailing the two network configurations: interoperability test network and mainnet launch. -2. A specification of the three network interaction *domains* of Eth 2.0: (a) the gossip domain, (b) the discovery domain, and (c) the Req/Resp domain. +2. A specification of the three network interaction *domains* of Eth2: (a) the gossip domain, (b) the discovery domain, and (c) the Req/Resp domain. 3. The rationale and further explanation for the design choices made in the previous two sections. -4. An analysis of the maturity/state of the libp2p features required by this spec across the languages in which Eth 2.0 clients are being developed. +4. An analysis of the maturity/state of the libp2p features required by this spec across the languages in which Eth2 clients are being developed. ## Table of contents @@ -21,7 +21,7 @@ It consists of four main sections: - [Encryption and identification](#encryption-and-identification) - [Protocol negotiation](#protocol-negotiation) - [Multiplexing](#multiplexing) -- [Eth 2.0 network interaction domains](#eth-20-network-interaction-domains) +- [Eth2 network interaction domains](#eth2-network-interaction-domains) - [Configuration](#configuration) - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) - [The Req/Resp domain](#the-reqresp-domain) @@ -83,7 +83,7 @@ The following SecIO parameters MUST be supported by all stacks: [Noise Framework](http://www.noiseprotocol.org/) handshakes will be used for mainnet. libp2p Noise support [is in the process of being standardized](https://github.com/libp2p/specs/issues/195) in the libp2p project. -Noise support will presumably include IX, IK, and XX handshake patterns, and may rely on Curve25519 keys, ChaCha20 and Poly1305 ciphers, and SHA-256 as a hash function. These aspects are being actively debated in the referenced issue (Eth 2.0 implementers are welcome to comment and contribute to the discussion). +Noise support will presumably include IX, IK, and XX handshake patterns, and may rely on Curve25519 keys, ChaCha20 and Poly1305 ciphers, and SHA-256 as a hash function. These aspects are being actively debated in the referenced issue (Eth2 implementers are welcome to comment and contribute to the discussion). ## Protocol Negotiation @@ -105,7 +105,7 @@ Two multiplexers are commonplace in libp2p implementations: [mplex](https://gith Clients MUST support [mplex](https://github.com/libp2p/specs/tree/master/mplex) and MAY support [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). If both are supported by the client, yamux must take precedence during negotiation. See the [Rationale](#design-decision-rationale) section below for tradeoffs. -# Eth 2.0 network interaction domains +# Eth2 network interaction domains ## Configuration @@ -475,9 +475,9 @@ Specifications of these parameters can be found in the [ENR Specification](http: #### Interop -In the interoperability testnet, all peers will support all capabilities defined in this document (gossip, full Req/Resp suite, discovery protocol), therefore the ENR record does not need to carry Eth 2.0 capability information, as it would be superfluous. +In the interoperability testnet, all peers will support all capabilities defined in this document (gossip, full Req/Resp suite, discovery protocol), therefore the ENR record does not need to carry Eth2 capability information, as it would be superfluous. -Nonetheless, ENRs MUST carry a generic `eth2` key with nil value, denoting that the peer is indeed an Eth 2.0 peer, in order to eschew connecting to Eth 1.0 peers. +Nonetheless, ENRs MUST carry a generic `eth2` key with nil value, denoting that the peer is indeed an Eth2 peer, in order to eschew connecting to Eth 1.0 peers. #### Mainnet @@ -514,7 +514,7 @@ Clients may support other transports such as libp2p QUIC, WebSockets, and WebRTC The libp2p QUIC transport inherently relies on TLS 1.3 per requirement in section 7 of the [QUIC protocol specification](https://tools.ietf.org/html/draft-ietf-quic-transport-22#section-7) and the accompanying [QUIC-TLS document](https://tools.ietf.org/html/draft-ietf-quic-tls-22). -The usage of one handshake procedure or the other shall be transparent to the Eth 2.0 application layer, once the libp2p Host/Node object has been configured appropriately. +The usage of one handshake procedure or the other shall be transparent to the Eth2 application layer, once the libp2p Host/Node object has been configured appropriately. ### What are the advantages of using TCP/QUIC/Websockets? @@ -524,7 +524,7 @@ QUIC is a new protocol that’s in the final stages of specification by the IETF QUIC is being adopted as the underlying protocol for HTTP/3. This has the potential to award us censorship resistance via deep packet inspection for free. Provided that we use the same port numbers and encryption mechanisms as HTTP/3, our traffic may be indistinguishable from standard web traffic, and we may only become subject to standard IP-based firewall filtering—something we can counteract via other mechanisms. -WebSockets and/or WebRTC transports are necessary for interaction with browsers, and will become increasingly important as we incorporate browser-based light clients to the Eth 2.0 network. +WebSockets and/or WebRTC transports are necessary for interaction with browsers, and will become increasingly important as we incorporate browser-based light clients to the Eth2 network. ### Why do we not just support a single transport? @@ -652,7 +652,7 @@ Topic names have a hierarchical structure. In the future, gossipsub may support No security or privacy guarantees are lost as a result of choosing plaintext topic names, since the domain is finite anyway, and calculating a digest's preimage would be trivial. -Furthermore, the Eth 2.0 topic names are shorter than their digest equivalents (assuming SHA-256 hash), so hashing topics would bloat messages unnecessarily. +Furthermore, the Eth2 topic names are shorter than their digest equivalents (assuming SHA-256 hash), so hashing topics would bloat messages unnecessarily. ### Why are there `SHARD_SUBNET_COUNT` subnets, and why is this not defined? @@ -683,7 +683,7 @@ Requests are segregated by protocol ID to: 3. Enable clients to select the individual requests/versions they support. It would no longer be a strict requirement to support all requests, and clients, in principle, could support a subset of requests and variety of versions. 4. Enable flexibility and agility for clients adopting spec changes that impact the request, by signalling to peers exactly which subset of new/old requests they support. 5. Enable clients to explicitly choose backwards compatibility at the request granularity. Without this, clients would be forced to support entire versions of the coarser request protocol. -6. Parallelise RFCs (or Eth 2.0 EIPs). By decoupling requests from one another, each RFC that affects the request protocol can be deployed/tested/debated independently without relying on a synchronization point to version the general top-level protocol. +6. Parallelise RFCs (or Eth2 EIPs). By decoupling requests from one another, each RFC that affects the request protocol can be deployed/tested/debated independently without relying on a synchronization point to version the general top-level protocol. 1. This has the benefit that clients can explicitly choose which RFCs to deploy without buying into all other RFCs that may be included in that top-level version. 2. Affording this level of granularity with a top-level protocol would imply creating as many variants (e.g. /protocol/43-{a,b,c,d,...}) as the cartesian product of RFCs inflight, O(n^2). 7. Allow us to simplify the payload of requests. Request-id’s and method-ids no longer need to be sent. The encoding/request type and version can all be handled by the framework. @@ -795,4 +795,4 @@ For specific ad-hoc testing scenarios, you can use the [plaintext/2.0.0 secure c # libp2p implementations matrix -This section will soon contain a matrix showing the maturity/state of the libp2p features required by this spec across the languages in which Eth 2.0 clients are being developed. +This section will soon contain a matrix showing the maturity/state of the libp2p features required by this spec across the languages in which Eth2 clients are being developed. diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index cb6134793..7c4667ec8 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -1,6 +1,6 @@ # SimpleSerialize (SSZ) -**Notice**: This document is a work-in-progress describing typing, serialization, and Merkleization of Eth 2.0 objects. +**Notice**: This document is a work-in-progress describing typing, serialization, and Merkleization of Eth2 objects. ## Table of contents diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index aaf636d2c..63b9a5390 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -1,6 +1,6 @@ # General test format -This document defines the YAML format and structure used for Eth 2.0 testing. +This document defines the YAML format and structure used for Eth2 testing. ## Table of contents diff --git a/specs/test_formats/genesis/initialization.md b/specs/test_formats/genesis/initialization.md index 17c87f66e..428abb5bd 100644 --- a/specs/test_formats/genesis/initialization.md +++ b/specs/test_formats/genesis/initialization.md @@ -6,7 +6,7 @@ Tests the initialization of a genesis state based on Eth1 data. ### `eth1_block_hash.yaml` -A `Bytes32` hex encoded, with prefix 0x. The root of the Eth-1 block. +A `Bytes32` hex encoded, with prefix 0x. The root of the Eth1 block. Also available as `eth1_block_hash.ssz`. diff --git a/specs/test_formats/ssz_static/README.md b/specs/test_formats/ssz_static/README.md index 1dfe0c23f..78df2fc9a 100644 --- a/specs/test_formats/ssz_static/README.md +++ b/specs/test_formats/ssz_static/README.md @@ -1,7 +1,7 @@ # SSZ, static tests This set of test-suites provides static testing for SSZ: - to instantiate just the known Eth 2.0 SSZ types from binary data. + to instantiate just the known Eth2 SSZ types from binary data. This series of tests is based on the spec-maintained `eth2spec/utils/ssz/ssz_impl.py`, i.e. fully consistent with the SSZ spec. diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 96adef20e..5550e358f 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -106,7 +106,7 @@ To submit a deposit: ### Process deposit -Deposits cannot be processed into the beacon chain until the Eth 1.0 block in which they were deposited or any of its descendants is added to the beacon chain `state.eth1_data`. This takes _a minimum_ of `ETH1_FOLLOW_DISTANCE` Eth 1.0 blocks (~4 hours) plus `ETH1_DATA_VOTING_PERIOD` epochs (~1.7 hours). Once the requisite Eth 1.0 data is added, the deposit will normally be added to a beacon chain block and processed into the `state.validators` within an epoch or two. The validator is then in a queue to be activated. +Deposits cannot be processed into the beacon chain until the Eth1 block in which they were deposited or any of its descendants is added to the beacon chain `state.eth1_data`. This takes _a minimum_ of `ETH1_FOLLOW_DISTANCE` Eth1 blocks (~4 hours) plus `ETH1_DATA_VOTING_PERIOD` epochs (~1.7 hours). Once the requisite Eth1 data is added, the deposit will normally be added to a beacon chain block and processed into the `state.validators` within an epoch or two. The validator is then in a queue to be activated. ### Validator index @@ -217,9 +217,9 @@ def get_epoch_signature(state: BeaconState, block: BeaconBlock, privkey: int) -> ##### Eth1 Data -The `block.eth1_data` field is for block proposers to vote on recent Eth 1.0 data. This recent data contains an Eth 1.0 block hash as well as the associated deposit root (as calculated by the `get_deposit_root()` method of the deposit contract) and deposit count after execution of the corresponding Eth 1.0 block. If over half of the block proposers in the current Eth 1.0 voting period vote for the same `eth1_data` then `state.eth1_data` updates at the end of the voting period. Each deposit in `block.body.deposits` must verify against `state.eth1_data.eth1_deposit_root`. +The `block.eth1_data` field is for block proposers to vote on recent Eth1 data. This recent data contains an Eth1 block hash as well as the associated deposit root (as calculated by the `get_deposit_root()` method of the deposit contract) and deposit count after execution of the corresponding Eth1 block. If over half of the block proposers in the current Eth1 voting period vote for the same `eth1_data` then `state.eth1_data` updates at the end of the voting period. Each deposit in `block.body.deposits` must verify against `state.eth1_data.eth1_deposit_root`. -Let `get_eth1_data(distance: uint64) -> Eth1Data` be the (subjective) function that returns the Eth 1.0 data at distance `distance` relative to the Eth 1.0 head at the start of the current Eth 1.0 voting period. Let `previous_eth1_distance` be the distance relative to the Eth 1.0 block corresponding to `state.eth1_data.block_hash` at the start of the current Eth 1.0 voting period. An honest block proposer sets `block.eth1_data = get_eth1_vote(state, previous_eth1_distance)` where: +Let `get_eth1_data(distance: uint64) -> Eth1Data` be the (subjective) function that returns the Eth1 data at distance `distance` relative to the Eth1 head at the start of the current Eth1 voting period. Let `previous_eth1_distance` be the distance relative to the Eth1 block corresponding to `state.eth1_data.block_hash` at the start of the current Eth1 voting period. An honest block proposer sets `block.eth1_data = get_eth1_vote(state, previous_eth1_distance)` where: ```python def get_eth1_vote(state: BeaconState, previous_eth1_distance: uint64) -> Eth1Data: @@ -265,7 +265,7 @@ Up to `MAX_ATTESTATIONS`, aggregate attestations can be included in the `block`. ##### Deposits -If there are any unprocessed deposits for the existing `state.eth1_data` (i.e. `state.eth1_data.deposit_count > state.eth1_deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, eth1_data.deposit_count - state.eth1_deposit_index)`. These [`deposits`](../core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth 1.0 deposit contract](../core/0_deposit-contract.md) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](../core/0_beacon-chain.md#deposits). +If there are any unprocessed deposits for the existing `state.eth1_data` (i.e. `state.eth1_data.deposit_count > state.eth1_deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, eth1_data.deposit_count - state.eth1_deposit_index)`. These [`deposits`](../core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth1 deposit contract](../core/0_deposit-contract.md) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](../core/0_beacon-chain.md#deposits). The `proof` for each deposit must be constructed against the deposit root contained in `state.eth1_data` rather than the deposit root at the time the deposit was initially logged from the 1.0 chain. This entails storing a full deposit merkle tree locally and computing updated proofs against the `eth1_data.deposit_root` as needed. See [`minimal_merkle.py`](https://github.com/ethereum/research/blob/master/spec_pythonizer/utils/merkle_minimal.py) for a sample implementation. diff --git a/test_generators/README.md b/test_generators/README.md index abcb8a1ee..9b1aab29c 100644 --- a/test_generators/README.md +++ b/test_generators/README.md @@ -1,6 +1,6 @@ -# Eth 2.0 Test Generators +# Eth2 test generators -This directory contains all the generators for tests, consumed by Eth 2.0 client implementations. +This directory contains all the generators for tests, consumed by Eth2 client implementations. Any issues with the generators and/or generated tests should be filed in the repository that hosts the generator outputs, here: [ethereum/eth2.0-spec-tests](https://github.com/ethereum/eth2.0-spec-tests). diff --git a/test_generators/bls/README.md b/test_generators/bls/README.md index 2bf46e9ea..39261771b 100644 --- a/test_generators/bls/README.md +++ b/test_generators/bls/README.md @@ -9,7 +9,7 @@ The base unit is bytes48 of which only 381 bits are used ## Resources -- [Eth2.0 spec](../../specs/bls_signature.md) +- [Eth2 spec](../../specs/bls_signature.md) - [Finite Field Arithmetic](http://www.springeronline.com/sgw/cda/pageitems/document/cda_downloaddocument/0,11996,0-0-45-110359-0,00.pdf) - Chapter 2 of [Elliptic Curve Cryptography](http://cacr.uwaterloo.ca/ecc/). Darrel Hankerson, Alfred Menezes, and Scott Vanstone - [Zcash BLS parameters](https://github.com/zkcrypto/pairing/tree/master/src/bls12_381) diff --git a/test_generators/shuffling/README.md b/test_generators/shuffling/README.md index a8f0cbdb4..fae06ad7e 100644 --- a/test_generators/shuffling/README.md +++ b/test_generators/shuffling/README.md @@ -1,6 +1,6 @@ # Shuffling Tests -Tests for the swap-or-not shuffling in ETH 2.0. +Tests for the swap-or-not shuffling in Eth2. Tips for initial shuffling write: - run with `round_count = 1` first, do the same with pyspec. diff --git a/test_generators/ssz_static/README.md b/test_generators/ssz_static/README.md index 453d6d0e5..2a5040192 100644 --- a/test_generators/ssz_static/README.md +++ b/test_generators/ssz_static/README.md @@ -1,6 +1,6 @@ # SSZ-static The purpose of this test-generator is to provide test-vectors for the most important applications of SSZ: - the serialization and hashing of ETH 2.0 data types. + the serialization and hashing of Eth2 data types. Test-format documentation can be found [here](../../specs/test_formats/ssz_static/README.md). diff --git a/test_libs/config_helpers/README.md b/test_libs/config_helpers/README.md index eaa3f3b40..20dfcf7bf 100644 --- a/test_libs/config_helpers/README.md +++ b/test_libs/config_helpers/README.md @@ -1,4 +1,4 @@ -# ETH 2.0 config helpers +# Eth2 config helpers `preset_loader`: A util to load constants-presets with. See [Constants-presets documentation](../../configs/constants_presets/README.md). diff --git a/test_libs/gen_helpers/README.md b/test_libs/gen_helpers/README.md index dfda434c3..1d74a31d3 100644 --- a/test_libs/gen_helpers/README.md +++ b/test_libs/gen_helpers/README.md @@ -1,4 +1,4 @@ -# ETH 2.0 test generator helpers +# Eth2 test generator helpers ## `gen_base` diff --git a/test_libs/pyspec/README.md b/test_libs/pyspec/README.md index 53750517d..ea994c71b 100644 --- a/test_libs/pyspec/README.md +++ b/test_libs/pyspec/README.md @@ -1,6 +1,6 @@ -# Eth 2.0 Executable Python Spec (PySpec) +# Eth2 Executable Python Spec (PySpec) -The executable Python spec is built from the Eth 2.0 specification, +The executable Python spec is built from the Eth2 specification, complemented with the necessary helper functions for hashing, BLS, and more. With this executable spec, From 96d4516889fd561d42bba4e5d43843ea93107432 Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 28 Oct 2019 09:35:19 +0100 Subject: [PATCH 247/250] fix forkchoice tests: fix test decorators, fix block state roots, clean up some logic --- test_libs/pyspec/eth2spec/test/context.py | 4 +- .../test/fork_choice/test_get_head.py | 14 ++--- .../test/fork_choice/test_on_attestation.py | 46 +++++++-------- .../test/fork_choice/test_on_block.py | 59 +++++++++++-------- 4 files changed, 62 insertions(+), 61 deletions(-) diff --git a/test_libs/pyspec/eth2spec/test/context.py b/test_libs/pyspec/eth2spec/test/context.py index b06d2984d..3177cd0b8 100644 --- a/test_libs/pyspec/eth2spec/test/context.py +++ b/test_libs/pyspec/eth2spec/test/context.py @@ -143,7 +143,9 @@ def bls_switch(fn): def entry(*args, **kw): old_state = bls.bls_active bls.bls_active = kw.pop('bls_active', DEFAULT_BLS_ACTIVE) - yield from fn(*args, **kw) + res = fn(*args, **kw) + if res is not None: + yield from res bls.bls_active = old_state return entry diff --git a/test_libs/pyspec/eth2spec/test/fork_choice/test_get_head.py b/test_libs/pyspec/eth2spec/test/fork_choice/test_get_head.py index 6ac46ba6c..ff5a822fb 100644 --- a/test_libs/pyspec/eth2spec/test/fork_choice/test_get_head.py +++ b/test_libs/pyspec/eth2spec/test/fork_choice/test_get_head.py @@ -1,4 +1,4 @@ -from eth2spec.test.context import with_all_phases, with_state, bls_switch +from eth2spec.test.context import with_all_phases, spec_state_test from eth2spec.test.helpers.attestations import get_valid_attestation from eth2spec.test.helpers.block import build_empty_block_for_next_slot from eth2spec.test.helpers.state import state_transition_and_sign_block @@ -27,8 +27,7 @@ def add_attestation_to_store(spec, store, attestation): @with_all_phases -@with_state -@bls_switch +@spec_state_test def test_genesis(spec, state): # Initialization store = spec.get_genesis_store(state) @@ -37,8 +36,7 @@ def test_genesis(spec, state): @with_all_phases -@with_state -@bls_switch +@spec_state_test def test_chain_no_attestations(spec, state): # Initialization store = spec.get_genesis_store(state) @@ -59,8 +57,7 @@ def test_chain_no_attestations(spec, state): @with_all_phases -@with_state -@bls_switch +@spec_state_test def test_split_tie_breaker_no_attestations(spec, state): genesis_state = state.copy() @@ -88,8 +85,7 @@ def test_split_tie_breaker_no_attestations(spec, state): @with_all_phases -@with_state -@bls_switch +@spec_state_test def test_shorter_chain_but_heavier_weight(spec, state): genesis_state = state.copy() diff --git a/test_libs/pyspec/eth2spec/test/fork_choice/test_on_attestation.py b/test_libs/pyspec/eth2spec/test/fork_choice/test_on_attestation.py index ee1c04219..70375ef27 100644 --- a/test_libs/pyspec/eth2spec/test/fork_choice/test_on_attestation.py +++ b/test_libs/pyspec/eth2spec/test/fork_choice/test_on_attestation.py @@ -1,8 +1,10 @@ -from eth2spec.test.context import with_all_phases, with_state, bls_switch, with_phases + +from eth2spec.test.context import with_all_phases, spec_state_test, with_phases + from eth2spec.test.helpers.block import build_empty_block_for_next_slot from eth2spec.test.helpers.attestations import get_valid_attestation -from eth2spec.test.helpers.state import next_slot +from eth2spec.test.helpers.state import state_transition_and_sign_block def run_on_attestation(spec, state, store, attestation, valid=True): @@ -26,27 +28,24 @@ def run_on_attestation(spec, state, store, attestation, valid=True): @with_all_phases -@with_state -@bls_switch +@spec_state_test def test_on_attestation(spec, state): store = spec.get_genesis_store(state) time = 100 spec.on_tick(store, time) - block = build_empty_block_for_next_slot(spec, state, signed=True) + block = build_empty_block_for_next_slot(spec, state) + state_transition_and_sign_block(spec, state, block) # store block in store spec.on_block(store, block) - next_slot(spec, state) - attestation = get_valid_attestation(spec, state, slot=block.slot) run_on_attestation(spec, state, store, attestation) @with_all_phases -@with_state -@bls_switch +@spec_state_test def test_on_attestation_target_not_in_store(spec, state): store = spec.get_genesis_store(state) time = 100 @@ -55,28 +54,27 @@ def test_on_attestation_target_not_in_store(spec, state): # move to next epoch to make block new target state.slot += spec.SLOTS_PER_EPOCH - block = build_empty_block_for_next_slot(spec, state, signed=True) + block = build_empty_block_for_next_slot(spec, state) + state_transition_and_sign_block(spec, state, block) # do not add block to store - next_slot(spec, state) attestation = get_valid_attestation(spec, state, slot=block.slot) run_on_attestation(spec, state, store, attestation, False) @with_all_phases -@with_state -@bls_switch +@spec_state_test def test_on_attestation_future_epoch(spec, state): store = spec.get_genesis_store(state) time = 3 * spec.SECONDS_PER_SLOT spec.on_tick(store, time) - block = build_empty_block_for_next_slot(spec, state, signed=True) + block = build_empty_block_for_next_slot(spec, state) + state_transition_and_sign_block(spec, state, block) # store block in store spec.on_block(store, block) - next_slot(spec, state) # move state forward but not store attestation_slot = block.slot + spec.SLOTS_PER_EPOCH @@ -87,36 +85,34 @@ def test_on_attestation_future_epoch(spec, state): @with_all_phases -@with_state -@bls_switch +@spec_state_test def test_on_attestation_same_slot(spec, state): store = spec.get_genesis_store(state) time = 1 * spec.SECONDS_PER_SLOT spec.on_tick(store, time) - block = build_empty_block_for_next_slot(spec, state, signed=True) + block = build_empty_block_for_next_slot(spec, state) + state_transition_and_sign_block(spec, state, block) spec.on_block(store, block) - next_slot(spec, state) attestation = get_valid_attestation(spec, state, slot=block.slot) run_on_attestation(spec, state, store, attestation, False) @with_phases(['phase0']) -@with_state -@bls_switch +@spec_state_test def test_on_attestation_invalid_attestation(spec, state): store = spec.get_genesis_store(state) time = 3 * spec.SECONDS_PER_SLOT spec.on_tick(store, time) - block = build_empty_block_for_next_slot(spec, state, signed=True) + block = build_empty_block_for_next_slot(spec, state) + state_transition_and_sign_block(spec, state, block) spec.on_block(store, block) - next_slot(spec, state) attestation = get_valid_attestation(spec, state, slot=block.slot) - # make attestation invalid - attestation.custody_bits[0:8] = [0, 0, 0, 0, 1, 1, 1, 1] + # make attestation invalid by setting a phase1-only custody bit + attestation.custody_bits[0] = 1 run_on_attestation(spec, state, store, attestation, False) diff --git a/test_libs/pyspec/eth2spec/test/fork_choice/test_on_block.py b/test_libs/pyspec/eth2spec/test/fork_choice/test_on_block.py index 90f161fa2..918c0f79e 100644 --- a/test_libs/pyspec/eth2spec/test/fork_choice/test_on_block.py +++ b/test_libs/pyspec/eth2spec/test/fork_choice/test_on_block.py @@ -1,11 +1,12 @@ +from copy import deepcopy from eth2spec.utils.ssz.ssz_impl import signing_root -from eth2spec.test.context import with_all_phases, with_state, bls_switch -from eth2spec.test.helpers.block import build_empty_block_for_next_slot -from eth2spec.test.helpers.state import next_epoch, next_epoch_with_attestations +from eth2spec.test.context import with_all_phases, spec_state_test +from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block +from eth2spec.test.helpers.state import next_epoch, next_epoch_with_attestations, state_transition_and_sign_block -def run_on_block(spec, state, store, block, valid=True): +def run_on_block(spec, store, block, valid=True): if not valid: try: spec.on_block(store, block) @@ -19,19 +20,18 @@ def run_on_block(spec, state, store, block, valid=True): def apply_next_epoch_with_attestations(spec, state, store): - _, new_blocks, state = next_epoch_with_attestations(spec, state, True, False) + _, new_blocks, post_state = next_epoch_with_attestations(spec, state, True, False) for block in new_blocks: block_root = signing_root(block) store.blocks[block_root] = block - store.block_states[block_root] = state + store.block_states[block_root] = post_state last_block = block spec.on_tick(store, store.time + state.slot * spec.SECONDS_PER_SLOT) - return state, store, last_block + return post_state, store, last_block @with_all_phases -@with_state -@bls_switch +@spec_state_test def test_basic(spec, state): # Initialization store = spec.get_genesis_store(state) @@ -41,21 +41,22 @@ def test_basic(spec, state): # On receiving a block of `GENESIS_SLOT + 1` slot block = build_empty_block_for_next_slot(spec, state) - run_on_block(spec, state, store, block) + state_transition_and_sign_block(spec, state, block) + run_on_block(spec, store, block) # On receiving a block of next epoch store.time = time + spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH block = build_empty_block_for_next_slot(spec, state) block.slot += spec.SLOTS_PER_EPOCH + state_transition_and_sign_block(spec, state, block) - run_on_block(spec, state, store, block) + run_on_block(spec, store, block) # TODO: add tests for justified_root and finalized_root @with_all_phases -@with_state -@bls_switch +@spec_state_test def test_on_block_checkpoints(spec, state): # Initialization store = spec.get_genesis_store(state) @@ -70,18 +71,18 @@ def test_on_block_checkpoints(spec, state): last_block_root = signing_root(last_block) # Mock the finalized_checkpoint - store.block_states[last_block_root].finalized_checkpoint = ( + fin_state = store.block_states[last_block_root] + fin_state.finalized_checkpoint = ( store.block_states[last_block_root].current_justified_checkpoint ) - # On receiving a block of `GENESIS_SLOT + 1` slot - block = build_empty_block_for_next_slot(spec, state) - run_on_block(spec, state, store, block) + block = build_empty_block_for_next_slot(spec, fin_state) + state_transition_and_sign_block(spec, deepcopy(fin_state), block) + run_on_block(spec, store, block) @with_all_phases -@with_state -@bls_switch +@spec_state_test def test_on_block_future_block(spec, state): # Initialization store = spec.get_genesis_store(state) @@ -90,12 +91,12 @@ def test_on_block_future_block(spec, state): # Fail receiving block of `GENESIS_SLOT + 1` slot block = build_empty_block_for_next_slot(spec, state) - run_on_block(spec, state, store, block, False) + state_transition_and_sign_block(spec, state, block) + run_on_block(spec, store, block, False) @with_all_phases -@with_state -@bls_switch +@spec_state_test def test_on_block_bad_parent_root(spec, state): # Initialization store = spec.get_genesis_store(state) @@ -104,13 +105,18 @@ def test_on_block_bad_parent_root(spec, state): # Fail receiving block of `GENESIS_SLOT + 1` slot block = build_empty_block_for_next_slot(spec, state) + spec.state_transition(state, block) + block.state_root = state.hash_tree_root() + block.parent_root = b'\x45' * 32 - run_on_block(spec, state, store, block, False) + + sign_block(spec, state, block) + + run_on_block(spec, store, block, False) @with_all_phases -@with_state -@bls_switch +@spec_state_test def test_on_block_before_finalized(spec, state): # Initialization store = spec.get_genesis_store(state) @@ -124,4 +130,5 @@ def test_on_block_before_finalized(spec, state): # Fail receiving block of `GENESIS_SLOT + 1` slot block = build_empty_block_for_next_slot(spec, state) - run_on_block(spec, state, store, block, False) + state_transition_and_sign_block(spec, state, block) + run_on_block(spec, store, block, False) From 3bb6aec3aba680b4493df26139d96cf71a545a4b Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 28 Oct 2019 17:10:48 +0800 Subject: [PATCH 248/250] Clarify committee_index subnets by defining and using "subnet" var Co-Authored-By: Hsiao-Wei Wang --- specs/networking/p2p-interface.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 2338aaf88..787ecafb8 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -154,7 +154,7 @@ The payload is carried in the `data` field of a gossipsub message, and varies de | beacon_block | BeaconBlock | | beacon_aggregate_and_proof | AggregateAndProof | | beacon_attestation\* | Attestation | -| committee_index{N}\_beacon_attestation | Attestation | +| committee_index{subnet_id}\_beacon_attestation | Attestation | | voluntary_exit | VoluntaryExit | | proposer_slashing | ProposerSlashing | | attester_slashing | AttesterSlashing | @@ -189,7 +189,7 @@ Additional global topics are used to propagate lower frequency validator message Attestation subnets are used to propagate unaggregated attestations to subsections of the network. Their `TopicName`s are: -- `committee_index{index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` - These topics are used to propagate unaggregated attestations to subsections of the network (typically beacon and persistent committees) to be aggregated before being gossiped to `beacon_aggregate_and_proof`. The following validations MUST pass before forwarding the `attestation` on the network. +- `committee_index{subnet_id}_beacon_attestation` - These topics are used to propagate unaggregated attestations to the subnet `subnet_id` (typically beacon and persistent committees) to be aggregated before being gossiped to `beacon_aggregate_and_proof`. The following validations MUST pass before forwarding the `attestation` on the subnet. - The attestation's committee index (`attestation.data.index`) is for the correct subnet. - The attestation is unaggregated -- that is, it has exactly one participating validator (`len([bit for bit in attestation.aggregation_bits if bit == 0b1]) == 1`). - The block being voted for (`attestation.data.beacon_block_root`) passes validation. @@ -202,7 +202,7 @@ Unaggregated and aggregated attestations from all shards are sent as `Attestatio #### Mainnet -Attestation broadcasting is grouped into subnets defined by a topic. The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`. The `CommitteeIndex`, `index`, is assigned to the topic: `committee_index{index % ATTESTATION_SUBNET_COUNT}_beacon_attestation`. +Attestation broadcasting is grouped into subnets defined by a topic. The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`. For the `committee_index{subnet_id}_beacon_attestation` topics, `subnet_id` is set to `index % ATTESTATION_SUBNET_COUNT`, where `index` is the `CommitteeIndex` of the given committee. Unaggregated attestations are sent to the subnet topic, `committee_index{attestation.data.index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` as `Attestation`s. From 04c20aeb28d4eaec28f11aa2b37baf0c4051b7b6 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 28 Oct 2019 18:18:24 +0900 Subject: [PATCH 249/250] add expicit ranges for valid attesation propogation range --- specs/networking/p2p-interface.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/networking/p2p-interface.md b/specs/networking/p2p-interface.md index 787ecafb8..1872b5e5d 100644 --- a/specs/networking/p2p-interface.md +++ b/specs/networking/p2p-interface.md @@ -11,7 +11,7 @@ It consists of four main sections: ## Table of contents - + @@ -173,7 +173,7 @@ There are two primary global topics used to propagate beacon blocks and aggregat - `beacon_aggregate_and_proof` - This topic is used to propagate aggregated attestations (as `AggregateAndProof`s) to subscribing nodes (typically validators) to be included in future blocks. The following validations MUST pass before forwarding the `aggregate_and_proof` on the network. - The aggregate attestation defined by `hash_tree_root(aggregate_and_proof.aggregate)` has _not_ already been seen (via aggregate gossip, within a block, or through the creation of an equivalent aggregate locally). - The block being voted for (`aggregate_and_proof.aggregate.data.beacon_block_root`) passes validation. - - `aggregate_and_proof.aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots. + - `aggregate_and_proof.aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (`aggregate_and_proof.aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate_and_proof.aggregate.data.slot`). - The validator index is within the aggregate's committee -- i.e. `aggregate_and_proof.index in get_attesting_indices(state, aggregate_and_proof.aggregate.data, aggregate_and_proof.aggregate.aggregation_bits)`. - `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate_and_proof.aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`. - The `aggregate_and_proof.selection_proof` is a valid signature of the `aggregate_and_proof.aggregate.data.slot` by the validator with index `aggregate_and_proof.index`. @@ -193,7 +193,7 @@ Attestation subnets are used to propagate unaggregated attestations to subsectio - The attestation's committee index (`attestation.data.index`) is for the correct subnet. - The attestation is unaggregated -- that is, it has exactly one participating validator (`len([bit for bit in attestation.aggregation_bits if bit == 0b1]) == 1`). - The block being voted for (`attestation.data.beacon_block_root`) passes validation. - - `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots. + - `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (`attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot`). - The signature of `attestation` is valid. #### Interop From 3d17c1057843103be85514c708b34e5e5d3b6006 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 28 Oct 2019 22:44:41 +0900 Subject: [PATCH 250/250] fix ruemel.yaml dependency issue --- test_libs/gen_helpers/requirements.txt | 2 +- test_libs/gen_helpers/setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test_libs/gen_helpers/requirements.txt b/test_libs/gen_helpers/requirements.txt index 557cae631..dc3f61904 100644 --- a/test_libs/gen_helpers/requirements.txt +++ b/test_libs/gen_helpers/requirements.txt @@ -1,2 +1,2 @@ -ruamel.yaml==0.15.96 +ruamel.yaml==0.16.5 eth-utils==1.6.0 diff --git a/test_libs/gen_helpers/setup.py b/test_libs/gen_helpers/setup.py index ee2c815c7..b674dbfb6 100644 --- a/test_libs/gen_helpers/setup.py +++ b/test_libs/gen_helpers/setup.py @@ -4,7 +4,7 @@ setup( name='gen_helpers', packages=['gen_base', 'gen_from_tests'], install_requires=[ - "ruamel.yaml==0.15.96", + "ruamel.yaml==0.16.5", "eth-utils==1.6.0" ] )