Merge branch 'dev' into SyncCommitteeSignature-to-SyncCommitteeMessage
This commit is contained in:
commit
65f8d3d296
4
setup.py
4
setup.py
|
@ -596,7 +596,7 @@ def objects_to_spec(preset_name: str,
|
||||||
|
|
||||||
def format_config_var(name: str, vardef: VariableDefinition) -> str:
|
def format_config_var(name: str, vardef: VariableDefinition) -> str:
|
||||||
if vardef.type_name is None:
|
if vardef.type_name is None:
|
||||||
out = f'{name}={vardef.value}'
|
out = f'{name}={vardef.value},'
|
||||||
else:
|
else:
|
||||||
out = f'{name}={vardef.type_name}({vardef.value}),'
|
out = f'{name}={vardef.type_name}({vardef.value}),'
|
||||||
if vardef.comment is not None:
|
if vardef.comment is not None:
|
||||||
|
@ -1017,7 +1017,7 @@ setup(
|
||||||
"py_ecc==5.2.0",
|
"py_ecc==5.2.0",
|
||||||
"milagro_bls_binding==1.6.3",
|
"milagro_bls_binding==1.6.3",
|
||||||
"dataclasses==0.6",
|
"dataclasses==0.6",
|
||||||
"remerkleable==0.1.19",
|
"remerkleable==0.1.20",
|
||||||
RUAMEL_YAML_VERSION,
|
RUAMEL_YAML_VERSION,
|
||||||
"lru-dict==1.1.6",
|
"lru-dict==1.1.6",
|
||||||
MARKO_VERSION,
|
MARKO_VERSION,
|
||||||
|
|
|
@ -120,7 +120,7 @@ def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64
|
||||||
return sync_committee.pubkeys[i:i + sync_subcommittee_size]
|
return sync_committee.pubkeys[i:i + sync_subcommittee_size]
|
||||||
```
|
```
|
||||||
|
|
||||||
- _[IGNORE]_ The contribution's slot is for the current slot, i.e. `contribution.slot == current_slot`.
|
- _[IGNORE]_ The contribution's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. `contribution.slot == current_slot`.
|
||||||
- _[IGNORE]_ The block being signed over (`contribution.beacon_block_root`) has been seen (via both gossip and non-gossip sources).
|
- _[IGNORE]_ The block being signed over (`contribution.beacon_block_root`) has been seen (via both gossip and non-gossip sources).
|
||||||
- _[REJECT]_ The subcommittee index is in the allowed range, i.e. `contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT`.
|
- _[REJECT]_ The subcommittee index is in the allowed range, i.e. `contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT`.
|
||||||
- _[IGNORE]_ The sync committee contribution is the first valid contribution received for the aggregator with index `contribution_and_proof.aggregator_index` for the slot `contribution.slot` and subcommittee index `contribution.subcommittee_index`.
|
- _[IGNORE]_ The sync committee contribution is the first valid contribution received for the aggregator with index `contribution_and_proof.aggregator_index` for the slot `contribution.slot` and subcommittee index `contribution.subcommittee_index`.
|
||||||
|
@ -141,7 +141,7 @@ The `sync_committee_{subnet_id}` topics are used to propagate unaggregated sync
|
||||||
|
|
||||||
The following validations MUST pass before forwarding the `sync_committee_message` on the network:
|
The following validations MUST pass before forwarding the `sync_committee_message` on the network:
|
||||||
|
|
||||||
- _[IGNORE]_ The signature's slot is for the current slot, i.e. `sync_committee_message.slot == current_slot`.
|
- _[IGNORE]_ The signature's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. `sync_committee_message.slot == current_slot`.
|
||||||
- _[IGNORE]_ The block being signed over (`sync_committee_message.beacon_block_root`) has been seen (via both gossip and non-gossip sources).
|
- _[IGNORE]_ The block being signed over (`sync_committee_message.beacon_block_root`) has been seen (via both gossip and non-gossip sources).
|
||||||
- _[IGNORE]_ There has been no other valid sync committee signature for the declared `slot` for the validator referenced by `sync_committee_message.validator_index`.
|
- _[IGNORE]_ There has been no other valid sync committee signature for the declared `slot` for the validator referenced by `sync_committee_message.validator_index`.
|
||||||
- _[REJECT]_ The `subnet_id` is valid for the given validator, i.e. `subnet_id in compute_subnets_for_sync_committee(state, sync_committee_message.validator_index)`.
|
- _[REJECT]_ The `subnet_id` is valid for the given validator, i.e. `subnet_id in compute_subnets_for_sync_committee(state, sync_committee_message.validator_index)`.
|
||||||
|
|
|
@ -216,6 +216,7 @@ class ShardBlobHeader(Container):
|
||||||
# Slot and shard that this header is intended for
|
# Slot and shard that this header is intended for
|
||||||
slot: Slot
|
slot: Slot
|
||||||
shard: Shard
|
shard: Shard
|
||||||
|
# SSZ-summary of ShardBlobBody
|
||||||
body_summary: ShardBlobBodySummary
|
body_summary: ShardBlobBodySummary
|
||||||
# Proposer of the shard-blob
|
# Proposer of the shard-blob
|
||||||
proposer_index: ValidatorIndex
|
proposer_index: ValidatorIndex
|
||||||
|
@ -253,7 +254,7 @@ class ShardBlobReference(Container):
|
||||||
# Slot and shard that this reference is intended for
|
# Slot and shard that this reference is intended for
|
||||||
slot: Slot
|
slot: Slot
|
||||||
shard: Shard
|
shard: Shard
|
||||||
# Hash-tree-root of commitment data
|
# Hash-tree-root of ShardBlobBody
|
||||||
body_root: Root
|
body_root: Root
|
||||||
# Proposer of the shard-blob
|
# Proposer of the shard-blob
|
||||||
proposer_index: ValidatorIndex
|
proposer_index: ValidatorIndex
|
||||||
|
|
|
@ -17,9 +17,11 @@
|
||||||
- [SignedShardBlob](#signedshardblob)
|
- [SignedShardBlob](#signedshardblob)
|
||||||
- [Gossip domain](#gossip-domain)
|
- [Gossip domain](#gossip-domain)
|
||||||
- [Topics and messages](#topics-and-messages)
|
- [Topics and messages](#topics-and-messages)
|
||||||
- [Shard blobs: `shard_blob_{subnet_id}`](#shard-blobs-shard_blob_subnet_id)
|
- [Shard blob subnets](#shard-blob-subnets)
|
||||||
- [Shard header: `shard_header`](#shard-header-shard_header)
|
- [`shard_blob_{subnet_id}`](#shard_blob_subnet_id)
|
||||||
- [Shard proposer slashing: `shard_proposer_slashing`](#shard-proposer-slashing-shard_proposer_slashing)
|
- [Global topics](#global-topics)
|
||||||
|
- [`shard_header`](#shard_header)
|
||||||
|
- [`shard_proposer_slashing`](#shard_proposer_slashing)
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
<!-- /TOC -->
|
<!-- /TOC -->
|
||||||
|
@ -64,6 +66,7 @@ class ShardBlob(Container):
|
||||||
# Slot and shard that this blob is intended for
|
# Slot and shard that this blob is intended for
|
||||||
slot: Slot
|
slot: Slot
|
||||||
shard: Shard
|
shard: Shard
|
||||||
|
# Shard data with related commitments and beacon anchor
|
||||||
body: ShardBlobBody
|
body: ShardBlobBody
|
||||||
# Proposer of the shard-blob
|
# Proposer of the shard-blob
|
||||||
proposer_index: ValidatorIndex
|
proposer_index: ValidatorIndex
|
||||||
|
@ -88,12 +91,16 @@ Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.
|
||||||
| Name | Message Type |
|
| Name | Message Type |
|
||||||
|----------------------------------|---------------------------|
|
|----------------------------------|---------------------------|
|
||||||
| `shard_blob_{subnet_id}` | `SignedShardBlob` |
|
| `shard_blob_{subnet_id}` | `SignedShardBlob` |
|
||||||
| `shard_header` | `SignedShardHeader` |
|
| `shard_header` | `SignedShardBlobHeader` |
|
||||||
| `shard_proposer_slashing` | `ShardProposerSlashing` |
|
| `shard_proposer_slashing` | `ShardProposerSlashing` |
|
||||||
|
|
||||||
The [DAS network specification](./das-p2p.md) defines additional topics.
|
The [DAS network specification](./das-p2p.md) defines additional topics.
|
||||||
|
|
||||||
#### Shard blobs: `shard_blob_{subnet_id}`
|
#### Shard blob subnets
|
||||||
|
|
||||||
|
Shard blob subnets are used to propagate shard blobs to subsections of the network.
|
||||||
|
|
||||||
|
##### `shard_blob_{subnet_id}`
|
||||||
|
|
||||||
Shard block data, in the form of a `SignedShardBlob` is published to the `shard_blob_{subnet_id}` subnets.
|
Shard block data, in the form of a `SignedShardBlob` is published to the `shard_blob_{subnet_id}` subnets.
|
||||||
|
|
||||||
|
@ -129,19 +136,23 @@ The following validations MUST pass before forwarding the `signed_blob` (with in
|
||||||
the block MAY be queued for later processing while proposers for the blob's branch are calculated --
|
the block MAY be queued for later processing while proposers for the blob's branch are calculated --
|
||||||
in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
||||||
|
|
||||||
|
#### Global topics
|
||||||
|
|
||||||
#### Shard header: `shard_header`
|
There are two additional global topics for Sharding, one is used to propagate shard blob headers (`shard_header`) to
|
||||||
|
all nodes on the network. Another one is used to propagate validator message (`shard_proposer_slashing`).
|
||||||
|
|
||||||
|
##### `shard_header`
|
||||||
|
|
||||||
Shard header data, in the form of a `SignedShardBlobHeader` is published to the global `shard_header` subnet.
|
Shard header data, in the form of a `SignedShardBlobHeader` is published to the global `shard_header` subnet.
|
||||||
|
|
||||||
The following validations MUST pass before forwarding the `signed_shard_header` (with inner `message` as `header`) on the network.
|
The following validations MUST pass before forwarding the `signed_shard_blob_header` (with inner `message` as `header`) on the network.
|
||||||
- _[IGNORE]_ The `header` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) --
|
- _[IGNORE]_ The `header` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) --
|
||||||
i.e. validate that `header.slot <= current_slot`
|
i.e. validate that `header.slot <= current_slot`
|
||||||
(a client MAY queue future headers for processing at the appropriate slot).
|
(a client MAY queue future headers for processing at the appropriate slot).
|
||||||
- _[IGNORE]_ The `header` is new enough to be still be processed --
|
- _[IGNORE]_ The `header` is new enough to be still be processed --
|
||||||
i.e. validate that `compute_epoch_at_slot(header.slot) >= get_previous_epoch(state)`
|
i.e. validate that `compute_epoch_at_slot(header.slot) >= get_previous_epoch(state)`
|
||||||
- _[IGNORE]_ The header is the first header with valid signature received for the `(header.proposer_index, header.slot, header.shard)` combination.
|
- _[IGNORE]_ The header is the first header with valid signature received for the `(header.proposer_index, header.slot, header.shard)` combination.
|
||||||
- _[REJECT]_ The proposer signature, `signed_shard_header.signature`, is valid with respect to the `proposer_index` pubkey.
|
- _[REJECT]_ The proposer signature, `signed_shard_blob_header.signature`, is valid with respect to the `proposer_index` pubkey.
|
||||||
- _[REJECT]_ The header is proposed by the expected `proposer_index` for the block's slot
|
- _[REJECT]_ The header is proposed by the expected `proposer_index` for the block's slot
|
||||||
in the context of the current shuffling (defined by `header.body_summary.beacon_block_root`/`slot`).
|
in the context of the current shuffling (defined by `header.body_summary.beacon_block_root`/`slot`).
|
||||||
If the `proposer_index` cannot immediately be verified against the expected shuffling,
|
If the `proposer_index` cannot immediately be verified against the expected shuffling,
|
||||||
|
@ -149,7 +160,7 @@ The following validations MUST pass before forwarding the `signed_shard_header`
|
||||||
in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
||||||
|
|
||||||
|
|
||||||
#### Shard proposer slashing: `shard_proposer_slashing`
|
##### `shard_proposer_slashing`
|
||||||
|
|
||||||
Shard proposer slashings, in the form of `ShardProposerSlashing`, are published to the global `shard_proposer_slashing` topic.
|
Shard proposer slashings, in the form of `ShardProposerSlashing`, are published to the global `shard_proposer_slashing` topic.
|
||||||
|
|
||||||
|
|
|
@ -17,10 +17,10 @@
|
||||||
- [Serialization](#serialization)
|
- [Serialization](#serialization)
|
||||||
- [`uintN`](#uintn)
|
- [`uintN`](#uintn)
|
||||||
- [`boolean`](#boolean)
|
- [`boolean`](#boolean)
|
||||||
- [`null`](#null)
|
|
||||||
- [`Bitvector[N]`](#bitvectorn)
|
- [`Bitvector[N]`](#bitvectorn)
|
||||||
- [`Bitlist[N]`](#bitlistn)
|
- [`Bitlist[N]`](#bitlistn)
|
||||||
- [Vectors, containers, lists, unions](#vectors-containers-lists-unions)
|
- [Vectors, containers, lists](#vectors-containers-lists)
|
||||||
|
- [Union](#union)
|
||||||
- [Deserialization](#deserialization)
|
- [Deserialization](#deserialization)
|
||||||
- [Merkleization](#merkleization)
|
- [Merkleization](#merkleization)
|
||||||
- [Summaries and expansions](#summaries-and-expansions)
|
- [Summaries and expansions](#summaries-and-expansions)
|
||||||
|
@ -61,7 +61,7 @@
|
||||||
* **bitlist**: ordered variable-length collection of `boolean` values, limited to `N` bits
|
* **bitlist**: ordered variable-length collection of `boolean` values, limited to `N` bits
|
||||||
* notation `Bitlist[N]`
|
* notation `Bitlist[N]`
|
||||||
* **union**: union type containing one of the given subtypes
|
* **union**: union type containing one of the given subtypes
|
||||||
* notation `Union[type_0, type_1, ...]`, e.g. `union[null, uint64]`
|
* notation `Union[type_0, type_1, ...]`, e.g. `union[None, uint64, uint32]`
|
||||||
|
|
||||||
*Note*: Both `Vector[boolean, N]` and `Bitvector[N]` are valid, yet distinct due to their different serialization requirements. Similarly, both `List[boolean, N]` and `Bitlist[N]` are valid, yet distinct. Generally `Bitvector[N]`/`Bitlist[N]` are preferred because of their serialization efficiencies.
|
*Note*: Both `Vector[boolean, N]` and `Bitvector[N]` are valid, yet distinct due to their different serialization requirements. Similarly, both `List[boolean, N]` and `Bitlist[N]` are valid, yet distinct. Generally `Bitvector[N]`/`Bitlist[N]` are preferred because of their serialization efficiencies.
|
||||||
|
|
||||||
|
@ -77,7 +77,6 @@ For convenience we alias:
|
||||||
* `byte` to `uint8` (this is a basic type)
|
* `byte` to `uint8` (this is a basic type)
|
||||||
* `BytesN` and `ByteVector[N]` to `Vector[byte, N]` (this is *not* a basic type)
|
* `BytesN` and `ByteVector[N]` to `Vector[byte, N]` (this is *not* a basic type)
|
||||||
* `ByteList[N]` to `List[byte, N]`
|
* `ByteList[N]` to `List[byte, N]`
|
||||||
* `null`: `{}`
|
|
||||||
|
|
||||||
### Default values
|
### Default values
|
||||||
Assuming a helper function `default(type)` which returns the default value for `type`, we can recursively define the default value for all types.
|
Assuming a helper function `default(type)` which returns the default value for `type`, we can recursively define the default value for all types.
|
||||||
|
@ -101,7 +100,7 @@ An SSZ object is called zeroed (and thus, `is_zero(object)` returns true) if it
|
||||||
|
|
||||||
- Empty vector types (`Vector[type, 0]`, `Bitvector[0]`) are illegal.
|
- Empty vector types (`Vector[type, 0]`, `Bitvector[0]`) are illegal.
|
||||||
- Containers with no fields are illegal.
|
- Containers with no fields are illegal.
|
||||||
- The `null` type is only legal as the first type in a union subtype (i.e. with type index zero).
|
- The `None` type option in a `Union` type is only legal as the first option (i.e. with index zero).
|
||||||
|
|
||||||
## Serialization
|
## Serialization
|
||||||
|
|
||||||
|
@ -123,12 +122,6 @@ assert value in (True, False)
|
||||||
return b"\x01" if value is True else b"\x00"
|
return b"\x01" if value is True else b"\x00"
|
||||||
```
|
```
|
||||||
|
|
||||||
### `null`
|
|
||||||
|
|
||||||
```python
|
|
||||||
return b""
|
|
||||||
```
|
|
||||||
|
|
||||||
### `Bitvector[N]`
|
### `Bitvector[N]`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -150,7 +143,7 @@ array[len(value) // 8] |= 1 << (len(value) % 8)
|
||||||
return bytes(array)
|
return bytes(array)
|
||||||
```
|
```
|
||||||
|
|
||||||
### Vectors, containers, lists, unions
|
### Vectors, containers, lists
|
||||||
|
|
||||||
```python
|
```python
|
||||||
# Recursively serialize
|
# Recursively serialize
|
||||||
|
@ -170,14 +163,26 @@ fixed_parts = [part if part != None else variable_offsets[i] for i, part in enum
|
||||||
return b"".join(fixed_parts + variable_parts)
|
return b"".join(fixed_parts + variable_parts)
|
||||||
```
|
```
|
||||||
|
|
||||||
If `value` is a union type:
|
### Union
|
||||||
|
|
||||||
Define value as an object that has properties `value.value` with the contained value, and `value.type_index` which indexes the type.
|
A `value` as `Union[T...]` type has properties `value.value` with the contained value, and `value.selector` which indexes the selected `Union` type option `T`.
|
||||||
|
|
||||||
|
A `Union`:
|
||||||
|
- May have multiple selectors with the same type.
|
||||||
|
- Should not use selectors above 127 (i.e. highest bit is set), these are reserved for backwards compatible extensions.
|
||||||
|
- Must have at least 1 type option.
|
||||||
|
- May have `None` as first type option, i.e. `selector == 0`
|
||||||
|
- Must have at least 2 type options if the first is `None`
|
||||||
|
- Is always considered a variable-length type, even if all type options have an equal fixed-length.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
serialized_bytes = serialize(value.value)
|
if value.value is None:
|
||||||
serialized_type_index = value.type_index.to_bytes(BYTES_PER_LENGTH_OFFSET, "little")
|
assert value.selector == 0
|
||||||
return serialized_type_index + serialized_bytes
|
return b"\x00"
|
||||||
|
else:
|
||||||
|
serialized_bytes = serialize(value.value)
|
||||||
|
serialized_selector_index = value.selector.to_bytes(1, "little")
|
||||||
|
return serialized_selector_index + serialized_bytes
|
||||||
```
|
```
|
||||||
|
|
||||||
## Deserialization
|
## Deserialization
|
||||||
|
@ -191,12 +196,14 @@ Deserialization can be implemented using a recursive algorithm. The deserializat
|
||||||
* The size of each object in the vector/list can be inferred from the difference of two offsets. To get the size of the last object, the total number of bytes has to be known (it is not generally possible to deserialize an SSZ object of unknown length)
|
* The size of each object in the vector/list can be inferred from the difference of two offsets. To get the size of the last object, the total number of bytes has to be known (it is not generally possible to deserialize an SSZ object of unknown length)
|
||||||
* Containers follow the same principles as vectors, with the difference that there may be fixed-size objects in a container as well. This means the `fixed_parts` data will contain offsets as well as fixed-size objects.
|
* Containers follow the same principles as vectors, with the difference that there may be fixed-size objects in a container as well. This means the `fixed_parts` data will contain offsets as well as fixed-size objects.
|
||||||
* In the case of bitlists, the length in bits cannot be uniquely inferred from the number of bytes in the object. Because of this, they have a bit at the end that is always set. This bit has to be used to infer the size of the bitlist in bits.
|
* In the case of bitlists, the length in bits cannot be uniquely inferred from the number of bytes in the object. Because of this, they have a bit at the end that is always set. This bit has to be used to infer the size of the bitlist in bits.
|
||||||
|
* In the case of unions, the first byte of the deserialization scope is deserialized as type selector, the remainder of the scope is deserialized as the selected type.
|
||||||
|
|
||||||
Note that deserialization requires hardening against invalid inputs. A non-exhaustive list:
|
Note that deserialization requires hardening against invalid inputs. A non-exhaustive list:
|
||||||
|
|
||||||
- Offsets: out of order, out of range, mismatching minimum element size.
|
- Offsets: out of order, out of range, mismatching minimum element size.
|
||||||
- Scope: Extra unused bytes, not aligned with element size.
|
- Scope: Extra unused bytes, not aligned with element size.
|
||||||
- More elements than a list limit allows. Part of enforcing consensus.
|
- More elements than a list limit allows. Part of enforcing consensus.
|
||||||
|
- An out-of-bounds selected index in an `Union`
|
||||||
|
|
||||||
Efficient algorithms for computing this object can be found in [the implementations](#implementations).
|
Efficient algorithms for computing this object can be found in [the implementations](#implementations).
|
||||||
|
|
||||||
|
@ -227,7 +234,7 @@ We first define helper functions:
|
||||||
- If `1` chunk: the root is the chunk itself.
|
- If `1` chunk: the root is the chunk itself.
|
||||||
- If `> 1` chunks: merkleize as binary tree.
|
- If `> 1` chunks: merkleize as binary tree.
|
||||||
* `mix_in_length`: Given a Merkle root `root` and a length `length` (`"uint256"` little-endian serialization) return `hash(root + length)`.
|
* `mix_in_length`: Given a Merkle root `root` and a length `length` (`"uint256"` little-endian serialization) return `hash(root + length)`.
|
||||||
* `mix_in_type`: Given a Merkle root `root` and a type_index `type_index` (`"uint256"` little-endian serialization) return `hash(root + type_index)`.
|
* `mix_in_selector`: Given a Merkle root `root` and a type selector `selector` (`"uint256"` little-endian serialization) return `hash(root + selector)`.
|
||||||
|
|
||||||
We now define Merkleization `hash_tree_root(value)` of an object `value` recursively:
|
We now define Merkleization `hash_tree_root(value)` of an object `value` recursively:
|
||||||
|
|
||||||
|
@ -237,7 +244,8 @@ We now define Merkleization `hash_tree_root(value)` of an object `value` recursi
|
||||||
* `mix_in_length(merkleize(pack_bits(value), limit=chunk_count(type)), len(value))` if `value` is a bitlist.
|
* `mix_in_length(merkleize(pack_bits(value), limit=chunk_count(type)), len(value))` if `value` is a bitlist.
|
||||||
* `merkleize([hash_tree_root(element) for element in value])` if `value` is a vector of composite objects or a container.
|
* `merkleize([hash_tree_root(element) for element in value])` if `value` is a vector of composite objects or a container.
|
||||||
* `mix_in_length(merkleize([hash_tree_root(element) for element in value], limit=chunk_count(type)), len(value))` if `value` is a list of composite objects.
|
* `mix_in_length(merkleize([hash_tree_root(element) for element in value], limit=chunk_count(type)), len(value))` if `value` is a list of composite objects.
|
||||||
* `mix_in_type(merkleize(value.value), value.type_index)` if `value` is of union type.
|
* `mix_in_selector(hash_tree_root(value.value), value.selector)` if `value` is of union type, and `value.value` is not `None`
|
||||||
|
* `mix_in_selector(Bytes32(), 0)` if `value` is of union type, and `value.value` is `None`
|
||||||
|
|
||||||
## Summaries and expansions
|
## Summaries and expansions
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ from eth2spec.utils.ssz.ssz_typing import Bitvector
|
||||||
from eth2spec.test.helpers.block import build_empty_block
|
from eth2spec.test.helpers.block import build_empty_block
|
||||||
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
||||||
from eth2spec.test.helpers.state import transition_to
|
from eth2spec.test.helpers.state import transition_to
|
||||||
from eth2spec.utils import bls
|
from eth2spec.test.helpers.sync_committee import compute_sync_committee_signature
|
||||||
from eth2spec.utils.bls import only_with_bls
|
from eth2spec.utils.bls import only_with_bls
|
||||||
from eth2spec.test.context import (
|
from eth2spec.test.context import (
|
||||||
with_altair_and_later,
|
with_altair_and_later,
|
||||||
|
@ -85,12 +85,9 @@ def _get_sync_committee_signature(
|
||||||
pubkey = state.current_sync_committee.pubkeys[sync_committee_index]
|
pubkey = state.current_sync_committee.pubkeys[sync_committee_index]
|
||||||
privkey = pubkey_to_privkey[pubkey]
|
privkey = pubkey_to_privkey[pubkey]
|
||||||
|
|
||||||
domain = spec.get_domain(
|
return compute_sync_committee_signature(
|
||||||
state,
|
spec, state, target_slot, privkey, block_root=target_block_root
|
||||||
spec.DOMAIN_SYNC_COMMITTEE,
|
|
||||||
)
|
)
|
||||||
signing_data = spec.compute_signing_root(target_block_root, domain)
|
|
||||||
return bls.Sign(privkey, spec.hash_tree_root(signing_data))
|
|
||||||
|
|
||||||
|
|
||||||
@only_with_bls()
|
@only_with_bls()
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
# Ignore linter: This module makes importing SSZ types easy, and hides away the underlying library from the spec.
|
# Ignore linter: This module makes importing SSZ types easy, and hides away the underlying library from the spec.
|
||||||
|
|
||||||
from remerkleable.complex import Container, Vector, List
|
from remerkleable.complex import Container, Vector, List
|
||||||
|
from remerkleable.union import Union
|
||||||
from remerkleable.basic import boolean, bit, uint, byte, uint8, uint16, uint32, uint64, uint128, uint256
|
from remerkleable.basic import boolean, bit, uint, byte, uint8, uint16, uint32, uint64, uint128, uint256
|
||||||
from remerkleable.bitfields import Bitvector, Bitlist
|
from remerkleable.bitfields import Bitvector, Bitlist
|
||||||
from remerkleable.byte_arrays import ByteVector, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, ByteList
|
from remerkleable.byte_arrays import ByteVector, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, ByteList
|
||||||
|
|
Loading…
Reference in New Issue