mirror of
https://github.com/status-im/eth2.0-specs.git
synced 2025-01-12 03:34:20 +00:00
Merge branch 'v08x' into n_zero
This commit is contained in:
commit
c01995436e
@ -21,6 +21,12 @@ Core specifications for Eth 2.0 client validation can be found in [specs/core](s
|
||||
* [Custody Game](specs/core/1_custody-game.md)
|
||||
* [Shard Data Chains](specs/core/1_shard-data-chains.md)
|
||||
|
||||
### Phase 2
|
||||
|
||||
Phase 2 is still actively in R&D and does not yet have any formal specifications.
|
||||
|
||||
See the [Eth 2.0 Phase 2 Wiki](https://hackmd.io/UzysWse1Th240HELswKqVA?view) for current progress, discussions, and definitions regarding this work.
|
||||
|
||||
### Accompanying documents can be found in [specs](specs) and include:
|
||||
|
||||
* [SimpleSerialize (SSZ) spec](specs/simple-serialize.md)
|
||||
|
@ -128,3 +128,6 @@ DOMAIN_ATTESTATION: 0x02000000
|
||||
DOMAIN_DEPOSIT: 0x03000000
|
||||
DOMAIN_VOLUNTARY_EXIT: 0x04000000
|
||||
DOMAIN_TRANSFER: 0x05000000
|
||||
DOMAIN_CUSTODY_BIT_CHALLENGE: 0x06000000
|
||||
DOMAIN_SHARD_PROPOSER: 0x80000000
|
||||
DOMAIN_SHARD_ATTESTER: 0x81000000
|
||||
|
@ -125,4 +125,7 @@ DOMAIN_RANDAO: 0x01000000
|
||||
DOMAIN_ATTESTATION: 0x02000000
|
||||
DOMAIN_DEPOSIT: 0x03000000
|
||||
DOMAIN_VOLUNTARY_EXIT: 0x04000000
|
||||
DOMAIN_TRANSFER: 0x05000000
|
||||
DOMAIN_TRANSFER: 0x05000000
|
||||
DOMAIN_CUSTODY_BIT_CHALLENGE: 0x06000000
|
||||
DOMAIN_SHARD_PROPOSER: 0x80000000
|
||||
DOMAIN_SHARD_ATTESTER: 0x81000000
|
||||
|
@ -49,9 +49,9 @@
|
||||
- [`BeaconState`](#beaconstate)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Math](#math)
|
||||
- [`int_to_bytes`](#int_to_bytes)
|
||||
- [`integer_squareroot`](#integer_squareroot)
|
||||
- [`xor`](#xor)
|
||||
- [`int_to_bytes`](#int_to_bytes)
|
||||
- [`bytes_to_int`](#bytes_to_int)
|
||||
- [Crypto](#crypto)
|
||||
- [`hash`](#hash)
|
||||
@ -207,6 +207,7 @@ The following values are (non-configurable) constants used throughout the specif
|
||||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | :-: | :-: |
|
||||
| `SECONDS_PER_SLOT` | `6` | seconds | 6 seconds |
|
||||
| `MIN_ATTESTATION_INCLUSION_DELAY` | `2**0` (= 1) | slots | 6 seconds |
|
||||
| `SLOTS_PER_EPOCH` | `2**6` (= 64) | slots | 6.4 minutes |
|
||||
| `MIN_SEED_LOOKAHEAD` | `2**0` (= 1) | epochs | 6.4 minutes |
|
||||
@ -540,8 +541,6 @@ class BeaconState(Container):
|
||||
|
||||
### Math
|
||||
|
||||
#### `int_to_bytes`
|
||||
|
||||
#### `integer_squareroot`
|
||||
|
||||
```python
|
||||
@ -560,13 +559,15 @@ def integer_squareroot(n: uint64) -> uint64:
|
||||
#### `xor`
|
||||
|
||||
```python
|
||||
def xor(bytes1: Bytes32, bytes2: Bytes32) -> Bytes32:
|
||||
def xor(bytes_1: Bytes32, bytes_2: Bytes32) -> Bytes32:
|
||||
"""
|
||||
Return the exclusive-or of two 32-byte strings.
|
||||
"""
|
||||
return Bytes32(a ^ b for a, b in zip(bytes1, bytes2))
|
||||
return Bytes32(a ^ b for a, b in zip(bytes_1, bytes_2))
|
||||
```
|
||||
|
||||
#### `int_to_bytes`
|
||||
|
||||
```python
|
||||
def int_to_bytes(n: uint64, length: uint64) -> bytes:
|
||||
"""
|
||||
@ -653,7 +654,7 @@ def is_slashable_attestation_data(data_1: AttestationData, data_2: AttestationDa
|
||||
```python
|
||||
def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool:
|
||||
"""
|
||||
Verify validity of ``indexed_attestation``.
|
||||
Check if ``indexed_attestation`` has valid indices and signature.
|
||||
"""
|
||||
bit_0_indices = indexed_attestation.custody_bit_0_indices
|
||||
bit_1_indices = indexed_attestation.custody_bit_1_indices
|
||||
@ -865,7 +866,7 @@ def get_seed(state: BeaconState, epoch: Epoch) -> Hash:
|
||||
"""
|
||||
Return the seed at ``epoch``.
|
||||
"""
|
||||
mix = get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD)) # Avoid underflow
|
||||
mix = get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)) # Avoid underflow
|
||||
active_index_root = state.active_index_roots[epoch % EPOCHS_PER_HISTORICAL_VECTOR]
|
||||
return hash(mix + active_index_root + int_to_bytes(epoch, length=32))
|
||||
```
|
||||
@ -989,7 +990,7 @@ def get_total_balance(state: BeaconState, indices: Set[ValidatorIndex]) -> Gwei:
|
||||
"""
|
||||
Return the combined effective balance of the ``indices``. (1 Gwei minimum to avoid divisions by zero.)
|
||||
"""
|
||||
return Gwei(max(sum([state.validators[index].effective_balance for index in indices]), 1))
|
||||
return Gwei(max(1, sum([state.validators[index].effective_balance for index in indices])))
|
||||
```
|
||||
|
||||
#### `get_total_active_balance`
|
||||
@ -1216,7 +1217,7 @@ def process_slot(state: BeaconState) -> None:
|
||||
previous_state_root = hash_tree_root(state)
|
||||
state.state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_state_root
|
||||
# Cache latest block header state root
|
||||
if state.latest_block_header.state_root == Hash():
|
||||
if state.latest_block_header.state_root == Bytes32():
|
||||
state.latest_block_header.state_root = previous_state_root
|
||||
# Cache block root
|
||||
previous_block_root = signing_root(state.latest_block_header)
|
||||
@ -1270,7 +1271,7 @@ def get_unslashed_attesting_indices(state: BeaconState,
|
||||
output = set() # type: Set[ValidatorIndex]
|
||||
for a in attestations:
|
||||
output = output.union(get_attesting_indices(state, a.data, a.aggregation_bits))
|
||||
return set(filter(lambda index: not state.validators[index].slashed, list(output)))
|
||||
return set(filter(lambda index: not state.validators[index].slashed, output))
|
||||
```
|
||||
|
||||
```python
|
||||
@ -1283,10 +1284,10 @@ def get_winning_crosslink_and_attesting_indices(state: BeaconState,
|
||||
epoch: Epoch,
|
||||
shard: Shard) -> Tuple[Crosslink, Set[ValidatorIndex]]:
|
||||
attestations = [a for a in get_matching_source_attestations(state, epoch) if a.data.crosslink.shard == shard]
|
||||
crosslinks = list(filter(
|
||||
crosslinks = filter(
|
||||
lambda c: hash_tree_root(state.current_crosslinks[shard]) in (c.parent_root, hash_tree_root(c)),
|
||||
[a.data.crosslink for a in attestations]
|
||||
))
|
||||
)
|
||||
# Winning crosslink has the crosslink data root with the most balance voting for it (ties broken lexicographically)
|
||||
winning_crosslink = max(crosslinks, key=lambda c: (
|
||||
get_attesting_balance(state, [a for a in attestations if a.data.crosslink == c]), c.data_root
|
||||
@ -1482,7 +1483,9 @@ def process_slashings(state: BeaconState) -> None:
|
||||
total_balance = get_total_active_balance(state)
|
||||
for index, validator in enumerate(state.validators):
|
||||
if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
|
||||
penalty = validator.effective_balance * min(sum(state.slashings) * 3, total_balance) // total_balance
|
||||
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
|
||||
penalty_numerator = validator.effective_balance // increment * min(sum(state.slashings) * 3, total_balance)
|
||||
penalty = penalty_numerator // total_balance * increment
|
||||
decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
```
|
||||
|
||||
@ -1546,8 +1549,9 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
|
||||
state.latest_block_header = BeaconBlockHeader(
|
||||
slot=block.slot,
|
||||
parent_root=block.parent_root,
|
||||
state_root=Hash(), # Overwritten in the next `process_slot` call
|
||||
# state_root: zeroed, overwritten in the next `process_slot` call
|
||||
body_root=hash_tree_root(block.body),
|
||||
# signature is always zeroed
|
||||
)
|
||||
# Verify proposer is not slashed
|
||||
proposer = state.validators[get_beacon_proposer_index(state)]
|
||||
@ -1670,7 +1674,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
assert data.crosslink.parent_root == hash_tree_root(parent_crosslink)
|
||||
assert data.crosslink.start_epoch == parent_crosslink.end_epoch
|
||||
assert data.crosslink.end_epoch == min(data.target.epoch, parent_crosslink.end_epoch + MAX_EPOCHS_PER_CROSSLINK)
|
||||
assert data.crosslink.data_root == Hash() # [to be removed in phase 1]
|
||||
assert data.crosslink.data_root == Bytes32() # [to be removed in phase 1]
|
||||
|
||||
# Check signature
|
||||
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
|
||||
|
@ -8,8 +8,6 @@
|
||||
- [Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice](#ethereum-20-phase-0----beacon-chain-fork-choice)
|
||||
- [Table of contents](#table-of-contents)
|
||||
- [Introduction](#introduction)
|
||||
- [Configuration](#configuration)
|
||||
- [Time parameters](#time-parameters)
|
||||
- [Fork choice](#fork-choice)
|
||||
- [Helpers](#helpers)
|
||||
- [`LatestMessage`](#latestmessage)
|
||||
@ -29,14 +27,6 @@
|
||||
|
||||
This document is the beacon chain fork choice spec, part of Ethereum 2.0 Phase 0. It assumes the [beacon chain state transition function spec](./0_beacon-chain.md).
|
||||
|
||||
## Configuration
|
||||
|
||||
### Time parameters
|
||||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | :-: | :-: |
|
||||
| `SECONDS_PER_SLOT` | `6` | seconds | 6 seconds |
|
||||
|
||||
## Fork choice
|
||||
|
||||
The head block root associated with a `store` is defined as `get_head(store)`. At genesis, let `store = get_genesis_store(genesis_state)` and update `store` by running:
|
||||
@ -101,8 +91,12 @@ def get_genesis_store(genesis_state: BeaconState) -> Store:
|
||||
```python
|
||||
def get_ancestor(store: Store, root: Hash, slot: Slot) -> Hash:
|
||||
block = store.blocks[root]
|
||||
assert block.slot >= slot
|
||||
return root if block.slot == slot else get_ancestor(store, block.parent_root, slot)
|
||||
if block.slot > slot:
|
||||
return get_ancestor(store, block.parent_root, slot)
|
||||
elif block.slot == slot:
|
||||
return root
|
||||
else:
|
||||
return Bytes32() # root is older than queried slot: no results.
|
||||
```
|
||||
|
||||
#### `get_latest_attesting_balance`
|
||||
|
@ -15,7 +15,7 @@
|
||||
- [Time parameters](#time-parameters)
|
||||
- [Max operations per block](#max-operations-per-block)
|
||||
- [Reward and penalty quotients](#reward-and-penalty-quotients)
|
||||
- [Signature domains](#signature-domains)
|
||||
- [Signature domain types](#signature-domain-types)
|
||||
- [TODO PLACEHOLDER](#todo-placeholder)
|
||||
- [Data structures](#data-structures)
|
||||
- [Custody objects](#custody-objects)
|
||||
@ -156,7 +156,7 @@ class CustodyChunkChallengeRecord(Container):
|
||||
challenger_index: ValidatorIndex
|
||||
responder_index: ValidatorIndex
|
||||
inclusion_epoch: Epoch
|
||||
data_root: Bytes32
|
||||
data_root: Hash
|
||||
depth: uint64
|
||||
chunk_index: uint64
|
||||
```
|
||||
@ -169,9 +169,9 @@ class CustodyBitChallengeRecord(Container):
|
||||
challenger_index: ValidatorIndex
|
||||
responder_index: ValidatorIndex
|
||||
inclusion_epoch: Epoch
|
||||
data_root: Bytes32
|
||||
data_root: Hash
|
||||
chunk_count: uint64
|
||||
chunk_bits_merkle_root: Bytes32
|
||||
chunk_bits_merkle_root: Hash
|
||||
responder_key: BLSSignature
|
||||
```
|
||||
|
||||
@ -182,9 +182,9 @@ class CustodyResponse(Container):
|
||||
challenge_index: uint64
|
||||
chunk_index: uint64
|
||||
chunk: Vector[Bytes[PLACEHOLDER], BYTES_PER_CUSTODY_CHUNK]
|
||||
data_branch: List[Bytes32, PLACEHOLDER]
|
||||
chunk_bits_branch: List[Bytes32, PLACEHOLDER]
|
||||
chunk_bits_leaf: Bytes32
|
||||
data_branch: List[Hash, PLACEHOLDER]
|
||||
chunk_bits_branch: List[Hash, PLACEHOLDER]
|
||||
chunk_bits_leaf: Hash
|
||||
```
|
||||
|
||||
### New beacon operations
|
||||
@ -296,7 +296,7 @@ def get_custody_chunk_bit(key: BLSSignature, chunk: bytes) -> bool:
|
||||
### `get_chunk_bits_root`
|
||||
|
||||
```python
|
||||
def get_chunk_bits_root(chunk_bits: bytes) -> Bytes32:
|
||||
def get_chunk_bits_root(chunk_bits: bytes) -> Hash:
|
||||
aggregated_bits = bytearray([0] * 32)
|
||||
for i in range(0, len(chunk_bits), 32):
|
||||
for j in range(32):
|
||||
|
@ -13,7 +13,7 @@
|
||||
- [Misc](#misc)
|
||||
- [Initial values](#initial-values)
|
||||
- [Time parameters](#time-parameters)
|
||||
- [Signature domains](#signature-domains)
|
||||
- [Signature domain types](#signature-domain-types)
|
||||
- [TODO PLACEHOLDER](#todo-placeholder)
|
||||
- [Data structures](#data-structures)
|
||||
- [`ShardBlockBody`](#shardblockbody)
|
||||
@ -61,7 +61,6 @@ This document describes the shard data layer and the shard fork choice rule in P
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | :-: | :-: |
|
||||
| `CROSSLINK_LOOKBACK` | `2**0` (= 1) | epochs | 6.2 minutes |
|
||||
| `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | ~9 days |
|
||||
|
||||
### Signature domain types
|
||||
|
||||
@ -94,7 +93,7 @@ class ShardAttestation(Container):
|
||||
class data(Container):
|
||||
slot: Slot
|
||||
shard: Shard
|
||||
shard_block_root: Bytes32
|
||||
shard_block_root: Hash
|
||||
aggregation_bits: Bitlist[PLACEHOLDER]
|
||||
aggregate_signature: BLSSignature
|
||||
```
|
||||
@ -105,10 +104,10 @@ class ShardAttestation(Container):
|
||||
class ShardBlock(Container):
|
||||
slot: Slot
|
||||
shard: Shard
|
||||
beacon_chain_root: Bytes32
|
||||
parent_root: Bytes32
|
||||
beacon_chain_root: Hash
|
||||
parent_root: Hash
|
||||
data: ShardBlockBody
|
||||
state_root: Bytes32
|
||||
state_root: Hash
|
||||
attestations: List[ShardAttestation, PLACEHOLDER]
|
||||
signature: BLSSignature
|
||||
```
|
||||
@ -119,10 +118,10 @@ class ShardBlock(Container):
|
||||
class ShardBlockHeader(Container):
|
||||
slot: Slot
|
||||
shard: Shard
|
||||
beacon_chain_root: Bytes32
|
||||
parent_root: Bytes32
|
||||
body_root: Bytes32
|
||||
state_root: Bytes32
|
||||
beacon_chain_root: Hash
|
||||
parent_root: Hash
|
||||
body_root: Hash
|
||||
state_root: Hash
|
||||
attestations: List[ShardAttestation, PLACEHOLDER]
|
||||
signature: BLSSignature
|
||||
```
|
||||
@ -250,7 +249,7 @@ def verify_shard_attestation_signature(state: BeaconState,
|
||||
### `compute_crosslink_data_root`
|
||||
|
||||
```python
|
||||
def compute_crosslink_data_root(blocks: Sequence[ShardBlock]) -> Bytes32:
|
||||
def compute_crosslink_data_root(blocks: Sequence[ShardBlock]) -> Hash:
|
||||
def is_power_of_two(value: uint64) -> bool:
|
||||
return (value > 0) and (value & (value - 1) == 0)
|
||||
|
||||
@ -259,7 +258,7 @@ def compute_crosslink_data_root(blocks: Sequence[ShardBlock]) -> Bytes32:
|
||||
values.append(b'\x00' * BYTES_PER_SHARD_BLOCK_BODY)
|
||||
return values
|
||||
|
||||
def hash_tree_root_of_bytes(data: bytes) -> bytes:
|
||||
def hash_tree_root_of_bytes(data: bytes) -> Hash:
|
||||
return hash_tree_root([data[i:i + 32] for i in range(0, len(data), 32)])
|
||||
|
||||
def zpad(data: bytes, length: uint64) -> bytes:
|
||||
|
@ -25,8 +25,6 @@
|
||||
- [Vectors, containers, lists, unions](#vectors-containers-lists-unions)
|
||||
- [Deserialization](#deserialization)
|
||||
- [Merkleization](#merkleization)
|
||||
- [`Bitvector[N]`](#bitvectorn-1)
|
||||
- [`Bitlist[N]`](#bitlistn-1)
|
||||
- [Self-signed containers](#self-signed-containers)
|
||||
- [Implementations](#implementations)
|
||||
|
||||
@ -120,8 +118,10 @@ return b""
|
||||
### `Bitvector[N]`
|
||||
|
||||
```python
|
||||
as_integer = sum([value[i] << i for i in range(len(value))])
|
||||
return as_integer.to_bytes((N + 7) // 8, "little")
|
||||
array = [0] * ((N + 7) // 8)
|
||||
for i in range(N):
|
||||
array[i // 8] |= value[i] << (i % 8)
|
||||
return bytes(array)
|
||||
```
|
||||
|
||||
### `Bitlist[N]`
|
||||
@ -129,8 +129,11 @@ return as_integer.to_bytes((N + 7) // 8, "little")
|
||||
Note that from the offset coding, the length (in bytes) of the bitlist is known. An additional leading `1` bit is added so that the length in bits will also be known.
|
||||
|
||||
```python
|
||||
as_integer = (1 << len(value)) + sum([value[i] << i for i in range(len(value))])
|
||||
return as_integer.to_bytes((as_integer.bit_length() + 7) // 8, "little")
|
||||
array = [0] * ((len(value) // 8) + 1)
|
||||
for i in range(len(value)):
|
||||
array[i // 8] |= value[i] << (i % 8)
|
||||
array[len(value) // 8] |= 1 << (len(value) % 8)
|
||||
return bytes(array)
|
||||
```
|
||||
|
||||
### Vectors, containers, lists, unions
|
||||
@ -177,38 +180,37 @@ Note that deserialization requires hardening against invalid inputs. A non-exhau
|
||||
|
||||
We first define helper functions:
|
||||
|
||||
* `size_of(B)`, where `B` is a basic type: the length, in bytes, of the serialized form of the basic type.
|
||||
* `chunk_count(type)`: calculate the amount of leafs for merkleization of the type.
|
||||
* all basic types: `1`
|
||||
* `Bitlist[N]` and `Bitvector[N]`: `(N + 255) // 256` (dividing by chunk size, rounding up)
|
||||
* `List[B, N]` and `Vector[B, N]`, where `B` is a basic type: `(N * size_of(B) + 31) // 32` (dividing by chunk size, rounding up)
|
||||
* `List[C, N]` and `Vector[C, N]`, where `C` is a composite type: `N`
|
||||
* containers: `len(fields)`
|
||||
* `bitfield_bytes(bits)`: return the bits of the bitlist or bitvector, packed in bytes, aligned to the start. Exclusive length-delimiting bit for bitlists.
|
||||
* `pack`: Given ordered objects of the same basic type, serialize them, pack them into `BYTES_PER_CHUNK`-byte chunks, right-pad the last chunk with zero bytes, and return the chunks.
|
||||
* `next_pow_of_two(i)`: get the next power of 2 of `i`, if not already a power of 2, with 0 mapping to 1. Examples: `0->1, 1->1, 2->2, 3->4, 4->4, 6->8, 9->16`
|
||||
* `merkleize(data, pad_for=1)`: Given ordered `BYTES_PER_CHUNK`-byte chunks, if necessary append zero chunks so that the number of chunks is a power of two, Merkleize the chunks, and return the root.
|
||||
* The merkleization depends on the effective input, which can be padded: if `pad_for=L`, then pad the `data` with zeroed chunks to `next_pow_of_two(L)` (virtually for memory efficiency).
|
||||
* `merkleize(chunks, limit=None)`: Given ordered `BYTES_PER_CHUNK`-byte chunks, merkleize the chunks, and return the root:
|
||||
* The merkleization depends on the effective input, which can be padded/limited:
|
||||
- if no limit: pad the `chunks` with zeroed chunks to `next_pow_of_two(len(chunks))` (virtually for memory efficiency).
|
||||
- if `limit > len(chunks)`, pad the `chunks` with zeroed chunks to `next_pow_of_two(limit)` (virtually for memory efficiency).
|
||||
- if `limit < len(chunks)`: do not merkleize, input exceeds limit. Raise an error instead.
|
||||
* Then, merkleize the chunks (empty input is padded to 1 zero chunk):
|
||||
- If `1` chunk: A single chunk is simply that chunk, i.e. the identity when the number of chunks is one.
|
||||
- If `> 1` chunks: pad to `next_pow_of_two(len(chunks))`, merkleize as binary tree.
|
||||
- If `1` chunk: the root is the chunk itself.
|
||||
- If `> 1` chunks: merkleize as binary tree.
|
||||
* `mix_in_length`: Given a Merkle root `root` and a length `length` (`"uint256"` little-endian serialization) return `hash(root + length)`.
|
||||
* `mix_in_type`: Given a Merkle root `root` and a type_index `type_index` (`"uint256"` little-endian serialization) return `hash(root + type_index)`.
|
||||
|
||||
We now define Merkleization `hash_tree_root(value)` of an object `value` recursively:
|
||||
|
||||
* `merkleize(pack(value))` if `value` is a basic object or a vector of basic objects.
|
||||
* `mix_in_length(merkleize(pack(value), pad_for=(N * elem_size / BYTES_PER_CHUNK)), len(value))` if `value` is a list of basic objects.
|
||||
* `merkleize(bitfield_bytes(value), limit=chunk_count(type))` if `value` is a bitvector.
|
||||
* `mix_in_length(merkleize(pack(value), limit=chunk_count(type)), len(value))` if `value` is a list of basic objects.
|
||||
* `mix_in_length(merkleize(bitfield_bytes(value), limit=chunk_count(type)), len(value))` if `value` is a bitlist.
|
||||
* `merkleize([hash_tree_root(element) for element in value])` if `value` is a vector of composite objects or a container.
|
||||
* `mix_in_length(merkleize([hash_tree_root(element) for element in value], pad_for=N), len(value))` if `value` is a list of composite objects.
|
||||
* `mix_in_length(merkleize([hash_tree_root(element) for element in value], limit=chunk_count(type)), len(value))` if `value` is a list of composite objects.
|
||||
* `mix_in_type(merkleize(value.value), value.type_index)` if `value` is of union type.
|
||||
|
||||
### `Bitvector[N]`
|
||||
|
||||
```python
|
||||
as_integer = sum([value[i] << i for i in range(len(value))])
|
||||
return merkleize(pack(as_integer.to_bytes((N + 7) // 8, "little")))
|
||||
```
|
||||
|
||||
### `Bitlist[N]`
|
||||
|
||||
```python
|
||||
as_integer = sum([value[i] << i for i in range(len(value))])
|
||||
return mix_in_length(merkleize(pack(as_integer.to_bytes((N + 7) // 8, "little"))), len(value))
|
||||
```
|
||||
|
||||
## Self-signed containers
|
||||
|
||||
Let `value` be a self-signed container object. The convention is that the signature (e.g. a `"bytes96"` BLS12-381 signature) be the last field of `value`. Further, the signed message for `value` is `signing_root(value) = hash_tree_root(truncate_last(value))` where `truncate_last` truncates the last element of `value`.
|
||||
|
@ -322,13 +322,13 @@ Set `attestation.data = attestation_data` where `attestation_data` is the `Attes
|
||||
|
||||
##### Aggregation bits
|
||||
|
||||
- Let `attestation.aggregation_bits` be a `Bitlist[MAX_INDICES_PER_ATTESTATION]` where the bits at the index in the aggregated validator's `committee` is set to `0b1`.
|
||||
- Let `attestation.aggregation_bits` be a `Bitlist[MAX_VALIDATORS_PER_COMMITTEE]` where the bits at the index in the aggregated validator's `committee` is set to `0b1`.
|
||||
|
||||
*Note*: Calling `get_attesting_indices(state, attestation.data, attestation.aggregation_bits)` should return a list of length equal to 1, containing `validator_index`.
|
||||
|
||||
##### Custody bits
|
||||
|
||||
- Let `attestation.custody_bits` be a `Bitlist[MAX_INDICES_PER_ATTESTATION]` filled with zeros of length `len(committee)`.
|
||||
- Let `attestation.custody_bits` be a `Bitlist[MAX_VALIDATORS_PER_COMMITTEE]` filled with zeros of length `len(committee)`.
|
||||
|
||||
*Note*: This is a stub for Phase 0.
|
||||
|
||||
|
@ -5,7 +5,9 @@ BLS test vectors generator
|
||||
from typing import Tuple
|
||||
|
||||
from eth_utils import (
|
||||
to_tuple, int_to_big_endian
|
||||
encode_hex,
|
||||
int_to_big_endian,
|
||||
to_tuple,
|
||||
)
|
||||
from gen_base import gen_runner, gen_suite, gen_typing
|
||||
|
||||
@ -20,7 +22,7 @@ def int_to_hex(n: int, byte_length: int=None) -> str:
|
||||
byte_value = int_to_big_endian(n)
|
||||
if byte_length:
|
||||
byte_value = byte_value.rjust(byte_length, b'\x00')
|
||||
return '0x' + byte_value.hex()
|
||||
return encode_hex(byte_value)
|
||||
|
||||
|
||||
def hex_to_int(x: str) -> int:
|
||||
@ -28,11 +30,9 @@ def hex_to_int(x: str) -> int:
|
||||
|
||||
|
||||
DOMAINS = [
|
||||
0,
|
||||
1,
|
||||
1234,
|
||||
2**32-1,
|
||||
2**64-1
|
||||
b'\x00\x00\x00\x00\x00\x00\x00\x00',
|
||||
b'\x00\x00\x00\x00\x00\x00\x00\x01',
|
||||
b'\xff\xff\xff\xff\xff\xff\xff\xff'
|
||||
]
|
||||
|
||||
MESSAGES = [
|
||||
@ -51,12 +51,12 @@ PRIVKEYS = [
|
||||
|
||||
|
||||
def hash_message(msg: bytes,
|
||||
domain: int) ->Tuple[Tuple[str, str], Tuple[str, str], Tuple[str, str]]:
|
||||
domain: bytes) ->Tuple[Tuple[str, str], Tuple[str, str], Tuple[str, str]]:
|
||||
"""
|
||||
Hash message
|
||||
Input:
|
||||
- Message as bytes
|
||||
- domain as uint64
|
||||
- Message as bytes32
|
||||
- domain as bytes8
|
||||
Output:
|
||||
- Message hash as a G2 point
|
||||
"""
|
||||
@ -69,12 +69,12 @@ def hash_message(msg: bytes,
|
||||
]
|
||||
|
||||
|
||||
def hash_message_compressed(msg: bytes, domain: int) -> Tuple[str, str]:
|
||||
def hash_message_compressed(msg: bytes, domain: bytes) -> Tuple[str, str]:
|
||||
"""
|
||||
Hash message
|
||||
Input:
|
||||
- Message as bytes
|
||||
- domain as uint64
|
||||
- Message as bytes32
|
||||
- domain as bytes8
|
||||
Output:
|
||||
- Message hash as a compressed G2 point
|
||||
"""
|
||||
@ -88,8 +88,8 @@ def case01_message_hash_G2_uncompressed():
|
||||
for domain in DOMAINS:
|
||||
yield {
|
||||
'input': {
|
||||
'message': '0x' + msg.hex(),
|
||||
'domain': int_to_hex(domain, byte_length=8)
|
||||
'message': encode_hex(msg),
|
||||
'domain': encode_hex(domain),
|
||||
},
|
||||
'output': hash_message(msg, domain)
|
||||
}
|
||||
@ -100,8 +100,8 @@ def case02_message_hash_G2_compressed():
|
||||
for domain in DOMAINS:
|
||||
yield {
|
||||
'input': {
|
||||
'message': '0x' + msg.hex(),
|
||||
'domain': int_to_hex(domain, byte_length=8)
|
||||
'message': encode_hex(msg),
|
||||
'domain': encode_hex(domain),
|
||||
},
|
||||
'output': hash_message_compressed(msg, domain)
|
||||
}
|
||||
@ -125,10 +125,10 @@ def case04_sign_messages():
|
||||
yield {
|
||||
'input': {
|
||||
'privkey': int_to_hex(privkey),
|
||||
'message': '0x' + message.hex(),
|
||||
'domain': int_to_hex(domain, byte_length=8)
|
||||
'message': encode_hex(message),
|
||||
'domain': encode_hex(domain),
|
||||
},
|
||||
'output': '0x' + sig.hex()
|
||||
'output': encode_hex(sig)
|
||||
}
|
||||
|
||||
# TODO: case05_verify_messages: Verify messages signed in case04
|
||||
@ -141,17 +141,17 @@ def case06_aggregate_sigs():
|
||||
for message in MESSAGES:
|
||||
sigs = [bls.sign(message, privkey, domain) for privkey in PRIVKEYS]
|
||||
yield {
|
||||
'input': ['0x' + sig.hex() for sig in sigs],
|
||||
'output': '0x' + bls.aggregate_signatures(sigs).hex(),
|
||||
'input': [encode_hex(sig) for sig in sigs],
|
||||
'output': encode_hex(bls.aggregate_signatures(sigs)),
|
||||
}
|
||||
|
||||
@to_tuple
|
||||
def case07_aggregate_pubkeys():
|
||||
pubkeys = [bls.privtopub(privkey) for privkey in PRIVKEYS]
|
||||
pubkeys_serial = ['0x' + pubkey.hex() for pubkey in pubkeys]
|
||||
pubkeys_serial = [encode_hex(pubkey) for pubkey in pubkeys]
|
||||
yield {
|
||||
'input': pubkeys_serial,
|
||||
'output': '0x' + bls.aggregate_pubkeys(pubkeys).hex(),
|
||||
'output': encode_hex(bls.aggregate_pubkeys(pubkeys)),
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,3 +1,3 @@
|
||||
py-ecc==1.7.0
|
||||
py_ecc==1.7.1
|
||||
eth-utils==1.6.0
|
||||
../../test_libs/gen_helpers
|
||||
|
@ -66,8 +66,13 @@ def test_small_penalty(spec, state):
|
||||
spec.process_slashings(state)
|
||||
yield 'post', state
|
||||
|
||||
assert state.balances[0] == pre_slash_balances[0] - (state.validators[0].effective_balance
|
||||
* 3 * total_penalties // total_balance)
|
||||
expected_penalty = (
|
||||
state.validators[0].effective_balance // spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
* (3 * total_penalties)
|
||||
// total_balance
|
||||
* spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
)
|
||||
assert state.balances[0] == pre_slash_balances[0] - expected_penalty
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@ -121,5 +126,10 @@ def test_scaled_penalties(spec, state):
|
||||
|
||||
for i in slashed_indices:
|
||||
v = state.validators[i]
|
||||
penalty = v.effective_balance * total_penalties * 3 // total_balance
|
||||
assert state.balances[i] == pre_slash_balances[i] - penalty
|
||||
expected_penalty = (
|
||||
v.effective_balance // spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
* (3 * total_penalties)
|
||||
// (total_balance)
|
||||
* spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
)
|
||||
assert state.balances[i] == pre_slash_balances[i] - expected_penalty
|
||||
|
@ -24,13 +24,13 @@ def only_with_bls(alt_return=None):
|
||||
@only_with_bls(alt_return=True)
|
||||
def bls_verify(pubkey, message_hash, signature, domain):
|
||||
return bls.verify(message_hash=message_hash, pubkey=pubkey,
|
||||
signature=signature, domain=int.from_bytes(domain, byteorder='little'))
|
||||
signature=signature, domain=domain)
|
||||
|
||||
|
||||
@only_with_bls(alt_return=True)
|
||||
def bls_verify_multiple(pubkeys, message_hashes, signature, domain):
|
||||
return bls.verify_multiple(pubkeys=pubkeys, message_hashes=message_hashes,
|
||||
signature=signature, domain=int.from_bytes(domain, byteorder='little'))
|
||||
signature=signature, domain=domain)
|
||||
|
||||
|
||||
@only_with_bls(alt_return=STUB_PUBKEY)
|
||||
@ -46,4 +46,4 @@ def bls_aggregate_signatures(signatures):
|
||||
@only_with_bls(alt_return=STUB_SIGNATURE)
|
||||
def bls_sign(message_hash, privkey, domain):
|
||||
return bls.sign(message_hash=message_hash, privkey=privkey,
|
||||
domain=int.from_bytes(domain, byteorder='little'))
|
||||
domain=domain)
|
||||
|
@ -1,4 +1,4 @@
|
||||
from .hash_function import hash
|
||||
from eth2spec.utils.hash_function import hash
|
||||
from math import log2
|
||||
|
||||
|
||||
@ -21,6 +21,8 @@ def calc_merkle_tree_from_leaves(values, layer_count=32):
|
||||
|
||||
|
||||
def get_merkle_root(values, pad_to=1):
|
||||
if pad_to == 0:
|
||||
return zerohashes[0]
|
||||
layer_count = int(log2(pad_to))
|
||||
if len(values) == 0:
|
||||
return zerohashes[layer_count]
|
||||
@ -35,10 +37,21 @@ def get_merkle_proof(tree, item_index):
|
||||
return proof
|
||||
|
||||
|
||||
def merkleize_chunks(chunks, pad_to: int=1):
|
||||
def merkleize_chunks(chunks, limit=None):
|
||||
# If no limit is defined, we are just merkleizing chunks (e.g. SSZ container).
|
||||
if limit is None:
|
||||
limit = len(chunks)
|
||||
|
||||
count = len(chunks)
|
||||
# See if the input is within expected size.
|
||||
# If not, a list-limit is set incorrectly, or a value is unexpectedly large.
|
||||
assert count <= limit
|
||||
|
||||
if limit == 0:
|
||||
return zerohashes[0]
|
||||
|
||||
depth = max(count - 1, 0).bit_length()
|
||||
max_depth = max(depth, (pad_to - 1).bit_length())
|
||||
max_depth = (limit - 1).bit_length()
|
||||
tmp = [None for _ in range(max_depth + 1)]
|
||||
|
||||
def merge(h, i):
|
||||
|
@ -41,11 +41,14 @@ def serialize(obj: SSZValue):
|
||||
if isinstance(obj, BasicValue):
|
||||
return serialize_basic(obj)
|
||||
elif isinstance(obj, Bitvector):
|
||||
as_integer = sum([obj[i] << i for i in range(len(obj))])
|
||||
return as_integer.to_bytes((len(obj) + 7) // 8, "little")
|
||||
return obj.as_bytes()
|
||||
elif isinstance(obj, Bitlist):
|
||||
as_integer = (1 << len(obj)) + sum([obj[i] << i for i in range(len(obj))])
|
||||
return as_integer.to_bytes((as_integer.bit_length() + 7) // 8, "little")
|
||||
as_bytearray = list(obj.as_bytes())
|
||||
if len(obj) % 8 == 0:
|
||||
as_bytearray.append(1)
|
||||
else:
|
||||
as_bytearray[len(obj) // 8] |= 1 << (len(obj) % 8)
|
||||
return bytes(as_bytearray)
|
||||
elif isinstance(obj, Series):
|
||||
return encode_series(obj)
|
||||
else:
|
||||
@ -92,12 +95,10 @@ def encode_series(values: Series):
|
||||
def pack(values: Series):
|
||||
if isinstance(values, bytes): # Bytes and BytesN are already packed
|
||||
return values
|
||||
elif isinstance(values, Bitvector):
|
||||
as_integer = sum([values[i] << i for i in range(len(values))])
|
||||
return as_integer.to_bytes((values.length + 7) // 8, "little")
|
||||
elif isinstance(values, Bitlist):
|
||||
as_integer = sum([values[i] << i for i in range(len(values))])
|
||||
return as_integer.to_bytes((values.length + 7) // 8, "little")
|
||||
elif isinstance(values, Bits):
|
||||
# packs the bits in bytes, left-aligned.
|
||||
# Exclusive length delimiting bits for bitlists.
|
||||
return values.as_bytes()
|
||||
return b''.join([serialize_basic(value) for value in values])
|
||||
|
||||
|
||||
@ -126,6 +127,7 @@ def item_length(typ: SSZType) -> int:
|
||||
|
||||
|
||||
def chunk_count(typ: SSZType) -> int:
|
||||
# note that for lists, .length *on the type* describes the list limit.
|
||||
if isinstance(typ, BasicType):
|
||||
return 1
|
||||
elif issubclass(typ, Bits):
|
||||
@ -150,7 +152,7 @@ def hash_tree_root(obj: SSZValue):
|
||||
raise Exception(f"Type not supported: {type(obj)}")
|
||||
|
||||
if isinstance(obj, (List, Bytes, Bitlist)):
|
||||
return mix_in_length(merkleize_chunks(leaves, pad_to=chunk_count(obj.type())), len(obj))
|
||||
return mix_in_length(merkleize_chunks(leaves, limit=chunk_count(obj.type())), len(obj))
|
||||
else:
|
||||
return merkleize_chunks(leaves)
|
||||
|
||||
|
@ -354,7 +354,12 @@ class BitElementsType(ElementsType):
|
||||
|
||||
|
||||
class Bits(BaseList, metaclass=BitElementsType):
|
||||
pass
|
||||
|
||||
def as_bytes(self):
|
||||
as_bytearray = [0] * ((len(self) + 7) // 8)
|
||||
for i in range(len(self)):
|
||||
as_bytearray[i // 8] |= int(self[i]) << (i % 8)
|
||||
return bytes(as_bytearray)
|
||||
|
||||
|
||||
class Bitlist(Bits):
|
||||
|
@ -8,7 +8,8 @@ def h(a: bytes, b: bytes) -> bytes:
|
||||
|
||||
|
||||
def e(v: int) -> bytes:
|
||||
return v.to_bytes(length=32, byteorder='little')
|
||||
# prefix with 0xfff... to make it non-zero
|
||||
return b'\xff' * 28 + v.to_bytes(length=4, byteorder='little')
|
||||
|
||||
|
||||
def z(i: int) -> bytes:
|
||||
@ -16,44 +17,64 @@ def z(i: int) -> bytes:
|
||||
|
||||
|
||||
cases = [
|
||||
(0, 0, 1, z(0)),
|
||||
(0, 1, 1, e(0)),
|
||||
(1, 0, 2, h(z(0), z(0))),
|
||||
(1, 1, 2, h(e(0), z(0))),
|
||||
(1, 2, 2, h(e(0), e(1))),
|
||||
(2, 0, 4, h(h(z(0), z(0)), z(1))),
|
||||
(2, 1, 4, h(h(e(0), z(0)), z(1))),
|
||||
(2, 2, 4, h(h(e(0), e(1)), z(1))),
|
||||
(2, 3, 4, h(h(e(0), e(1)), h(e(2), z(0)))),
|
||||
(2, 4, 4, h(h(e(0), e(1)), h(e(2), e(3)))),
|
||||
(3, 0, 8, h(h(h(z(0), z(0)), z(1)), z(2))),
|
||||
(3, 1, 8, h(h(h(e(0), z(0)), z(1)), z(2))),
|
||||
(3, 2, 8, h(h(h(e(0), e(1)), z(1)), z(2))),
|
||||
(3, 3, 8, h(h(h(e(0), e(1)), h(e(2), z(0))), z(2))),
|
||||
(3, 4, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), z(2))),
|
||||
(3, 5, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), z(0)), z(1)))),
|
||||
(3, 6, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(z(0), z(0))))),
|
||||
(3, 7, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), z(0))))),
|
||||
(3, 8, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), e(7))))),
|
||||
(4, 0, 16, h(h(h(h(z(0), z(0)), z(1)), z(2)), z(3))),
|
||||
(4, 1, 16, h(h(h(h(e(0), z(0)), z(1)), z(2)), z(3))),
|
||||
(4, 2, 16, h(h(h(h(e(0), e(1)), z(1)), z(2)), z(3))),
|
||||
(4, 3, 16, h(h(h(h(e(0), e(1)), h(e(2), z(0))), z(2)), z(3))),
|
||||
(4, 4, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), z(2)), z(3))),
|
||||
(4, 5, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), z(0)), z(1))), z(3))),
|
||||
(4, 6, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(z(0), z(0)))), z(3))),
|
||||
(4, 7, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), z(0)))), z(3))),
|
||||
(4, 8, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), e(7)))), z(3))),
|
||||
(4, 9, 16,
|
||||
h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), e(7)))), h(h(h(e(8), z(0)), z(1)), z(2)))),
|
||||
# limit 0: always zero hash
|
||||
(0, 0, z(0)),
|
||||
(1, 0, None), # cut-off due to limit
|
||||
(2, 0, None), # cut-off due to limit
|
||||
# limit 1: padded to 1 element if not already. Returned (like identity func)
|
||||
(0, 1, z(0)),
|
||||
(1, 1, e(0)),
|
||||
(2, 1, None), # cut-off due to limit
|
||||
(1, 1, e(0)),
|
||||
(0, 2, h(z(0), z(0))),
|
||||
(1, 2, h(e(0), z(0))),
|
||||
(2, 2, h(e(0), e(1))),
|
||||
(3, 2, None), # cut-off due to limit
|
||||
(16, 2, None), # bigger cut-off due to limit
|
||||
(0, 4, h(h(z(0), z(0)), z(1))),
|
||||
(1, 4, h(h(e(0), z(0)), z(1))),
|
||||
(2, 4, h(h(e(0), e(1)), z(1))),
|
||||
(3, 4, h(h(e(0), e(1)), h(e(2), z(0)))),
|
||||
(4, 4, h(h(e(0), e(1)), h(e(2), e(3)))),
|
||||
(5, 4, None), # cut-off due to limit
|
||||
(0, 8, h(h(h(z(0), z(0)), z(1)), z(2))),
|
||||
(1, 8, h(h(h(e(0), z(0)), z(1)), z(2))),
|
||||
(2, 8, h(h(h(e(0), e(1)), z(1)), z(2))),
|
||||
(3, 8, h(h(h(e(0), e(1)), h(e(2), z(0))), z(2))),
|
||||
(4, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), z(2))),
|
||||
(5, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), z(0)), z(1)))),
|
||||
(6, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(z(0), z(0))))),
|
||||
(7, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), z(0))))),
|
||||
(8, 8, h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), e(7))))),
|
||||
(9, 8, None), # cut-off due to limit
|
||||
(0, 16, h(h(h(h(z(0), z(0)), z(1)), z(2)), z(3))),
|
||||
(1, 16, h(h(h(h(e(0), z(0)), z(1)), z(2)), z(3))),
|
||||
(2, 16, h(h(h(h(e(0), e(1)), z(1)), z(2)), z(3))),
|
||||
(3, 16, h(h(h(h(e(0), e(1)), h(e(2), z(0))), z(2)), z(3))),
|
||||
(4, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), z(2)), z(3))),
|
||||
(5, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), z(0)), z(1))), z(3))),
|
||||
(6, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(z(0), z(0)))), z(3))),
|
||||
(7, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), z(0)))), z(3))),
|
||||
(8, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), e(7)))), z(3))),
|
||||
(9, 16, h(h(h(h(e(0), e(1)), h(e(2), e(3))), h(h(e(4), e(5)), h(e(6), e(7)))), h(h(h(e(8), z(0)), z(1)), z(2)))),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'depth,count,pow2,value',
|
||||
'count,limit,value',
|
||||
cases,
|
||||
)
|
||||
def test_merkleize_chunks_and_get_merkle_root(depth, count, pow2, value):
|
||||
def test_merkleize_chunks_and_get_merkle_root(count, limit, value):
|
||||
chunks = [e(i) for i in range(count)]
|
||||
assert merkleize_chunks(chunks, pad_to=pow2) == value
|
||||
assert get_merkle_root(chunks, pad_to=pow2) == value
|
||||
if value is None:
|
||||
bad = False
|
||||
try:
|
||||
merkleize_chunks(chunks, limit=limit)
|
||||
bad = True
|
||||
except AssertionError:
|
||||
pass
|
||||
if bad:
|
||||
assert False, "expected merkleization to be invalid"
|
||||
else:
|
||||
assert merkleize_chunks(chunks, limit=limit) == value
|
||||
assert get_merkle_root(chunks, pad_to=limit) == value
|
||||
|
@ -1,6 +1,6 @@
|
||||
eth-utils>=1.3.0,<2
|
||||
eth-typing>=2.1.0,<3.0.0
|
||||
pycryptodome==3.7.3
|
||||
py_ecc>=1.6.0
|
||||
py_ecc==1.7.1
|
||||
dataclasses==0.6
|
||||
ssz==0.1.0a10
|
||||
|
@ -8,7 +8,7 @@ setup(
|
||||
"eth-utils>=1.3.0,<2",
|
||||
"eth-typing>=2.1.0,<3.0.0",
|
||||
"pycryptodome==3.7.3",
|
||||
"py_ecc>=1.6.0",
|
||||
"py_ecc==1.7.1",
|
||||
"ssz==0.1.0a10",
|
||||
"dataclasses==0.6",
|
||||
]
|
||||
|
Loading…
x
Reference in New Issue
Block a user