nimbus-eth2/beacon_chain/spec/helpers.nim

507 lines
21 KiB
Nim
Raw Normal View History

2018-07-23 12:58:41 +00:00
# beacon_chain
# Copyright (c) 2018-2022 Status Research & Development GmbH
2018-07-23 12:58:41 +00:00
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
2018-07-23 12:58:41 +00:00
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Uncategorized helper functions from the spec
{.push raises: [Defect].}
import
# Standard lib
2021-11-10 02:18:52 +00:00
std/[algorithm, math, sequtils, sets, tables],
# Status libraries
stew/[bitops2, byteutils, endians2],
chronicles,
# Internal
./datatypes/[phase0, altair, bellatrix],
"."/[eth2_merkleization, forks, ssz_codec]
disentangle eth2 types from the ssz library (#2785) * reorganize ssz dependencies This PR continues the work in https://github.com/status-im/nimbus-eth2/pull/2646, https://github.com/status-im/nimbus-eth2/pull/2779 as well as past issues with serialization and type, to disentangle SSZ from eth2 and at the same time simplify imports and exports with a structured approach. The principal idea here is that when a library wants to introduce SSZ support, they do so via 3 files: * `ssz_codecs` which imports and reexports `codecs` - this covers the basic byte conversions and ensures no overloads get lost * `xxx_merkleization` imports and exports `merkleization` to specialize and get access to `hash_tree_root` and friends * `xxx_ssz_serialization` imports and exports `ssz_serialization` to specialize ssz for a specific library Those that need to interact with SSZ always import the `xxx_` versions of the modules and never `ssz` itself so as to keep imports simple and safe. This is similar to how the REST / JSON-RPC serializers are structured in that someone wanting to serialize spec types to REST-JSON will import `eth2_rest_serialization` and nothing else. * split up ssz into a core library that is independendent of eth2 types * rename `bytes_reader` to `codec` to highlight that it contains coding and decoding of bytes and native ssz types * remove tricky List init overload that causes compile issues * get rid of top-level ssz import * reenable merkleization tests * move some "standard" json serializers to spec * remove `ValidatorIndex` serialization for now * remove test_ssz_merkleization * add tests for over/underlong byte sequences * fix broken seq[byte] test - seq[byte] is not an SSZ type There are a few things this PR doesn't solve: * like #2646 this PR is weak on how to handle root and other dontSerialize fields that "sometimes" should be computed - the same problem appears in REST / JSON-RPC etc * Fix a build problem on macOS * Another way to fix the macOS builds Co-authored-by: Zahary Karadjov <zahary@gmail.com>
2021-08-18 18:57:58 +00:00
# TODO although eth2_merkleization already exports ssz_codec, *sometimes* code
# fails to compile if the export is not done here also
export
forks, eth2_merkleization, ssz_codec
2018-07-23 12:58:41 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/phase0/beacon-chain.md#integer_squareroot
func integer_squareroot*(n: SomeInteger): SomeInteger =
## Return the largest integer ``x`` such that ``x**2 <= n``.
doAssert n >= 0'u64
var
x = n
y = (x + 1) div 2
while y < x:
x = y
y = (x + n div x) div 2
x
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/ssz/merkle-proofs.md#generalized_index_sibling
template generalized_index_sibling*(
index: GeneralizedIndex): GeneralizedIndex =
index xor 1.GeneralizedIndex
template generalized_index_sibling_left(
index: GeneralizedIndex): GeneralizedIndex =
index and not 1.GeneralizedIndex
template generalized_index_sibling_right(
index: GeneralizedIndex): GeneralizedIndex =
index or 1.GeneralizedIndex
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/ssz/merkle-proofs.md#generalized_index_parent
template generalized_index_parent*(
index: GeneralizedIndex): GeneralizedIndex =
index shr 1
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/ssz/merkle-proofs.md#merkle-multiproofs
iterator get_branch_indices*(
tree_index: GeneralizedIndex): GeneralizedIndex =
## Get the generalized indices of the sister chunks along the path
## from the chunk with the given tree index to the root.
var index = tree_index
while index > 1.GeneralizedIndex:
yield generalized_index_sibling(index)
index = generalized_index_parent(index)
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/ssz/merkle-proofs.md#merkle-multiproofs
iterator get_path_indices*(
tree_index: GeneralizedIndex): GeneralizedIndex =
## Get the generalized indices of the chunks along the path
## from the chunk with the given tree index to the root.
var index = tree_index
while index > 1.GeneralizedIndex:
yield index
index = generalized_index_parent(index)
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/ssz/merkle-proofs.md#merkle-multiproofs
func get_helper_indices*(
indices: openArray[GeneralizedIndex]): seq[GeneralizedIndex] =
## Get the generalized indices of all "extra" chunks in the tree needed
## to prove the chunks with the given generalized indices. Note that the
## decreasing order is chosen deliberately to ensure equivalence to the order
## of hashes in a regular single-item Merkle proof in the single-item case.
2021-11-10 02:18:52 +00:00
var all_helper_indices = initHashSet[GeneralizedIndex]()
for index in indices:
for idx in get_branch_indices(index):
2021-11-10 02:18:52 +00:00
all_helper_indices.incl idx
for index in indices:
for idx in get_path_indices(index):
2021-11-10 02:18:52 +00:00
all_helper_indices.excl idx
2021-11-10 02:18:52 +00:00
var res = newSeqOfCap[GeneralizedIndex](all_helper_indices.len)
for idx in all_helper_indices:
2021-11-10 02:18:52 +00:00
res.add idx
res.sort(SortOrder.Descending)
res
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/ssz/merkle-proofs.md#merkle-multiproofs
func check_multiproof_acceptable*(
indices: openArray[GeneralizedIndex]): Result[void, string] =
# Check that proof verification won't allocate excessive amounts of memory.
const max_multiproof_complexity = nextPowerOfTwo(256)
if indices.len > max_multiproof_complexity:
trace "Max multiproof complexity exceeded",
num_indices=indices.len, max_multiproof_complexity
return err("Unsupported multiproof complexity (" & $indices.len & ")")
if indices.len == 0:
return err("No indices specified")
if indices.anyIt(it == 0.GeneralizedIndex):
return err("Invalid index specified")
ok()
func calculate_multi_merkle_root_impl(
leaves: openArray[Eth2Digest],
proof: openArray[Eth2Digest],
indices: openArray[GeneralizedIndex],
helper_indices: openArray[GeneralizedIndex]): Result[Eth2Digest, string] =
# All callers have already verified the checks in check_multiproof_acceptable,
# as well as whether lengths of leaves/indices and proof/helper_indices match.
# Helper to retrieve a value from a table that is statically known to exist.
template getExisting[A, B](t: var Table[A, B], key: A): var B =
try: t[key]
except KeyError: raiseAssert "Unreachable"
# Populate data structure with all leaves.
# This data structure only scales with the number of `leaves`,
# in contrast to the spec one that also scales with the number of `proof`
# items and the number of all intermediate roots, potentially the entire tree.
let capacity = nextPowerOfTwo(leaves.len)
var objects = initTable[GeneralizedIndex, Eth2Digest](capacity)
for i, index in indices:
if objects.mgetOrPut(index, leaves[i]) != leaves[i]:
return err("Conflicting roots for same index")
# Create list with keys of all active nodes that need to be visited.
# This list is sorted in descending order, same as `helper_indices`.
# Pulling from `objects` instead of from `indices` deduplicates the list.
var keys = newSeqOfCap[GeneralizedIndex](objects.len)
for index in objects.keys:
if index > 1.GeneralizedIndex: # For the root, no work needs to be done.
keys.add index
keys.sort(SortOrder.Descending)
# The merkle tree is processed from bottom to top, pulling in helper
# indices from `proof` as needed. During processing, the `keys` list
# may temporarily end up being split into two parts, sorted individually.
# An additional index tracks the current maximum element of the list.
var
completed = 0 # All key indices before this are fully processed.
maxIndex = completed # Index of the list's largest key.
helper = 0 # Helper index from `proof` to be pulled next.
# Processing is done when there are no more keys to process.
while completed < keys.len:
let
k = keys[maxIndex]
sibling = generalized_index_sibling(k)
left = generalized_index_sibling_left(k)
right = generalized_index_sibling_right(k)
parent = generalized_index_parent(k)
parentRight = generalized_index_sibling_right(parent)
# Keys need to be processed in descending order to ensure that intermediate
# roots remain available until they are no longer needed. This ensures that
# conflicting roots are detected in all cases.
keys[maxIndex] =
if not objects.hasKey(k):
# A previous computation did already merge this key with its sibling.
0.GeneralizedIndex
else:
# Compute expected root for parent. This deletes child roots.
# Because the list is sorted in descending order, they are not needed.
let root = withEth2Hash:
if helper < helper_indices.len and helper_indices[helper] == sibling:
# The next proof item is required to form the parent hash.
if sibling == left:
h.update proof[helper].data
h.update objects.getExisting(right).data; objects.del right
else:
h.update objects.getExisting(left).data; objects.del left
h.update proof[helper].data
inc helper
else:
# Both siblings are already known.
h.update objects.getExisting(left).data; objects.del left
h.update objects.getExisting(right).data; objects.del right
# Store parent root, and replace the current list entry with its parent.
if objects.hasKeyOrPut(parent, root):
if objects.getExisting(parent) != root:
return err("Conflicting roots for same index")
0.GeneralizedIndex
elif parent > 1.GeneralizedIndex:
# Note that the list may contain further nodes that are on a layer
# beneath the parent, so this may break the strictly descending order
# of the list. For example, given [12, 9], this will lead to [6, 9].
# This will resolve itself after the additional nodes are processed,
# i.e., [6, 9] -> [6, 4] -> [3, 4] -> [3, 2] -> [1].
parent
else:
0.GeneralizedIndex
if keys[maxIndex] != 0.GeneralizedIndex:
# The list may have been temporarily split up into two parts that are
# individually sorted in descending order. Have to first process further
# nodes until the list is sorted once more.
inc maxIndex
# Determine whether descending sort order has been restored.
let isSorted =
if maxIndex == completed: true
else:
while maxIndex < keys.len and keys[maxIndex] == 0.GeneralizedIndex:
inc maxIndex
maxIndex >= keys.len or keys[maxIndex] <= parentRight
if isSorted:
# List is sorted once more. Reset `maxIndex` to its start.
while completed < keys.len and keys[completed] == 0.GeneralizedIndex:
inc completed
maxIndex = completed
# Proof is guaranteed to provide all info needed to reach the root.
doAssert helper == helper_indices.len
doAssert objects.len == 1
ok(objects.getExisting(1.GeneralizedIndex))
func calculate_multi_merkle_root*(
leaves: openArray[Eth2Digest],
proof: openArray[Eth2Digest],
indices: openArray[GeneralizedIndex],
helper_indices: openArray[GeneralizedIndex]): Result[Eth2Digest, string] =
doAssert proof.len == helper_indices.len
if leaves.len != indices.len:
return err("Length mismatch for leaves and indices")
? check_multiproof_acceptable(indices)
calculate_multi_merkle_root_impl(
leaves, proof, indices, helper_indices)
func calculate_multi_merkle_root*(
leaves: openArray[Eth2Digest],
proof: openArray[Eth2Digest],
indices: openArray[GeneralizedIndex]): Result[Eth2Digest, string] =
if leaves.len != indices.len:
return err("Length mismatch for leaves and indices")
? check_multiproof_acceptable(indices)
calculate_multi_merkle_root_impl(
leaves, proof, indices, get_helper_indices(indices))
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/ssz/merkle-proofs.md#merkle-multiproofs
func verify_merkle_multiproof*(
leaves: openArray[Eth2Digest],
proof: openArray[Eth2Digest],
indices: openArray[GeneralizedIndex],
helper_indices: openArray[GeneralizedIndex],
root: Eth2Digest): bool =
let calc = calculate_multi_merkle_root(leaves, proof, indices, helper_indices)
if calc.isErr: return false
calc.get == root
func verify_merkle_multiproof*(
leaves: openArray[Eth2Digest],
proof: openArray[Eth2Digest],
indices: openArray[GeneralizedIndex],
root: Eth2Digest): bool =
let calc = calculate_multi_merkle_root(leaves, proof, indices)
if calc.isErr: return false
calc.get == root
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/phase0/beacon-chain.md#is_valid_merkle_branch
2021-09-08 16:57:00 +00:00
func is_valid_merkle_branch*(leaf: Eth2Digest, branch: openArray[Eth2Digest],
depth: int, index: uint64,
root: Eth2Digest): bool =
## Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and
## ``branch``.
var
value = leaf
buf: array[64, byte]
for i in 0 ..< depth:
if (index div (1'u64 shl i)) mod 2 != 0:
buf[0..31] = branch[i].data
buf[32..63] = value.data
else:
buf[0..31] = value.data
buf[32..63] = branch[i].data
value = eth2digest(buf)
value == root
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/tests/core/pyspec/eth2spec/test/helpers/merkle.py#L4-L21
func build_proof_impl(anchor: object, leaf_index: uint64,
proof: var openArray[Eth2Digest]) =
let
bottom_length = nextPow2(typeof(anchor).totalSerializedFields.uint64)
tree_depth = log2trunc(bottom_length)
parent_index =
if leaf_index < bottom_length shl 1:
0'u64
else:
var i = leaf_index
while i >= bottom_length shl 1:
i = i shr 1
i
var
prefix_len = 0
proof_len = log2trunc(leaf_index)
cache = newSeq[Eth2Digest](bottom_length shl 1)
block:
var i = bottom_length
anchor.enumInstanceSerializedFields(fieldNameVar, fieldVar):
if i == parent_index:
when fieldVar is object:
prefix_len = log2trunc(leaf_index) - tree_depth
proof_len -= prefix_len
let
bottom_bits = leaf_index and not (uint64.high shl prefix_len)
prefix_leaf_index = (1'u64 shl prefix_len) + bottom_bits
build_proof_impl(fieldVar, prefix_leaf_index, proof)
else: raiseAssert "Invalid leaf_index"
cache[i] = hash_tree_root(fieldVar)
i += 1
for i in countdown(bottom_length - 1, 1):
cache[i] = withEth2Hash:
h.update cache[i shl 1].data
h.update cache[i shl 1 + 1].data
var i = if parent_index != 0: parent_index
else: leaf_index
doAssert i > 0 and i < bottom_length shl 1
for proof_index in prefix_len ..< prefix_len + proof_len:
let b = (i and 1) != 0
i = i shr 1
proof[proof_index] = if b: cache[i shl 1]
else: cache[i shl 1 + 1]
func build_proof*(anchor: object, leaf_index: uint64,
proof: var openArray[Eth2Digest]) =
doAssert leaf_index > 0
doAssert proof.len == log2trunc(leaf_index)
build_proof_impl(anchor, leaf_index, proof)
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/phase0/beacon-chain.md#is_active_validator
func is_active_validator*(validator: Validator, epoch: Epoch): bool =
## Check if ``validator`` is active
validator.activation_epoch <= epoch and epoch < validator.exit_epoch
func is_exited_validator*(validator: Validator, epoch: Epoch): bool =
## Check if ``validator`` is exited
validator.exit_epoch <= epoch
func is_withdrawable_validator*(validator: Validator, epoch: Epoch): bool =
epoch >= validator.withdrawable_epoch
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/phase0/beacon-chain.md#get_active_validator_indices
iterator get_active_validator_indices*(state: ForkyBeaconState, epoch: Epoch):
ValidatorIndex =
for idx in 0..<state.validators.len:
if is_active_validator(state.validators[idx], epoch):
yield idx.ValidatorIndex
func get_active_validator_indices*(state: ForkyBeaconState, epoch: Epoch):
seq[ValidatorIndex] =
## Return the sequence of active validator indices at ``epoch``.
var res = newSeqOfCap[ValidatorIndex](state.validators.len)
for idx in get_active_validator_indices(state, epoch):
res.add idx
res
func get_active_validator_indices_len*(state: ForkyBeaconState, epoch: Epoch):
uint64 =
performance fixes (#2259) * performance fixes * don't mark tree cache as dirty on read-only List accesses * store only blob in memory for keys and signatures, parse blob lazily * compare public keys by blob instead of parsing / converting to raw * compare Eth2Digest using non-constant-time comparison * avoid some unnecessary validator copying This branch will in particular speed up deposit processing which has been slowing down block replay. Pre (mainnet, 1600 blocks): ``` All time are ms Average, StdDev, Min, Max, Samples, Test Validation is turned off meaning that no BLS operations are performed 3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB 0.417, 0.822, 0.036, 21.098, 1400, Load block from database 16.521, 0.000, 16.521, 16.521, 1, Load state from database 27.906, 50.846, 8.104, 1507.633, 1350, Apply block 52.617, 37.029, 20.640, 135.938, 50, Apply epoch block ``` Post: ``` 3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB 0.080, 0.560, 0.035, 21.015, 1400, Load block from database 17.595, 0.000, 17.595, 17.595, 1, Load state from database 15.706, 11.028, 8.300, 107.537, 1350, Apply block 33.217, 12.622, 17.331, 60.580, 50, Apply epoch block ``` * more perf fixes * load EpochRef cache into StateCache more aggressively * point out security concern with public key cache * reuse proposer index from state when processing block * avoid genericAssign in a few more places * don't parse key when signature is unparseable * fix `==` overload for Eth2Digest * preallocate validator list when getting active validators * speed up proposer index calculation a little bit * reuse cache when replaying blocks in ncli_db * avoid a few more copying loops ``` Average, StdDev, Min, Max, Samples, Test Validation is turned off meaning that no BLS operations are performed 3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB 0.072, 0.357, 0.035, 13.400, 1400, Load block from database 17.295, 0.000, 17.295, 17.295, 1, Load state from database 5.918, 9.896, 0.198, 98.028, 1350, Apply block 15.888, 10.951, 7.902, 39.535, 50, Apply epoch block 0.000, 0.000, 0.000, 0.000, 0, Database block store ``` * clear full balance cache before processing rewards and penalties ``` All time are ms Average, StdDev, Min, Max, Samples, Test Validation is turned off meaning that no BLS operations are performed 3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB 0.124, 0.506, 0.026, 202.370, 363345, Load block from database 97.614, 0.000, 97.614, 97.614, 1, Load state from database 0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch 14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch 1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing 0.000, 0.000, 0.000, 0.000, 0, Database block store ```
2021-01-25 12:04:18 +00:00
for idx in 0..<state.validators.len:
if is_active_validator(state.validators[idx], epoch):
inc result
func get_active_validator_indices_len*(
state: ForkedHashedBeaconState; epoch: Epoch): uint64 =
withState(state):
get_active_validator_indices_len(state.data, epoch)
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/phase0/beacon-chain.md#get_current_epoch
func get_current_epoch*(state: ForkyBeaconState): Epoch =
## Return the current epoch.
state.slot.epoch
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/phase0/beacon-chain.md#get_current_epoch
func get_current_epoch*(state: ForkedHashedBeaconState): Epoch =
## Return the current epoch.
withState(state): get_current_epoch(state.data)
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/phase0/beacon-chain.md#get_randao_mix
func get_randao_mix*(state: ForkyBeaconState, epoch: Epoch): Eth2Digest =
## Returns the randao mix at a recent ``epoch``.
state.randao_mixes[epoch mod EPOCHS_PER_HISTORICAL_VECTOR]
2020-10-28 18:35:31 +00:00
func bytes_to_uint64*(data: openArray[byte]): uint64 =
2019-03-13 23:04:43 +00:00
doAssert data.len == 8
# Little-endian data representation
uint64.fromBytesLE(data)
func uint_to_bytes*(x: uint64): array[8, byte] = toBytesLE(x)
func uint_to_bytes*(x: uint32): array[4, byte] = toBytesLE(x)
func uint_to_bytes*(x: uint16): array[2, byte] = toBytesLE(x)
func uint_to_bytes*(x: uint8): array[1, byte] = toBytesLE(x)
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/phase0/beacon-chain.md#compute_domain
func compute_domain*(
domain_type: DomainType,
fork_version: Version,
genesis_validators_root: Eth2Digest = ZERO_HASH): Eth2Domain =
## Return the domain for the ``domain_type`` and ``fork_version``.
let fork_data_root =
compute_fork_data_root(fork_version, genesis_validators_root)
result[0..3] = domain_type.data
result[4..31] = fork_data_root.data.toOpenArray(0, 27)
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/phase0/beacon-chain.md#get_domain
func get_domain*(
fork: Fork,
domain_type: DomainType,
epoch: Epoch,
genesis_validators_root: Eth2Digest): Eth2Domain =
## Return the signature domain (fork version concatenated with domain type)
## of a message.
let fork_version =
if epoch < fork.epoch:
fork.previous_version
else:
fork.current_version
compute_domain(domain_type, fork_version, genesis_validators_root)
func get_domain*(
state: ForkyBeaconState, domain_type: DomainType, epoch: Epoch): Eth2Domain =
## Return the signature domain (fork version concatenated with domain type)
## of a message.
get_domain(state.fork, domain_type, epoch, state.genesis_validators_root)
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/phase0/beacon-chain.md#compute_signing_root
func compute_signing_root*(ssz_object: auto, domain: Eth2Domain): Eth2Digest =
## Return the signing root of an object by calculating the root of the
## object-domain tree.
let domain_wrapped_object = SigningData(
object_root: hash_tree_root(ssz_object),
domain: domain
)
hash_tree_root(domain_wrapped_object)
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/phase0/beacon-chain.md#get_seed
func get_seed*(state: ForkyBeaconState, epoch: Epoch, domain_type: DomainType):
Eth2Digest =
## Return the seed at ``epoch``.
var seed_input : array[4+8+32, byte]
0.6.2 updates (#275) * update process_justification_and_finalization to 0.6.2; mark AttesterSlashing as 0.6.2 * replace get_effective_balance(...) with state.validator_registry[idx].effective_balance; rm get_effective_balance, process_ejections, should_update_validator_registry, update_validator_registry, and update_registry_and_shuffling_data; update get_total_balance to 0.6.2; implement process_registry_updates * rm exit_validator; implement is_slashable_attestation_data; partly update processAttesterSlashings * mark HistoricalBatch and Eth1Data as 0.6.2; implement get_shard_delta(...); replace 0.5 finish_epoch_update with 0.6 process_final_updates * mark increase_balance, decrease_balance, get_delayed_activation_exit_epoch, bls_aggregate_pubkeys, bls_verify_multiple, Attestation, Transfer, slot_to_epoch, Crosslink, get_current_epoch, int_to_bytes*, various constants, processEth1Data, processTransfers, and verifyStateRoot as 0.6.2; rm is_double_vote and is_surround_vote * mark get_bitfield_bit, verify_bitfield, ProposerSlashing, DepositData, VoluntaryExit, PendingAttestation, Fork, integer_squareroot, get_epoch_start_slot, is_active_validator, generate_seed, some constants to 0.6.2; rename MIN_PENALTY_QUOTIENT to MIN_SLASHING_PENALTY_QUOTIENT * rm get_previous_total_balance, get_current_epoch_boundary_attestations, get_previous_epoch_boundary_attestations, and get_previous_epoch_matching_head_attestations * update BeaconState to 0.6.2; simplify legacy get_crosslink_committees_at_slot infrastructure a bit by noting that registry_change is always false; reimplment 0.5 get_crosslink_committees_at_slot in terms of 0.6 get_crosslink_committee * mark process_deposit(...), get_block_root_at_slot(...), get_block_root(...), Deposit, BeaconBlockHeader, BeaconBlockBody, hash(...), get_active_index_root(...), various constants, get_shard_delta(...), get_epoch_start_shard(...), get_crosslink_committee(...), processRandao(...), processVoluntaryExits(...), cacheState(...) as 0.6.2 * rm removed-since-0.5 split(...), is_power_of_2(...), get_shuffling(...); rm 0.5 versions of get_active_validator_indices and get_epoch_committee_count; add a few tests for integer_squareroot * mark bytes_to_int(...) and advanceState(...) as 0.6.2 * rm 0.5 get_attesting_indices; update get_attesting_balance to 0.6.2 * another tiny commit to poke AppVeyor to maybe not timeout at connecting to GitHub partway through CI: mark get_churn_limit(...), initiate_validator_exit(...), and Validator as 0.6.2 * mark get_attestation_slot(...), AttestationDataAndCustodyBit, and BeaconBlock as 0.6.2
2019-06-03 10:31:04 +00:00
# Detect potential underflow
static:
doAssert EPOCHS_PER_HISTORICAL_VECTOR > MIN_SEED_LOOKAHEAD
0.6.2 updates (#275) * update process_justification_and_finalization to 0.6.2; mark AttesterSlashing as 0.6.2 * replace get_effective_balance(...) with state.validator_registry[idx].effective_balance; rm get_effective_balance, process_ejections, should_update_validator_registry, update_validator_registry, and update_registry_and_shuffling_data; update get_total_balance to 0.6.2; implement process_registry_updates * rm exit_validator; implement is_slashable_attestation_data; partly update processAttesterSlashings * mark HistoricalBatch and Eth1Data as 0.6.2; implement get_shard_delta(...); replace 0.5 finish_epoch_update with 0.6 process_final_updates * mark increase_balance, decrease_balance, get_delayed_activation_exit_epoch, bls_aggregate_pubkeys, bls_verify_multiple, Attestation, Transfer, slot_to_epoch, Crosslink, get_current_epoch, int_to_bytes*, various constants, processEth1Data, processTransfers, and verifyStateRoot as 0.6.2; rm is_double_vote and is_surround_vote * mark get_bitfield_bit, verify_bitfield, ProposerSlashing, DepositData, VoluntaryExit, PendingAttestation, Fork, integer_squareroot, get_epoch_start_slot, is_active_validator, generate_seed, some constants to 0.6.2; rename MIN_PENALTY_QUOTIENT to MIN_SLASHING_PENALTY_QUOTIENT * rm get_previous_total_balance, get_current_epoch_boundary_attestations, get_previous_epoch_boundary_attestations, and get_previous_epoch_matching_head_attestations * update BeaconState to 0.6.2; simplify legacy get_crosslink_committees_at_slot infrastructure a bit by noting that registry_change is always false; reimplment 0.5 get_crosslink_committees_at_slot in terms of 0.6 get_crosslink_committee * mark process_deposit(...), get_block_root_at_slot(...), get_block_root(...), Deposit, BeaconBlockHeader, BeaconBlockBody, hash(...), get_active_index_root(...), various constants, get_shard_delta(...), get_epoch_start_shard(...), get_crosslink_committee(...), processRandao(...), processVoluntaryExits(...), cacheState(...) as 0.6.2 * rm removed-since-0.5 split(...), is_power_of_2(...), get_shuffling(...); rm 0.5 versions of get_active_validator_indices and get_epoch_committee_count; add a few tests for integer_squareroot * mark bytes_to_int(...) and advanceState(...) as 0.6.2 * rm 0.5 get_attesting_indices; update get_attesting_balance to 0.6.2 * another tiny commit to poke AppVeyor to maybe not timeout at connecting to GitHub partway through CI: mark get_churn_limit(...), initiate_validator_exit(...), and Validator as 0.6.2 * mark get_attestation_slot(...), AttestationDataAndCustodyBit, and BeaconBlock as 0.6.2
2019-06-03 10:31:04 +00:00
seed_input[0..3] = domain_type.data
seed_input[4..11] = uint_to_bytes(epoch.uint64)
seed_input[12..43] =
get_randao_mix(state, # Avoid underflow
epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1).data
eth2digest(seed_input)
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/altair/beacon-chain.md#add_flag
func add_flag*(flags: ParticipationFlags, flag_index: int): ParticipationFlags =
let flag = ParticipationFlags(1'u8 shl flag_index)
flags or flag
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/altair/beacon-chain.md#has_flag
func has_flag*(flags: ParticipationFlags, flag_index: int): bool =
let flag = ParticipationFlags(1'u8 shl flag_index)
(flags and flag) == flag
2021-09-08 16:57:00 +00:00
2022-01-31 09:56:59 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/altair/sync-protocol.md#get_subtree_index
2021-09-08 16:57:00 +00:00
func get_subtree_index*(idx: GeneralizedIndex): uint64 =
2021-09-13 16:47:39 +00:00
doAssert idx > 0
2021-09-08 16:57:00 +00:00
uint64(idx mod (type(idx)(1) shl log2trunc(idx)))
2022-01-31 09:56:59 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/bellatrix/beacon-chain.md#is_merge_transition_complete
func is_merge_transition_complete*(state: bellatrix.BeaconState): bool =
state.latest_execution_payload_header != default(ExecutionPayloadHeader)
2022-01-31 09:56:59 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/bellatrix/beacon-chain.md#is_merge_transition_block
2021-12-17 06:56:33 +00:00
func is_merge_transition_block(
state: bellatrix.BeaconState,
body: bellatrix.BeaconBlockBody | bellatrix.TrustedBeaconBlockBody |
bellatrix.SigVerifiedBeaconBlockBody): bool =
not is_merge_transition_complete(state) and
body.execution_payload != default(bellatrix.ExecutionPayload)
2022-01-31 09:56:59 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/bellatrix/beacon-chain.md#is_execution_enabled
func is_execution_enabled*(
state: bellatrix.BeaconState,
body: bellatrix.BeaconBlockBody | bellatrix.TrustedBeaconBlockBody |
bellatrix.SigVerifiedBeaconBlockBody): bool =
2021-12-17 06:56:33 +00:00
is_merge_transition_block(state, body) or is_merge_transition_complete(state)
2022-01-31 09:56:59 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/bellatrix/beacon-chain.md#compute_timestamp_at_slot
func compute_timestamp_at_slot*(state: ForkyBeaconState, slot: Slot): uint64 =
# Note: This function is unsafe with respect to overflows and underflows.
let slots_since_genesis = slot - GENESIS_SLOT
state.genesis_time + slots_since_genesis * SECONDS_PER_SLOT