2019-12-03 16:45:12 +00:00
|
|
|
# beacon_chain
|
2023-01-09 22:44:44 +00:00
|
|
|
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
2019-12-03 16:45:12 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
|
|
|
import
|
2022-01-24 20:40:59 +00:00
|
|
|
std/[options, random],
|
2021-05-28 15:25:58 +00:00
|
|
|
chronicles,
|
2022-01-24 20:40:59 +00:00
|
|
|
eth/keys,
|
|
|
|
stew/endians2,
|
2021-12-01 15:55:57 +00:00
|
|
|
../beacon_chain/consensus_object_pools/sync_committee_msg_pool,
|
2022-01-06 11:25:35 +00:00
|
|
|
../beacon_chain/spec/datatypes/bellatrix,
|
2021-11-05 07:34:34 +00:00
|
|
|
../beacon_chain/spec/[
|
|
|
|
beaconstate, helpers, keystore, signatures, state_transition, validator]
|
2019-12-03 16:45:12 +00:00
|
|
|
|
2022-09-27 12:11:47 +00:00
|
|
|
from eth/common/eth_types import EMPTY_ROOT_HASH
|
|
|
|
from eth/common/eth_types_rlp import rlpHash
|
|
|
|
from eth/eip1559 import EIP1559_INITIAL_BASE_FEE
|
|
|
|
|
2021-09-30 15:14:03 +00:00
|
|
|
type
|
|
|
|
MockPrivKeysT = object
|
|
|
|
MockPubKeysT = object
|
2021-10-13 14:24:36 +00:00
|
|
|
const
|
2021-09-30 15:14:03 +00:00
|
|
|
MockPrivKeys* = MockPrivKeysT()
|
|
|
|
MockPubKeys* = MockPubKeysT()
|
|
|
|
|
2023-02-21 16:43:21 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.3/tests/core/pyspec/eth2spec/test/helpers/keys.py
|
2022-08-23 15:30:46 +00:00
|
|
|
func `[]`*(_: MockPrivKeysT, index: ValidatorIndex|uint64): ValidatorPrivKey =
|
2020-03-04 21:27:11 +00:00
|
|
|
# 0 is not a valid BLS private key - 1000 helps interop with rust BLS library,
|
2021-09-30 15:14:03 +00:00
|
|
|
# lighthouse. EF tests use 1 instead of 1000.
|
|
|
|
var bytes = (index.uint64 + 1000'u64).toBytesLE()
|
|
|
|
static: doAssert sizeof(bytes) <= sizeof(result)
|
|
|
|
copyMem(addr result, addr bytes, sizeof(bytes))
|
|
|
|
|
2022-08-23 15:30:46 +00:00
|
|
|
func `[]`*(_: MockPubKeysT, index: ValidatorIndex|uint64): ValidatorPubKey =
|
2021-09-30 15:14:03 +00:00
|
|
|
MockPrivKeys[index].toPubKey().toPubKey()
|
|
|
|
|
2020-12-03 04:30:35 +00:00
|
|
|
func makeFakeHash*(i: int): Eth2Digest =
|
2019-12-03 16:45:12 +00:00
|
|
|
var bytes = uint64(i).toBytesLE()
|
|
|
|
static: doAssert sizeof(bytes) <= sizeof(result.data)
|
|
|
|
copyMem(addr result.data[0], addr bytes[0], sizeof(bytes))
|
|
|
|
|
2021-10-01 11:35:16 +00:00
|
|
|
func makeDeposit*(
|
2021-10-13 14:24:36 +00:00
|
|
|
i: int,
|
|
|
|
flags: UpdateFlags = {},
|
2021-10-01 11:35:16 +00:00
|
|
|
cfg = defaultRuntimeConfig): DepositData =
|
2019-12-03 16:45:12 +00:00
|
|
|
let
|
2021-10-01 11:35:16 +00:00
|
|
|
privkey = MockPrivKeys[i.ValidatorIndex]
|
|
|
|
pubkey = MockPubKeys[i.ValidatorIndex]
|
|
|
|
withdrawal_credentials = makeWithdrawalCredentials(pubkey)
|
2019-12-03 16:45:12 +00:00
|
|
|
|
2020-11-14 21:43:27 +00:00
|
|
|
result = DepositData(
|
2021-10-01 11:35:16 +00:00
|
|
|
pubkey: pubkey,
|
2020-11-14 21:43:27 +00:00
|
|
|
withdrawal_credentials: withdrawal_credentials,
|
|
|
|
amount: MAX_EFFECTIVE_BALANCE)
|
2019-12-03 16:45:12 +00:00
|
|
|
|
2022-04-14 15:39:37 +00:00
|
|
|
if skipBlsValidation notin flags:
|
2021-09-22 17:50:10 +00:00
|
|
|
result.signature = get_deposit_signature(cfg, result, privkey).toValidatorSig()
|
2019-12-03 16:45:12 +00:00
|
|
|
|
2021-05-21 09:23:28 +00:00
|
|
|
func makeInitialDeposits*(
|
2021-09-22 17:50:10 +00:00
|
|
|
n = SLOTS_PER_EPOCH, flags: UpdateFlags = {}, cfg = defaultRuntimeConfig): seq[DepositData] =
|
2019-12-03 16:45:12 +00:00
|
|
|
for i in 0..<n.int:
|
2021-09-22 17:50:10 +00:00
|
|
|
result.add makeDeposit(i, flags, cfg = cfg)
|
2019-12-03 16:45:12 +00:00
|
|
|
|
2021-01-22 13:29:04 +00:00
|
|
|
func signBlock(
|
2021-09-17 10:55:04 +00:00
|
|
|
fork: Fork, genesis_validators_root: Eth2Digest, forked: ForkedBeaconBlock,
|
|
|
|
privKey: ValidatorPrivKey, flags: UpdateFlags = {}): ForkedSignedBeaconBlock =
|
|
|
|
let
|
|
|
|
slot = withBlck(forked): blck.slot
|
|
|
|
root = hash_tree_root(forked)
|
|
|
|
signature =
|
2020-04-09 09:41:02 +00:00
|
|
|
if skipBlsValidation notin flags:
|
|
|
|
get_block_signature(
|
2021-09-17 10:55:04 +00:00
|
|
|
fork, genesis_validators_root, slot, root, privKey).toValidatorSig()
|
2020-04-09 09:41:02 +00:00
|
|
|
else:
|
|
|
|
ValidatorSig()
|
2021-09-17 10:55:04 +00:00
|
|
|
ForkedSignedBeaconBlock.init(forked, root, signature)
|
2020-04-09 09:41:02 +00:00
|
|
|
|
2022-12-20 08:24:33 +00:00
|
|
|
proc build_empty_merge_execution_payload(state: bellatrix.BeaconState):
|
2022-12-02 07:39:01 +00:00
|
|
|
bellatrix.ExecutionPayload =
|
2022-09-27 12:11:47 +00:00
|
|
|
## Assuming a pre-state of the same slot, build a valid ExecutionPayload
|
|
|
|
## without any transactions from a non-merged block.
|
|
|
|
|
|
|
|
doAssert not is_merge_transition_complete(state)
|
|
|
|
|
|
|
|
let
|
|
|
|
latest = state.latest_execution_payload_header
|
|
|
|
timestamp = compute_timestamp_at_slot(state, state.slot)
|
|
|
|
randao_mix = get_randao_mix(state, get_current_epoch(state))
|
|
|
|
|
2022-12-02 07:39:01 +00:00
|
|
|
var payload = bellatrix.ExecutionPayload(
|
2022-09-27 12:11:47 +00:00
|
|
|
parent_hash: latest.block_hash,
|
|
|
|
state_root: latest.state_root, # no changes to the state
|
|
|
|
receipts_root: EMPTY_ROOT_HASH,
|
|
|
|
block_number: latest.block_number + 1,
|
|
|
|
prev_randao: randao_mix,
|
|
|
|
gas_limit: 30000000, # retain same limit
|
|
|
|
gas_used: 0, # empty block, 0 gas
|
|
|
|
timestamp: timestamp,
|
|
|
|
base_fee_per_gas: EIP1559_INITIAL_BASE_FEE)
|
|
|
|
|
2022-12-20 08:24:33 +00:00
|
|
|
payload.block_hash = rlpHash payloadToBlockHeader(payload)
|
2022-09-27 12:11:47 +00:00
|
|
|
|
|
|
|
payload
|
|
|
|
|
2023-02-13 17:15:16 +00:00
|
|
|
proc addTestBlock*(
|
2021-06-11 17:51:46 +00:00
|
|
|
state: var ForkedHashedBeaconState,
|
2020-06-04 12:03:16 +00:00
|
|
|
cache: var StateCache,
|
2023-02-13 17:15:16 +00:00
|
|
|
eth1_data: Eth1Data = Eth1Data(),
|
|
|
|
attestations: seq[Attestation] = newSeq[Attestation](),
|
|
|
|
deposits: seq[Deposit] = newSeq[Deposit](),
|
|
|
|
sync_aggregate: SyncAggregate = SyncAggregate.init(),
|
|
|
|
graffiti: GraffitiBytes = default(GraffitiBytes),
|
|
|
|
flags: set[UpdateFlag] = {},
|
|
|
|
nextSlot: bool = true,
|
|
|
|
cfg: RuntimeConfig = defaultRuntimeConfig): ForkedSignedBeaconBlock =
|
2019-12-03 16:45:12 +00:00
|
|
|
# Create and add a block to state - state will advance by one slot!
|
2021-04-12 20:25:09 +00:00
|
|
|
if nextSlot:
|
2021-10-13 14:24:36 +00:00
|
|
|
var info = ForkedEpochInfo()
|
2022-01-17 11:19:58 +00:00
|
|
|
process_slots(
|
|
|
|
cfg, state, getStateField(state, slot) + 1, cache, info, flags).expect(
|
|
|
|
"can advance 1")
|
2020-06-01 07:44:50 +00:00
|
|
|
|
2019-12-03 16:45:12 +00:00
|
|
|
let
|
2021-06-11 17:51:46 +00:00
|
|
|
proposer_index = get_beacon_proposer_index(
|
2023-01-11 12:29:21 +00:00
|
|
|
state, cache, getStateField(state, slot)).expect("valid proposer index")
|
|
|
|
privKey = MockPrivKeys[proposer_index]
|
2020-03-19 23:48:03 +00:00
|
|
|
randao_reveal =
|
|
|
|
if skipBlsValidation notin flags:
|
2022-06-29 16:53:59 +00:00
|
|
|
get_epoch_signature(
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(state, fork),
|
|
|
|
getStateField(state, genesis_validators_root),
|
2022-06-29 16:53:59 +00:00
|
|
|
getStateField(state, slot).epoch, privKey).toValidatorSig()
|
2020-03-19 23:48:03 +00:00
|
|
|
else:
|
|
|
|
ValidatorSig()
|
2019-12-03 16:45:12 +00:00
|
|
|
|
2022-09-27 12:11:47 +00:00
|
|
|
let execution_payload =
|
2023-02-13 17:15:16 +00:00
|
|
|
if cfg.CAPELLA_FORK_EPOCH != FAR_FUTURE_EPOCH:
|
|
|
|
# Can't keep correctly doing this once Capella happens, but LVH search
|
|
|
|
# test relies on merging. So, merge only if no Capella transition.
|
|
|
|
default(bellatrix.ExecutionPayload)
|
|
|
|
else:
|
|
|
|
withState(state):
|
|
|
|
when stateFork == ConsensusFork.Bellatrix:
|
|
|
|
# Merge shortly after Bellatrix
|
|
|
|
if forkyState.data.slot >
|
|
|
|
cfg.BELLATRIX_FORK_EPOCH * SLOTS_PER_EPOCH + 10:
|
|
|
|
if is_merge_transition_complete(forkyState.data):
|
|
|
|
const feeRecipient = default(Eth1Address)
|
|
|
|
build_empty_execution_payload(forkyState.data, feeRecipient)
|
|
|
|
else:
|
|
|
|
build_empty_merge_execution_payload(forkyState.data)
|
2022-09-27 12:11:47 +00:00
|
|
|
else:
|
2023-02-13 17:15:16 +00:00
|
|
|
default(bellatrix.ExecutionPayload)
|
2022-09-27 12:11:47 +00:00
|
|
|
else:
|
2023-02-13 17:15:16 +00:00
|
|
|
default(bellatrix.ExecutionPayload)
|
2022-09-27 12:11:47 +00:00
|
|
|
|
2021-10-13 14:24:36 +00:00
|
|
|
let
|
2021-09-29 12:10:44 +00:00
|
|
|
message = makeBeaconBlock(
|
|
|
|
cfg,
|
|
|
|
state,
|
2023-01-11 12:29:21 +00:00
|
|
|
proposer_index,
|
2021-09-29 12:10:44 +00:00
|
|
|
randao_reveal,
|
|
|
|
# Keep deposit counts internally consistent.
|
|
|
|
Eth1Data(
|
|
|
|
deposit_root: eth1_data.deposit_root,
|
|
|
|
deposit_count: getStateField(state, eth1_deposit_index) + deposits.lenu64,
|
|
|
|
block_hash: eth1_data.block_hash),
|
|
|
|
graffiti,
|
|
|
|
attestations,
|
|
|
|
deposits,
|
2023-01-19 22:00:40 +00:00
|
|
|
BeaconBlockValidatorChanges(),
|
2021-12-01 15:55:57 +00:00
|
|
|
sync_aggregate,
|
2022-09-27 12:11:47 +00:00
|
|
|
execution_payload,
|
2023-02-23 18:06:57 +00:00
|
|
|
(static(default(deneb.KZGCommitmentList))),
|
2021-09-29 12:10:44 +00:00
|
|
|
noRollback,
|
2022-05-17 19:56:15 +00:00
|
|
|
cache,
|
|
|
|
verificationFlags = {skipBlsValidation})
|
2021-09-29 12:10:44 +00:00
|
|
|
|
2022-05-17 19:56:15 +00:00
|
|
|
if message.isErr:
|
|
|
|
raiseAssert "Failed to create a block: " & $message.error
|
2019-12-03 16:45:12 +00:00
|
|
|
|
2020-04-09 09:41:02 +00:00
|
|
|
let
|
|
|
|
new_block = signBlock(
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(state, fork),
|
2021-09-29 12:10:44 +00:00
|
|
|
getStateField(state, genesis_validators_root), message.get(), privKey,
|
2021-06-11 17:51:46 +00:00
|
|
|
flags)
|
2019-12-03 16:45:12 +00:00
|
|
|
|
|
|
|
new_block
|
|
|
|
|
2020-03-19 23:48:03 +00:00
|
|
|
proc makeTestBlock*(
|
2021-06-11 17:51:46 +00:00
|
|
|
state: ForkedHashedBeaconState,
|
2020-06-04 12:03:16 +00:00
|
|
|
cache: var StateCache,
|
2020-03-19 23:48:03 +00:00
|
|
|
eth1_data = Eth1Data(),
|
|
|
|
attestations = newSeq[Attestation](),
|
|
|
|
deposits = newSeq[Deposit](),
|
2021-12-01 15:55:57 +00:00
|
|
|
sync_aggregate = SyncAggregate.init(),
|
2021-09-22 17:50:10 +00:00
|
|
|
graffiti = default(GraffitiBytes),
|
|
|
|
cfg = defaultRuntimeConfig): ForkedSignedBeaconBlock =
|
2019-12-03 16:45:12 +00:00
|
|
|
# Create a block for `state.slot + 1` - like a block proposer would do!
|
|
|
|
# It's a bit awkward - in order to produce a block for N+1, we need to
|
|
|
|
# calculate what the state will look like after that block has been applied,
|
|
|
|
# because the block includes the state root.
|
2022-02-20 20:13:06 +00:00
|
|
|
let tmpState = assignClone(state)
|
2020-03-19 23:48:03 +00:00
|
|
|
addTestBlock(
|
2021-12-01 15:55:57 +00:00
|
|
|
tmpState[], cache, eth1_data,
|
|
|
|
attestations, deposits, sync_aggregate, graffiti, cfg = cfg)
|
2019-12-03 16:45:12 +00:00
|
|
|
|
2021-05-21 09:23:28 +00:00
|
|
|
func makeAttestationData*(
|
2021-11-18 12:02:43 +00:00
|
|
|
state: ForkyBeaconState, slot: Slot, committee_index: CommitteeIndex,
|
2021-05-21 09:23:28 +00:00
|
|
|
beacon_block_root: Eth2Digest): AttestationData =
|
|
|
|
let
|
|
|
|
current_epoch = get_current_epoch(state)
|
2022-01-11 10:01:54 +00:00
|
|
|
start_slot = start_slot(current_epoch)
|
2021-05-21 09:23:28 +00:00
|
|
|
epoch_boundary_block_root =
|
2021-11-18 12:02:43 +00:00
|
|
|
if start_slot == state.slot: beacon_block_root
|
2021-05-21 09:23:28 +00:00
|
|
|
else: get_block_root_at_slot(state, start_slot)
|
|
|
|
|
2022-01-11 10:01:54 +00:00
|
|
|
doAssert slot.epoch == current_epoch,
|
|
|
|
"Computed epoch was " & $slot.epoch &
|
2021-05-21 09:23:28 +00:00
|
|
|
" while the state current_epoch was " & $current_epoch
|
|
|
|
|
2023-02-21 16:43:21 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.3/specs/phase0/validator.md#attestation-data
|
2021-05-21 09:23:28 +00:00
|
|
|
AttestationData(
|
|
|
|
slot: slot,
|
|
|
|
index: committee_index.uint64,
|
|
|
|
beacon_block_root: beacon_block_root,
|
2021-11-18 12:02:43 +00:00
|
|
|
source: state.current_justified_checkpoint,
|
2021-05-21 09:23:28 +00:00
|
|
|
target: Checkpoint(
|
|
|
|
epoch: current_epoch,
|
|
|
|
root: epoch_boundary_block_root
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2022-01-12 20:42:03 +00:00
|
|
|
func makeAttestationSig*(
|
|
|
|
fork: Fork, genesis_validators_root: Eth2Digest, data: AttestationData,
|
|
|
|
committee: openArray[ValidatorIndex],
|
|
|
|
bits: CommitteeValidatorsBits): ValidatorSig =
|
|
|
|
let signing_root = compute_attestation_signing_root(
|
|
|
|
fork, genesis_validators_root, data)
|
|
|
|
|
|
|
|
var
|
2022-02-16 22:24:44 +00:00
|
|
|
agg {.noinit.}: AggregateSignature
|
2022-01-12 20:42:03 +00:00
|
|
|
first = true
|
|
|
|
|
|
|
|
for i in 0..<bits.len():
|
|
|
|
if not bits[i]: continue
|
|
|
|
let sig = blsSign(MockPrivKeys[committee[i]], signing_root.data)
|
|
|
|
|
|
|
|
if first:
|
|
|
|
agg.init(sig)
|
|
|
|
first = false
|
|
|
|
else:
|
|
|
|
agg.aggregate(sig)
|
|
|
|
|
|
|
|
if first:
|
|
|
|
ValidatorSig.infinity()
|
|
|
|
else:
|
|
|
|
agg.finish().toValidatorSig()
|
|
|
|
|
2021-11-18 12:02:43 +00:00
|
|
|
func makeAttestationData*(
|
|
|
|
state: ForkedHashedBeaconState, slot: Slot, committee_index: CommitteeIndex,
|
|
|
|
beacon_block_root: Eth2Digest): AttestationData =
|
|
|
|
## Create an attestation / vote for the block `beacon_block_root` using the
|
|
|
|
## data in `state` to fill in the rest of the fields.
|
|
|
|
## `state` is the state corresponding to the `beacon_block_root` advanced to
|
|
|
|
## the slot we're attesting to.
|
|
|
|
withState(state):
|
2022-08-26 14:14:18 +00:00
|
|
|
makeAttestationData(
|
|
|
|
forkyState.data, slot, committee_index, beacon_block_root)
|
2021-11-18 12:02:43 +00:00
|
|
|
|
2021-05-21 09:23:28 +00:00
|
|
|
func makeAttestation*(
|
2021-06-11 17:51:46 +00:00
|
|
|
state: ForkedHashedBeaconState, beacon_block_root: Eth2Digest,
|
2022-01-08 23:28:49 +00:00
|
|
|
committee: seq[ValidatorIndex], slot: Slot, committee_index: CommitteeIndex,
|
2020-11-04 21:52:47 +00:00
|
|
|
validator_index: ValidatorIndex, cache: var StateCache,
|
2019-12-03 16:45:12 +00:00
|
|
|
flags: UpdateFlags = {}): Attestation =
|
|
|
|
# Avoids state_sim silliness; as it's responsible for all validators,
|
|
|
|
# transforming, from monotonic enumerable index -> committee index ->
|
2022-01-28 14:24:37 +00:00
|
|
|
# monotonic enumerable index, is wasteful and slow. Most test callers
|
2019-12-03 16:45:12 +00:00
|
|
|
# want ValidatorIndex, so that's supported too.
|
|
|
|
let
|
2022-06-29 16:53:59 +00:00
|
|
|
index_in_committee = committee.find(validator_index)
|
2022-01-08 23:28:49 +00:00
|
|
|
data = makeAttestationData(state, slot, committee_index, beacon_block_root)
|
2019-12-03 16:45:12 +00:00
|
|
|
|
2022-06-29 16:53:59 +00:00
|
|
|
doAssert index_in_committee != -1, "find_beacon_committee should guarantee this"
|
2019-12-03 16:45:12 +00:00
|
|
|
|
|
|
|
var aggregation_bits = CommitteeValidatorsBits.init(committee.len)
|
2022-06-29 16:53:59 +00:00
|
|
|
aggregation_bits.setBit index_in_committee
|
2019-12-03 16:45:12 +00:00
|
|
|
|
2022-04-14 15:39:37 +00:00
|
|
|
let sig = if skipBlsValidation in flags:
|
2022-01-12 20:42:03 +00:00
|
|
|
ValidatorSig()
|
|
|
|
else:
|
|
|
|
makeAttestationSig(
|
|
|
|
getStateField(state, fork),
|
|
|
|
getStateField(state, genesis_validators_root),
|
|
|
|
data, committee, aggregation_bits)
|
2019-12-03 16:45:12 +00:00
|
|
|
|
|
|
|
Attestation(
|
|
|
|
data: data,
|
|
|
|
aggregation_bits: aggregation_bits,
|
|
|
|
signature: sig
|
|
|
|
)
|
|
|
|
|
2021-08-18 13:58:43 +00:00
|
|
|
func find_beacon_committee(
|
2021-06-11 17:51:46 +00:00
|
|
|
state: ForkedHashedBeaconState, validator_index: ValidatorIndex,
|
2019-12-03 16:45:12 +00:00
|
|
|
cache: var StateCache): auto =
|
2022-01-11 10:01:54 +00:00
|
|
|
let epoch = epoch(getStateField(state, slot))
|
2020-07-23 17:01:07 +00:00
|
|
|
for epoch_committee_index in 0'u64 ..< get_committee_count_per_slot(
|
|
|
|
state, epoch, cache) * SLOTS_PER_EPOCH:
|
2019-12-03 16:45:12 +00:00
|
|
|
let
|
|
|
|
slot = ((epoch_committee_index mod SLOTS_PER_EPOCH) +
|
2022-01-11 10:01:54 +00:00
|
|
|
epoch.start_slot.uint64).Slot
|
2020-11-04 21:52:47 +00:00
|
|
|
index = CommitteeIndex(epoch_committee_index div SLOTS_PER_EPOCH)
|
|
|
|
committee = get_beacon_committee(state, slot, index, cache)
|
2019-12-03 16:45:12 +00:00
|
|
|
if validator_index in committee:
|
|
|
|
return (committee, slot, index)
|
|
|
|
doAssert false
|
|
|
|
|
2021-05-21 09:23:28 +00:00
|
|
|
func makeAttestation*(
|
2021-06-11 17:51:46 +00:00
|
|
|
state: ForkedHashedBeaconState, beacon_block_root: Eth2Digest,
|
2021-01-22 13:29:04 +00:00
|
|
|
validator_index: ValidatorIndex, cache: var StateCache): Attestation =
|
2019-12-03 16:45:12 +00:00
|
|
|
let (committee, slot, index) =
|
|
|
|
find_beacon_committee(state, validator_index, cache)
|
|
|
|
makeAttestation(state, beacon_block_root, committee, slot, index,
|
2021-01-22 13:29:04 +00:00
|
|
|
validator_index, cache)
|
2020-01-21 09:22:13 +00:00
|
|
|
|
2021-05-21 09:23:28 +00:00
|
|
|
func makeFullAttestations*(
|
2021-06-11 17:51:46 +00:00
|
|
|
state: ForkedHashedBeaconState, beacon_block_root: Eth2Digest, slot: Slot,
|
2020-01-21 09:22:13 +00:00
|
|
|
cache: var StateCache,
|
|
|
|
flags: UpdateFlags = {}): seq[Attestation] =
|
|
|
|
# Create attestations in which the full committee participates for each shard
|
|
|
|
# that should be attested to during a particular slot
|
2022-01-12 20:42:03 +00:00
|
|
|
let committees_per_slot = get_committee_count_per_slot(
|
|
|
|
state, slot.epoch, cache)
|
|
|
|
for committee_index in get_committee_indices(committees_per_slot):
|
2020-01-21 09:22:13 +00:00
|
|
|
let
|
2022-01-08 23:28:49 +00:00
|
|
|
committee = get_beacon_committee(state, slot, committee_index, cache)
|
|
|
|
data = makeAttestationData(state, slot, committee_index, beacon_block_root)
|
2020-03-04 21:27:11 +00:00
|
|
|
|
|
|
|
doAssert committee.len() >= 1
|
|
|
|
var attestation = Attestation(
|
|
|
|
aggregation_bits: CommitteeValidatorsBits.init(committee.len),
|
2021-04-26 20:39:44 +00:00
|
|
|
data: data)
|
2022-01-12 20:42:03 +00:00
|
|
|
for i in 0..<committee.len:
|
|
|
|
attestation.aggregation_bits.setBit(i)
|
2021-04-26 20:39:44 +00:00
|
|
|
|
2022-01-12 20:42:03 +00:00
|
|
|
attestation.signature = makeAttestationSig(
|
2021-05-21 09:23:28 +00:00
|
|
|
getStateField(state, fork),
|
2022-01-12 20:42:03 +00:00
|
|
|
getStateField(state, genesis_validators_root), data, committee,
|
|
|
|
attestation.aggregation_bits)
|
2020-01-21 09:22:13 +00:00
|
|
|
|
|
|
|
result.add attestation
|
2020-08-27 07:34:12 +00:00
|
|
|
|
2021-12-01 15:55:57 +00:00
|
|
|
proc makeSyncAggregate(
|
|
|
|
state: ForkedHashedBeaconState,
|
|
|
|
syncCommitteeRatio: float,
|
|
|
|
cfg: RuntimeConfig): SyncAggregate =
|
|
|
|
if syncCommitteeRatio <= 0.0:
|
|
|
|
return SyncAggregate.init()
|
2022-05-10 10:03:40 +00:00
|
|
|
|
2021-12-01 15:55:57 +00:00
|
|
|
let
|
|
|
|
syncCommittee =
|
|
|
|
withState(state):
|
2023-01-28 19:53:41 +00:00
|
|
|
when stateFork >= ConsensusFork.Altair:
|
2022-08-26 14:14:18 +00:00
|
|
|
if (forkyState.data.slot + 1).is_sync_committee_period():
|
|
|
|
forkyState.data.next_sync_committee
|
2021-12-01 15:55:57 +00:00
|
|
|
else:
|
2022-08-26 14:14:18 +00:00
|
|
|
forkyState.data.current_sync_committee
|
2021-12-01 15:55:57 +00:00
|
|
|
else:
|
|
|
|
return SyncAggregate.init()
|
|
|
|
fork =
|
|
|
|
getStateField(state, fork)
|
|
|
|
genesis_validators_root =
|
|
|
|
getStateField(state, genesis_validators_root)
|
|
|
|
slot =
|
|
|
|
getStateField(state, slot)
|
|
|
|
latest_block_root =
|
2022-09-16 13:35:00 +00:00
|
|
|
withState(state): forkyState.latest_block_root
|
2022-01-24 20:40:59 +00:00
|
|
|
syncCommitteePool = newClone(SyncCommitteeMsgPool.init(keys.newRng()))
|
2022-05-23 12:02:54 +00:00
|
|
|
|
2021-12-01 15:55:57 +00:00
|
|
|
type
|
|
|
|
Aggregator = object
|
|
|
|
subcommitteeIdx: SyncSubcommitteeIndex
|
|
|
|
validatorIdx: ValidatorIndex
|
|
|
|
selectionProof: ValidatorSig
|
2022-05-10 10:03:40 +00:00
|
|
|
|
2022-05-23 12:02:54 +00:00
|
|
|
let
|
|
|
|
minActiveParticipants =
|
|
|
|
if syncCommitteeRatio >= 2.0 / 3: # Ensure supermajority is hit
|
|
|
|
(SYNC_COMMITTEE_SIZE * 2 + 2) div 3
|
|
|
|
else:
|
|
|
|
0
|
|
|
|
maxActiveParticipants = (syncCommitteeRatio * SYNC_COMMITTEE_SIZE).int
|
|
|
|
var
|
|
|
|
aggregators: seq[Aggregator]
|
|
|
|
numActiveParticipants = 0
|
2022-01-08 23:28:49 +00:00
|
|
|
for subcommitteeIdx in SyncSubcommitteeIndex:
|
2021-12-01 15:55:57 +00:00
|
|
|
let
|
|
|
|
firstKeyIdx = subcommitteeIdx.int * SYNC_SUBCOMMITTEE_SIZE
|
|
|
|
lastKeyIdx = firstKeyIdx + SYNC_SUBCOMMITTEE_SIZE - 1
|
|
|
|
var processedKeys = initHashSet[ValidatorPubKey]()
|
|
|
|
for idx, validatorKey in syncCommittee.pubkeys[firstKeyIdx .. lastKeyIdx]:
|
2022-05-23 12:02:54 +00:00
|
|
|
if validatorKey in processedKeys:
|
|
|
|
continue
|
2021-12-01 15:55:57 +00:00
|
|
|
processedKeys.incl validatorKey
|
|
|
|
let
|
|
|
|
validatorIdx =
|
|
|
|
block:
|
|
|
|
var res = 0
|
|
|
|
for i, validator in getStateField(state, validators):
|
|
|
|
if validator.pubkey == validatorKey:
|
|
|
|
res = i
|
|
|
|
break
|
|
|
|
res.ValidatorIndex
|
2021-12-09 12:56:54 +00:00
|
|
|
selectionProofSig = get_sync_committee_selection_proof(
|
2021-12-01 15:55:57 +00:00
|
|
|
fork, genesis_validators_root,
|
2022-05-10 10:03:40 +00:00
|
|
|
slot, subcommitteeIdx,
|
2021-12-01 15:55:57 +00:00
|
|
|
MockPrivKeys[validatorIdx])
|
2022-05-23 12:02:54 +00:00
|
|
|
if is_sync_committee_aggregator(selectionProofSig.toValidatorSig):
|
|
|
|
aggregators.add Aggregator(
|
|
|
|
subcommitteeIdx: subcommitteeIdx,
|
|
|
|
validatorIdx: validatorIdx,
|
|
|
|
selectionProof: selectionProofSig.toValidatorSig)
|
2022-05-10 10:03:40 +00:00
|
|
|
|
2022-05-23 12:02:54 +00:00
|
|
|
if numActiveParticipants >= minActiveParticipants and
|
|
|
|
rand(1.0) > syncCommitteeRatio:
|
|
|
|
continue
|
|
|
|
var positions: seq[uint64]
|
|
|
|
for pos, key in syncCommittee.pubkeys[firstKeyIdx + idx .. lastKeyIdx]:
|
|
|
|
if numActiveParticipants >= maxActiveParticipants:
|
|
|
|
break
|
|
|
|
if key == validatorKey:
|
|
|
|
positions.add (idx + pos).uint64
|
|
|
|
inc numActiveParticipants
|
|
|
|
if positions.len == 0:
|
|
|
|
continue
|
|
|
|
|
|
|
|
let signature = get_sync_committee_message_signature(
|
|
|
|
fork, genesis_validators_root,
|
|
|
|
slot, latest_block_root,
|
|
|
|
MockPrivKeys[validatorIdx])
|
2021-12-01 15:55:57 +00:00
|
|
|
syncCommitteePool[].addSyncCommitteeMessage(
|
|
|
|
slot,
|
|
|
|
latest_block_root,
|
|
|
|
uint64 validatorIdx,
|
|
|
|
signature,
|
|
|
|
subcommitteeIdx,
|
|
|
|
positions)
|
2022-05-10 10:03:40 +00:00
|
|
|
|
2021-12-01 15:55:57 +00:00
|
|
|
for aggregator in aggregators:
|
|
|
|
var contribution: SyncCommitteeContribution
|
|
|
|
if syncCommitteePool[].produceContribution(
|
|
|
|
slot, latest_block_root, aggregator.subcommitteeIdx, contribution):
|
|
|
|
let
|
|
|
|
contributionAndProof = ContributionAndProof(
|
|
|
|
aggregator_index: uint64 aggregator.validatorIdx,
|
|
|
|
contribution: contribution,
|
|
|
|
selection_proof: aggregator.selectionProof)
|
2021-12-09 12:56:54 +00:00
|
|
|
contributionSig = get_contribution_and_proof_signature(
|
2021-12-01 15:55:57 +00:00
|
|
|
fork, genesis_validators_root,
|
|
|
|
contributionAndProof,
|
|
|
|
MockPrivKeys[aggregator.validatorIdx])
|
|
|
|
signedContributionAndProof = SignedContributionAndProof(
|
|
|
|
message: contributionAndProof,
|
|
|
|
signature: contributionSig.toValidatorSig)
|
2023-01-12 14:08:08 +00:00
|
|
|
syncCommitteePool[].addContribution(
|
2021-12-01 15:55:57 +00:00
|
|
|
signedContributionAndProof, contribution.signature.load.get)
|
2022-05-10 10:03:40 +00:00
|
|
|
|
2021-12-01 15:55:57 +00:00
|
|
|
syncCommitteePool[].produceSyncAggregate(latest_block_root)
|
|
|
|
|
2021-09-17 10:55:04 +00:00
|
|
|
iterator makeTestBlocks*(
|
|
|
|
state: ForkedHashedBeaconState,
|
2020-08-27 07:34:12 +00:00
|
|
|
cache: var StateCache,
|
|
|
|
blocks: int,
|
2021-09-22 17:50:10 +00:00
|
|
|
attested: bool,
|
2021-12-01 15:55:57 +00:00
|
|
|
syncCommitteeRatio = 0.0,
|
2021-09-22 17:50:10 +00:00
|
|
|
cfg = defaultRuntimeConfig): ForkedSignedBeaconBlock =
|
2020-08-27 07:34:12 +00:00
|
|
|
var
|
2021-11-05 07:34:34 +00:00
|
|
|
state = assignClone(state)
|
2020-08-27 07:34:12 +00:00
|
|
|
for _ in 0..<blocks:
|
2021-12-01 15:55:57 +00:00
|
|
|
let
|
2022-08-26 14:14:18 +00:00
|
|
|
parent_root = withState(state[]): forkyState.latest_block_root
|
2021-12-01 15:55:57 +00:00
|
|
|
attestations =
|
|
|
|
if attested:
|
|
|
|
makeFullAttestations(
|
|
|
|
state[], parent_root, getStateField(state[], slot), cache)
|
|
|
|
else:
|
|
|
|
@[]
|
|
|
|
sync_aggregate = makeSyncAggregate(state[], syncCommitteeRatio, cfg)
|
|
|
|
|
|
|
|
yield addTestBlock(state[], cache,
|
|
|
|
attestations = attestations, sync_aggregate = sync_aggregate, cfg = cfg)
|