# beacon_chain # Copyright (c) 2018-2024 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. {.push raises: [].} import chronicles, stew/endians2, ../beacon_chain/consensus_object_pools/sync_committee_msg_pool, ../beacon_chain/spec/datatypes/bellatrix, ../beacon_chain/spec/[ beaconstate, helpers, keystore, signatures, state_transition, validator] # TODO remove this dependency from std/random import rand from eth/common/eth_types_rlp import rlpHash type MockPrivKeysT = object MockPubKeysT = object const MockPrivKeys* = MockPrivKeysT() MockPubKeys* = MockPubKeysT() # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/tests/core/pyspec/eth2spec/test/helpers/keys.py func `[]`*(sk: MockPrivKeysT, index: ValidatorIndex|uint64): ValidatorPrivKey = var bytes = (index.uint64 + 1'u64).toBytesLE() # Consistent with EF tests static: doAssert sizeof(bytes) <= sizeof(result) copyMem(addr result, addr bytes, sizeof(bytes)) proc `[]`*(pk: MockPubKeysT, index: uint64): ValidatorPubKey = var cache {.threadvar.}: Table[uint64, ValidatorPubKey] cache.withValue(index, key) do: return key[] do: let key = MockPrivKeys[index].toPubKey().toPubKey() cache[index] = key return key proc `[]`*(pk: MockPubKeysT, index: ValidatorIndex): ValidatorPubKey = pk[index.uint64] func makeFakeHash*(i: int): Eth2Digest = var bytes = uint64(i).toBytesLE() static: doAssert sizeof(bytes) <= sizeof(result.data) copyMem(addr result.data[0], addr bytes[0], sizeof(bytes)) proc makeDeposit*( i: int, flags: UpdateFlags = {}, cfg = defaultRuntimeConfig): DepositData = let privkey = MockPrivKeys[i.ValidatorIndex] pubkey = MockPubKeys[i.ValidatorIndex] withdrawal_credentials = makeWithdrawalCredentials(pubkey) result = DepositData( pubkey: pubkey, withdrawal_credentials: withdrawal_credentials, amount: MAX_EFFECTIVE_BALANCE.Gwei) if skipBlsValidation notin flags: result.signature = get_deposit_signature(cfg, result, privkey).toValidatorSig() proc makeInitialDeposits*( n = SLOTS_PER_EPOCH, flags: UpdateFlags = {}, cfg = defaultRuntimeConfig): seq[DepositData] = for i in 0.. ConsensusFork.Bellatrix: default(consensusFork.ExecutionPayloadForSigning) elif consensusFork == ConsensusFork.Bellatrix: if cfg.CAPELLA_FORK_EPOCH != FAR_FUTURE_EPOCH: # Can't keep correctly doing this once Capella happens, but LVH search # test relies on merging. So, merge only if no Capella transition. default(bellatrix.ExecutionPayloadForSigning) else: # Merge shortly after Bellatrix if forkyState.data.slot > cfg.BELLATRIX_FORK_EPOCH * SLOTS_PER_EPOCH + 10: if is_merge_transition_complete(forkyState.data): const feeRecipient = default(Eth1Address) build_empty_execution_payload(forkyState.data, feeRecipient) else: build_empty_merge_execution_payload(forkyState.data) else: default(bellatrix.ExecutionPayloadForSigning) else: default(bellatrix.ExecutionPayloadForSigning) debugComment "addTestBlock Electra attestation support" makeBeaconBlock( cfg, state, proposer_index, randao_reveal, # Keep deposit counts internally consistent. Eth1Data( deposit_root: eth1_data.deposit_root, deposit_count: forkyState.data.eth1_deposit_index + deposits.lenu64, block_hash: eth1_data.block_hash), graffiti, when consensusFork == ConsensusFork.Electra: default(seq[electra.Attestation]) else: attestations, deposits, BeaconBlockValidatorChanges(), sync_aggregate, execution_payload, noRollback, cache, verificationFlags = {skipBlsValidation}) if message.isErr: raiseAssert "Failed to create a block: " & $message.error let new_block = signBlock( getStateField(state, fork), getStateField(state, genesis_validators_root), message.get(), privKey, flags) new_block proc makeTestBlock*( state: ForkedHashedBeaconState, cache: var StateCache, eth1_data = Eth1Data(), attestations = newSeq[phase0.Attestation](), deposits = newSeq[Deposit](), sync_aggregate = SyncAggregate.init(), graffiti = default(GraffitiBytes), cfg = defaultRuntimeConfig): ForkedSignedBeaconBlock = # Create a block for `state.slot + 1` - like a block proposer would do! # It's a bit awkward - in order to produce a block for N+1, we need to # calculate what the state will look like after that block has been applied, # because the block includes the state root. let tmpState = assignClone(state) addTestBlock( tmpState[], cache, eth1_data, attestations, deposits, sync_aggregate, graffiti, cfg = cfg) func makeAttestationData*( state: ForkyBeaconState, slot: Slot, committee_index: CommitteeIndex, beacon_block_root: Eth2Digest): AttestationData = let current_epoch = get_current_epoch(state) start_slot = start_slot(current_epoch) epoch_boundary_block_root = if start_slot == state.slot: beacon_block_root else: get_block_root_at_slot(state, start_slot) doAssert slot.epoch == current_epoch, "Computed epoch was " & $slot.epoch & " while the state current_epoch was " & $current_epoch # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#attestation-data AttestationData( slot: slot, index: committee_index.uint64, beacon_block_root: beacon_block_root, source: state.current_justified_checkpoint, target: Checkpoint( epoch: current_epoch, root: epoch_boundary_block_root ) ) func makeAttestationSig( fork: Fork, genesis_validators_root: Eth2Digest, data: AttestationData, committee: openArray[ValidatorIndex], bits: CommitteeValidatorsBits): ValidatorSig = let signing_root = compute_attestation_signing_root( fork, genesis_validators_root, data) var agg {.noinit.}: AggregateSignature first = true for i in 0..= 1 var attestation = phase0.Attestation( aggregation_bits: CommitteeValidatorsBits.init(committee.len), data: data) for i in 0..= ConsensusFork.Altair: if (forkyState.data.slot + 1).is_sync_committee_period(): forkyState.data.next_sync_committee else: forkyState.data.current_sync_committee else: return SyncAggregate.init() fork = getStateField(state, fork) genesis_validators_root = getStateField(state, genesis_validators_root) slot = getStateField(state, slot) latest_block_id = withState(state): forkyState.latest_block_id rng = HmacDrbgContext.new() syncCommitteePool = newClone(SyncCommitteeMsgPool.init(rng, cfg)) type Aggregator = object subcommitteeIdx: SyncSubcommitteeIndex validatorIdx: ValidatorIndex selectionProof: ValidatorSig let minActiveParticipants = if syncCommitteeRatio >= 2.0 / 3: # Ensure supermajority is hit (SYNC_COMMITTEE_SIZE * 2 + 2) div 3 else: 0 maxActiveParticipants = (syncCommitteeRatio * SYNC_COMMITTEE_SIZE).int var aggregators: seq[Aggregator] numActiveParticipants = 0 for subcommitteeIdx in SyncSubcommitteeIndex: let firstKeyIdx = subcommitteeIdx.int * SYNC_SUBCOMMITTEE_SIZE lastKeyIdx = firstKeyIdx + SYNC_SUBCOMMITTEE_SIZE - 1 var processedKeys = initHashSet[ValidatorPubKey]() for idx, validatorKey in syncCommittee.pubkeys[firstKeyIdx .. lastKeyIdx]: if validatorKey in processedKeys: continue processedKeys.incl validatorKey let validatorIdx = block: var res = 0 for i, validator in getStateField(state, validators): if validator.pubkey == validatorKey: res = i break res.ValidatorIndex selectionProofSig = get_sync_committee_selection_proof( fork, genesis_validators_root, slot, subcommitteeIdx, MockPrivKeys[validatorIdx]) if is_sync_committee_aggregator(selectionProofSig.toValidatorSig): aggregators.add Aggregator( subcommitteeIdx: subcommitteeIdx, validatorIdx: validatorIdx, selectionProof: selectionProofSig.toValidatorSig) if numActiveParticipants >= minActiveParticipants and rand(1.0) > syncCommitteeRatio: continue var positions: seq[uint64] for pos, key in syncCommittee.pubkeys[firstKeyIdx + idx .. lastKeyIdx]: if numActiveParticipants >= maxActiveParticipants: break if key == validatorKey: positions.add (idx + pos).uint64 inc numActiveParticipants if positions.len == 0: continue let signature = get_sync_committee_message_signature( fork, genesis_validators_root, slot, latest_block_id.root, MockPrivKeys[validatorIdx]) syncCommitteePool[].addSyncCommitteeMessage( slot, latest_block_id, uint64 validatorIdx, signature, subcommitteeIdx, positions) for aggregator in aggregators: var contribution: SyncCommitteeContribution if syncCommitteePool[].produceContribution( slot, latest_block_id, aggregator.subcommitteeIdx, contribution): let contributionAndProof = ContributionAndProof( aggregator_index: uint64 aggregator.validatorIdx, contribution: contribution, selection_proof: aggregator.selectionProof) contributionSig = get_contribution_and_proof_signature( fork, genesis_validators_root, contributionAndProof, MockPrivKeys[aggregator.validatorIdx]) signedContributionAndProof = SignedContributionAndProof( message: contributionAndProof, signature: contributionSig.toValidatorSig) syncCommitteePool[].addContribution( signedContributionAndProof, latest_block_id, contribution.signature.load.get) syncCommitteePool[].produceSyncAggregate(latest_block_id, slot + 1) iterator makeTestBlocks*( state: ForkedHashedBeaconState, cache: var StateCache, blocks: int, eth1_data = Eth1Data(), attested = false, allDeposits = newSeq[Deposit](), syncCommitteeRatio = 0.0, graffiti = default(GraffitiBytes), cfg = defaultRuntimeConfig): ForkedSignedBeaconBlock = var state = assignClone(state) for _ in 0..