nimbus-eth2/tests/test_gossip_validation.nim

286 lines
11 KiB
Nim
Raw Normal View History

# beacon_chain
# Copyright (c) 2018-2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.used.}
import
# Standard library
std/sequtils,
# Status lib
unittest2,
chronos,
eth/keys, taskpools,
# Internal
../beacon_chain/[beacon_clock],
../beacon_chain/gossip_processing/[gossip_validation, batch_validation],
../beacon_chain/fork_choice/fork_choice,
../beacon_chain/consensus_object_pools/[
block_quarantine, blockchain_dag, block_clearance, attestation_pool,
sync_committee_msg_pool],
../beacon_chain/spec/datatypes/[phase0, altair],
../beacon_chain/spec/[state_transition, helpers, network, validator],
../beacon_chain/validators/validator_pool,
# Test utilities
./testutil, ./testdbutil, ./testblockutil
proc pruneAtFinalization(dag: ChainDAGRef, attPool: AttestationPool) =
if dag.needStateCachesAndForkChoicePruning():
dag.pruneStateCachesDAG()
# pool[].prune() # We test logic without att_1_0 pool / fork choice pruning
suite "Gossip validation " & preset():
setup:
# Genesis state that results in 3 members per committee
var
validatorMonitor = newClone(ValidatorMonitor.init())
dag = init(
ChainDAGRef, defaultRuntimeConfig, makeTestDB(SLOTS_PER_EPOCH * 3),
validatorMonitor, {})
taskpool = Taskpool.new()
verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool)
quarantine = newClone(Quarantine.init())
pool = newClone(AttestationPool.init(dag, quarantine))
state = newClone(dag.headState)
cache = StateCache()
info = ForkedEpochInfo()
batchCrypto = BatchCrypto.new(
keys.newRng(), eager = proc(): bool = false,
genesis_validators_root = dag.genesis_validators_root, taskpool)
# Slot 0 is a finalized slot - won't be making attestations for it..
check:
process_slots(
defaultRuntimeConfig, state[], getStateField(state[], slot) + 1,
cache, info, {}).isOk()
test "Empty committee when no committee for slot":
template committee(idx: uint64): untyped =
get_beacon_committee(
dag.headState, dag.head.slot, idx.CommitteeIndex, cache)
template committeeLen(idx: uint64): untyped =
get_beacon_committee_len(
dag.headState, dag.head.slot, idx.CommitteeIndex, cache)
check:
committee(0).len > 0
committee(63).len == 0
check:
committeeLen(2) > 0
committeeLen(63) == 0
Speed up altair block processing 2x (#3115) * Speed up altair block processing >2x Like #3089, this PR drastially speeds up historical REST queries and other long state replays. * cache sync committee validator indices * use ~80mb less memory for validator pubkey mappings * batch-verify sync aggregate signature (fixes #2985) * document sync committee hack with head block vs sync message block * add batch signature verification failure tests Before: ``` ../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000 All time are ms Average, StdDev, Min, Max, Samples, Test Validation is turned off meaning that no BLS operations are performed 5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB 0.481, 1.878, 0.215, 59.167, 981, Load block from database 8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database 6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch 93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch 20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing 0.000, 0.000, 0.000, 0.000, 0, Database load 0.000, 0.000, 0.000, 0.000, 0, Database store ``` After: ``` 7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB 0.553, 2.122, 0.175, 66.692, 981, Load block from database 5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database 6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch 94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch 11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing 0.000, 0.000, 0.000, 0.000, 0, Database load 0.000, 0.000, 0.000, 0.000, 0, Database store ``` * add comment
2021-11-24 13:43:50 +01:00
test "validateAttestation":
accelerate `getShufflingRef` (#4911) When an uncached `ShufflingRef` is requested, we currently replay state which can take several seconds. Acceleration is possible by: 1. Start from any state with locked-in `get_active_validator_indices`. Any blocks / slots applied to such a state can only affect that result for future epochs, so are viable for querying target epoch. `compute_activation_exit_epoch(state.slot.epoch) > target.epoch` 2. Determine highest common ancestor among `state` and `target.blck`. At the ancestor slot, same rules re `get_active_validator_indices`. `compute_activation_exit_epoch(ancestorSlot.epoch) > target.epoch` 3. We now have a `state` that shares history with `target.blck` up through a common ancestor slot. Any blocks / slots that the `state` contains, which are not part of the `target.blck` history, affect `get_active_validator_indices` at epochs _after_ `target.epoch`. 4. Select `state.randao_mixes[N]` that is closest to common ancestor. Either direction is fine (above / below ancestor). 5. From that RANDAO mix, mix in / out all RANDAO reveals from blocks in-between. This is just an XOR operation, so fully reversible. `mix = mix xor SHA256(blck.message.body.randao_reveal)` 6. Compute the attester dependent slot from `target.epoch`. `if epoch >= 2: (target.epoch - 1).start_slot - 1 else: GENESIS_SLOT` 7. Trace back from `target.blck` to the attester dependent slot. We now have the destination for which we want to obtain RANDAO. 8. Mix in all RANDAO reveals from blocks up through the `dependentBlck`. Same method, no special handling necessary for epoch transitions. 9. Combine `get_active_validator_indices` from `state` at `target.epoch` with the recovered RANDAO value at `dependentBlck` to obtain the requested shuffling, and construct the `ShufflingRef` without replay. * more tests and simplify logic * test with different number of deposits per branch * Update beacon_chain/consensus_object_pools/blockchain_dag.nim Co-authored-by: Jacek Sieka <jacek@status.im> * `commonAncestor` tests * lint --------- Co-authored-by: Jacek Sieka <jacek@status.im>
2023-05-12 19:36:59 +02:00
var cache: StateCache
for blck in makeTestBlocks(
accelerate `getShufflingRef` (#4911) When an uncached `ShufflingRef` is requested, we currently replay state which can take several seconds. Acceleration is possible by: 1. Start from any state with locked-in `get_active_validator_indices`. Any blocks / slots applied to such a state can only affect that result for future epochs, so are viable for querying target epoch. `compute_activation_exit_epoch(state.slot.epoch) > target.epoch` 2. Determine highest common ancestor among `state` and `target.blck`. At the ancestor slot, same rules re `get_active_validator_indices`. `compute_activation_exit_epoch(ancestorSlot.epoch) > target.epoch` 3. We now have a `state` that shares history with `target.blck` up through a common ancestor slot. Any blocks / slots that the `state` contains, which are not part of the `target.blck` history, affect `get_active_validator_indices` at epochs _after_ `target.epoch`. 4. Select `state.randao_mixes[N]` that is closest to common ancestor. Either direction is fine (above / below ancestor). 5. From that RANDAO mix, mix in / out all RANDAO reveals from blocks in-between. This is just an XOR operation, so fully reversible. `mix = mix xor SHA256(blck.message.body.randao_reveal)` 6. Compute the attester dependent slot from `target.epoch`. `if epoch >= 2: (target.epoch - 1).start_slot - 1 else: GENESIS_SLOT` 7. Trace back from `target.blck` to the attester dependent slot. We now have the destination for which we want to obtain RANDAO. 8. Mix in all RANDAO reveals from blocks up through the `dependentBlck`. Same method, no special handling necessary for epoch transitions. 9. Combine `get_active_validator_indices` from `state` at `target.epoch` with the recovered RANDAO value at `dependentBlck` to obtain the requested shuffling, and construct the `ShufflingRef` without replay. * more tests and simplify logic * test with different number of deposits per branch * Update beacon_chain/consensus_object_pools/blockchain_dag.nim Co-authored-by: Jacek Sieka <jacek@status.im> * `commonAncestor` tests * lint --------- Co-authored-by: Jacek Sieka <jacek@status.im>
2023-05-12 19:36:59 +02:00
dag.headState, cache, int(SLOTS_PER_EPOCH * 5), attested = false):
let added = dag.addHeadBlock(verifier, blck.phase0Data) do (
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
epochRef: EpochRef, unrealized: FinalityCheckpoints):
# Callback add to fork choice if valid
pool[].addForkChoice(
epochRef, blckRef, unrealized, signedBlock.message,
blckRef.slot.start_beacon_time)
check: added.isOk()
2023-03-02 17:13:35 +01:00
dag.updateHead(added[], quarantine[], [])
pruneAtFinalization(dag, pool[])
var
# Create attestations for slot 1
beacon_committee = get_beacon_committee(
dag.headState, dag.head.slot, 0.CommitteeIndex, cache)
att_1_0 = makeAttestation(
dag.headState, dag.head.root, beacon_committee[0], cache)
att_1_1 = makeAttestation(
dag.headState, dag.head.root, beacon_committee[1], cache)
committees_per_slot =
get_committee_count_per_slot(
dag.headState, att_1_0.data.slot.epoch, cache)
subnet = compute_subnet_for_attestation(
committees_per_slot,
att_1_0.data.slot, att_1_0.data.index.CommitteeIndex)
beaconTime = att_1_0.data.slot.start_beacon_time()
check:
validateAttestation(pool, batchCrypto, att_1_0, beaconTime, subnet, true).waitFor().isOk
# Same validator again
validateAttestation(pool, batchCrypto, att_1_0, beaconTime, subnet, true).waitFor().error()[0] ==
ValidationResult.Ignore
pool[].nextAttestationEpoch.setLen(0) # reset for test
check:
# Wrong subnet
validateAttestation(
pool, batchCrypto, att_1_0, beaconTime, SubnetId(subnet.uint8 + 1), true).waitFor().isErr
pool[].nextAttestationEpoch.setLen(0) # reset for test
check:
# Too far in the future
validateAttestation(
pool, batchCrypto, att_1_0, beaconTime - 1.seconds, subnet, true).waitFor().isErr
pool[].nextAttestationEpoch.setLen(0) # reset for test
check:
# Too far in the past
validateAttestation(
pool, batchCrypto, att_1_0,
beaconTime - (SECONDS_PER_SLOT * SLOTS_PER_EPOCH - 1).int.seconds,
subnet, true).waitFor().isErr
block:
var broken = att_1_0
broken.signature.blob[0] += 1
pool[].nextAttestationEpoch.setLen(0) # reset for test
check:
# Invalid signature
validateAttestation(
pool, batchCrypto, broken, beaconTime, subnet, true).waitFor().
error()[0] == ValidationResult.Reject
block:
var broken = att_1_0
broken.signature.blob[5] += 1
pool[].nextAttestationEpoch.setLen(0) # reset for test
# One invalid, one valid (batched)
let
fut_1_0 = validateAttestation(
pool, batchCrypto, broken, beaconTime, subnet, true)
fut_1_1 = validateAttestation(
pool, batchCrypto, att_1_1, beaconTime, subnet, true)
check:
fut_1_0.waitFor().error()[0] == ValidationResult.Reject
fut_1_1.waitFor().isOk()
block:
var broken = att_1_0
# This shouldn't deserialize, which is a different way to break it
broken.signature.blob = default(type broken.signature.blob)
pool[].nextAttestationEpoch.setLen(0) # reset for test
# One invalid, one valid (batched)
let
fut_1_0 = validateAttestation(
pool, batchCrypto, broken, beaconTime, subnet, true)
fut_1_1 = validateAttestation(
pool, batchCrypto, att_1_1, beaconTime, subnet, true)
check:
fut_1_0.waitFor().error()[0] == ValidationResult.Reject
fut_1_1.waitFor().isOk()
suite "Gossip validation - Extra": # Not based on preset config
test "validateSyncCommitteeMessage":
const num_validators = SLOTS_PER_EPOCH
let
cfg = block:
var cfg = defaultRuntimeConfig
cfg.ALTAIR_FORK_EPOCH = (GENESIS_EPOCH + 1).Epoch
cfg
taskpool = Taskpool.new()
quarantine = newClone(Quarantine.init())
var
verifier = BatchVerifier(rng: keys.newRng(), taskpool: Taskpool.new())
dag = block:
let
validatorMonitor = newClone(ValidatorMonitor.init())
dag = ChainDAGRef.init(
cfg, makeTestDB(num_validators), validatorMonitor, {})
var cache = StateCache()
for blck in makeTestBlocks(
accelerate `getShufflingRef` (#4911) When an uncached `ShufflingRef` is requested, we currently replay state which can take several seconds. Acceleration is possible by: 1. Start from any state with locked-in `get_active_validator_indices`. Any blocks / slots applied to such a state can only affect that result for future epochs, so are viable for querying target epoch. `compute_activation_exit_epoch(state.slot.epoch) > target.epoch` 2. Determine highest common ancestor among `state` and `target.blck`. At the ancestor slot, same rules re `get_active_validator_indices`. `compute_activation_exit_epoch(ancestorSlot.epoch) > target.epoch` 3. We now have a `state` that shares history with `target.blck` up through a common ancestor slot. Any blocks / slots that the `state` contains, which are not part of the `target.blck` history, affect `get_active_validator_indices` at epochs _after_ `target.epoch`. 4. Select `state.randao_mixes[N]` that is closest to common ancestor. Either direction is fine (above / below ancestor). 5. From that RANDAO mix, mix in / out all RANDAO reveals from blocks in-between. This is just an XOR operation, so fully reversible. `mix = mix xor SHA256(blck.message.body.randao_reveal)` 6. Compute the attester dependent slot from `target.epoch`. `if epoch >= 2: (target.epoch - 1).start_slot - 1 else: GENESIS_SLOT` 7. Trace back from `target.blck` to the attester dependent slot. We now have the destination for which we want to obtain RANDAO. 8. Mix in all RANDAO reveals from blocks up through the `dependentBlck`. Same method, no special handling necessary for epoch transitions. 9. Combine `get_active_validator_indices` from `state` at `target.epoch` with the recovered RANDAO value at `dependentBlck` to obtain the requested shuffling, and construct the `ShufflingRef` without replay. * more tests and simplify logic * test with different number of deposits per branch * Update beacon_chain/consensus_object_pools/blockchain_dag.nim Co-authored-by: Jacek Sieka <jacek@status.im> * `commonAncestor` tests * lint --------- Co-authored-by: Jacek Sieka <jacek@status.im>
2023-05-12 19:36:59 +02:00
dag.headState, cache, int(SLOTS_PER_EPOCH),
attested = false, cfg = cfg):
let added =
case blck.kind
of ConsensusFork.Phase0:
const nilCallback = OnPhase0BlockAdded(nil)
dag.addHeadBlock(verifier, blck.phase0Data, nilCallback)
of ConsensusFork.Altair:
const nilCallback = OnAltairBlockAdded(nil)
dag.addHeadBlock(verifier, blck.altairData, nilCallback)
of ConsensusFork.Bellatrix:
const nilCallback = OnBellatrixBlockAdded(nil)
dag.addHeadBlock(verifier, blck.bellatrixData, nilCallback)
of ConsensusFork.Capella:
2022-12-04 07:42:03 +00:00
const nilCallback = OnCapellaBlockAdded(nil)
dag.addHeadBlock(verifier, blck.capellaData, nilCallback)
of ConsensusFork.Deneb:
const nilCallback = OnDenebBlockAdded(nil)
dag.addHeadBlock(verifier, blck.denebData, nilCallback)
check: added.isOk()
2023-03-02 17:13:35 +01:00
dag.updateHead(added[], quarantine[], [])
dag
let batchCrypto = BatchCrypto.new(
keys.newRng(), eager = proc(): bool = false,
genesis_validators_root = dag.genesis_validators_root, taskpool)
var
state = assignClone(dag.headState.altairData)
slot = state[].data.slot
subcommitteeIdx = 0.SyncSubcommitteeIndex
syncCommittee = @(dag.syncCommitteeParticipants(slot))
subcommittee = toSeq(syncCommittee.syncSubcommittee(subcommitteeIdx))
Speed up altair block processing 2x (#3115) * Speed up altair block processing >2x Like #3089, this PR drastially speeds up historical REST queries and other long state replays. * cache sync committee validator indices * use ~80mb less memory for validator pubkey mappings * batch-verify sync aggregate signature (fixes #2985) * document sync committee hack with head block vs sync message block * add batch signature verification failure tests Before: ``` ../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000 All time are ms Average, StdDev, Min, Max, Samples, Test Validation is turned off meaning that no BLS operations are performed 5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB 0.481, 1.878, 0.215, 59.167, 981, Load block from database 8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database 6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch 93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch 20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing 0.000, 0.000, 0.000, 0.000, 0, Database load 0.000, 0.000, 0.000, 0.000, 0, Database store ``` After: ``` 7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB 0.553, 2.122, 0.175, 66.692, 981, Load block from database 5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database 6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch 94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch 11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing 0.000, 0.000, 0.000, 0.000, 0, Database load 0.000, 0.000, 0.000, 0.000, 0, Database store ``` * add comment
2021-11-24 13:43:50 +01:00
index = subcommittee[0]
expectedCount = subcommittee.count(index)
pubkey = state[].data.validators.item(index).pubkey
keystoreData = KeystoreData(kind: KeystoreKind.Local,
pubkey: pubkey,
privateKey: MockPrivKeys[index])
validator = AttachedValidator(
kind: ValidatorKind.Local, data: keystoreData, index: Opt.some index)
resMsg = waitFor getSyncCommitteeMessage(
validator, state[].data.fork, state[].data.genesis_validators_root, slot,
state[].root)
2021-11-30 03:20:21 +02:00
msg = resMsg.get()
syncCommitteeMsgPool = newClone(SyncCommitteeMsgPool.init(keys.newRng()))
res = waitFor validateSyncCommitteeMessage(
dag, batchCrypto, syncCommitteeMsgPool, msg, subcommitteeIdx,
slot.start_beacon_time(), true)
(positions, cookedSig) = res.get()
syncCommitteeMsgPool[].addSyncCommitteeMessage(
msg.slot,
msg.beacon_block_root,
msg.validator_index,
cookedSig,
subcommitteeIdx,
positions)
let
contribution = block:
let contribution = (ref SignedContributionAndProof)()
check:
syncCommitteeMsgPool[].produceContribution(
slot, state[].root, subcommitteeIdx,
contribution.message.contribution)
syncCommitteeMsgPool[].addContribution(
contribution[], contribution.message.contribution.signature.load.get)
let signRes = waitFor validator.getContributionAndProofSignature(
state[].data.fork, state[].data.genesis_validators_root,
contribution[].message)
2021-11-30 03:20:21 +02:00
doAssert(signRes.isOk())
contribution[].signature = signRes.get()
contribution
aggregate = syncCommitteeMsgPool[].produceSyncAggregate(state[].root)
check:
expectedCount > 1 # Cover edge case
res.isOk
contribution.message.contribution.aggregation_bits.countOnes == expectedCount
aggregate.sync_committee_bits.countOnes == expectedCount
# Same message twice should be ignored
validateSyncCommitteeMessage(
dag, batchCrypto, syncCommitteeMsgPool, msg, subcommitteeIdx,
state[].data.slot.start_beacon_time(), true).waitFor().isErr()