accelerate `getShufflingRef` (#4911)
When an uncached `ShufflingRef` is requested, we currently replay state which can take several seconds. Acceleration is possible by: 1. Start from any state with locked-in `get_active_validator_indices`. Any blocks / slots applied to such a state can only affect that result for future epochs, so are viable for querying target epoch. `compute_activation_exit_epoch(state.slot.epoch) > target.epoch` 2. Determine highest common ancestor among `state` and `target.blck`. At the ancestor slot, same rules re `get_active_validator_indices`. `compute_activation_exit_epoch(ancestorSlot.epoch) > target.epoch` 3. We now have a `state` that shares history with `target.blck` up through a common ancestor slot. Any blocks / slots that the `state` contains, which are not part of the `target.blck` history, affect `get_active_validator_indices` at epochs _after_ `target.epoch`. 4. Select `state.randao_mixes[N]` that is closest to common ancestor. Either direction is fine (above / below ancestor). 5. From that RANDAO mix, mix in / out all RANDAO reveals from blocks in-between. This is just an XOR operation, so fully reversible. `mix = mix xor SHA256(blck.message.body.randao_reveal)` 6. Compute the attester dependent slot from `target.epoch`. `if epoch >= 2: (target.epoch - 1).start_slot - 1 else: GENESIS_SLOT` 7. Trace back from `target.blck` to the attester dependent slot. We now have the destination for which we want to obtain RANDAO. 8. Mix in all RANDAO reveals from blocks up through the `dependentBlck`. Same method, no special handling necessary for epoch transitions. 9. Combine `get_active_validator_indices` from `state` at `target.epoch` with the recovered RANDAO value at `dependentBlck` to obtain the requested shuffling, and construct the `ShufflingRef` without replay. * more tests and simplify logic * test with different number of deposits per branch * Update beacon_chain/consensus_object_pools/blockchain_dag.nim Co-authored-by: Jacek Sieka <jacek@status.im> * `commonAncestor` tests * lint --------- Co-authored-by: Jacek Sieka <jacek@status.im>
This commit is contained in:
parent
d263f7f0cb
commit
ea97e93e74
|
@ -99,10 +99,11 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
|||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||
## BlockRef and helpers
|
||||
```diff
|
||||
+ commonAncestor sanity OK
|
||||
+ get_ancestor sanity OK
|
||||
+ isAncestorOf sanity OK
|
||||
```
|
||||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||
OK: 3/3 Fail: 0/3 Skip: 0/3
|
||||
## BlockSlot and helpers
|
||||
```diff
|
||||
+ atSlot sanity OK
|
||||
|
@ -445,6 +446,11 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
|||
+ RestErrorMessage writer tests OK
|
||||
```
|
||||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||
## Shufflings
|
||||
```diff
|
||||
+ Accelerated shuffling computation OK
|
||||
```
|
||||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||
## Slashing Interchange tests [Preset: mainnet]
|
||||
```diff
|
||||
+ Slashing test: duplicate_pubkey_not_slashable.json OK
|
||||
|
@ -676,4 +682,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
|
|||
OK: 9/9 Fail: 0/9 Skip: 0/9
|
||||
|
||||
---TOTAL---
|
||||
OK: 385/390 Fail: 0/390 Skip: 5/390
|
||||
OK: 387/392 Fail: 0/392 Skip: 5/392
|
||||
|
|
|
@ -149,6 +149,30 @@ func get_ancestor*(blck: BlockRef, slot: Slot,
|
|||
|
||||
blck = blck.parent
|
||||
|
||||
func commonAncestor*(a, b: BlockRef, lowSlot: Slot): Opt[BlockRef] =
|
||||
## Return the common ancestor with highest slot of two non-nil `BlockRef`,
|
||||
## limited by `lowSlot` (`err` if exceeded).
|
||||
doAssert a != nil
|
||||
doAssert b != nil
|
||||
if a.slot < lowSlot or b.slot < lowSlot:
|
||||
return err()
|
||||
|
||||
var
|
||||
aa = a
|
||||
bb = b
|
||||
while aa != bb:
|
||||
if aa.slot >= bb.slot:
|
||||
aa = aa.parent
|
||||
doAssert aa != nil, "All `BlockRef` lead to `finalizedHead`"
|
||||
if aa.slot < lowSlot:
|
||||
return err()
|
||||
else:
|
||||
bb = bb.parent
|
||||
doAssert bb != nil, "All `BlockRef` lead to `finalizedHead`"
|
||||
if bb.slot < lowSlot:
|
||||
return err()
|
||||
ok aa
|
||||
|
||||
func atSlot*(blck: BlockRef, slot: Slot): BlockSlot =
|
||||
## Return a BlockSlot at a given slot, with the block set to the closest block
|
||||
## available. If slot comes from before the block, a suitable block ancestor
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
import
|
||||
std/[algorithm, sequtils, tables, sets],
|
||||
stew/[assign2, byteutils, results],
|
||||
stew/[arrayops, assign2, byteutils, results],
|
||||
metrics, snappy, chronicles,
|
||||
../spec/[beaconstate, eth2_merkleization, eth2_ssz_serialization, helpers,
|
||||
state_transition, validator],
|
||||
|
@ -399,6 +399,17 @@ func nextTimestamp[I, T](cache: var LRUCache[I, T]): uint32 =
|
|||
inc cache.timestamp
|
||||
cache.timestamp
|
||||
|
||||
template peekIt[I, T](cache: var LRUCache[I, T], predicate: untyped): Opt[T] =
|
||||
block:
|
||||
var res: Opt[T]
|
||||
for i in 0 ..< I:
|
||||
template e: untyped = cache.entries[i]
|
||||
template it: untyped {.inject, used.} = e.value
|
||||
if e.lastUsed != 0 and predicate:
|
||||
res.ok it
|
||||
break
|
||||
res
|
||||
|
||||
template findIt[I, T](cache: var LRUCache[I, T], predicate: untyped): Opt[T] =
|
||||
block:
|
||||
var res: Opt[T]
|
||||
|
@ -475,17 +486,8 @@ func epochKey(dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[EpochKey] =
|
|||
|
||||
Opt.some(EpochKey(bid: bsi.bid, epoch: epoch))
|
||||
|
||||
func findShufflingRef*(
|
||||
dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[ShufflingRef] =
|
||||
## Lookup a shuffling in the cache, returning `none` if it's not present - see
|
||||
## `getShufflingRef` for a version that creates a new instance if it's missing
|
||||
let
|
||||
dependent_slot = if epoch >= 2: (epoch - 1).start_slot() - 1 else: Slot(0)
|
||||
dependent_bsi = dag.atSlot(bid, dependent_slot).valueOr:
|
||||
return Opt.none(ShufflingRef)
|
||||
|
||||
dag.shufflingRefs.findIt(
|
||||
it.epoch == epoch and dependent_bsi.bid.root == it.attester_dependent_root)
|
||||
func shufflingDependentSlot*(epoch: Epoch): Slot =
|
||||
if epoch >= 2: (epoch - 1).start_slot() - 1 else: Slot(0)
|
||||
|
||||
func putShufflingRef*(dag: ChainDAGRef, shufflingRef: ShufflingRef) =
|
||||
## Store shuffling in the cache
|
||||
|
@ -496,6 +498,30 @@ func putShufflingRef*(dag: ChainDAGRef, shufflingRef: ShufflingRef) =
|
|||
|
||||
dag.shufflingRefs.put shufflingRef
|
||||
|
||||
func findShufflingRef*(
|
||||
dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[ShufflingRef] =
|
||||
## Lookup a shuffling in the cache, returning `none` if it's not present - see
|
||||
## `getShufflingRef` for a version that creates a new instance if it's missing
|
||||
let
|
||||
dependent_slot = epoch.shufflingDependentSlot
|
||||
dependent_bsi = ? dag.atSlot(bid, dependent_slot)
|
||||
|
||||
# Check `ShufflingRef` cache
|
||||
let shufflingRef = dag.shufflingRefs.findIt(
|
||||
it.epoch == epoch and it.attester_dependent_root == dependent_bsi.bid.root)
|
||||
if shufflingRef.isOk:
|
||||
return shufflingRef
|
||||
|
||||
# Check `EpochRef` cache
|
||||
let epochRef = dag.epochRefs.peekIt(
|
||||
it.shufflingRef.epoch == epoch and
|
||||
it.shufflingRef.attester_dependent_root == dependent_bsi.bid.root)
|
||||
if epochRef.isOk:
|
||||
dag.putShufflingRef(epochRef.get.shufflingRef)
|
||||
return ok epochRef.get.shufflingRef
|
||||
|
||||
err()
|
||||
|
||||
func findEpochRef*(
|
||||
dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[EpochRef] =
|
||||
## Lookup an EpochRef in the cache, returning `none` if it's not present - see
|
||||
|
@ -1314,23 +1340,272 @@ proc getFinalizedEpochRef*(dag: ChainDAGRef): EpochRef =
|
|||
dag.finalizedHead.blck, dag.finalizedHead.slot.epoch, false).expect(
|
||||
"getEpochRef for finalized head should always succeed")
|
||||
|
||||
func ancestorSlotForShuffling*(
|
||||
dag: ChainDAGRef, state: ForkyHashedBeaconState,
|
||||
blck: BlockRef, epoch: Epoch): Opt[Slot] =
|
||||
## Return slot of `blck` ancestor to which `state` can be rewinded
|
||||
## so that RANDAO at `epoch.shufflingDependentSlot` can be computed.
|
||||
## Return `err` if `state` is unviable to compute shuffling for `blck@epoch`.
|
||||
|
||||
# A state must be somewhat recent so that `get_active_validator_indices`
|
||||
# for the queried `epoch` cannot be affected by any such skipped processing.
|
||||
const numDelayEpochs = compute_activation_exit_epoch(GENESIS_EPOCH).uint64
|
||||
let
|
||||
lowEpoch = max(epoch, (numDelayEpochs - 1).Epoch) - (numDelayEpochs - 1)
|
||||
lowSlot = lowEpoch.start_slot
|
||||
if state.data.slot < lowSlot or blck.slot < lowSlot:
|
||||
return err()
|
||||
|
||||
# Check that state is related to the information stored in the DAG,
|
||||
# and determine the corresponding `BlockRef`, or `finalizedHead` if finalized
|
||||
let
|
||||
stateBid = state.latest_block_id
|
||||
stateBlck =
|
||||
if dag.finalizedHead.blck == nil:
|
||||
return err()
|
||||
elif stateBid.slot > dag.finalizedHead.blck.slot:
|
||||
? dag.getBlockRef(stateBid.root)
|
||||
elif stateBid.slot == dag.finalizedHead.blck.slot:
|
||||
if stateBid.root != dag.finalizedHead.blck.root:
|
||||
return err()
|
||||
dag.finalizedHead.blck
|
||||
else:
|
||||
let bsi = ? dag.getBlockIdAtSlot(stateBid.slot)
|
||||
if bsi.bid != stateBid:
|
||||
return err()
|
||||
dag.finalizedHead.blck
|
||||
|
||||
# Check that history up to `lowSlot` is included in `state`,
|
||||
# otherwise `get_active_validator_indices` may still change
|
||||
if lowSlot <= dag.finalizedHead.blck.slot:
|
||||
let
|
||||
bsi = ? dag.getBlockIdAtSlot(lowSlot)
|
||||
stateLowBlockRoot =
|
||||
if state.data.slot == lowSlot:
|
||||
stateBid.root
|
||||
else:
|
||||
state.data.get_block_root_at_slot(lowSlot)
|
||||
if stateLowBlockRoot != bsi.bid.root:
|
||||
return err()
|
||||
|
||||
# Compute ancestor slot for starting RANDAO recovery
|
||||
let
|
||||
ancestorBlck =
|
||||
if stateBlck == dag.finalizedHead.blck:
|
||||
dag.finalizedHead.blck
|
||||
else:
|
||||
? commonAncestor(blck, stateBlck, lowSlot)
|
||||
dependentSlot = epoch.shufflingDependentSlot
|
||||
doAssert dependentSlot >= lowSlot
|
||||
ok min(min(stateBid.slot, ancestorBlck.slot), dependentSlot)
|
||||
|
||||
proc mixRandao(
|
||||
dag: ChainDAGRef, mix: var Eth2Digest,
|
||||
bid: BlockId): Opt[void] =
|
||||
## Mix in/out the RANDAO reveal from the given block.
|
||||
let bdata = ? dag.getForkedBlock(bid)
|
||||
withBlck(bdata): # See `process_randao` / `process_randao_mixes_reset`
|
||||
mix.data.mxor eth2digest(blck.message.body.randao_reveal.toRaw()).data
|
||||
ok()
|
||||
|
||||
proc computeRandaoMix*(
|
||||
dag: ChainDAGRef, state: ForkyHashedBeaconState,
|
||||
blck: BlockRef, epoch: Epoch
|
||||
): Opt[tuple[dependentBid: BlockId, mix: Eth2Digest]] =
|
||||
## Compute the requested RANDAO mix for `blck@epoch` based on `state`.
|
||||
## `state` must have the correct `get_active_validator_indices` for `epoch`.
|
||||
## RANDAO reveals of blocks from `state.data.slot` back to `ancestorSlot` are
|
||||
## mixed out from `state.data.randao_mixes`, and RANDAO reveals from blocks
|
||||
## up through `epoch.shufflingDependentSlot` are mixed in.
|
||||
let
|
||||
stateSlot = state.data.slot
|
||||
dependentSlot = epoch.shufflingDependentSlot
|
||||
# Check `state` has locked-in `get_active_validator_indices` for `epoch`
|
||||
ancestorSlot = ? dag.ancestorSlotForShuffling(state, blck, epoch)
|
||||
doAssert ancestorSlot <= stateSlot
|
||||
doAssert ancestorSlot <= dependentSlot
|
||||
|
||||
# Load initial mix
|
||||
var mix {.noinit.}: Eth2Digest
|
||||
let
|
||||
stateEpoch = stateSlot.epoch
|
||||
ancestorEpoch = ancestorSlot.epoch
|
||||
highRandaoSlot =
|
||||
# `randao_mixes[ancestorEpoch]`
|
||||
if stateEpoch == ancestorEpoch:
|
||||
stateSlot
|
||||
else:
|
||||
(ancestorEpoch + 1).start_slot - 1
|
||||
startSlot =
|
||||
if ancestorEpoch == GENESIS_EPOCH:
|
||||
# Can only move backward
|
||||
mix = state.data.get_randao_mix(ancestorEpoch)
|
||||
highRandaoSlot
|
||||
else:
|
||||
# `randao_mixes[ancestorEpoch - 1]`
|
||||
let lowRandaoSlot = ancestorEpoch.start_slot - 1
|
||||
if highRandaoSlot - ancestorSlot < ancestorSlot - lowRandaoSlot:
|
||||
mix = state.data.get_randao_mix(ancestorEpoch)
|
||||
highRandaoSlot
|
||||
else:
|
||||
mix = state.data.get_randao_mix(ancestorEpoch - 1)
|
||||
lowRandaoSlot
|
||||
slotsToMix =
|
||||
if startSlot > ancestorSlot:
|
||||
(ancestorSlot + 1) .. startSlot
|
||||
else:
|
||||
(startSlot + 1) .. ancestorSlot
|
||||
highRoot =
|
||||
if slotsToMix.b == stateSlot:
|
||||
state.latest_block_root
|
||||
else:
|
||||
doAssert slotsToMix.b < stateSlot
|
||||
state.data.get_block_root_at_slot(slotsToMix.b)
|
||||
|
||||
# Move `mix` from `startSlot` to `ancestorSlot`
|
||||
var bid =
|
||||
if slotsToMix.b >= dag.finalizedHead.slot:
|
||||
var b = ? dag.getBlockRef(highRoot)
|
||||
let lowSlot = max(slotsToMix.a, dag.finalizedHead.slot)
|
||||
while b.bid.slot > lowSlot:
|
||||
? dag.mixRandao(mix, b.bid)
|
||||
b = b.parent
|
||||
doAssert b != nil
|
||||
b.bid
|
||||
else:
|
||||
var highSlot = slotsToMix.b
|
||||
const availableSlots = SLOTS_PER_HISTORICAL_ROOT
|
||||
let lowSlot = max(state.data.slot, availableSlots.Slot) - availableSlots
|
||||
while highSlot > lowSlot and
|
||||
state.data.get_block_root_at_slot(highSlot - 1) == highRoot:
|
||||
dec highSlot
|
||||
if highSlot + SLOTS_PER_HISTORICAL_ROOT > state.data.slot:
|
||||
BlockId(slot: highSlot, root: highRoot)
|
||||
else:
|
||||
let bsi = ? dag.getBlockIdAtSlot(highSlot)
|
||||
doAssert bsi.bid.root == highRoot
|
||||
bsi.bid
|
||||
while bid.slot >= slotsToMix.a:
|
||||
? dag.mixRandao(mix, bid)
|
||||
bid = ? dag.parent(bid)
|
||||
|
||||
# Move `mix` from `ancestorSlot` to `dependentSlot`
|
||||
var dependentBid {.noinit.}: BlockId
|
||||
bid =
|
||||
if dependentSlot >= dag.finalizedHead.slot:
|
||||
var b = blck.get_ancestor(dependentSlot)
|
||||
doAssert b != nil
|
||||
dependentBid = b.bid
|
||||
let lowSlot = max(ancestorSlot, dag.finalizedHead.slot)
|
||||
while b.bid.slot > lowSlot:
|
||||
? dag.mixRandao(mix, b.bid)
|
||||
b = b.parent
|
||||
doAssert b != nil
|
||||
b.bid
|
||||
else:
|
||||
let bsi = ? dag.getBlockIdAtSlot(dependentSlot)
|
||||
dependentBid = bsi.bid
|
||||
bsi.bid
|
||||
while bid.slot > ancestorSlot:
|
||||
? dag.mixRandao(mix, bid)
|
||||
bid = ? dag.parent(bid)
|
||||
|
||||
ok (dependentBid: dependentBid, mix: mix)
|
||||
|
||||
proc computeShufflingRefFromState*(
|
||||
dag: ChainDAGRef, state: ForkyHashedBeaconState,
|
||||
blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] =
|
||||
let (dependentBid, mix) =
|
||||
? dag.computeRandaoMix(state, blck, epoch)
|
||||
|
||||
return ok ShufflingRef(
|
||||
epoch: epoch,
|
||||
attester_dependent_root: dependentBid.root,
|
||||
shuffled_active_validator_indices:
|
||||
state.data.get_shuffled_active_validator_indices(epoch, mix))
|
||||
|
||||
proc computeShufflingRefFromMemory*(
|
||||
dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] =
|
||||
## Compute `ShufflingRef` from states available in memory (up to ~5 ms)
|
||||
template tryWithState(state: ForkedHashedBeaconState) =
|
||||
block:
|
||||
withState(state):
|
||||
let shufflingRef =
|
||||
dag.computeShufflingRefFromState(forkyState, blck, epoch)
|
||||
if shufflingRef.isOk:
|
||||
return shufflingRef
|
||||
tryWithState dag.headState
|
||||
tryWithState dag.epochRefState
|
||||
tryWithState dag.clearanceState
|
||||
|
||||
proc computeShufflingRefFromDatabase*(
|
||||
dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] =
|
||||
## Load state from DB, for when DAG states are unviable (up to ~500 ms)
|
||||
let
|
||||
dependentSlot = epoch.shufflingDependentSlot
|
||||
state = newClone(dag.headState)
|
||||
var
|
||||
e = dependentSlot.epoch
|
||||
b = blck
|
||||
while e > GENESIS_EPOCH and compute_activation_exit_epoch(e) > epoch:
|
||||
let boundaryBlockSlot = e.start_slot - 1
|
||||
b = b.get_ancestor(boundaryBlockSlot) # nil if < finalized head
|
||||
let
|
||||
bid =
|
||||
if b != nil:
|
||||
b.bid
|
||||
else:
|
||||
let bsi = ? dag.getBlockIdAtSlot(boundaryBlockSlot)
|
||||
bsi.bid
|
||||
bsi = BlockSlotId.init(bid, boundaryBlockSlot + 1)
|
||||
if not dag.getState(bsi, state[]):
|
||||
dec e
|
||||
continue
|
||||
|
||||
return withState(state[]):
|
||||
dag.computeShufflingRefFromState(forkyState, blck, epoch)
|
||||
err()
|
||||
|
||||
proc computeShufflingRef*(
|
||||
dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] =
|
||||
# Try to compute `ShufflingRef` from states available in memory
|
||||
template tryWithState(state: ForkedHashedBeaconState) =
|
||||
withState(state):
|
||||
let shufflingRef =
|
||||
dag.computeShufflingRefFromState(forkyState, blck, epoch)
|
||||
if shufflingRef.isOk:
|
||||
return shufflingRef
|
||||
tryWithState dag.headState
|
||||
tryWithState dag.epochRefState
|
||||
tryWithState dag.clearanceState
|
||||
|
||||
# Fall back to database
|
||||
dag.computeShufflingRefFromDatabase(blck, epoch)
|
||||
|
||||
proc getShufflingRef*(
|
||||
dag: ChainDAGRef, blck: BlockRef, epoch: Epoch,
|
||||
preFinalized: bool): Opt[ShufflingRef] =
|
||||
## Return the shuffling in the given history and epoch - this potentially is
|
||||
## faster than returning a full EpochRef because the shuffling is determined
|
||||
## an epoch in advance and therefore is less sensitive to reorgs
|
||||
let shufflingRef = dag.findShufflingRef(blck.bid, epoch)
|
||||
if shufflingRef.isNone:
|
||||
# TODO here, we could check the existing cached states and see if any one
|
||||
# has the right dependent root - unlike EpochRef, we don't need an _exact_
|
||||
# epoch match
|
||||
let epochRef = dag.getEpochRef(blck, epoch, preFinalized).valueOr:
|
||||
return Opt.none ShufflingRef
|
||||
dag.putShufflingRef(epochRef.shufflingRef)
|
||||
Opt.some epochRef.shufflingRef
|
||||
else:
|
||||
shufflingRef
|
||||
var shufflingRef = dag.findShufflingRef(blck.bid, epoch)
|
||||
if shufflingRef.isSome:
|
||||
return shufflingRef
|
||||
|
||||
# Use existing states to quickly compute the shuffling
|
||||
shufflingRef = dag.computeShufflingRef(blck, epoch)
|
||||
if shufflingRef.isSome:
|
||||
dag.putShufflingRef(shufflingRef.get)
|
||||
return shufflingRef
|
||||
|
||||
# Last resort, this can take several seconds as this may replay states
|
||||
# TODO here, we could check the existing cached states and see if any one
|
||||
# has the right dependent root - unlike EpochRef, we don't need an _exact_
|
||||
# epoch match
|
||||
let epochRef = dag.getEpochRef(blck, epoch, preFinalized).valueOr:
|
||||
return Opt.none ShufflingRef
|
||||
dag.putShufflingRef(epochRef.shufflingRef)
|
||||
Opt.some epochRef.shufflingRef
|
||||
|
||||
func stateCheckpoint*(dag: ChainDAGRef, bsi: BlockSlotId): BlockSlotId =
|
||||
## The first ancestor BlockSlot that is a state checkpoint
|
||||
|
|
|
@ -181,23 +181,24 @@ func compute_signing_root*(ssz_object: auto, domain: Eth2Domain): Eth2Digest =
|
|||
hash_tree_root(domain_wrapped_object)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/beacon-chain.md#get_seed
|
||||
func get_seed*(state: ForkyBeaconState, epoch: Epoch, domain_type: DomainType):
|
||||
Eth2Digest =
|
||||
func get_seed*(
|
||||
state: ForkyBeaconState, epoch: Epoch, domain_type: DomainType,
|
||||
mix: Eth2Digest): Eth2Digest =
|
||||
## Return the seed at ``epoch``.
|
||||
|
||||
var seed_input : array[4+8+32, byte]
|
||||
|
||||
# Detect potential underflow
|
||||
static:
|
||||
doAssert EPOCHS_PER_HISTORICAL_VECTOR > MIN_SEED_LOOKAHEAD
|
||||
|
||||
seed_input[0..3] = domain_type.data
|
||||
seed_input[4..11] = uint_to_bytes(epoch.uint64)
|
||||
seed_input[12..43] =
|
||||
get_randao_mix(state, # Avoid underflow
|
||||
epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1).data
|
||||
seed_input[12..43] = mix.data
|
||||
eth2digest(seed_input)
|
||||
|
||||
func get_seed*(state: ForkyBeaconState, epoch: Epoch, domain_type: DomainType):
|
||||
Eth2Digest =
|
||||
# Detect potential underflow
|
||||
static: doAssert EPOCHS_PER_HISTORICAL_VECTOR > MIN_SEED_LOOKAHEAD
|
||||
let mix = get_randao_mix(state, # Avoid underflow
|
||||
epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)
|
||||
state.get_seed(epoch, domain_type, mix)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/beacon-chain.md#add_flag
|
||||
func add_flag*(flags: ParticipationFlags, flag_index: int): ParticipationFlags =
|
||||
let flag = ParticipationFlags(1'u8 shl flag_index)
|
||||
|
|
|
@ -124,15 +124,23 @@ func shuffle_list*(input: var seq[ValidatorIndex], seed: Eth2Digest) =
|
|||
|
||||
shuffle
|
||||
|
||||
func get_shuffled_active_validator_indices*(
|
||||
state: ForkyBeaconState, epoch: Epoch,
|
||||
mix: Eth2Digest): seq[ValidatorIndex] =
|
||||
# Non-spec function, to cache a data structure from which one can cheaply
|
||||
# compute both get_active_validator_indexes() and get_beacon_committee().
|
||||
var active_validator_indices = get_active_validator_indices(state, epoch)
|
||||
let seed = get_seed(state, epoch, DOMAIN_BEACON_ATTESTER, mix)
|
||||
shuffle_list(active_validator_indices, seed)
|
||||
active_validator_indices
|
||||
|
||||
func get_shuffled_active_validator_indices*(
|
||||
state: ForkyBeaconState, epoch: Epoch): seq[ValidatorIndex] =
|
||||
# Non-spec function, to cache a data structure from which one can cheaply
|
||||
# compute both get_active_validator_indexes() and get_beacon_committee().
|
||||
var active_validator_indices = get_active_validator_indices(state, epoch)
|
||||
|
||||
shuffle_list(
|
||||
active_validator_indices, get_seed(state, epoch, DOMAIN_BEACON_ATTESTER))
|
||||
|
||||
let seed = get_seed(state, epoch, DOMAIN_BEACON_ATTESTER)
|
||||
shuffle_list(active_validator_indices, seed)
|
||||
active_validator_indices
|
||||
|
||||
func get_shuffled_active_validator_indices*(
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018-2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
@ -51,6 +51,260 @@ suite "BlockRef and helpers":
|
|||
s4.get_ancestor(Slot(3)) == s2
|
||||
s4.get_ancestor(Slot(4)) == s4
|
||||
|
||||
test "commonAncestor sanity":
|
||||
# s0
|
||||
# / \
|
||||
# s1 s3
|
||||
# / \
|
||||
# s2 s6
|
||||
# / \ \
|
||||
# s4 s5 s7
|
||||
# \
|
||||
# s8
|
||||
# \
|
||||
# s9
|
||||
let
|
||||
s0 = BlockRef(bid: BlockId(slot: Slot(0)))
|
||||
s1 = BlockRef(bid: BlockId(slot: Slot(1)), parent: s0)
|
||||
s2 = BlockRef(bid: BlockId(slot: Slot(2)), parent: s1)
|
||||
s3 = BlockRef(bid: BlockId(slot: Slot(3)), parent: s0)
|
||||
s4 = BlockRef(bid: BlockId(slot: Slot(4)), parent: s2)
|
||||
s5 = BlockRef(bid: BlockId(slot: Slot(5)), parent: s2)
|
||||
s6 = BlockRef(bid: BlockId(slot: Slot(6)), parent: s3)
|
||||
s7 = BlockRef(bid: BlockId(slot: Slot(7)), parent: s6)
|
||||
s8 = BlockRef(bid: BlockId(slot: Slot(8)), parent: s4)
|
||||
s9 = BlockRef(bid: BlockId(slot: Slot(9)), parent: s8)
|
||||
|
||||
check:
|
||||
commonAncestor(s0, s0, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s0, s1, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s0, s2, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s0, s3, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s0, s4, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s0, s5, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s0, s6, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s0, s7, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s0, s8, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s0, s9, Slot(0)) == Opt.some(s0)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
||||
check commonAncestor(s0, b, Slot(1)) == Opt.none(BlockRef)
|
||||
|
||||
check:
|
||||
commonAncestor(s1, s0, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s1, s1, Slot(0)) == Opt.some(s1)
|
||||
commonAncestor(s1, s2, Slot(0)) == Opt.some(s1)
|
||||
commonAncestor(s1, s3, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s1, s4, Slot(0)) == Opt.some(s1)
|
||||
commonAncestor(s1, s5, Slot(0)) == Opt.some(s1)
|
||||
commonAncestor(s1, s6, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s1, s7, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s1, s8, Slot(0)) == Opt.some(s1)
|
||||
commonAncestor(s1, s9, Slot(0)) == Opt.some(s1)
|
||||
for b in [s0, s3, s6, s7]:
|
||||
check commonAncestor(s1, b, Slot(1)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
||||
check commonAncestor(s1, b, Slot(2)) == Opt.none(BlockRef)
|
||||
|
||||
check:
|
||||
commonAncestor(s2, s0, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s2, s1, Slot(0)) == Opt.some(s1)
|
||||
commonAncestor(s2, s2, Slot(0)) == Opt.some(s2)
|
||||
commonAncestor(s2, s3, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s2, s4, Slot(0)) == Opt.some(s2)
|
||||
commonAncestor(s2, s5, Slot(0)) == Opt.some(s2)
|
||||
commonAncestor(s2, s6, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s2, s7, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s2, s8, Slot(0)) == Opt.some(s2)
|
||||
commonAncestor(s2, s9, Slot(0)) == Opt.some(s2)
|
||||
for b in [s0, s3, s6, s7]:
|
||||
check commonAncestor(s2, b, Slot(1)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s3, s6, s7]:
|
||||
check commonAncestor(s2, b, Slot(2)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
||||
check commonAncestor(s2, b, Slot(3)) == Opt.none(BlockRef)
|
||||
|
||||
check:
|
||||
commonAncestor(s3, s0, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s3, s1, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s3, s2, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s3, s3, Slot(0)) == Opt.some(s3)
|
||||
commonAncestor(s3, s4, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s3, s5, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s3, s6, Slot(0)) == Opt.some(s3)
|
||||
commonAncestor(s3, s7, Slot(0)) == Opt.some(s3)
|
||||
commonAncestor(s3, s8, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s3, s9, Slot(0)) == Opt.some(s0)
|
||||
for b in [s0, s1, s2, s4, s5, s8, s9]:
|
||||
check commonAncestor(s3, b, Slot(1)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s4, s5, s8, s9]:
|
||||
check commonAncestor(s3, b, Slot(2)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s4, s5, s8, s9]:
|
||||
check commonAncestor(s3, b, Slot(3)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
||||
check commonAncestor(s3, b, Slot(4)) == Opt.none(BlockRef)
|
||||
|
||||
check:
|
||||
commonAncestor(s4, s0, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s4, s1, Slot(0)) == Opt.some(s1)
|
||||
commonAncestor(s4, s2, Slot(0)) == Opt.some(s2)
|
||||
commonAncestor(s4, s3, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s4, s4, Slot(0)) == Opt.some(s4)
|
||||
commonAncestor(s4, s5, Slot(0)) == Opt.some(s2)
|
||||
commonAncestor(s4, s6, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s4, s7, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s4, s8, Slot(0)) == Opt.some(s4)
|
||||
commonAncestor(s4, s9, Slot(0)) == Opt.some(s4)
|
||||
for b in [s0, s3, s6, s7]:
|
||||
check commonAncestor(s4, b, Slot(1)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s3, s6, s7]:
|
||||
check commonAncestor(s4, b, Slot(2)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s5, s6, s7]:
|
||||
check commonAncestor(s4, b, Slot(3)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s5, s6, s7]:
|
||||
check commonAncestor(s4, b, Slot(4)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
||||
check commonAncestor(s4, b, Slot(5)) == Opt.none(BlockRef)
|
||||
|
||||
check:
|
||||
commonAncestor(s5, s0, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s5, s1, Slot(0)) == Opt.some(s1)
|
||||
commonAncestor(s5, s2, Slot(0)) == Opt.some(s2)
|
||||
commonAncestor(s5, s3, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s5, s4, Slot(0)) == Opt.some(s2)
|
||||
commonAncestor(s5, s5, Slot(0)) == Opt.some(s5)
|
||||
commonAncestor(s5, s6, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s5, s7, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s5, s8, Slot(0)) == Opt.some(s2)
|
||||
commonAncestor(s5, s9, Slot(0)) == Opt.some(s2)
|
||||
for b in [s0, s3, s6, s7]:
|
||||
check commonAncestor(s5, b, Slot(1)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s3, s6, s7]:
|
||||
check commonAncestor(s5, b, Slot(2)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s6, s7, s8, s9]:
|
||||
check commonAncestor(s5, b, Slot(3)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s6, s7, s8, s9]:
|
||||
check commonAncestor(s5, b, Slot(4)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s6, s7, s8, s9]:
|
||||
check commonAncestor(s5, b, Slot(5)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
||||
check commonAncestor(s5, b, Slot(6)) == Opt.none(BlockRef)
|
||||
|
||||
check:
|
||||
commonAncestor(s6, s0, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s6, s1, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s6, s2, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s6, s3, Slot(0)) == Opt.some(s3)
|
||||
commonAncestor(s6, s4, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s6, s5, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s6, s6, Slot(0)) == Opt.some(s6)
|
||||
commonAncestor(s6, s7, Slot(0)) == Opt.some(s6)
|
||||
commonAncestor(s6, s8, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s6, s9, Slot(0)) == Opt.some(s0)
|
||||
for b in [s0, s1, s2, s4, s5, s8, s9]:
|
||||
check commonAncestor(s6, b, Slot(1)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s4, s5, s8, s9]:
|
||||
check commonAncestor(s6, b, Slot(2)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s4, s5, s8, s9]:
|
||||
check commonAncestor(s6, b, Slot(3)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s8, s9]:
|
||||
check commonAncestor(s6, b, Slot(4)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s8, s9]:
|
||||
check commonAncestor(s6, b, Slot(5)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s8, s9]:
|
||||
check commonAncestor(s6, b, Slot(6)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
||||
check commonAncestor(s6, b, Slot(7)) == Opt.none(BlockRef)
|
||||
|
||||
check:
|
||||
commonAncestor(s7, s0, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s7, s1, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s7, s2, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s7, s3, Slot(0)) == Opt.some(s3)
|
||||
commonAncestor(s7, s4, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s7, s5, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s7, s6, Slot(0)) == Opt.some(s6)
|
||||
commonAncestor(s7, s7, Slot(0)) == Opt.some(s7)
|
||||
commonAncestor(s7, s8, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s7, s9, Slot(0)) == Opt.some(s0)
|
||||
for b in [s0, s1, s2, s4, s5, s8, s9]:
|
||||
check commonAncestor(s7, b, Slot(1)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s4, s5, s8, s9]:
|
||||
check commonAncestor(s7, b, Slot(2)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s4, s5, s8, s9]:
|
||||
check commonAncestor(s7, b, Slot(3)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s8, s9]:
|
||||
check commonAncestor(s7, b, Slot(4)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s8, s9]:
|
||||
check commonAncestor(s7, b, Slot(5)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s8, s9]:
|
||||
check commonAncestor(s7, b, Slot(6)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s8, s9]:
|
||||
check commonAncestor(s7, b, Slot(7)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
||||
check commonAncestor(s7, b, Slot(8)) == Opt.none(BlockRef)
|
||||
|
||||
check:
|
||||
commonAncestor(s8, s0, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s8, s1, Slot(0)) == Opt.some(s1)
|
||||
commonAncestor(s8, s2, Slot(0)) == Opt.some(s2)
|
||||
commonAncestor(s8, s3, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s8, s4, Slot(0)) == Opt.some(s4)
|
||||
commonAncestor(s8, s5, Slot(0)) == Opt.some(s2)
|
||||
commonAncestor(s8, s6, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s8, s7, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s8, s8, Slot(0)) == Opt.some(s8)
|
||||
commonAncestor(s8, s9, Slot(0)) == Opt.some(s8)
|
||||
for b in [s0, s3, s6, s7]:
|
||||
check commonAncestor(s8, b, Slot(1)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s3, s6, s7]:
|
||||
check commonAncestor(s8, b, Slot(2)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s5, s6, s7]:
|
||||
check commonAncestor(s8, b, Slot(3)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s5, s6, s7]:
|
||||
check commonAncestor(s8, b, Slot(4)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7]:
|
||||
check commonAncestor(s8, b, Slot(5)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7]:
|
||||
check commonAncestor(s8, b, Slot(6)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7]:
|
||||
check commonAncestor(s8, b, Slot(7)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7]:
|
||||
check commonAncestor(s8, b, Slot(8)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
||||
check commonAncestor(s8, b, Slot(9)) == Opt.none(BlockRef)
|
||||
|
||||
check:
|
||||
commonAncestor(s9, s0, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s9, s1, Slot(0)) == Opt.some(s1)
|
||||
commonAncestor(s9, s2, Slot(0)) == Opt.some(s2)
|
||||
commonAncestor(s9, s3, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s9, s4, Slot(0)) == Opt.some(s4)
|
||||
commonAncestor(s9, s5, Slot(0)) == Opt.some(s2)
|
||||
commonAncestor(s9, s6, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s9, s7, Slot(0)) == Opt.some(s0)
|
||||
commonAncestor(s9, s8, Slot(0)) == Opt.some(s8)
|
||||
commonAncestor(s9, s9, Slot(0)) == Opt.some(s9)
|
||||
for b in [s0, s3, s6, s7]:
|
||||
check commonAncestor(s9, b, Slot(1)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s3, s6, s7]:
|
||||
check commonAncestor(s9, b, Slot(2)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s5, s6, s7]:
|
||||
check commonAncestor(s9, b, Slot(3)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s5, s6, s7]:
|
||||
check commonAncestor(s9, b, Slot(4)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7]:
|
||||
check commonAncestor(s9, b, Slot(5)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7]:
|
||||
check commonAncestor(s9, b, Slot(6)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7]:
|
||||
check commonAncestor(s9, b, Slot(7)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7]:
|
||||
check commonAncestor(s9, b, Slot(8)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8]:
|
||||
check commonAncestor(s9, b, Slot(9)) == Opt.none(BlockRef)
|
||||
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
||||
check commonAncestor(s9, b, Slot(10)) == Opt.none(BlockRef)
|
||||
|
||||
suite "BlockSlot and helpers":
|
||||
test "atSlot sanity":
|
||||
let
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018-2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
@ -8,8 +8,10 @@
|
|||
{.used.}
|
||||
|
||||
import
|
||||
std/[random, sequtils],
|
||||
unittest2,
|
||||
eth/keys, taskpools,
|
||||
../beacon_chain/eth1/merkle_minimal,
|
||||
../beacon_chain/spec/datatypes/base,
|
||||
../beacon_chain/spec/[beaconstate, forks, helpers, signatures, state_transition],
|
||||
../beacon_chain/[beacon_chain_db],
|
||||
|
@ -573,8 +575,7 @@ suite "chain DAG finalization tests" & preset():
|
|||
|
||||
test "init with gaps" & preset():
|
||||
for blck in makeTestBlocks(
|
||||
dag.headState, cache, int(SLOTS_PER_EPOCH * 6 - 2),
|
||||
true):
|
||||
dag.headState, cache, int(SLOTS_PER_EPOCH * 6 - 2), attested = true):
|
||||
let added = dag.addHeadBlock(verifier, blck.phase0Data, nilPhase0Callback)
|
||||
check: added.isOk()
|
||||
dag.updateHead(added[], quarantine, [])
|
||||
|
@ -1165,3 +1166,135 @@ suite "Pruning":
|
|||
check:
|
||||
dag.tail.slot == Epoch(EPOCHS_PER_STATE_SNAPSHOT).start_slot - 1
|
||||
not db.containsBlock(blocks[1].root)
|
||||
|
||||
suite "Shufflings":
|
||||
const
|
||||
numValidators = SLOTS_PER_EPOCH
|
||||
targetNumValidators = 20 * SLOTS_PER_EPOCH * MAX_DEPOSITS
|
||||
let cfg = defaultRuntimeConfig
|
||||
var deposits = newSeqOfCap[Deposit](targetNumValidators)
|
||||
for depositIndex in 0 ..< targetNumValidators:
|
||||
deposits.add Deposit(data: makeDeposit(depositIndex.int, cfg = cfg))
|
||||
let
|
||||
eth1Data = Eth1Data(
|
||||
deposit_root: deposits.attachMerkleProofs(),
|
||||
deposit_count: deposits.lenu64)
|
||||
validatorMonitor = newClone(ValidatorMonitor.init())
|
||||
dag = ChainDAGRef.init(
|
||||
cfg, makeTestDB(
|
||||
numValidators, eth1Data = Opt.some(eth1Data),
|
||||
flags = {}, cfg = cfg),
|
||||
validatorMonitor, {})
|
||||
quarantine = newClone(Quarantine.init())
|
||||
taskpool = Taskpool.new()
|
||||
|
||||
var
|
||||
verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool)
|
||||
graffiti: GraffitiBytes
|
||||
proc addBlocks(blocks: uint64, attested: bool, cache: var StateCache) =
|
||||
inc distinctBase(graffiti)[0] # Avoid duplicate blocks across branches
|
||||
for blck in makeTestBlocks(
|
||||
dag.headState, cache, blocks.int, eth1_data = eth1Data,
|
||||
attested = attested, allDeposits = deposits,
|
||||
graffiti = graffiti, cfg = cfg):
|
||||
let added =
|
||||
case blck.kind
|
||||
of ConsensusFork.Phase0:
|
||||
const nilCallback = OnPhase0BlockAdded(nil)
|
||||
dag.addHeadBlock(verifier, blck.phase0Data, nilCallback)
|
||||
of ConsensusFork.Altair:
|
||||
const nilCallback = OnAltairBlockAdded(nil)
|
||||
dag.addHeadBlock(verifier, blck.altairData, nilCallback)
|
||||
of ConsensusFork.Bellatrix:
|
||||
const nilCallback = OnBellatrixBlockAdded(nil)
|
||||
dag.addHeadBlock(verifier, blck.bellatrixData, nilCallback)
|
||||
of ConsensusFork.Capella:
|
||||
const nilCallback = OnCapellaBlockAdded(nil)
|
||||
dag.addHeadBlock(verifier, blck.capellaData, nilCallback)
|
||||
of ConsensusFork.Deneb:
|
||||
const nilCallback = OnDenebBlockAdded(nil)
|
||||
dag.addHeadBlock(verifier, blck.denebData, nilCallback)
|
||||
check added.isOk()
|
||||
dag.updateHead(added[], quarantine[], [])
|
||||
|
||||
var states: seq[ref ForkedHashedBeaconState]
|
||||
|
||||
# Genesis state
|
||||
states.add newClone(dag.headState)
|
||||
|
||||
# Create a segment and cache the post state (0.75 epochs + empty slots)
|
||||
proc createSegment(attested: bool, delaySlots = 0.uint64) =
|
||||
var cache: StateCache
|
||||
|
||||
# Add some empty slots to have different deposit history
|
||||
if delaySlots > 0:
|
||||
var info: ForkedEpochInfo
|
||||
check cfg.process_slots(
|
||||
dag.headState,
|
||||
getStateField(dag.headState, slot) + delaySlots,
|
||||
cache, info, flags = {}).isOk
|
||||
|
||||
# Add 0.75 epochs
|
||||
addBlocks((SLOTS_PER_EPOCH * 3) div 4, attested = attested, cache)
|
||||
states.add newClone(dag.headState)
|
||||
|
||||
# Linear part of history (3.75 epochs)
|
||||
for _ in 0 ..< 5:
|
||||
createSegment(attested = true)
|
||||
|
||||
# Start branching (6 epochs + up to 0.5 epoch)
|
||||
func numDelaySlots(branchId: int): uint64 =
|
||||
branchId.uint64 * SLOTS_PER_EPOCH div 8
|
||||
for a in 0 ..< 2:
|
||||
let oldHead = dag.head
|
||||
createSegment(attested = false, delaySlots = a.numDelaySlots)
|
||||
for b in 0 ..< 2:
|
||||
let oldHead = dag.head
|
||||
createSegment(attested = false, delaySlots = b.numDelaySlots)
|
||||
for _ in 0 ..< 3:
|
||||
createSegment(attested = false, delaySlots = a.numDelaySlots)
|
||||
createSegment(attested = false, delaySlots = b.numDelaySlots)
|
||||
dag.updateHead(oldHead, quarantine[], [])
|
||||
dag.updateHead(oldHead, quarantine[], [])
|
||||
|
||||
# Cover entire range of epochs plus some extra
|
||||
const maxEpochOfInterest = compute_activation_exit_epoch(11.Epoch) + 2
|
||||
|
||||
test "Accelerated shuffling computation":
|
||||
randomize()
|
||||
let forkBlocks = dag.forkBlocks.toSeq()
|
||||
for _ in 0 ..< 150: # Number of random tests (against _all_ cached states)
|
||||
let
|
||||
blck = sample(forkBlocks).data
|
||||
epoch = rand(GENESIS_EPOCH .. maxEpochOfInterest)
|
||||
checkpoint "blck: " & $shortLog(blck) & " / epoch: " & $shortLog(epoch)
|
||||
|
||||
let epochRef = dag.getEpochRef(blck, epoch, true)
|
||||
check epochRef.isOk
|
||||
|
||||
proc checkShuffling(computedShufflingRef: Opt[ShufflingRef]) =
|
||||
## Check that computed shuffling matches the one from `EpochRef`.
|
||||
if computedShufflingRef.isOk:
|
||||
check computedShufflingRef.get[] == epochRef.get.shufflingRef[]
|
||||
|
||||
# If shuffling is computable from DAG, check its correctness
|
||||
checkShuffling dag.computeShufflingRefFromMemory(blck, epoch)
|
||||
|
||||
# If shuffling is computable from DB, check its correctness
|
||||
checkShuffling dag.computeShufflingRefFromDatabase(blck, epoch)
|
||||
|
||||
# Shuffling should be correct when starting from any cached state
|
||||
for state in states:
|
||||
withState(state[]):
|
||||
let
|
||||
shufflingRef =
|
||||
dag.computeShufflingRefFromState(forkyState, blck, epoch)
|
||||
stateEpoch = forkyState.data.get_current_epoch
|
||||
blckEpoch = blck.bid.slot.epoch
|
||||
minEpoch = min(stateEpoch, blckEpoch)
|
||||
if compute_activation_exit_epoch(minEpoch) <= epoch or
|
||||
dag.ancestorSlotForShuffling(forkyState, blck, epoch).isNone:
|
||||
check shufflingRef.isErr
|
||||
else:
|
||||
check shufflingRef.isOk
|
||||
checkShuffling shufflingRef
|
||||
|
|
|
@ -74,10 +74,9 @@ suite "Gossip validation " & preset():
|
|||
committeeLen(63) == 0
|
||||
|
||||
test "validateAttestation":
|
||||
var
|
||||
cache: StateCache
|
||||
var cache: StateCache
|
||||
for blck in makeTestBlocks(
|
||||
dag.headState, cache, int(SLOTS_PER_EPOCH * 5), false):
|
||||
dag.headState, cache, int(SLOTS_PER_EPOCH * 5), attested = false):
|
||||
let added = dag.addHeadBlock(verifier, blck.phase0Data) do (
|
||||
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef, unrealized: FinalityCheckpoints):
|
||||
|
@ -196,7 +195,8 @@ suite "Gossip validation - Extra": # Not based on preset config
|
|||
cfg, makeTestDB(num_validators), validatorMonitor, {})
|
||||
var cache = StateCache()
|
||||
for blck in makeTestBlocks(
|
||||
dag.headState, cache, int(SLOTS_PER_EPOCH), false, cfg = cfg):
|
||||
dag.headState, cache, int(SLOTS_PER_EPOCH),
|
||||
attested = false, cfg = cfg):
|
||||
let added =
|
||||
case blck.kind
|
||||
of ConsensusFork.Phase0:
|
||||
|
|
|
@ -60,8 +60,9 @@ suite "Light client" & preset():
|
|||
|
||||
# Create blocks for final few epochs
|
||||
let blocks = min(targetSlot - slot, maxAttestedSlotsPerPeriod)
|
||||
for blck in makeTestBlocks(dag.headState, cache, blocks.int,
|
||||
attested, syncCommitteeRatio, cfg):
|
||||
for blck in makeTestBlocks(
|
||||
dag.headState, cache, blocks.int, attested = attested,
|
||||
syncCommitteeRatio = syncCommitteeRatio, cfg = cfg):
|
||||
let added =
|
||||
case blck.kind
|
||||
of ConsensusFork.Phase0:
|
||||
|
|
|
@ -47,8 +47,9 @@ suite "Light client processor" & preset():
|
|||
|
||||
var cache: StateCache
|
||||
proc addBlocks(blocks: uint64, syncCommitteeRatio: float) =
|
||||
for blck in makeTestBlocks(dag.headState, cache, blocks.int,
|
||||
attested = true, syncCommitteeRatio, cfg):
|
||||
for blck in makeTestBlocks(
|
||||
dag.headState, cache, blocks.int, attested = true,
|
||||
syncCommitteeRatio = syncCommitteeRatio, cfg = cfg):
|
||||
let added =
|
||||
case blck.kind
|
||||
of ConsensusFork.Phase0:
|
||||
|
|
|
@ -519,11 +519,13 @@ iterator makeTestBlocks*(
|
|||
state: ForkedHashedBeaconState,
|
||||
cache: var StateCache,
|
||||
blocks: int,
|
||||
attested: bool,
|
||||
eth1_data = Eth1Data(),
|
||||
attested = false,
|
||||
allDeposits = newSeq[Deposit](),
|
||||
syncCommitteeRatio = 0.0,
|
||||
graffiti = default(GraffitiBytes),
|
||||
cfg = defaultRuntimeConfig): ForkedSignedBeaconBlock =
|
||||
var
|
||||
state = assignClone(state)
|
||||
var state = assignClone(state)
|
||||
for _ in 0..<blocks:
|
||||
let
|
||||
parent_root = withState(state[]): forkyState.latest_block_root
|
||||
|
@ -533,7 +535,24 @@ iterator makeTestBlocks*(
|
|||
state[], parent_root, getStateField(state[], slot), cache)
|
||||
else:
|
||||
@[]
|
||||
stateEth1 = getStateField(state[], eth1_data)
|
||||
stateDepositIndex = getStateField(state[], eth1_deposit_index)
|
||||
deposits =
|
||||
if stateDepositIndex < stateEth1.deposit_count:
|
||||
let
|
||||
lowIndex = stateDepositIndex
|
||||
numDeposits = min(MAX_DEPOSITS, stateEth1.deposit_count - lowIndex)
|
||||
highIndex = lowIndex + numDeposits - 1
|
||||
allDeposits[lowIndex .. highIndex]
|
||||
else:
|
||||
newSeq[Deposit]()
|
||||
sync_aggregate = makeSyncAggregate(state[], syncCommitteeRatio, cfg)
|
||||
|
||||
yield addTestBlock(state[], cache,
|
||||
attestations = attestations, sync_aggregate = sync_aggregate, cfg = cfg)
|
||||
yield addTestBlock(
|
||||
state[], cache,
|
||||
eth1_data = eth1_data,
|
||||
attestations = attestations,
|
||||
deposits = deposits,
|
||||
sync_aggregate = sync_aggregate,
|
||||
graffiti = graffiti,
|
||||
cfg = cfg)
|
||||
|
|
|
@ -17,7 +17,10 @@ import
|
|||
export beacon_chain_db, testblockutil, kvstore, kvstore_sqlite3
|
||||
|
||||
proc makeTestDB*(
|
||||
validators: Natural, cfg = defaultRuntimeConfig): BeaconChainDB =
|
||||
validators: Natural,
|
||||
eth1Data = Opt.none(Eth1Data),
|
||||
flags: UpdateFlags = {skipBlsValidation},
|
||||
cfg = defaultRuntimeConfig): BeaconChainDB =
|
||||
let
|
||||
genState = (ref ForkedHashedBeaconState)(
|
||||
kind: ConsensusFork.Phase0,
|
||||
|
@ -25,8 +28,14 @@ proc makeTestDB*(
|
|||
cfg,
|
||||
ZERO_HASH,
|
||||
0,
|
||||
makeInitialDeposits(validators.uint64, flags = {skipBlsValidation}),
|
||||
{skipBlsValidation}))
|
||||
makeInitialDeposits(validators.uint64, flags),
|
||||
flags))
|
||||
|
||||
# Override Eth1Data on request, skipping the lengthy Eth1 voting process
|
||||
if eth1Data.isOk:
|
||||
withState(genState[]):
|
||||
forkyState.data.eth1_data = eth1Data.get
|
||||
forkyState.root = hash_tree_root(forkyState.data)
|
||||
|
||||
result = BeaconChainDB.new("", cfg = cfg, inMemory = true)
|
||||
ChainDAGRef.preInit(result, genState[])
|
||||
|
|
Loading…
Reference in New Issue