share validator keys in EpochRef (#1486)

This commit is contained in:
Jacek Sieka 2020-08-11 21:39:53 +02:00 committed by GitHub
parent a746b4ae7a
commit 8b0f2cc96f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 47 additions and 12 deletions

View File

@ -140,10 +140,10 @@ type
beacon_proposers*: array[
SLOTS_PER_EPOCH, Option[(ValidatorIndex, ValidatorPubKey)]]
shuffled_active_validator_indices*: seq[ValidatorIndex]
# This is an expensive cache that could probably be shared among epochref
# This is an expensive cache that is sometimes shared among epochref
# instances - in particular, validators keep their keys and locations in the
# structure
validator_keys*: seq[ValidatorPubKey]
# validator list in each particular history.
validator_key_store*: (Eth2Digest, ref seq[ValidatorPubKey])
BlockRef* = ref object
## Node in object graph guaranteed to lead back to tail block, and to have
@ -193,6 +193,8 @@ type
blckRef: BlockRef, blck: SignedBeaconBlock,
state: HashedBeaconState) {.raises: [Defect], gcsafe.}
template validator_keys*(e: EpochRef): untyped = e.validator_key_store[1][]
proc shortLog*(v: BlockSlot): string =
if v.blck.slot == v.slot:
v.blck.root.data[0..3].toHex() & ":" & $v.blck.slot

View File

@ -61,7 +61,7 @@ func parent*(bs: BlockSlot): BlockSlot =
slot: bs.slot - 1
)
proc init*(T: type EpochRef, state: BeaconState, cache: var StateCache): T =
proc init*(T: type EpochRef, state: BeaconState, cache: var StateCache, prevEpoch: EpochRef): T =
let
epoch = state.get_current_epoch()
epochRef = EpochRef(
@ -77,7 +77,17 @@ proc init*(T: type EpochRef, state: BeaconState, cache: var StateCache): T =
epochRef.beacon_proposers[i] =
some((idx.get(), state.validators[idx.get].pubkey))
epochRef.validator_keys = mapIt(state.validators.toSeq, it.pubkey)
if prevEpoch != nil and
(prevEpoch.validator_key_store[0] == hash_tree_root(state.validators)):
# Validator sets typically don't change between epochs - a more efficient
# scheme could be devised where parts of the validator key set is reused
# between epochs because in a single history, the validator set only
# grows - this however is a trivially implementable compromise.
epochRef.validator_key_store = prevEpoch.validator_key_store
else:
epochRef.validator_key_store = (
hash_tree_root(state.validators),
newClone(mapIt(state.validators.toSeq, it.pubkey)))
epochRef
func link*(parent, child: BlockRef) =
@ -153,7 +163,21 @@ proc getEpochInfo*(blck: BlockRef, state: BeaconState, cache: var StateCache): E
matching_epochinfo = blck.epochsInfo.filterIt(it.epoch == state_epoch)
if matching_epochinfo.len == 0:
let epochInfo = EpochRef.init(state, cache)
# When creating an epochref, we can somtimes reuse some of the information
# from an earlier epoch in the same history - if we're processing slots
# only, the epochref of an earlier slot of the same block will be the most
# similar
var prevEpochRefs = blck.epochsInfo.filterIt(it.epoch < state_epoch)
var prevEpochRef: EpochRef = nil # nil ok
if prevEpochRefs.len > 0:
prevEpochRef = prevEpochRefs[^1]
elif state_epoch > 0:
let parent = blck.atEpochEnd((state_epoch - 1))
if parent.blck != nil and parent.blck.epochsInfo.len > 0:
prevEpochRef = parent.blck.epochsInfo[0]
let epochInfo = EpochRef.init(state, cache, prevEpochRef)
# Don't use BlockRef caching as far as the epoch where the active
# validator indices can diverge.

View File

@ -54,6 +54,9 @@ proc addResolvedBlock(
blockRoot = signedBlock.root
blockRef = BlockRef.init(blockRoot, signedBlock.message)
blockEpoch = blockRef.slot.compute_epoch_at_slot()
link(parent, blockRef)
if parent.slot.compute_epoch_at_slot() == blockEpoch:
# If the parent and child blocks are from the same epoch, we can reuse
# the epoch cache - but we'll only use the current epoch because the new
@ -63,8 +66,6 @@ proc addResolvedBlock(
# Ensure we collect the epoch info if it's missing
discard getEpochInfo(blockRef, state.data, cache)
link(parent, blockRef)
dag.blocks[blockRoot] = blockRef
trace "Populating block dag", key = blockRoot, val = blockRef

View File

@ -10,7 +10,7 @@
import
options, sequtils, unittest,
./testutil, ./testblockutil,
../beacon_chain/spec/[datatypes, digest, state_transition, presets],
../beacon_chain/spec/[datatypes, digest, helpers, state_transition, presets],
../beacon_chain/[beacon_node_types, ssz],
../beacon_chain/block_pools/[chain_dag, quarantine, clearance]
@ -132,6 +132,9 @@ suiteReport "Block pool processing" & preset():
b2Add[].root == b2Get.get().refs.root
dag.heads.len == 1
dag.heads[0] == b2Add[]
# both should have the same epoch ref instance because they're from the
# same epoch
addr(b2Add[].epochsInfo[0][]) == addr(b1Add[].epochsInfo[0][])
# Skip one slot to get a gap
check:
@ -148,8 +151,6 @@ suiteReport "Block pool processing" & preset():
var blocks: array[3, BlockRef]
check:
dag.getBlockRange(Slot(0), 1, blocks.toOpenArray(0, 0)) == 0
blocks[0..<1] == [dag.tail]
@ -325,6 +326,13 @@ suiteReport "chain DAG finalization tests" & preset():
check:
dag.heads.len() == 1
# Epochrefs should share validator key set when the validator set is
# stable
addr(dag.heads[0].epochsInfo[0].validator_key_store[1][]) ==
addr(dag.heads[0].atEpochEnd(
dag.heads[0].slot.compute_epoch_at_slot() - 1).
blck.epochsInfo[0].validator_key_store[1][])
block:
# The late block is a block whose parent was finalized long ago and thus
# is no longer a viable head candidate
@ -376,7 +384,7 @@ suiteReport "chain DAG finalization tests" & preset():
# let
# pool2 = BlockPool.init(db)
# # check that the state reloaded from database resembles what we had before
# check:
# pool2.dag.tail.root == dag.tail.root