[Split fork choice PR] Derisk-ed attestation checks changes (#1154)

* Derisked attestation pool improvements

* tune down frequent logs

* VoteTracker logging
This commit is contained in:
Mamy Ratsimbazafy 2020-06-10 08:58:12 +02:00 committed by GitHub
parent 4517bf5dd4
commit ce897fe83f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 383 additions and 250 deletions

View File

@ -8,10 +8,10 @@
{.push raises: [Defect].}
import
options,
options, chronicles,
./spec/[beaconstate, datatypes, crypto, digest, helpers, validator,
state_transition_block],
./attestation_pool, ./beacon_node_types
./block_pool, ./attestation_pool, ./beacon_node_types, ./ssz
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#aggregation-selection
func is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex,
@ -69,3 +69,81 @@ proc aggregate_attestations*(
selection_proof: slot_signature))
none(AggregateAndProof)
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#attestation-subnets
proc isValidAttestation*(
pool: AttestationPool, attestation: Attestation, current_slot: Slot,
topicCommitteeIndex: uint64): bool =
# The attestation's committee index (attestation.data.index) is for the
# correct subnet.
if attestation.data.index != topicCommitteeIndex:
debug "isValidAttestation: attestation's committee index not for the correct subnet",
topicCommitteeIndex = topicCommitteeIndex,
attestation_data_index = attestation.data.index
return false
if not (attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >=
current_slot and current_slot >= attestation.data.slot):
debug "isValidAttestation: attestation.data.slot not within ATTESTATION_PROPAGATION_SLOT_RANGE"
return false
# The attestation is unaggregated -- that is, it has exactly one
# participating validator (len([bit for bit in attestation.aggregation_bits
# if bit == 0b1]) == 1).
# TODO a cleverer algorithm, along the lines of countOnes() in nim-stew
# But that belongs in nim-stew, since it'd break abstraction layers, to
# use details of its representation from nim-beacon-chain.
var onesCount = 0
for aggregation_bit in attestation.aggregation_bits:
if not aggregation_bit:
continue
onesCount += 1
if onesCount > 1:
debug "isValidAttestation: attestation has too many aggregation bits",
aggregation_bits = attestation.aggregation_bits
return false
if onesCount != 1:
debug "isValidAttestation: attestation has too few aggregation bits"
return false
# The attestation is the first valid attestation received for the
# participating validator for the slot, attestation.data.slot.
let maybeAttestationsSeen = getAttestationsForSlot(pool, attestation.data.slot)
if maybeAttestationsSeen.isSome:
for attestationEntry in maybeAttestationsSeen.get.attestations:
if attestation.data != attestationEntry.data:
continue
# Attestations might be aggregated eagerly or lazily; allow for both.
for validation in attestationEntry.validations:
if attestation.aggregation_bits.isSubsetOf(validation.aggregation_bits):
debug "isValidAttestation: attestation already exists at slot",
attestation_data_slot = attestation.data.slot,
attestation_aggregation_bits = attestation.aggregation_bits,
attestation_pool_validation = validation.aggregation_bits
return false
# The block being voted for (attestation.data.beacon_block_root) passes
# validation.
# We rely on the block pool to have been validated, so check for the
# existence of the block in the pool.
# TODO: consider a "slush pool" of attestations whose blocks have not yet
# propagated - i.e. imagine that attestations are smaller than blocks and
# therefore propagate faster, thus reordering their arrival in some nodes
if pool.blockPool.get(attestation.data.beacon_block_root).isNone():
debug "isValidAttestation: block doesn't exist in block pool",
attestation_data_beacon_block_root = attestation.data.beacon_block_root
return false
# The signature of attestation is valid.
# TODO need to know above which validator anyway, and this is too general
# as it supports aggregated attestations (which this can't be)
var cache = get_empty_per_epoch_cache()
if not is_valid_indexed_attestation(
pool.blockPool.headState.data.data,
get_indexed_attestation(
pool.blockPool.headState.data.data, attestation, cache), {}):
debug "isValidAttestation: signature verification failed"
return false
true

View File

@ -11,25 +11,29 @@ import
deques, sequtils, tables, options,
chronicles, stew/[byteutils], json_serialization/std/sets,
./spec/[beaconstate, datatypes, crypto, digest, helpers, validator],
./extras, ./block_pool, ./block_pools/candidate_chains, ./beacon_node_types
./extras, ./block_pool, ./block_pools/candidate_chains, ./beacon_node_types,
./fork_choice/[fork_choice_types, fork_choice]
logScope: topics = "attpool"
func init*(T: type AttestationPool, blockPool: BlockPool): T =
## Initialize an AttestationPool from the blockPool `headState`
## The `finalized_root` works around the finalized_checkpoint of the genesis block
## holding a zero_root.
# TODO blockPool is only used when resolving orphaned attestations - it should
# probably be removed as a dependency of AttestationPool (or some other
# smart refactoring)
T(
slots: initDeque[SlotData](),
mapSlotsToAttestations: initDeque[AttestationsSeen](),
blockPool: blockPool,
unresolved: initTable[Eth2Digest, UnresolvedAttestation](),
latestAttestations: initTable[ValidatorPubKey, BlockRef]()
)
proc combine*(tgt: var Attestation, src: Attestation, flags: UpdateFlags) =
## Combine the signature and participation bitfield, with the assumption that
## the same data is being signed - if the signatures overlap, they are not
## combined.
# TODO: Exported only for testing, all usage are internals
doAssert tgt.data == src.data
@ -44,43 +48,6 @@ proc combine*(tgt: var Attestation, src: Attestation, flags: UpdateFlags) =
else:
trace "Ignoring overlapping attestations"
# TODO remove/merge with p2p-interface validation
proc validate(
state: BeaconState, attestation: Attestation): bool =
# TODO what constitutes a valid attestation when it's about to be added to
# the pool? we're interested in attestations that will become viable
# for inclusion in blocks in the future and on any fork, so we need to
# consider that validations might happen using the state of a different
# fork.
# Some things are always invalid (like out-of-bounds issues etc), but
# others are more subtle - how do we validate the signature for example?
# It might be valid on one fork but not another. One thing that helps
# is that committees are stable per epoch and that it should be OK to
# include an attestation in a block even if the corresponding validator
# was slashed in the same epoch - there's no penalty for doing this and
# the vote counting logic will take care of any ill effects (TODO verify)
let data = attestation.data
# TODO re-enable check
#if not (data.crosslink.shard < SHARD_COUNT):
# notice "Attestation shard too high",
# attestation_shard = data.crosslink.shard
# return
# Without this check, we can't get a slot number for the attestation as
# certain helpers will assert
# TODO this could probably be avoided by being smart about the specific state
# used to validate the attestation: most likely if we pick the state of
# the beacon block being voted for and a slot in the target epoch
# of the attestation, we'll be safe!
# TODO the above state selection logic should probably live here in the
# attestation pool
if not (data.target.epoch == get_previous_epoch(state) or
data.target.epoch == get_current_epoch(state)):
notice "Target epoch not current or previous epoch"
return
true
proc slotIndex(
pool: var AttestationPool, state: BeaconState, attestationSlot: Slot): int =
@ -101,7 +68,7 @@ proc slotIndex(
", attestationSlot: " & $shortLog(attestationSlot) &
", startingSlot: " & $shortLog(pool.startingSlot)
if pool.slots.len == 0:
if pool.mapSlotsToAttestations.len == 0:
# Because the first attestations may arrive in any order, we'll make sure
# to start counting at the last finalized epoch start slot - anything
# earlier than that is thrown out by the above check
@ -111,15 +78,15 @@ proc slotIndex(
pool.startingSlot =
state.finalized_checkpoint.epoch.compute_start_slot_at_epoch()
if pool.startingSlot + pool.slots.len.uint64 <= attestationSlot:
if pool.startingSlot + pool.mapSlotsToAttestations.len.uint64 <= attestationSlot:
trace "Growing attestation pool",
attestationSlot = $shortLog(attestationSlot),
startingSlot = $shortLog(pool.startingSlot),
cat = "caching"
# Make sure there's a pool entry for every slot, even when there's a gap
while pool.startingSlot + pool.slots.len.uint64 <= attestationSlot:
pool.slots.addLast(SlotData())
while pool.startingSlot + pool.mapSlotsToAttestations.len.uint64 <= attestationSlot:
pool.mapSlotsToAttestations.addLast(AttestationsSeen())
if pool.startingSlot <
state.finalized_checkpoint.epoch.compute_start_slot_at_epoch():
@ -133,7 +100,7 @@ proc slotIndex(
# TODO there should be a better way to remove a whole epoch of stuff..
while pool.startingSlot <
state.finalized_checkpoint.epoch.compute_start_slot_at_epoch():
pool.slots.popFirst()
pool.mapSlotsToAttestations.popFirst()
pool.startingSlot += 1
int(attestationSlot - pool.startingSlot)
@ -168,21 +135,36 @@ proc addResolved(pool: var AttestationPool, blck: BlockRef, attestation: Attesta
# reasonable to involve the head being voted for as well as the intended
# slot of the attestation - double-check this with spec
# A basic check is that the attestation is at least as new as the block being
# voted for..
# TODO: How fast is state rewind?
# Can this be a DOS vector.
# TODO: filter valid attestation as much as possible before state rewind
# TODO: the below check does not respect the inclusion delay
# we should use isValidAttestationSlot instead
if blck.slot > attestation.data.slot:
notice "Invalid attestation (too new!)",
attestation = shortLog(attestation),
blockSlot = shortLog(blck.slot)
return
# if not isValidAttestationSlot(attestation.data.slot, blck.slot):
# # Logging in isValidAttestationSlot
# return
# Get a temporary state at the (block, slot) targeted by the attestation
updateStateData(
pool.blockPool, pool.blockPool.tmpState,
BlockSlot(blck: blck, slot: attestation.data.slot))
template state(): BeaconState = pool.blockPool.tmpState.data.data
if not validate(state, attestation):
# Check that the attestation is indeed valid
# TODO: we might want to split checks that depend
# on the state and those that don't to cheaply
# discard invalid attestations before rewinding state.
# TODO: stateCache usage
var stateCache = get_empty_per_epoch_cache()
if not isValidAttestationTargetEpoch(state, attestation):
notice "Invalid attestation",
attestation = shortLog(attestation),
current_epoch = get_current_epoch(state),
@ -195,7 +177,7 @@ proc addResolved(pool: var AttestationPool, blck: BlockRef, attestation: Attesta
let
attestationSlot = attestation.data.slot
idx = pool.slotIndex(state, attestationSlot)
slotData = addr pool.slots[idx]
attestationsSeen = addr pool.mapSlotsToAttestations[idx]
validation = Validation(
aggregation_bits: attestation.aggregation_bits,
aggregate_signature: attestation.signature)
@ -203,7 +185,7 @@ proc addResolved(pool: var AttestationPool, blck: BlockRef, attestation: Attesta
state, attestation.data, validation.aggregation_bits, cache)
var found = false
for a in slotData.attestations.mitems():
for a in attestationsSeen.attestations.mitems():
if a.data == attestation.data:
for v in a.validations:
if validation.aggregation_bits.isSubsetOf(v.aggregation_bits):
@ -251,7 +233,7 @@ proc addResolved(pool: var AttestationPool, blck: BlockRef, attestation: Attesta
break
if not found:
slotData.attestations.add(AttestationEntry(
attestationsSeen.attestations.add(AttestationEntry(
data: attestation.data,
blck: blck,
validations: @[validation]
@ -266,29 +248,33 @@ proc addResolved(pool: var AttestationPool, blck: BlockRef, attestation: Attesta
cat = "filtering"
proc add*(pool: var AttestationPool, attestation: Attestation) =
## Add a verified attestation to the fork choice context
logScope: pcs = "atp_add_attestation"
# Fetch the target block or notify the block pool that it's needed
let blck = pool.blockPool.getOrResolve(attestation.data.beacon_block_root)
# If the block exist, add it to the fork choice context
# Otherwise delay until it resolves
if blck.isNil:
pool.addUnresolved(attestation)
return
pool.addResolved(blck, attestation)
proc getAttestationsForSlot(pool: AttestationPool, newBlockSlot: Slot):
Option[SlotData] =
proc getAttestationsForSlot*(pool: AttestationPool, newBlockSlot: Slot):
Option[AttestationsSeen] =
if newBlockSlot < (GENESIS_SLOT + MIN_ATTESTATION_INCLUSION_DELAY):
debug "Too early for attestations",
newBlockSlot = shortLog(newBlockSlot),
cat = "query"
return none(SlotData)
return none(AttestationsSeen)
if pool.slots.len == 0: # startingSlot not set yet!
if pool.mapSlotsToAttestations.len == 0: # startingSlot not set yet!
info "No attestations found (pool empty)",
newBlockSlot = shortLog(newBlockSlot),
cat = "query"
return none(SlotData)
return none(AttestationsSeen)
let
# TODO in theory we could include attestations from other slots also, but
@ -299,16 +285,16 @@ proc getAttestationsForSlot(pool: AttestationPool, newBlockSlot: Slot):
attestationSlot = newBlockSlot - MIN_ATTESTATION_INCLUSION_DELAY
if attestationSlot < pool.startingSlot or
attestationSlot >= pool.startingSlot + pool.slots.len.uint64:
attestationSlot >= pool.startingSlot + pool.mapSlotsToAttestations.len.uint64:
info "No attestations matching the slot range",
attestationSlot = shortLog(attestationSlot),
startingSlot = shortLog(pool.startingSlot),
endingSlot = shortLog(pool.startingSlot + pool.slots.len.uint64),
endingSlot = shortLog(pool.startingSlot + pool.mapSlotsToAttestations.len.uint64),
cat = "query"
return none(SlotData)
return none(AttestationsSeen)
let slotDequeIdx = int(attestationSlot - pool.startingSlot)
some(pool.slots[slotDequeIdx])
some(pool.mapSlotsToAttestations[slotDequeIdx])
proc getAttestationsForBlock*(pool: AttestationPool,
state: BeaconState): seq[Attestation] =
@ -354,21 +340,17 @@ proc getAttestationsForBlock*(pool: AttestationPool,
signature: a.validations[0].aggregate_signature
)
if not validate(state, attestation):
warn "Attestation no longer validates...",
cat = "query"
continue
# TODO what's going on here is that when producing a block, we need to
# include only such attestations that will not cause block validation
# to fail. How this interacts with voting and the acceptance of
# attestations into the pool in general is an open question that needs
# revisiting - for example, when attestations are added, against which
# state should they be validated, if at all?
# TODO we're checking signatures here every time which is very slow - this
# is needed because validate does nothing for now and we don't want
# TODO we're checking signatures here every time which is very slow and we don't want
# to include a broken attestation
if not check_attestation(state, attestation, {}, cache):
warn "Attestation no longer validates...",
cat = "query"
continue
for v in a.validations[1..^1]:
@ -391,6 +373,8 @@ proc getAttestationsForBlock*(pool: AttestationPool,
return
proc resolve*(pool: var AttestationPool) =
## Check attestations in our unresolved deque
## if they can be integrated to the fork choice
logScope: pcs = "atp_resolve"
var
@ -479,88 +463,3 @@ proc selectHead*(pool: AttestationPool): BlockRef =
lmdGhost(pool, pool.blockPool.justifiedState.data.data, justifiedHead.blck)
newHead
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#attestation-subnets
proc isValidAttestation*(
pool: AttestationPool, attestation: Attestation, current_slot: Slot,
topicCommitteeIndex: uint64, flags: UpdateFlags): bool =
# The attestation's committee index (attestation.data.index) is for the
# correct subnet.
if attestation.data.index != topicCommitteeIndex:
debug "isValidAttestation: attestation's committee index not for the correct subnet",
topicCommitteeIndex = topicCommitteeIndex,
attestation_data_index = attestation.data.index
return false
if not (attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >=
current_slot and current_slot >= attestation.data.slot):
debug "isValidAttestation: attestation.data.slot not within ATTESTATION_PROPAGATION_SLOT_RANGE"
return false
# The attestation is unaggregated -- that is, it has exactly one
# participating validator (len([bit for bit in attestation.aggregation_bits
# if bit == 0b1]) == 1).
# TODO a cleverer algorithm, along the lines of countOnes() in nim-stew
# But that belongs in nim-stew, since it'd break abstraction layers, to
# use details of its representation from nim-beacon-chain.
var onesCount = 0
for aggregation_bit in attestation.aggregation_bits:
if not aggregation_bit:
continue
onesCount += 1
if onesCount > 1:
debug "isValidAttestation: attestation has too many aggregation bits",
aggregation_bits = attestation.aggregation_bits
return false
if onesCount != 1:
debug "isValidAttestation: attestation has too few aggregation bits"
return false
# The attestation is the first valid attestation received for the
# participating validator for the slot, attestation.data.slot.
let maybeSlotData = getAttestationsForSlot(pool, attestation.data.slot)
if maybeSlotData.isSome:
for attestationEntry in maybeSlotData.get.attestations:
if attestation.data != attestationEntry.data:
continue
# Attestations might be aggregated eagerly or lazily; allow for both.
for validation in attestationEntry.validations:
if attestation.aggregation_bits.isSubsetOf(validation.aggregation_bits):
debug "isValidAttestation: attestation already exists at slot",
attestation_data_slot = attestation.data.slot,
attestation_aggregation_bits = attestation.aggregation_bits,
attestation_pool_validation = validation.aggregation_bits
return false
# Temporary: on Witti testnet, attestations and blocks routinely come in out
# of order. TODO we need to support this case but to enable participation in
# Witti, before https://github.com/status-im/nim-beacon-chain/issues/1106 is
# fixed, just disable these validations for now.
const kludge = true
if kludge:
return true
# The block being voted for (attestation.data.beacon_block_root) passes
# validation.
# We rely on the block pool to have been validated, so check for the
# existence of the block in the pool.
# TODO: consider a "slush pool" of attestations whose blocks have not yet
# propagated - i.e. imagine that attestations are smaller than blocks and
# therefore propagate faster, thus reordering their arrival in some nodes
if pool.blockPool.get(attestation.data.beacon_block_root).isNone():
debug "isValidAttestation: block doesn't exist in block pool",
attestation_data_beacon_block_root = attestation.data.beacon_block_root
return false
# The signature of attestation is valid.
# TODO need to know above which validator anyway, and this is too general
# as it supports aggregated attestations (which this can't be)
var cache = get_empty_per_epoch_cache()
if not is_valid_indexed_attestation(
pool.blockPool.headState.data.data,
get_indexed_attestation(
pool.blockPool.headState.data.data, attestation, cache), {}):
debug "isValidAttestation: signature verification failed"
return false
true

View File

@ -27,7 +27,7 @@ import
mainchain_monitor, version, ssz/[merkleization],
sync_protocol, request_manager, validator_keygen, interop, statusbar,
sync_manager, state_transition,
validator_duties, validator_api
validator_duties, validator_api, attestation_aggregation
const
genesisFile* = "genesis.ssz"
@ -387,7 +387,7 @@ proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.gcsafe, asyn
beacon_head_slot.set slot.int64
# Time passes in here..
head = await node.handleValidatorDuties(head, lastSlot, slot)
asyncCheck node.handleValidatorDuties(lastSlot, slot)
let
nextSlotStart = saturate(node.beaconClock.fromNow(nextSlot))
@ -652,7 +652,7 @@ proc installAttestationHandlers(node: BeaconNode) =
let (afterGenesis, slot) = node.beaconClock.now().toSlot()
if not afterGenesis:
return false
node.attestationPool.isValidAttestation(attestation, slot, ci, {})))
node.attestationPool.isValidAttestation(attestation, slot, ci)))
when ETH2_SPEC == "v0.11.3":
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#interop-3
@ -666,7 +666,7 @@ proc installAttestationHandlers(node: BeaconNode) =
# isValidAttestation checks attestation.data.index == topicCommitteeIndex
# which doesn't make sense here, so rig that check to vacuously pass.
node.attestationPool.isValidAttestation(
attestation, slot, attestation.data.index, {})))
attestation, slot, attestation.data.index)))
waitFor allFutures(attestationSubscriptions)

View File

@ -5,4 +5,3 @@
-d:"chronicles_sinks=json"
-d:"withoutPrompt"
@end

View File

@ -5,7 +5,8 @@ import
stew/endians2,
spec/[datatypes, crypto, digest],
block_pools/block_pools_types,
block_pool # TODO: refactoring compat shim
block_pool, # TODO: refactoring compat shim
fork_choice/fork_choice_types
export block_pools_types
@ -39,7 +40,7 @@ type
## this seq and aggregate only when needed
## TODO there are obvious caching opportunities here..
SlotData* = object
AttestationsSeen* = object
attestations*: seq[AttestationEntry] ## \
## Depending on the world view of the various validators, they may have
## voted on different states - here we collect all the different
@ -59,7 +60,7 @@ type
## contains both votes that have been included in the chain and those that
## have not.
slots*: Deque[SlotData] ## \
mapSlotsToAttestations*: Deque[AttestationsSeen] ## \
## We keep one item per slot such that indexing matches slot number
## together with startingSlot

View File

@ -800,7 +800,7 @@ func latestJustifiedBlock*(dag: CandidateChains): BlockSlot =
## as the latest finalized block
doAssert dag.heads.len > 0,
"We should have at least the genesis block in heaads"
"We should have at least the genesis block in heads"
doAssert (not dag.head.blck.isNil()),
"Genesis block will be head, if nothing else"

View File

@ -114,6 +114,14 @@ proc add*(
## the state parameter may be updated to include the given block, if
## everything checks out
# TODO reevaluate passing the state in like this
# TODO: to facilitate adding the block to the attestation pool
# this should also return justified and finalized epoch corresponding
# to each block.
# This would be easy apart from the "Block already exists"
# early return.
let blck = signedBlock.message
doAssert blockRoot == hash_tree_root(blck)

View File

@ -9,9 +9,9 @@
import
# Standard library
std/tables, std/options, std/typetraits,
std/tables, std/typetraits,
# Status libraries
stew/results,
stew/results, chronicles,
# Internal
../spec/[datatypes, digest],
# Fork choice
@ -43,13 +43,17 @@ func compute_deltas(
# Fork choice routines
# ----------------------------------------------------------------------
logScope:
topics = "fork_choice"
cat = "fork_choice"
# API:
# - The private procs uses the ForkChoiceError error code
# - The public procs use Result
func initForkChoice*(
finalized_block_slot: Slot,
finalized_block_state_root: Eth2Digest,
finalized_block_slot: Slot, # This is unnecessary for fork choice but helps external components
finalized_block_state_root: Eth2Digest, # This is unnecessary for fork choice but helps external components
justified_epoch: Epoch,
finalized_epoch: Epoch,
finalized_root: Eth2Digest
@ -64,7 +68,8 @@ func initForkChoice*(
let err = proto_array.on_block(
finalized_block_slot,
finalized_root,
none(Eth2Digest),
hasParentInForkChoice = false,
default(Eth2Digest),
finalized_block_state_root,
justified_epoch,
finalized_epoch
@ -111,6 +116,31 @@ func process_attestation*(
vote.next_root = block_root
vote.next_epoch = target_epoch
{.noSideEffect.}:
info "Integrating vote in fork choice",
validator_index = $validator_index,
new_vote = shortlog(vote)
else:
{.noSideEffect.}:
if vote.next_epoch != target_epoch or vote.next_root != block_root:
warn "Change of vote ignored for fork choice. Potential for slashing.",
validator_index = $validator_index,
current_vote = shortlog(vote),
ignored_block_root = shortlog(block_root),
ignored_target_epoch = $target_epoch
else:
info "Ignoring double-vote for fork choice",
validator_index = $validator_index,
current_vote = shortlog(vote),
ignored_block_root = shortlog(block_root),
ignored_target_epoch = $target_epoch
func contains*(self: ForkChoice, block_root: Eth2Digest): bool =
## Returns `true` if a block is known to the fork choice
## and `false` otherwise.
##
## In particular, before adding a block, its parent must be known to the fork choice
self.proto_array.indices.contains(block_root)
func process_block*(
self: var ForkChoice,
@ -123,10 +153,18 @@ func process_block*(
): Result[void, string] =
## Add a block to the fork choice context
let err = self.proto_array.on_block(
slot, block_root, some(parent_root), state_root, justified_epoch, finalized_epoch
slot, block_root, hasParentInForkChoice = true, parent_root, state_root, justified_epoch, finalized_epoch
)
if err.kind != fcSuccess:
return err("process_block_error: " & $err)
{.noSideEffect.}:
info "Integrating block in fork choice",
block_root = $shortlog(block_root),
parent_root = $shortlog(parent_root),
justified_epoch = $justified_epoch,
finalized_epoch = $finalized_epoch
return ok()
@ -166,6 +204,13 @@ func find_head*(
if ghost_err.kind != fcSuccess:
return err("find_head failed: " & $ghost_err)
{.noSideEffect.}:
info "Fork choice requested",
justified_epoch = $justified_epoch,
justified_root = shortlog(justified_root),
finalized_epoch = $finalized_epoch,
fork_choice_head = shortlog(new_head)
return ok(new_head)
@ -252,7 +297,7 @@ func compute_deltas(
when isMainModule:
import stew/endians2
func fakeHash*(index: SomeInteger): Eth2Digest =
func fakeHash(index: SomeInteger): Eth2Digest =
## Create fake hashes
## Those are just the value serialized in big-endian
## We add 16x16 to avoid having a zero hash are those are special cased

View File

@ -1,4 +1,3 @@
# beacon_chain
# Copyright (c) 2018-2020 Status Research & Development GmbH
# Licensed and distributed under either of
@ -11,6 +10,8 @@
import
# Standard library
std/tables, std/options,
# Status
chronicles,
# Internal
../spec/[datatypes, digest]
@ -44,6 +45,9 @@ type
fcErrInvalidDeltaLen
fcErrRevertedFinalizedEpoch
fcErrInvalidBestNode
# -------------------------
# TODO: Extra error modes beyond Proto/Lighthouse to be reviewed
fcErrUnknownParent
FcUnderflowKind* = enum
## Fork Choice Overflow Kinds
@ -88,6 +92,9 @@ type
head_root*: Eth2Digest
head_justified_epoch*: Epoch
head_finalized_epoch*: Epoch
of fcErrUnknownParent:
child_root*: Eth2Digest
parent_root*: Eth2Digest
ProtoArray* = object
prune_threshold*: int
@ -127,3 +134,12 @@ type
proto_array*: ProtoArray
votes*: seq[VoteTracker]
balances*: seq[Gwei]
func shortlog*(vote: VoteTracker): auto =
(
current_root: vote.current_root,
next_root: vote.next_root,
next_epoch: vote.next_epoch
)
chronicles.formatIt VoteTracker: it.shortLog

View File

@ -153,28 +153,38 @@ func on_block*(
self: var ProtoArray,
slot: Slot,
root: Eth2Digest,
parent: Option[Eth2Digest],
hasParentInForkChoice: bool,
parent: Eth2Digest,
state_root: Eth2Digest,
justified_epoch: Epoch,
finalized_epoch: Epoch
): ForkChoiceError =
## Register a block with the fork choice
## A `none` parent is only valid for Genesis
## A block `hasParentInForkChoice` may be false
## on fork choice initialization:
## - either from Genesis
## - or from a finalized state loaded from database
# Note: if parent is an "Option" type, we can run out of stack space.
# If the block is already known, ignore it
if root in self.indices:
return ForkChoiceSuccess
let node_index = self.nodes.len
var parent_index: Option[int]
if not hasParentInForkChoice:
# Genesis (but Genesis might not be default(Eth2Digest))
parent_index = none(int)
elif parent notin self.indices:
return ForkChoiceError(
kind: fcErrUnknownParent,
child_root: root,
parent_root: parent
)
else:
parent_index = some(self.indices.unsafeGet(parent))
let parent_index = block:
if parent.isNone:
none(int)
elif parent.unsafeGet() notin self.indices:
# Is this possible?
none(int)
else:
some(self.indices.unsafeGet(parent.unsafeGet()))
let node_index = self.nodes.len
let node = ProtoNode(
slot: slot,
@ -191,7 +201,7 @@ func on_block*(
self.indices[node.root] = node_index
self.nodes.add node # TODO: if this is costly, we can setLen + construct the node in-place
if parent_index.isSome():
if parent_index.isSome(): # parent_index is always valid except for Genesis
let err = self.maybe_update_best_child_and_descendant(parent_index.unsafeGet(), node_index)
if err.kind != fcSuccess:
return err

View File

@ -473,7 +473,78 @@ func get_indexed_attestation*(state: BeaconState, attestation: Attestation,
signature: attestation.signature
)
# Attestation validation
# ------------------------------------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#attestations
#
# TODO: Refactor, we need:
# - cheap validations that do not involve the fork state (to avoid unnecessary and costly rewinding which are probably a DOS vector)
# - check that must be done on the targeted state (for example beacon committee consistency)
# - check that requires easily computed data from the targeted sate (slots, epoch, justified checkpoints)
proc isValidAttestationSlot*(attestationSlot, stateSlot: Slot): bool =
## Attestation check that does not depend on the whole block state
## for cheaper prefiltering of attestations that come from the network
## to limit DOS via state rewinding
if not (attestationSlot + MIN_ATTESTATION_INCLUSION_DELAY <= stateSlot):
warn("Attestation too new",
attestation_slot = shortLog(attestationSlot),
state_slot = shortLog(stateSlot))
return false
if not (stateSlot <= attestationSlot + SLOTS_PER_EPOCH):
warn("Attestation too old",
attestation_slot = shortLog(attestationSlot),
state_slot = shortLog(stateSlot))
return false
return true
# TODO remove/merge with p2p-interface validation
proc isValidAttestationTargetEpoch*(
state: BeaconState, attestation: Attestation): bool =
# TODO what constitutes a valid attestation when it's about to be added to
# the pool? we're interested in attestations that will become viable
# for inclusion in blocks in the future and on any fork, so we need to
# consider that validations might happen using the state of a different
# fork.
# Some things are always invalid (like out-of-bounds issues etc), but
# others are more subtle - how do we validate the signature for example?
# It might be valid on one fork but not another. One thing that helps
# is that committees are stable per epoch and that it should be OK to
# include an attestation in a block even if the corresponding validator
# was slashed in the same epoch - there's no penalty for doing this and
# the vote counting logic will take care of any ill effects (TODO verify)
let data = attestation.data
# TODO re-enable check
#if not (data.crosslink.shard < SHARD_COUNT):
# notice "Attestation shard too high",
# attestation_shard = data.crosslink.shard
# return
# Without this check, we can't get a slot number for the attestation as
# certain helpers will assert
# TODO this could probably be avoided by being smart about the specific state
# used to validate the attestation: most likely if we pick the state of
# the beacon block being voted for and a slot in the target epoch
# of the attestation, we'll be safe!
# TODO the above state selection logic should probably live here in the
# attestation pool
if not (data.target.epoch == get_previous_epoch(state) or
data.target.epoch == get_current_epoch(state)):
warn("Target epoch not current or previous epoch")
return
if not (data.target.epoch == compute_epoch_at_slot(data.slot)):
warn("Target epoch inconsistent with epoch of data slot",
target_epoch = data.target.epoch,
data_slot_epoch = compute_epoch_at_slot(data.slot))
return
true
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.2/specs/phase0/beacon-chain.md#attestations
proc check_attestation*(
state: BeaconState, attestation: Attestation, flags: UpdateFlags,
stateCache: var StateCache): bool =
@ -481,6 +552,12 @@ proc check_attestation*(
## at the current slot. When acting as a proposer, the same rules need to
## be followed!
# TODO: When checking attestation from the network, currently rewinding/advancing
# a temporary state is needed.
# If this is too costly we can be DOS-ed.
# We might want to split this into "state-dependent" checks and "state-independent" checks
# The latter one should be made prior to state-rewinding.
let
stateSlot = state.slot
data = attestation.data
@ -494,27 +571,12 @@ proc check_attestation*(
committee_count = get_committee_count_at_slot(state, data.slot))
return
if not (data.target.epoch == get_previous_epoch(state) or
data.target.epoch == get_current_epoch(state)):
warn("Target epoch not current or previous epoch")
if not isValidAttestationTargetEpoch(state, attestation):
# Logging in isValidAttestationTargetEpoch
return
if not (data.target.epoch == compute_epoch_at_slot(data.slot)):
warn("Target epoch inconsistent with epoch of data slot",
target_epoch = data.target.epoch,
data_slot_epoch = compute_epoch_at_slot(data.slot))
return
if not (data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= stateSlot):
warn("Attestation too new",
attestation_slot = shortLog(data.slot),
state_slot = shortLog(stateSlot))
return
if not (stateSlot <= data.slot + SLOTS_PER_EPOCH):
warn("Attestation too old",
attestation_slot = shortLog(data.slot),
state_slot = shortLog(stateSlot))
if not isValidAttestationSlot(data.slot, stateSlot):
# Logging in isValidAttestationSlot
return
let committee = get_beacon_committee(

View File

@ -173,7 +173,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
root: get_block_root(state, previous_epoch))
state.justification_bits.setBit 1
trace "Justified with previous epoch",
info "Justified with previous epoch",
current_epoch = current_epoch,
checkpoint = shortLog(state.current_justified_checkpoint),
cat = "justification"
@ -187,7 +187,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
root: get_block_root(state, current_epoch))
state.justification_bits.setBit 0
trace "Justified with current epoch",
info "Justified with current epoch",
current_epoch = current_epoch,
checkpoint = shortLog(state.current_justified_checkpoint),
cat = "justification"
@ -201,7 +201,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
old_previous_justified_checkpoint.epoch + 3 == current_epoch:
state.finalized_checkpoint = old_previous_justified_checkpoint
trace "Finalized with rule 234",
info "Finalized with rule 234",
current_epoch = current_epoch,
checkpoint = shortLog(state.finalized_checkpoint),
cat = "finalization"
@ -212,7 +212,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
old_previous_justified_checkpoint.epoch + 2 == current_epoch:
state.finalized_checkpoint = old_previous_justified_checkpoint
trace "Finalized with rule 23",
info "Finalized with rule 23",
current_epoch = current_epoch,
checkpoint = shortLog(state.finalized_checkpoint),
cat = "finalization"
@ -223,7 +223,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
old_current_justified_checkpoint.epoch + 2 == current_epoch:
state.finalized_checkpoint = old_current_justified_checkpoint
trace "Finalized with rule 123",
info "Finalized with rule 123",
current_epoch = current_epoch,
checkpoint = shortLog(state.finalized_checkpoint),
cat = "finalization"
@ -234,7 +234,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
old_current_justified_checkpoint.epoch + 1 == current_epoch:
state.finalized_checkpoint = old_current_justified_checkpoint
trace "Finalized with rule 12",
info "Finalized with rule 12",
current_epoch = current_epoch,
checkpoint = shortLog(state.finalized_checkpoint),
cat = "finalization"

View File

@ -398,19 +398,19 @@ proc broadcastAggregatedAttestations(
node.network.broadcast(node.topicAggregateAndProofs, signedAP)
proc handleValidatorDuties*(
node: BeaconNode, head: BlockRef, lastSlot, slot: Slot): Future[BlockRef] {.async.} =
node: BeaconNode, lastSlot, slot: Slot) {.async.} =
## Perform validator duties - create blocks, vote and aggreagte existing votes
var head = node.updateHead()
if node.attachedValidators.count == 0:
# Nothing to do because we have no validator attached
return head
return
if not node.isSynced(head):
notice "Node out of sync, skipping validator duties",
slot, headSlot = head.slot
return head
return
var curSlot = lastSlot + 1
var head = head
# Start by checking if there's work we should have done in the past that we
# can still meaningfully do
@ -493,5 +493,3 @@ proc handleValidatorDuties*(
broadcastAggregatedAttestations(
node, aggregationHead, aggregationSlot, TRAILING_DISTANCE)
return head

View File

@ -7,13 +7,17 @@
{.used.}
import
../beacon_chain/spec/datatypes,
../beacon_chain/ssz
import
unittest,
chronicles,
stew/byteutils,
./testutil, ./testblockutil,
../beacon_chain/spec/[datatypes, digest, validator],
../beacon_chain/[beacon_node_types, attestation_pool, block_pool, state_transition, ssz]
../beacon_chain/spec/[digest, validator],
../beacon_chain/[beacon_node_types, attestation_pool, block_pool, state_transition]
suiteReport "Attestation pool processing" & preset():
## For now just test that we can compile and execute block processing with
@ -22,13 +26,15 @@ suiteReport "Attestation pool processing" & preset():
setup:
# Genesis state that results in 3 members per committee
var
blockPool = BlockPool.init(makeTestDB(SLOTS_PER_EPOCH * 3))
pool = AttestationPool.init(blockPool)
state = newClone(loadTailState(blockPool))
blockPool = newClone(BlockPool.init(makeTestDB(SLOTS_PER_EPOCH * 3)))
pool = newClone(AttestationPool.init(blockPool[]))
state = newClone(loadTailState(blockPool[]))
# Slot 0 is a finalized slot - won't be making attestations for it..
check:
process_slots(state.data, state.data.data.slot + 1)
# pool[].add(blockPool[].tail) # Make the tail known to fork choice
timedTest "Can add and retrieve simple attestation" & preset():
var cache = get_empty_per_epoch_cache()
let
@ -38,12 +44,12 @@ suiteReport "Attestation pool processing" & preset():
attestation = makeAttestation(
state.data.data, state.blck.root, beacon_committee[0], cache)
pool.add(attestation)
pool[].add(attestation)
check:
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
let attestations = pool.getAttestationsForBlock(state.data.data)
let attestations = pool[].getAttestationsForBlock(state.data.data)
check:
attestations.len == 1
@ -67,12 +73,12 @@ suiteReport "Attestation pool processing" & preset():
state.data.data, state.blck.root, bc1[0], cache)
# test reverse order
pool.add(attestation1)
pool.add(attestation0)
pool[].add(attestation1)
pool[].add(attestation0)
discard process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
let attestations = pool.getAttestationsForBlock(state.data.data)
let attestations = pool[].getAttestationsForBlock(state.data.data)
check:
attestations.len == 1
@ -88,13 +94,13 @@ suiteReport "Attestation pool processing" & preset():
attestation1 = makeAttestation(
state.data.data, state.blck.root, bc0[1], cache)
pool.add(attestation0)
pool.add(attestation1)
pool[].add(attestation0)
pool[].add(attestation1)
check:
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
let attestations = pool.getAttestationsForBlock(state.data.data)
let attestations = pool[].getAttestationsForBlock(state.data.data)
check:
attestations.len == 1
@ -113,13 +119,13 @@ suiteReport "Attestation pool processing" & preset():
attestation0.combine(attestation1, {})
pool.add(attestation0)
pool.add(attestation1)
pool[].add(attestation0)
pool[].add(attestation1)
check:
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
let attestations = pool.getAttestationsForBlock(state.data.data)
let attestations = pool[].getAttestationsForBlock(state.data.data)
check:
attestations.len == 1
@ -137,13 +143,13 @@ suiteReport "Attestation pool processing" & preset():
attestation0.combine(attestation1, {})
pool.add(attestation1)
pool.add(attestation0)
pool[].add(attestation1)
pool[].add(attestation0)
check:
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
let attestations = pool.getAttestationsForBlock(state.data.data)
let attestations = pool[].getAttestationsForBlock(state.data.data)
check:
attestations.len == 1
@ -151,10 +157,12 @@ suiteReport "Attestation pool processing" & preset():
timedTest "Fork choice returns latest block with no attestations":
var cache = get_empty_per_epoch_cache()
let
b1 = addTestBlock(state.data, blockPool.tail.root, cache)
b1 = addTestBlock(state.data, blockPool[].tail.root, cache)
b1Root = hash_tree_root(b1.message)
b1Add = blockPool.add(b1Root, b1)[]
head = pool.selectHead()
b1Add = blockpool[].add(b1Root, b1)[]
# pool[].add(b1Add) - make a block known to the future fork choice
let head = pool[].selectHead()
check:
head == b1Add
@ -162,8 +170,10 @@ suiteReport "Attestation pool processing" & preset():
let
b2 = addTestBlock(state.data, b1Root, cache)
b2Root = hash_tree_root(b2.message)
b2Add = blockPool.add(b2Root, b2)[]
head2 = pool.selectHead()
b2Add = blockpool[].add(b2Root, b2)[]
# pool[].add(b2Add) - make a block known to the future fork choice
let head2 = pool[].selectHead()
check:
head2 == b2Add
@ -171,28 +181,31 @@ suiteReport "Attestation pool processing" & preset():
timedTest "Fork choice returns block with attestation":
var cache = get_empty_per_epoch_cache()
let
b10 = makeTestBlock(state.data, blockPool.tail.root, cache)
b10 = makeTestBlock(state.data, blockPool[].tail.root, cache)
b10Root = hash_tree_root(b10.message)
b10Add = blockPool.add(b10Root, b10)[]
head = pool.selectHead()
b10Add = blockpool[].add(b10Root, b10)[]
# pool[].add(b10Add) - make a block known to the future fork choice
let head = pool[].selectHead()
check:
head == b10Add
let
b11 = makeTestBlock(state.data, blockPool.tail.root, cache,
b11 = makeTestBlock(state.data, blockPool[].tail.root, cache,
graffiti = Eth2Digest(data: [1'u8, 0, 0, 0 ,0 ,0 ,0 ,0 ,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
)
b11Root = hash_tree_root(b11.message)
b11Add = blockPool.add(b11Root, b11)[]
b11Add = blockpool[].add(b11Root, b11)[]
bc1 = get_beacon_committee(
state.data.data, state.data.data.slot, 1.CommitteeIndex, cache)
attestation0 = makeAttestation(state.data.data, b10Root, bc1[0], cache)
pool.add(attestation0)
# pool[].add(b11Add) - make a block known to the future fork choice
pool[].add(attestation0)
let head2 = pool.selectHead()
let head2 = pool[].selectHead()
check:
# Single vote for b10 and no votes for b11
@ -201,18 +214,22 @@ suiteReport "Attestation pool processing" & preset():
let
attestation1 = makeAttestation(state.data.data, b11Root, bc1[1], cache)
attestation2 = makeAttestation(state.data.data, b11Root, bc1[2], cache)
pool.add(attestation1)
pool[].add(attestation1)
let head3 = pool.selectHead()
let head3 = pool[].selectHead()
# Warning - the tiebreak are incorrect and guaranteed consensus fork, it should be bigger
let smaller = if b10Root.data < b11Root.data: b10Add else: b11Add
check:
# Ties broken lexicographically
# Ties broken lexicographically in spec -> ?
# all implementations favor the biggest root
# TODO
# currently using smaller as we have used for over a year
head3 == smaller
pool.add(attestation2)
pool[].add(attestation2)
let head4 = pool.selectHead()
let head4 = pool[].selectHead()
check:
# Two votes for b11