mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-01-11 14:54:12 +00:00
Fork choice fixes 5 (#1381)
* limit attestations kept in attestation pool With fork choice updated, the attestation pool only needs to keep track of attestations that will eventually end up in blocks - we can thus limit the horizon of attestations that we keep more aggressively. To get here, we expose getEpochRef which gets metadata about a particular epochref, and make sure to populate it when a block is added - this ensures that state rewinds during block addition are minimized. In addition, we'll use the target root/epoch when validating attestations - this helps minimize the number of different states that we need to rewind to, in general. * remove CandidateChains.justifiedState unused * remove BlockPools.Head object * avoid quadratic quarantine loop * fix
This commit is contained in:
parent
6ccfff7d58
commit
157ddd2ac4
@ -161,7 +161,7 @@ proc isValidAttestation*(
|
||||
# therefore propagate faster, thus reordering their arrival in some nodes
|
||||
let attestationBlck = pool.blockPool.getRef(attestation.data.beacon_block_root)
|
||||
if attestationBlck.isNil:
|
||||
debug "block doesn't exist in block pool"
|
||||
debug "Block not found"
|
||||
pool.blockPool.addMissing(attestation.data.beacon_block_root)
|
||||
return false
|
||||
|
||||
|
@ -9,15 +9,15 @@
|
||||
|
||||
import
|
||||
# Standard libraries
|
||||
std/[algorithm, deques, sequtils, tables, options],
|
||||
std/[algorithm, deques, sequtils, sets, tables, options],
|
||||
# Status libraries
|
||||
chronicles, stew/[byteutils], json_serialization/std/sets,
|
||||
chronicles, stew/[byteutils], json_serialization/std/sets as jsonSets,
|
||||
# Internal
|
||||
./spec/[beaconstate, datatypes, crypto, digest, helpers],
|
||||
./block_pool, ./block_pools/candidate_chains, ./beacon_node_types,
|
||||
./fork_choice/fork_choice
|
||||
|
||||
export beacon_node_types
|
||||
export beacon_node_types, sets
|
||||
|
||||
logScope: topics = "attpool"
|
||||
|
||||
@ -39,7 +39,7 @@ proc init*(T: type AttestationPool, blockPool: BlockPool): T =
|
||||
doAssert blockPool.heads.len == 1, "Init only supports a single history"
|
||||
|
||||
var blocks: seq[BlockRef]
|
||||
var cur = blockPool.head.blck
|
||||
var cur = blockPool.head
|
||||
while cur != blockPool.finalizedHead.blck:
|
||||
blocks.add cur
|
||||
cur = cur.parent
|
||||
@ -65,65 +65,11 @@ proc init*(T: type AttestationPool, blockPool: BlockPool): T =
|
||||
finalized_root = shortlog(blockPool.finalizedHead.blck.root)
|
||||
|
||||
T(
|
||||
mapSlotsToAttestations: initDeque[AttestationsSeen](),
|
||||
blockPool: blockPool,
|
||||
unresolved: initTable[Eth2Digest, UnresolvedAttestation](),
|
||||
forkChoice: forkChoice
|
||||
)
|
||||
|
||||
proc slotIndex(
|
||||
pool: var AttestationPool, state: BeaconState, attestationSlot: Slot): int =
|
||||
## Grow and garbage collect pool, returning the deque index of the slot
|
||||
|
||||
# We keep a sliding window of attestations, roughly from the last finalized
|
||||
# epoch to now, because these are the attestations that may affect the voting
|
||||
# outcome. Some of these attestations will already have been added to blocks,
|
||||
# while others are fresh off the network.
|
||||
# TODO only the latest vote of each validator counts. Can we use that somehow?
|
||||
logScope: pcs = "atp_slot_maintenance"
|
||||
|
||||
doAssert attestationSlot >= pool.startingSlot,
|
||||
"""
|
||||
We should have checked in addResolved that attestation is newer than
|
||||
finalized_slot and we never prune things before that, per below condition!
|
||||
""" &
|
||||
", attestationSlot: " & $shortLog(attestationSlot) &
|
||||
", startingSlot: " & $shortLog(pool.startingSlot)
|
||||
|
||||
if pool.mapSlotsToAttestations.len == 0:
|
||||
# Because the first attestations may arrive in any order, we'll make sure
|
||||
# to start counting at the last finalized epoch start slot - anything
|
||||
# earlier than that is thrown out by the above check
|
||||
info "First attestation!",
|
||||
attestationSlot = shortLog(attestationSlot)
|
||||
pool.startingSlot =
|
||||
state.finalized_checkpoint.epoch.compute_start_slot_at_epoch()
|
||||
|
||||
if pool.startingSlot + pool.mapSlotsToAttestations.lenu64 <= attestationSlot:
|
||||
trace "Growing attestation pool",
|
||||
attestationSlot = shortLog(attestationSlot),
|
||||
startingSlot = shortLog(pool.startingSlot)
|
||||
|
||||
# Make sure there's a pool entry for every slot, even when there's a gap
|
||||
while pool.startingSlot + pool.mapSlotsToAttestations.lenu64 <= attestationSlot:
|
||||
pool.mapSlotsToAttestations.addLast(AttestationsSeen())
|
||||
|
||||
if pool.startingSlot <
|
||||
state.finalized_checkpoint.epoch.compute_start_slot_at_epoch():
|
||||
debug "Pruning attestation pool",
|
||||
startingSlot = shortLog(pool.startingSlot),
|
||||
finalizedSlot = shortLog(
|
||||
state.finalized_checkpoint
|
||||
.epoch.compute_start_slot_at_epoch())
|
||||
|
||||
# TODO there should be a better way to remove a whole epoch of stuff..
|
||||
while pool.startingSlot <
|
||||
state.finalized_checkpoint.epoch.compute_start_slot_at_epoch():
|
||||
pool.mapSlotsToAttestations.popFirst()
|
||||
pool.startingSlot += 1
|
||||
|
||||
int(attestationSlot - pool.startingSlot)
|
||||
|
||||
func processAttestation(
|
||||
pool: var AttestationPool, participants: HashSet[ValidatorIndex],
|
||||
block_root: Eth2Digest, target_epoch: Epoch) =
|
||||
@ -137,61 +83,56 @@ func addUnresolved(pool: var AttestationPool, attestation: Attestation) =
|
||||
attestation: attestation,
|
||||
)
|
||||
|
||||
proc addResolved(pool: var AttestationPool, blck: BlockRef, attestation: Attestation) =
|
||||
func candidateIdx(pool: AttestationPool, slot: Slot): Option[uint64] =
|
||||
if slot >= pool.startingSlot and
|
||||
slot < (pool.startingSlot + pool.candidates.lenu64):
|
||||
some(slot mod pool.candidates.lenu64)
|
||||
else:
|
||||
none(uint64)
|
||||
|
||||
proc updateCurrent(pool: var AttestationPool, wallSlot: Slot) =
|
||||
if wallSlot + 1 < pool.candidates.lenu64:
|
||||
return
|
||||
|
||||
if pool.startingSlot + pool.candidates.lenu64 - 1 > wallSlot:
|
||||
error "Current slot older than attestation pool view, clock reset?",
|
||||
poolSlot = pool.startingSlot, wallSlot
|
||||
return
|
||||
|
||||
# As time passes we'll clear out any old attestations as they are no longer
|
||||
# viable to be included in blocks
|
||||
|
||||
let newWallSlot = wallSlot + 1 - pool.candidates.lenu64
|
||||
for i in pool.startingSlot..newWallSlot:
|
||||
pool.candidates[i.uint64 mod pool.candidates.lenu64] = AttestationsSeen()
|
||||
|
||||
pool.startingSlot = newWallSlot
|
||||
|
||||
proc addResolved(
|
||||
pool: var AttestationPool, blck: BlockRef, attestation: Attestation,
|
||||
wallSlot: Slot) =
|
||||
# Add an attestation whose parent we know
|
||||
logScope:
|
||||
attestation = shortLog(attestation)
|
||||
|
||||
updateCurrent(pool, wallSlot)
|
||||
|
||||
doAssert blck.root == attestation.data.beacon_block_root
|
||||
|
||||
# TODO Which state should we use to validate the attestation? It seems
|
||||
# reasonable to involve the head being voted for as well as the intended
|
||||
# slot of the attestation - double-check this with spec
|
||||
|
||||
# TODO: filter valid attestation as much as possible before state rewind
|
||||
# TODO: the below check does not respect the inclusion delay
|
||||
# we should use isValidAttestationSlot instead
|
||||
if blck.slot > attestation.data.slot:
|
||||
notice "Invalid attestation (too new!)",
|
||||
blockSlot = shortLog(blck.slot)
|
||||
return
|
||||
|
||||
if attestation.data.slot < pool.startingSlot:
|
||||
# It can happen that attestations in blocks for example are included even
|
||||
# though they no longer are relevant for finalization - let's clear
|
||||
# these out
|
||||
debug "Old attestation",
|
||||
let candidateIdx = pool.candidateIdx(attestation.data.slot)
|
||||
if candidateIdx.isNone:
|
||||
debug "Attestation slot out of range",
|
||||
startingSlot = pool.startingSlot
|
||||
return
|
||||
|
||||
# if not isValidAttestationSlot(attestation.data.slot, blck.slot):
|
||||
# # Logging in isValidAttestationSlot
|
||||
# return
|
||||
|
||||
# Check that the attestation is indeed valid
|
||||
if (let v = check_attestation_slot_target(attestation.data); v.isErr):
|
||||
debug "Invalid attestation", err = v.error
|
||||
return
|
||||
|
||||
# Get a temporary state at the (block, slot) targeted by the attestation
|
||||
updateStateData(
|
||||
pool.blockPool, pool.blockPool.tmpState,
|
||||
BlockSlot(blck: blck, slot: attestation.data.slot),
|
||||
true)
|
||||
|
||||
template state(): BeaconState = pool.blockPool.tmpState.data.data
|
||||
|
||||
# TODO inefficient data structures..
|
||||
|
||||
var cache = getEpochCache(blck, state)
|
||||
let
|
||||
attestationSlot = attestation.data.slot
|
||||
idx = pool.slotIndex(state, attestationSlot)
|
||||
attestationsSeen = addr pool.mapSlotsToAttestations[idx]
|
||||
epochRef = pool.blockPool.dag.getEpochRef(blck, attestation.data.target.epoch)
|
||||
attestationsSeen = addr pool.candidates[candidateIdx.get]
|
||||
validation = Validation(
|
||||
aggregation_bits: attestation.aggregation_bits,
|
||||
aggregate_signature: attestation.signature)
|
||||
participants = get_attesting_indices(
|
||||
state, attestation.data, validation.aggregation_bits, cache)
|
||||
epochRef, attestation.data, validation.aggregation_bits)
|
||||
|
||||
var found = false
|
||||
for a in attestationsSeen.attestations.mitems():
|
||||
@ -226,7 +167,6 @@ proc addResolved(pool: var AttestationPool, blck: BlockRef, attestation: Attesta
|
||||
info "Attestation resolved",
|
||||
attestation = shortLog(attestation),
|
||||
validations = a.validations.len(),
|
||||
current_epoch = get_current_epoch(state),
|
||||
blockSlot = shortLog(blck.slot)
|
||||
|
||||
found = true
|
||||
@ -244,11 +184,12 @@ proc addResolved(pool: var AttestationPool, blck: BlockRef, attestation: Attesta
|
||||
|
||||
info "Attestation resolved",
|
||||
attestation = shortLog(attestation),
|
||||
current_epoch = get_current_epoch(state),
|
||||
validations = 1,
|
||||
blockSlot = shortLog(blck.slot)
|
||||
|
||||
proc addAttestation*(pool: var AttestationPool, attestation: Attestation) =
|
||||
proc addAttestation*(pool: var AttestationPool,
|
||||
attestation: Attestation,
|
||||
wallSlot: Slot) =
|
||||
## Add a verified attestation to the fork choice context
|
||||
logScope: pcs = "atp_add_attestation"
|
||||
|
||||
@ -261,7 +202,7 @@ proc addAttestation*(pool: var AttestationPool, attestation: Attestation) =
|
||||
pool.addUnresolved(attestation)
|
||||
return
|
||||
|
||||
pool.addResolved(blck, attestation)
|
||||
pool.addResolved(blck, attestation, wallSlot)
|
||||
|
||||
proc addForkChoice*(pool: var AttestationPool,
|
||||
state: BeaconState,
|
||||
@ -269,7 +210,6 @@ proc addForkChoice*(pool: var AttestationPool,
|
||||
blck: BeaconBlock,
|
||||
wallSlot: Slot) =
|
||||
## Add a verified block to the fork choice context
|
||||
## The current justifiedState of the block pool is used as reference
|
||||
let state = pool.forkChoice.process_block(
|
||||
pool.blockPool, state, blckRef, blck, wallSlot)
|
||||
|
||||
@ -288,29 +228,17 @@ proc getAttestationsForSlot*(pool: AttestationPool, newBlockSlot: Slot):
|
||||
newBlockSlot = shortLog(newBlockSlot)
|
||||
return none(AttestationsSeen)
|
||||
|
||||
if pool.mapSlotsToAttestations.len == 0: # startingSlot not set yet!
|
||||
info "No attestations found (pool empty)",
|
||||
newBlockSlot = shortLog(newBlockSlot)
|
||||
return none(AttestationsSeen)
|
||||
|
||||
let
|
||||
# TODO in theory we could include attestations from other slots also, but
|
||||
# we're currently not tracking which attestations have already been included
|
||||
# in blocks on the fork we're aiming for.. this is a conservative approach
|
||||
# that's guaranteed to not include any duplicates, because it's the first
|
||||
# time the attestations are up for inclusion!
|
||||
attestationSlot = newBlockSlot - MIN_ATTESTATION_INCLUSION_DELAY
|
||||
candidateIdx = pool.candidateIdx(attestationSlot)
|
||||
|
||||
if attestationSlot < pool.startingSlot or
|
||||
attestationSlot >= pool.startingSlot + pool.mapSlotsToAttestations.lenu64:
|
||||
if candidateIdx.isNone:
|
||||
info "No attestations matching the slot range",
|
||||
attestationSlot = shortLog(attestationSlot),
|
||||
startingSlot = shortLog(pool.startingSlot),
|
||||
endingSlot = shortLog(pool.startingSlot + pool.mapSlotsToAttestations.lenu64)
|
||||
startingSlot = shortLog(pool.startingSlot)
|
||||
return none(AttestationsSeen)
|
||||
|
||||
let slotDequeIdx = int(attestationSlot - pool.startingSlot)
|
||||
some(pool.mapSlotsToAttestations[slotDequeIdx])
|
||||
some(pool.candidates[candidateIdx.get()])
|
||||
|
||||
proc getAttestationsForBlock*(pool: AttestationPool,
|
||||
state: BeaconState): seq[Attestation] =
|
||||
@ -337,8 +265,7 @@ proc getAttestationsForBlock*(pool: AttestationPool,
|
||||
# addResolved, too, the new attestations get added to the end, while in
|
||||
# these functions, it's reading from the beginning, et cetera. This all
|
||||
# needs a single unified strategy.
|
||||
const LOOKBACK_WINDOW = 3
|
||||
for i in max(1, newBlockSlot.int64 - LOOKBACK_WINDOW) .. newBlockSlot.int64:
|
||||
for i in max(1, newBlockSlot.int64 - ATTESTATION_LOOKBACK.int64) .. newBlockSlot.int64:
|
||||
let maybeSlotData = getAttestationsForSlot(pool, i.Slot)
|
||||
if maybeSlotData.isSome:
|
||||
insert(attestations, maybeSlotData.get.attestations)
|
||||
@ -390,7 +317,7 @@ proc getAttestationsForBlock*(pool: AttestationPool,
|
||||
attestationSlot = newBlockSlot - 1
|
||||
return
|
||||
|
||||
proc resolve*(pool: var AttestationPool) =
|
||||
proc resolve*(pool: var AttestationPool, wallSlot: Slot) =
|
||||
## Check attestations in our unresolved deque
|
||||
## if they can be integrated to the fork choice
|
||||
logScope: pcs = "atp_resolve"
|
||||
@ -412,7 +339,7 @@ proc resolve*(pool: var AttestationPool) =
|
||||
pool.unresolved.del(k)
|
||||
|
||||
for a in resolved:
|
||||
pool.addResolved(a.blck, a.attestation)
|
||||
pool.addResolved(a.blck, a.attestation, wallSlot)
|
||||
|
||||
proc selectHead*(pool: var AttestationPool, wallSlot: Slot): BlockRef =
|
||||
let newHead = pool.forkChoice.find_head(wallSlot, pool.blockPool)
|
||||
|
@ -272,6 +272,7 @@ proc onAttestation(node: BeaconNode, attestation: Attestation) =
|
||||
# we're on, or that it follows the rules of the protocol
|
||||
logScope:
|
||||
attestation = shortLog(attestation)
|
||||
head = shortLog(node.blockPool.head)
|
||||
pcs = "on_attestation"
|
||||
|
||||
let
|
||||
@ -279,23 +280,20 @@ proc onAttestation(node: BeaconNode, attestation: Attestation) =
|
||||
head = node.blockPool.head
|
||||
|
||||
debug "Attestation received",
|
||||
head = shortLog(head.blck),
|
||||
wallSlot = shortLog(wallSlot.slot)
|
||||
|
||||
if not wallSlot.afterGenesis or wallSlot.slot < head.blck.slot:
|
||||
if not wallSlot.afterGenesis or wallSlot.slot < head.slot:
|
||||
warn "Received attestation before genesis or head - clock is wrong?",
|
||||
afterGenesis = wallSlot.afterGenesis,
|
||||
wallSlot = shortLog(wallSlot.slot),
|
||||
head = shortLog(head.blck)
|
||||
wallSlot = shortLog(wallSlot.slot)
|
||||
return
|
||||
|
||||
if attestation.data.slot > head.blck.slot and
|
||||
(attestation.data.slot - head.blck.slot) > MaxEmptySlotCount:
|
||||
warn "Ignoring attestation, head block too old (out of sync?)",
|
||||
head = head.blck
|
||||
if attestation.data.slot > head.slot and
|
||||
(attestation.data.slot - head.slot) > MaxEmptySlotCount:
|
||||
warn "Ignoring attestation, head block too old (out of sync?)"
|
||||
return
|
||||
|
||||
node.attestationPool.addAttestation(attestation)
|
||||
node.attestationPool.addAttestation(attestation, wallSlot.slot)
|
||||
|
||||
proc dumpBlock[T](
|
||||
node: BeaconNode, signedBlock: SignedBeaconBlock,
|
||||
@ -382,8 +380,8 @@ proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.gcsafe, asyn
|
||||
scheduledSlot = shortLog(scheduledSlot),
|
||||
beaconTime = shortLog(beaconTime),
|
||||
peers = node.network.peersCount,
|
||||
head = shortLog(node.blockPool.head.blck),
|
||||
headEpoch = shortLog(node.blockPool.head.blck.slot.compute_epoch_at_slot()),
|
||||
head = shortLog(node.blockPool.head),
|
||||
headEpoch = shortLog(node.blockPool.head.slot.compute_epoch_at_slot()),
|
||||
finalized = shortLog(node.blockPool.finalizedHead.blck),
|
||||
finalizedEpoch = shortLog(node.blockPool.finalizedHead.blck.slot.compute_epoch_at_slot())
|
||||
|
||||
@ -466,7 +464,7 @@ proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.gcsafe, asyn
|
||||
# disappear naturally - risky because user is not aware,
|
||||
# and might lose stake on canonical chain but "just works"
|
||||
# when reconnected..
|
||||
var head = node.updateHead()
|
||||
var head = node.updateHead(slot)
|
||||
|
||||
# TODO is the slot of the clock or the head block more interesting? provide
|
||||
# rationale in comment
|
||||
@ -481,12 +479,10 @@ proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.gcsafe, asyn
|
||||
info "Slot end",
|
||||
slot = shortLog(slot),
|
||||
nextSlot = shortLog(nextSlot),
|
||||
headSlot = shortLog(node.blockPool.head.blck.slot),
|
||||
headEpoch = shortLog(node.blockPool.head.blck.slot.compute_epoch_at_slot()),
|
||||
headRoot = shortLog(node.blockPool.head.blck.root),
|
||||
finalizedSlot = shortLog(node.blockPool.finalizedHead.blck.slot),
|
||||
finalizedEpoch = shortLog(node.blockPool.finalizedHead.blck.slot.compute_epoch_at_slot()),
|
||||
finalizedRoot = shortLog(node.blockPool.finalizedHead.blck.root)
|
||||
head = shortLog(node.blockPool.head),
|
||||
headEpoch = shortLog(node.blockPool.head.slot.compute_epoch_at_slot()),
|
||||
finalizedHead = shortLog(node.blockPool.finalizedHead.blck),
|
||||
finalizedEpoch = shortLog(node.blockPool.finalizedHead.blck.slot.compute_epoch_at_slot())
|
||||
|
||||
when declared(GC_fullCollect):
|
||||
# The slots in the beacon node work as frames in a game: we want to make
|
||||
@ -528,7 +524,7 @@ proc runOnSecondLoop(node: BeaconNode) {.async.} =
|
||||
|
||||
proc runForwardSyncLoop(node: BeaconNode) {.async.} =
|
||||
func getLocalHeadSlot(): Slot =
|
||||
result = node.blockPool.head.blck.slot
|
||||
result = node.blockPool.head.slot
|
||||
|
||||
proc getLocalWallSlot(): Slot {.gcsafe.} =
|
||||
let epoch = node.beaconClock.now().slotOrZero.compute_epoch_at_slot() +
|
||||
@ -539,7 +535,7 @@ proc runForwardSyncLoop(node: BeaconNode) {.async.} =
|
||||
let fepoch = node.blockPool.headState.data.data.finalized_checkpoint.epoch
|
||||
compute_start_slot_at_epoch(fepoch)
|
||||
|
||||
proc updateLocalBlocks(list: openarray[SignedBeaconBlock]): Result[void, BlockError] =
|
||||
proc updateLocalBlocks(list: openArray[SignedBeaconBlock]): Result[void, BlockError] =
|
||||
debug "Forward sync imported blocks", count = len(list),
|
||||
local_head_slot = getLocalHeadSlot()
|
||||
let sm = now(chronos.Moment)
|
||||
@ -553,7 +549,7 @@ proc runForwardSyncLoop(node: BeaconNode) {.async.} =
|
||||
# (and they may have no parents anymore in the fork choice if it was pruned)
|
||||
if res.isErr and res.error notin {BlockError.Unviable, BlockError.Old, BLockError.Duplicate}:
|
||||
return res
|
||||
discard node.updateHead()
|
||||
discard node.updateHead(node.beaconClock.now().slotOrZero)
|
||||
|
||||
let dur = now(chronos.Moment) - sm
|
||||
let secs = float(chronos.seconds(1).nanoseconds)
|
||||
@ -602,7 +598,7 @@ proc connectedPeersCount(node: BeaconNode): int =
|
||||
|
||||
proc installBeaconApiHandlers(rpcServer: RpcServer, node: BeaconNode) =
|
||||
rpcServer.rpc("getBeaconHead") do () -> Slot:
|
||||
return node.blockPool.head.blck.slot
|
||||
return node.blockPool.head.slot
|
||||
|
||||
rpcServer.rpc("getChainHead") do () -> JsonNode:
|
||||
let
|
||||
@ -610,8 +606,8 @@ proc installBeaconApiHandlers(rpcServer: RpcServer, node: BeaconNode) =
|
||||
finalized = node.blockPool.headState.data.data.finalized_checkpoint
|
||||
justified = node.blockPool.headState.data.data.current_justified_checkpoint
|
||||
return %* {
|
||||
"head_slot": head.blck.slot,
|
||||
"head_block_root": head.blck.root.data.toHex(),
|
||||
"head_slot": head.slot,
|
||||
"head_block_root": head.root.data.toHex(),
|
||||
"finalized_slot": finalized.epoch * SLOTS_PER_EPOCH,
|
||||
"finalized_block_root": finalized.root.data.toHex(),
|
||||
"justified_slot": justified.epoch * SLOTS_PER_EPOCH,
|
||||
@ -622,7 +618,7 @@ proc installBeaconApiHandlers(rpcServer: RpcServer, node: BeaconNode) =
|
||||
let
|
||||
beaconTime = node.beaconClock.now()
|
||||
wallSlot = currentSlot(node)
|
||||
headSlot = node.blockPool.head.blck.slot
|
||||
headSlot = node.blockPool.head.slot
|
||||
# FIXME: temporary hack: If more than 1 block away from expected head, then we are "syncing"
|
||||
return (headSlot + 1) < wallSlot
|
||||
|
||||
@ -658,7 +654,7 @@ proc installBeaconApiHandlers(rpcServer: RpcServer, node: BeaconNode) =
|
||||
requireOneOf(slot, root)
|
||||
if slot.isSome:
|
||||
# TODO sanity check slot so that it doesn't cause excessive rewinding
|
||||
let blk = node.blockPool.head.blck.atSlot(slot.get)
|
||||
let blk = node.blockPool.head.atSlot(slot.get)
|
||||
node.blockPool.withState(node.blockPool.tmpState, blk):
|
||||
return jsonResult(state)
|
||||
else:
|
||||
@ -858,7 +854,7 @@ proc start(node: BeaconNode) =
|
||||
timeSinceFinalization =
|
||||
int64(finalizedHead.slot.toBeaconTime()) -
|
||||
int64(node.beaconClock.now()),
|
||||
head = shortLog(head.blck),
|
||||
head = shortLog(head),
|
||||
finalizedHead = shortLog(finalizedHead),
|
||||
SLOTS_PER_EPOCH,
|
||||
SECONDS_PER_SLOT,
|
||||
@ -919,6 +915,9 @@ when hasPrompt:
|
||||
# p.useHistoryFile()
|
||||
|
||||
proc dataResolver(expr: string): string =
|
||||
template justified: untyped = node.blockPool.head.atSlot(
|
||||
node.blockPool.headState.data.data.current_justified_checkpoint.epoch.
|
||||
compute_start_slot_at_epoch)
|
||||
# TODO:
|
||||
# We should introduce a general API for resolving dot expressions
|
||||
# such as `db.latest_block.slot` or `metrics.connected_peers`.
|
||||
@ -931,22 +930,22 @@ when hasPrompt:
|
||||
$(node.connectedPeersCount)
|
||||
|
||||
of "head_root":
|
||||
shortLog(node.blockPool.head.blck.root)
|
||||
shortLog(node.blockPool.head.root)
|
||||
of "head_epoch":
|
||||
$(node.blockPool.head.blck.slot.epoch)
|
||||
$(node.blockPool.head.slot.epoch)
|
||||
of "head_epoch_slot":
|
||||
$(node.blockPool.head.blck.slot mod SLOTS_PER_EPOCH)
|
||||
$(node.blockPool.head.slot mod SLOTS_PER_EPOCH)
|
||||
of "head_slot":
|
||||
$(node.blockPool.head.blck.slot)
|
||||
$(node.blockPool.head.slot)
|
||||
|
||||
of "justifed_root":
|
||||
shortLog(node.blockPool.head.justified.blck.root)
|
||||
shortLog(justified.blck.root)
|
||||
of "justifed_epoch":
|
||||
$(node.blockPool.head.justified.slot.epoch)
|
||||
$(justified.slot.epoch)
|
||||
of "justifed_epoch_slot":
|
||||
$(node.blockPool.head.justified.slot mod SLOTS_PER_EPOCH)
|
||||
$(justified.slot mod SLOTS_PER_EPOCH)
|
||||
of "justifed_slot":
|
||||
$(node.blockPool.head.justified.slot)
|
||||
$(justified.slot)
|
||||
|
||||
of "finalized_root":
|
||||
shortLog(node.blockPool.finalizedHead.blck.root)
|
||||
|
@ -59,12 +59,12 @@ const
|
||||
declareGauge beacon_head_root,
|
||||
"Root of the head block of the beacon chain"
|
||||
|
||||
proc updateHead*(node: BeaconNode): BlockRef =
|
||||
proc updateHead*(node: BeaconNode, wallSlot: Slot): BlockRef =
|
||||
# Check pending attestations - maybe we found some blocks for them
|
||||
node.attestationPool.resolve()
|
||||
node.attestationPool.resolve(wallSlot)
|
||||
|
||||
# Grab the new head according to our latest attestation data
|
||||
let newHead = node.attestationPool.selectHead(node.beaconClock.now().slotOrZero())
|
||||
let newHead = node.attestationPool.selectHead(wallSlot)
|
||||
|
||||
# Store the new head in the block pool - this may cause epochs to be
|
||||
# justified and finalized
|
||||
|
@ -10,6 +10,12 @@ import
|
||||
|
||||
export block_pools_types
|
||||
|
||||
const
|
||||
ATTESTATION_LOOKBACK* =
|
||||
min(4'u64, SLOTS_PER_EPOCH) + MIN_ATTESTATION_INCLUSION_DELAY
|
||||
## The number of slots we'll keep track of in terms of "free" attestations
|
||||
## that potentially could be added to a newly created block
|
||||
|
||||
type
|
||||
# #############################################
|
||||
#
|
||||
@ -50,9 +56,9 @@ type
|
||||
## "free" attestations with those found in past blocks - these votes
|
||||
## are tracked separately in the fork choice.
|
||||
|
||||
mapSlotsToAttestations*: Deque[AttestationsSeen] ## \
|
||||
## We keep one item per slot such that indexing matches slot number
|
||||
## together with startingSlot
|
||||
candidates*: array[ATTESTATION_LOOKBACK, AttestationsSeen] ## \
|
||||
## We keep one item per slot such that indexing matches slot number
|
||||
## together with startingSlot
|
||||
|
||||
startingSlot*: Slot ## \
|
||||
## Generally, we keep attestations only until a slot has been finalized -
|
||||
|
@ -6,12 +6,10 @@
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import
|
||||
std/[algorithm, sequtils, sets],
|
||||
extras, beacon_chain_db,
|
||||
stew/results,
|
||||
spec/[crypto, datatypes, digest, presets, validator]
|
||||
|
||||
|
||||
import
|
||||
spec/[beaconstate, crypto, datatypes, digest, presets, validator],
|
||||
block_pools/[block_pools_types, clearance, candidate_chains, quarantine]
|
||||
|
||||
export results, block_pools_types
|
||||
@ -23,13 +21,10 @@ export results, block_pools_types
|
||||
# during block_pool refactor
|
||||
|
||||
type
|
||||
BlockPools* = object
|
||||
# TODO: Rename BlockPools
|
||||
BlockPool* = object
|
||||
quarantine: Quarantine
|
||||
dag*: CandidateChains
|
||||
|
||||
BlockPool* = BlockPools
|
||||
|
||||
{.push raises: [Defect], inline.}
|
||||
|
||||
# Quarantine dispatch
|
||||
@ -44,10 +39,10 @@ func checkMissing*(pool: var BlockPool): seq[FetchRecord] =
|
||||
template tail*(pool: BlockPool): BlockRef =
|
||||
pool.dag.tail
|
||||
|
||||
template heads*(pool: BlockPool): seq[Head] =
|
||||
template heads*(pool: BlockPool): seq[BlockRef] =
|
||||
pool.dag.heads
|
||||
|
||||
template head*(pool: BlockPool): Head =
|
||||
template head*(pool: BlockPool): BlockRef =
|
||||
pool.dag.head
|
||||
|
||||
template finalizedHead*(pool: BlockPool): BlockSlot =
|
||||
@ -73,10 +68,10 @@ export get_ancestor # func get_ancestor*(blck: BlockRef, slot: Slot): BlockRef
|
||||
export atSlot # func atSlot*(blck: BlockRef, slot: Slot): BlockSlot
|
||||
|
||||
|
||||
proc init*(T: type BlockPools,
|
||||
proc init*(T: type BlockPool,
|
||||
preset: RuntimePreset,
|
||||
db: BeaconChainDB,
|
||||
updateFlags: UpdateFlags = {}): BlockPools =
|
||||
updateFlags: UpdateFlags = {}): BlockPool =
|
||||
result.dag = init(CandidateChains, preset, db, updateFlags)
|
||||
|
||||
func addFlags*(pool: BlockPool, flags: UpdateFlags) =
|
||||
@ -139,11 +134,6 @@ proc updateHead*(pool: BlockPool, newHead: BlockRef) =
|
||||
## now fall from grace, or no longer be considered resolved.
|
||||
updateHead(pool.dag, newHead)
|
||||
|
||||
proc latestJustifiedBlock*(pool: BlockPool): BlockSlot =
|
||||
## Return the most recent block that is justified and at least as recent
|
||||
## as the latest finalized block
|
||||
latestJustifiedBlock(pool.dag)
|
||||
|
||||
proc addMissing*(pool: var BlockPool, broot: Eth2Digest) {.inline.} =
|
||||
pool.quarantine.addMissing(broot)
|
||||
|
||||
@ -171,9 +161,6 @@ template tmpState*(pool: BlockPool): StateData =
|
||||
template balanceState*(pool: BlockPool): StateData =
|
||||
pool.dag.balanceState
|
||||
|
||||
template justifiedState*(pool: BlockPool): StateData =
|
||||
pool.dag.justifiedState
|
||||
|
||||
template withState*(
|
||||
pool: BlockPool, cache: var StateData, blockSlot: BlockSlot, body: untyped):
|
||||
untyped =
|
||||
@ -216,8 +203,46 @@ proc isValidBeaconBlock*(
|
||||
isValidBeaconBlock(
|
||||
pool.dag, pool.quarantine, signed_beacon_block, current_slot, flags)
|
||||
|
||||
# Spec functions implemented based on cached values instead of the full state
|
||||
func count_active_validators*(epochInfo: EpochRef): uint64 =
|
||||
epochInfo.shuffled_active_validator_indices.lenu64
|
||||
|
||||
func get_committee_count_per_slot*(epochInfo: EpochRef): uint64 =
|
||||
get_committee_count_per_slot(count_active_validators(epochInfo))
|
||||
|
||||
func get_beacon_committee*(
|
||||
epochRef: EpochRef, slot: Slot, index: CommitteeIndex): seq[ValidatorIndex] =
|
||||
# Return the beacon committee at ``slot`` for ``index``.
|
||||
let
|
||||
committees_per_slot = get_committee_count_per_slot(epochRef)
|
||||
compute_committee(
|
||||
epochRef.shuffled_active_validator_indices,
|
||||
(slot mod SLOTS_PER_EPOCH) * committees_per_slot +
|
||||
index.uint64,
|
||||
committees_per_slot * SLOTS_PER_EPOCH
|
||||
)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/beacon-chain.md#get_attesting_indices
|
||||
func get_attesting_indices*(epochRef: EpochRef,
|
||||
data: AttestationData,
|
||||
bits: CommitteeValidatorsBits):
|
||||
HashSet[ValidatorIndex] =
|
||||
get_attesting_indices(
|
||||
bits,
|
||||
get_beacon_committee(epochRef, data.slot, data.index.CommitteeIndex))
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/beacon-chain.md#get_indexed_attestation
|
||||
func get_indexed_attestation*(epochRef: EpochRef, attestation: Attestation): IndexedAttestation =
|
||||
# Return the indexed attestation corresponding to ``attestation``.
|
||||
let
|
||||
attesting_indices =
|
||||
get_attesting_indices(
|
||||
epochRef, attestation.data, attestation.aggregation_bits)
|
||||
|
||||
IndexedAttestation(
|
||||
attesting_indices:
|
||||
List[uint64, Limit MAX_VALIDATORS_PER_COMMITTEE].init(
|
||||
sorted(mapIt(attesting_indices, it.uint64), system.cmp)),
|
||||
data: attestation.data,
|
||||
signature: attestation.signature
|
||||
)
|
||||
|
@ -101,10 +101,10 @@ type
|
||||
tail*: BlockRef ##\
|
||||
## The earliest finalized block we know about
|
||||
|
||||
heads*: seq[Head] ##\
|
||||
heads*: seq[BlockRef] ##\
|
||||
## Candidate heads of candidate chains
|
||||
|
||||
head*: Head ##\
|
||||
head*: BlockRef ##\
|
||||
## The latest block we know about, that's been chosen as a head by the fork
|
||||
## choice rule
|
||||
|
||||
@ -122,8 +122,6 @@ type
|
||||
## State given by the head block; only update in `updateHead`, not anywhere
|
||||
## else via `withState`
|
||||
|
||||
justifiedState*: StateData ## Latest justified state, as seen from the head
|
||||
|
||||
tmpState*: StateData ## Scratchpad - may be any state
|
||||
|
||||
clearanceState*: StateData ##\
|
||||
@ -185,10 +183,6 @@ type
|
||||
## Slot time for this BlockSlot which may differ from blck.slot when time
|
||||
## has advanced without blocks
|
||||
|
||||
Head* = object
|
||||
blck*: BlockRef
|
||||
justified*: BlockSlot
|
||||
|
||||
OnBlockAdded* = proc(
|
||||
blckRef: BlockRef, blck: SignedBeaconBlock,
|
||||
state: HashedBeaconState) {.raises: [Defect], gcsafe.}
|
||||
|
@ -60,7 +60,7 @@ func parent*(bs: BlockSlot): BlockSlot =
|
||||
slot: bs.slot - 1
|
||||
)
|
||||
|
||||
func populateEpochCache(state: BeaconState): EpochRef =
|
||||
func init*(T: type EpochRef, state: BeaconState): T =
|
||||
let epoch = state.get_current_epoch()
|
||||
EpochRef(
|
||||
epoch: epoch,
|
||||
@ -169,16 +169,16 @@ func getEpochInfo*(blck: BlockRef, state: BeaconState): EpochRef =
|
||||
matching_epochinfo = blck.epochsInfo.filterIt(it.epoch == state_epoch)
|
||||
|
||||
if matching_epochinfo.len == 0:
|
||||
let cache = populateEpochCache(state)
|
||||
let epochInfo = EpochRef.init(state)
|
||||
|
||||
# Don't use BlockRef caching as far as the epoch where the active
|
||||
# validator indices can diverge.
|
||||
if (compute_activation_exit_epoch(blck.slot.compute_epoch_at_slot) >
|
||||
state_epoch):
|
||||
blck.epochsInfo.add(cache)
|
||||
blck.epochsInfo.add(epochInfo)
|
||||
trace "candidate_chains.getEpochInfo: back-filling parent.epochInfo",
|
||||
state_slot = state.slot
|
||||
cache
|
||||
epochInfo
|
||||
elif matching_epochinfo.len == 1:
|
||||
matching_epochinfo[0]
|
||||
else:
|
||||
@ -282,23 +282,15 @@ proc init*(T: type CandidateChains,
|
||||
finalizedSlot =
|
||||
tmpState.data.data.finalized_checkpoint.epoch.compute_start_slot_at_epoch()
|
||||
finalizedHead = headRef.atSlot(finalizedSlot)
|
||||
justifiedSlot =
|
||||
tmpState.data.data.current_justified_checkpoint.epoch.compute_start_slot_at_epoch()
|
||||
justifiedHead = headRef.atSlot(justifiedSlot)
|
||||
head = Head(blck: headRef, justified: justifiedHead)
|
||||
|
||||
doAssert justifiedHead.slot >= finalizedHead.slot,
|
||||
"justified head comes before finalized head - database corrupt?"
|
||||
|
||||
let res = CandidateChains(
|
||||
blocks: blocks,
|
||||
tail: tailRef,
|
||||
head: head,
|
||||
head: headRef,
|
||||
finalizedHead: finalizedHead,
|
||||
db: db,
|
||||
heads: @[head],
|
||||
heads: @[headRef],
|
||||
headState: tmpState[],
|
||||
justifiedState: tmpState[], # This is wrong but we'll update it below
|
||||
tmpState: tmpState[],
|
||||
clearanceState: tmpState[],
|
||||
balanceState: tmpState[],
|
||||
@ -311,17 +303,28 @@ proc init*(T: type CandidateChains,
|
||||
|
||||
doAssert res.updateFlags in [{}, {verifyFinalization}]
|
||||
|
||||
res.updateStateData(res.justifiedState, justifiedHead)
|
||||
res.updateStateData(res.headState, headRef.atSlot(headRef.slot))
|
||||
res.clearanceState = res.headState
|
||||
res.balanceState = res.justifiedState
|
||||
res.balanceState = res.headState
|
||||
|
||||
info "Block dag initialized",
|
||||
head = head.blck, justifiedHead, finalizedHead, tail = tailRef,
|
||||
head = shortLog(headRef),
|
||||
finalizedHead = shortLog(finalizedHead),
|
||||
tail = shortLog(tailRef),
|
||||
totalBlocks = blocks.len
|
||||
|
||||
res
|
||||
|
||||
proc getEpochRef*(pool: CandidateChains, blck: BlockRef, epoch: Epoch): EpochRef =
|
||||
let bs = blck.atSlot(epoch.compute_start_slot_at_epoch)
|
||||
for e in bs.blck.epochsInfo:
|
||||
if e.epoch == epoch:
|
||||
return e
|
||||
|
||||
# TODO use any state from epoch
|
||||
pool.withState(pool.tmpState, bs):
|
||||
getEpochInfo(blck, state)
|
||||
|
||||
proc getState(
|
||||
dag: CandidateChains, db: BeaconChainDB, stateRoot: Eth2Digest, blck: BlockRef,
|
||||
output: var StateData): bool =
|
||||
@ -426,14 +429,14 @@ func getBlockRange*(
|
||||
## If there were no blocks in the range, `output.len` will be returned.
|
||||
let count = output.len
|
||||
trace "getBlockRange entered",
|
||||
head = shortLog(dag.head.blck.root), count, startSlot, skipStep
|
||||
head = shortLog(dag.head.root), count, startSlot, skipStep
|
||||
|
||||
let
|
||||
skipStep = max(1, skipStep) # Treat 0 step as 1
|
||||
endSlot = startSlot + uint64(count * skipStep)
|
||||
|
||||
var
|
||||
b = dag.head.blck.atSlot(endSlot)
|
||||
b = dag.head.atSlot(endSlot)
|
||||
o = count
|
||||
for i in 0..<count:
|
||||
for j in 0..<skipStep:
|
||||
@ -451,7 +454,7 @@ func getBlockRange*(
|
||||
func getBlockBySlot*(dag: CandidateChains, slot: Slot): BlockRef =
|
||||
## Retrieves the first block in the current canonical chain
|
||||
## with slot number less or equal to `slot`.
|
||||
dag.head.blck.atSlot(slot).blck
|
||||
dag.head.atSlot(slot).blck
|
||||
|
||||
func getBlockByPreciseSlot*(dag: CandidateChains, slot: Slot): BlockRef =
|
||||
## Retrieves a block from the canonical chain with a slot
|
||||
@ -626,7 +629,8 @@ proc getStateDataCached(
|
||||
false
|
||||
|
||||
template withEpochState*(
|
||||
dag: CandidateChains, cache: var StateData, blockSlot: BlockSlot, body: untyped): untyped =
|
||||
dag: CandidateChains, cache: var StateData, blockSlot: BlockSlot,
|
||||
body: untyped): untyped =
|
||||
## Helper template that updates state to a particular BlockSlot - usage of
|
||||
## cache is unsafe outside of block.
|
||||
## TODO async transformations will lead to a race where cache gets updated
|
||||
@ -710,8 +714,8 @@ proc updateHead*(dag: CandidateChains, newHead: BlockRef) =
|
||||
newHead = shortLog(newHead)
|
||||
pcs = "fork_choice"
|
||||
|
||||
if dag.head.blck == newHead:
|
||||
info "No head block update"
|
||||
if dag.head == newHead:
|
||||
debug "No head block update"
|
||||
|
||||
return
|
||||
|
||||
@ -723,26 +727,18 @@ proc updateHead*(dag: CandidateChains, newHead: BlockRef) =
|
||||
updateStateData(
|
||||
dag, dag.headState, BlockSlot(blck: newHead, slot: newHead.slot))
|
||||
|
||||
let
|
||||
justifiedSlot = dag.headState.data.data
|
||||
.current_justified_checkpoint
|
||||
.epoch
|
||||
.compute_start_slot_at_epoch()
|
||||
justifiedBS = newHead.atSlot(justifiedSlot)
|
||||
|
||||
dag.head = Head(blck: newHead, justified: justifiedBS)
|
||||
updateStateData(dag, dag.justifiedState, justifiedBS)
|
||||
dag.head = newHead
|
||||
|
||||
# TODO isAncestorOf may be expensive - too expensive?
|
||||
if not lastHead.blck.isAncestorOf(newHead):
|
||||
info "Updated head block (new parent)",
|
||||
lastHead = shortLog(lastHead.blck),
|
||||
if not lastHead.isAncestorOf(newHead):
|
||||
info "Updated head block with reorg",
|
||||
lastHead = shortLog(lastHead),
|
||||
headParent = shortLog(newHead.parent),
|
||||
stateRoot = shortLog(dag.headState.data.root),
|
||||
headBlock = shortLog(dag.headState.blck),
|
||||
stateSlot = shortLog(dag.headState.data.data.slot),
|
||||
justifiedEpoch = shortLog(dag.headState.data.data.current_justified_checkpoint.epoch),
|
||||
finalizedEpoch = shortLog(dag.headState.data.data.finalized_checkpoint.epoch)
|
||||
justified = shortLog(dag.headState.data.data.current_justified_checkpoint),
|
||||
finalized = shortLog(dag.headState.data.data.finalized_checkpoint)
|
||||
|
||||
# A reasonable criterion for "reorganizations of the chain"
|
||||
beacon_reorgs_total.inc()
|
||||
@ -751,13 +747,12 @@ proc updateHead*(dag: CandidateChains, newHead: BlockRef) =
|
||||
stateRoot = shortLog(dag.headState.data.root),
|
||||
headBlock = shortLog(dag.headState.blck),
|
||||
stateSlot = shortLog(dag.headState.data.data.slot),
|
||||
justifiedEpoch = shortLog(dag.headState.data.data.current_justified_checkpoint.epoch),
|
||||
finalizedEpoch = shortLog(dag.headState.data.data.finalized_checkpoint.epoch)
|
||||
justified = shortLog(dag.headState.data.data.current_justified_checkpoint),
|
||||
finalized = shortLog(dag.headState.data.data.finalized_checkpoint)
|
||||
let
|
||||
finalizedEpochStartSlot =
|
||||
dag.headState.data.data.finalized_checkpoint.epoch.
|
||||
compute_start_slot_at_epoch()
|
||||
# TODO there might not be a block at the epoch boundary - what then?
|
||||
finalizedHead = newHead.atSlot(finalizedEpochStartSlot)
|
||||
|
||||
doAssert (not finalizedHead.blck.isNil),
|
||||
@ -787,20 +782,22 @@ proc updateHead*(dag: CandidateChains, newHead: BlockRef) =
|
||||
for i in 0..<hlen:
|
||||
let n = hlen - i - 1
|
||||
let head = dag.heads[n]
|
||||
if finalizedHead.blck.isAncestorOf(head.blck):
|
||||
if finalizedHead.blck.isAncestorOf(head):
|
||||
continue
|
||||
|
||||
var cur = head.blck
|
||||
while not cur.isAncestorOf(finalizedHead.blck):
|
||||
# TODO empty states need to be removed also!
|
||||
let stateRoot = dag.db.getStateRoot(cur.root, cur.slot)
|
||||
if stateRoot.issome():
|
||||
dag.db.delState(stateRoot.get())
|
||||
var cur = head.atSlot(head.slot)
|
||||
while not cur.blck.isAncestorOf(finalizedHead.blck):
|
||||
# TODO there may be more empty states here: those that have a slot
|
||||
# higher than head.slot and those near the branch point - one
|
||||
# needs to be careful though because those close to the branch
|
||||
# point should not necessarily be cleaned up
|
||||
dag.delState(cur)
|
||||
|
||||
dag.blocks.del(cur.root)
|
||||
dag.db.delBlock(cur.root)
|
||||
if cur.blck.slot == cur.slot:
|
||||
dag.blocks.del(cur.blck.root)
|
||||
dag.db.delBlock(cur.blck.root)
|
||||
|
||||
if cur.parent.isNil:
|
||||
if cur.blck.parent.isNil:
|
||||
break
|
||||
cur = cur.parent
|
||||
|
||||
@ -808,27 +805,10 @@ proc updateHead*(dag: CandidateChains, newHead: BlockRef) =
|
||||
|
||||
dag.finalizedHead = finalizedHead
|
||||
|
||||
info "Finalized block",
|
||||
info "Reached new finalization checkpoint",
|
||||
finalizedHead = shortLog(finalizedHead),
|
||||
heads = dag.heads.len
|
||||
|
||||
# TODO prune everything before weak subjectivity period
|
||||
|
||||
func latestJustifiedBlock*(dag: CandidateChains): BlockSlot =
|
||||
## Return the most recent block that is justified and at least as recent
|
||||
## as the latest finalized block
|
||||
|
||||
doAssert dag.heads.len > 0,
|
||||
"We should have at least the genesis block in heads"
|
||||
doAssert (not dag.head.blck.isNil()),
|
||||
"Genesis block will be head, if nothing else"
|
||||
|
||||
# Prefer stability: use justified block from current head to break ties!
|
||||
result = dag.head.justified
|
||||
for head in dag.heads[1 ..< ^0]:
|
||||
if head.justified.slot > result.slot:
|
||||
result = head.justified
|
||||
|
||||
proc isInitialized*(T: type CandidateChains, db: BeaconChainDB): bool =
|
||||
let
|
||||
headBlockRoot = db.getHeadBlock()
|
||||
|
@ -8,11 +8,12 @@
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
chronicles, sequtils, tables,
|
||||
std/tables,
|
||||
chronicles,
|
||||
metrics, stew/results,
|
||||
../extras,
|
||||
../spec/[crypto, datatypes, digest, helpers, signatures, state_transition],
|
||||
block_pools_types, candidate_chains, quarantine
|
||||
./block_pools_types, ./candidate_chains, ./quarantine
|
||||
|
||||
export results
|
||||
|
||||
@ -41,7 +42,8 @@ proc addRawBlock*(
|
||||
|
||||
proc addResolvedBlock(
|
||||
dag: var CandidateChains, quarantine: var Quarantine,
|
||||
state: HashedBeaconState, signedBlock: SignedBeaconBlock, parent: BlockRef,
|
||||
state: HashedBeaconState, signedBlock: SignedBeaconBlock,
|
||||
parent: BlockRef, cache: StateCache,
|
||||
onBlockAdded: OnBlockAdded
|
||||
): BlockRef =
|
||||
# TODO: `addResolvedBlock` is accumulating significant cruft
|
||||
@ -55,8 +57,11 @@ proc addResolvedBlock(
|
||||
let
|
||||
blockRoot = signedBlock.root
|
||||
blockRef = BlockRef.init(blockRoot, signedBlock.message)
|
||||
blockRef.epochsInfo = filterIt(parent.epochsInfo,
|
||||
it.epoch + 1 >= state.data.get_current_epoch())
|
||||
if parent.slot.compute_epoch_at_slot() == blockRef.slot.compute_epoch_at_slot:
|
||||
blockRef.epochsInfo = @[parent.epochsInfo[0]]
|
||||
else:
|
||||
discard getEpochInfo(blockRef, state.data)
|
||||
|
||||
link(parent, blockRef)
|
||||
|
||||
dag.blocks[blockRoot] = blockRef
|
||||
@ -65,32 +70,22 @@ proc addResolvedBlock(
|
||||
# Resolved blocks should be stored in database
|
||||
dag.putBlock(signedBlock)
|
||||
|
||||
# This block *might* have caused a justification - make sure we stow away
|
||||
# that information:
|
||||
let justifiedSlot =
|
||||
state.data.current_justified_checkpoint.epoch.compute_start_slot_at_epoch()
|
||||
|
||||
var foundHead: Option[Head]
|
||||
var foundHead: BlockRef
|
||||
for head in dag.heads.mitems():
|
||||
if head.blck.isAncestorOf(blockRef):
|
||||
if head.justified.slot != justifiedSlot:
|
||||
head.justified = blockRef.atSlot(justifiedSlot)
|
||||
if head.isAncestorOf(blockRef):
|
||||
|
||||
head.blck = blockRef
|
||||
head = blockRef
|
||||
|
||||
foundHead = some(head)
|
||||
foundHead = head
|
||||
break
|
||||
|
||||
if foundHead.isNone():
|
||||
foundHead = some(Head(
|
||||
blck: blockRef,
|
||||
justified: blockRef.atSlot(justifiedSlot)))
|
||||
dag.heads.add(foundHead.get())
|
||||
if foundHead.isNil:
|
||||
foundHead = blockRef
|
||||
dag.heads.add(foundHead)
|
||||
|
||||
info "Block resolved",
|
||||
blck = shortLog(signedBlock.message),
|
||||
blockRoot = shortLog(blockRoot),
|
||||
justifiedHead = foundHead.get().justified,
|
||||
heads = dag.heads.len()
|
||||
|
||||
# This MUST be added before the quarantine
|
||||
@ -107,14 +102,15 @@ proc addResolvedBlock(
|
||||
if not quarantine.inAdd:
|
||||
quarantine.inAdd = true
|
||||
defer: quarantine.inAdd = false
|
||||
var keepGoing = true
|
||||
while keepGoing:
|
||||
let retries = quarantine.orphans
|
||||
for _, v in retries:
|
||||
var entries = 0
|
||||
while entries != quarantine.orphans.len:
|
||||
entries = quarantine.orphans.len # keep going while quarantine is shrinking
|
||||
var resolved: seq[SignedBeaconBlock]
|
||||
for _, v in quarantine.orphans:
|
||||
if v.message.parent_root in dag.blocks: resolved.add(v)
|
||||
|
||||
for v in resolved:
|
||||
discard addRawBlock(dag, quarantine, v, onBlockAdded)
|
||||
# Keep going for as long as the pending dag is shrinking
|
||||
# TODO inefficient! so what?
|
||||
keepGoing = quarantine.orphans.len < retries.len
|
||||
|
||||
blockRef
|
||||
|
||||
@ -208,9 +204,9 @@ proc addRawBlock*(
|
||||
doAssert v.addr == addr poolPtr.clearanceState.data
|
||||
assign(poolPtr.clearanceState, poolPtr.headState)
|
||||
|
||||
var stateCache = getEpochCache(parent, dag.clearanceState.data.data)
|
||||
var cache = getEpochCache(parent, dag.clearanceState.data.data)
|
||||
if not state_transition(dag.runtimePreset, dag.clearanceState.data, signedBlock,
|
||||
stateCache, dag.updateFlags, restore):
|
||||
cache, dag.updateFlags, restore):
|
||||
notice "Invalid block"
|
||||
|
||||
return err Invalid
|
||||
@ -218,7 +214,7 @@ proc addRawBlock*(
|
||||
# Careful, clearanceState.data has been updated but not blck - we need to
|
||||
# create the BlockRef first!
|
||||
dag.clearanceState.blck = addResolvedBlock(
|
||||
dag, quarantine, dag.clearanceState.data, signedBlock, parent,
|
||||
dag, quarantine, dag.clearanceState.data, signedBlock, parent, cache,
|
||||
onBlockAdded
|
||||
)
|
||||
|
||||
|
@ -9,14 +9,16 @@
|
||||
|
||||
import
|
||||
# Standard library
|
||||
std/[sequtils, sets, tables, typetraits],
|
||||
std/[sets, tables, typetraits],
|
||||
# Status libraries
|
||||
stew/results, chronicles,
|
||||
# Internal
|
||||
../spec/[beaconstate, datatypes, digest, helpers],
|
||||
# Fork choice
|
||||
./fork_choice_types, ./proto_array,
|
||||
../block_pool
|
||||
../block_pool, ../block_pools/candidate_chains
|
||||
|
||||
export sets
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md
|
||||
# This is a port of https://github.com/sigp/lighthouse/pull/804
|
||||
@ -280,12 +282,15 @@ proc process_block*(self: var ForkChoice,
|
||||
# TODO current time
|
||||
maybe_update(self.checkpoints, wallSlot, pool)
|
||||
|
||||
var cache = StateCache() # TODO reuse shuffling
|
||||
|
||||
for attestation in blck.body.attestations:
|
||||
let targetBlck = pool.dag.getRef(attestation.data.target.root)
|
||||
if targetBlck.isNil:
|
||||
continue
|
||||
let epochRef =
|
||||
pool.dag.getEpochRef(targetBlck, attestation.data.target.epoch)
|
||||
if attestation.data.beacon_block_root in self.backend:
|
||||
let participants = toSeq(items(get_attesting_indices(
|
||||
state, attestation.data, attestation.aggregation_bits, cache)))
|
||||
let participants = get_attesting_indices(
|
||||
epochRef, attestation.data, attestation.aggregation_bits)
|
||||
|
||||
for validator in participants:
|
||||
self.process_attestation(
|
||||
|
@ -453,16 +453,9 @@ func is_valid_indexed_attestation*(
|
||||
true
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/beacon-chain.md#get_attesting_indices
|
||||
func get_attesting_indices*(state: BeaconState,
|
||||
data: AttestationData,
|
||||
bits: CommitteeValidatorsBits,
|
||||
stateCache: var StateCache):
|
||||
func get_attesting_indices*(bits: CommitteeValidatorsBits,
|
||||
committee: openArray[ValidatorIndex]):
|
||||
HashSet[ValidatorIndex] =
|
||||
# Return the set of attesting indices corresponding to ``data`` and ``bits``.
|
||||
result = initHashSet[ValidatorIndex]()
|
||||
let committee = get_beacon_committee(
|
||||
state, data.slot, data.index.CommitteeIndex, stateCache)
|
||||
|
||||
# This shouldn't happen if one begins with a valid BeaconState and applies
|
||||
# valid updates, but one can construct a BeaconState where it does. Do not
|
||||
# do anything here since the PendingAttestation wouldn't have made it past
|
||||
@ -478,14 +471,25 @@ func get_attesting_indices*(state: BeaconState,
|
||||
if bits[i]:
|
||||
result.incl index
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/beacon-chain.md#get_attesting_indices
|
||||
func get_attesting_indices*(state: BeaconState,
|
||||
data: AttestationData,
|
||||
bits: CommitteeValidatorsBits,
|
||||
cache: var StateCache):
|
||||
HashSet[ValidatorIndex] =
|
||||
# Return the set of attesting indices corresponding to ``data`` and ``bits``.
|
||||
get_attesting_indices(
|
||||
bits,
|
||||
get_beacon_committee(state, data.slot, data.index.CommitteeIndex, cache))
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/beacon-chain.md#get_indexed_attestation
|
||||
func get_indexed_attestation*(state: BeaconState, attestation: Attestation,
|
||||
stateCache: var StateCache): IndexedAttestation =
|
||||
cache: var StateCache): IndexedAttestation =
|
||||
# Return the indexed attestation corresponding to ``attestation``.
|
||||
let
|
||||
attesting_indices =
|
||||
get_attesting_indices(
|
||||
state, attestation.data, attestation.aggregation_bits, stateCache)
|
||||
state, attestation.data, attestation.aggregation_bits, cache)
|
||||
|
||||
IndexedAttestation(
|
||||
attesting_indices:
|
||||
@ -496,12 +500,12 @@ func get_indexed_attestation*(state: BeaconState, attestation: Attestation,
|
||||
)
|
||||
|
||||
func get_indexed_attestation*(state: BeaconState, attestation: TrustedAttestation,
|
||||
stateCache: var StateCache): TrustedIndexedAttestation =
|
||||
cache: var StateCache): TrustedIndexedAttestation =
|
||||
# Return the indexed attestation corresponding to ``attestation``.
|
||||
let
|
||||
attesting_indices =
|
||||
get_attesting_indices(
|
||||
state, attestation.data, attestation.aggregation_bits, stateCache)
|
||||
state, attestation.data, attestation.aggregation_bits, cache)
|
||||
|
||||
TrustedIndexedAttestation(
|
||||
attesting_indices:
|
||||
@ -608,16 +612,16 @@ proc process_attestation*(
|
||||
)
|
||||
|
||||
if attestation.data.target.epoch == get_current_epoch(state):
|
||||
trace "process_attestation: current_epoch_attestations.add",
|
||||
trace "current_epoch_attestations.add",
|
||||
attestation = shortLog(attestation),
|
||||
pending_attestation = pending_attestation,
|
||||
pending_attestation = shortLog(pending_attestation),
|
||||
indices = get_attesting_indices(
|
||||
state, attestation.data, attestation.aggregation_bits, cache).len
|
||||
state.current_epoch_attestations.add(pending_attestation)
|
||||
else:
|
||||
trace "process_attestation: previous_epoch_attestations.add",
|
||||
trace "previous_epoch_attestations.add",
|
||||
attestation = shortLog(attestation),
|
||||
pending_attestation = pending_attestation,
|
||||
pending_attestation = shortLog(pending_attestation),
|
||||
indices = get_attesting_indices(
|
||||
state, attestation.data, attestation.aggregation_bits, cache).len
|
||||
state.previous_epoch_attestations.add(pending_attestation)
|
||||
|
@ -670,6 +670,14 @@ func shortLog*(v: AttestationData): auto =
|
||||
target: shortLog(v.target),
|
||||
)
|
||||
|
||||
func shortLog*(v: PendingAttestation): auto =
|
||||
(
|
||||
aggregation_bits: v.aggregation_bits,
|
||||
data: shortLog(v.data),
|
||||
inclusion_delay: v.inclusion_delay,
|
||||
proposer_index: v.proposer_index
|
||||
)
|
||||
|
||||
func shortLog*(v: SomeAttestation): auto =
|
||||
(
|
||||
aggregation_bits: v.aggregation_bits,
|
||||
|
@ -149,7 +149,7 @@ func get_previous_epoch*(state: BeaconState): Epoch =
|
||||
get_previous_epoch(get_current_epoch(state))
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/beacon-chain.md#compute_committee
|
||||
func compute_committee(shuffled_indices: seq[ValidatorIndex],
|
||||
func compute_committee*(shuffled_indices: seq[ValidatorIndex],
|
||||
index: uint64, count: uint64): seq[ValidatorIndex] =
|
||||
## Return the committee corresponding to ``indices``, ``seed``, ``index``,
|
||||
## and committee ``count``.
|
||||
|
@ -68,7 +68,7 @@ func disconnectReasonName(reason: uint64): string =
|
||||
proc getCurrentStatus*(state: BeaconSyncNetworkState): StatusMsg {.gcsafe.} =
|
||||
let
|
||||
blockPool = state.blockPool
|
||||
headBlock = blockPool.head.blck
|
||||
headBlock = blockPool.head
|
||||
|
||||
StatusMsg(
|
||||
forkDigest: state.forkDigest,
|
||||
|
@ -27,7 +27,7 @@ type
|
||||
|
||||
logScope: topics = "valapi"
|
||||
|
||||
proc toBlockSlot(blckRef: BlockRef): BlockSlot =
|
||||
proc toBlockSlot(blckRef: BlockRef): BlockSlot =
|
||||
blckRef.atSlot(blckRef.slot)
|
||||
|
||||
proc parseRoot(str: string): Eth2Digest =
|
||||
@ -40,7 +40,7 @@ proc parsePubkey(str: string): ValidatorPubKey =
|
||||
return pubkeyRes[]
|
||||
|
||||
proc doChecksAndGetCurrentHead(node: BeaconNode, slot: Slot): BlockRef =
|
||||
result = node.blockPool.head.blck
|
||||
result = node.blockPool.head
|
||||
if not node.isSynced(result):
|
||||
raise newException(CatchableError, "Cannot fulfill request until ndoe is synced")
|
||||
# TODO for now we limit the requests arbitrarily by up to 2 epochs into the future
|
||||
@ -133,7 +133,7 @@ proc getBlockSlotFromString(node: BeaconNode, slot: string): BlockSlot =
|
||||
proc getBlockDataFromBlockId(node: BeaconNode, blockId: string): BlockData =
|
||||
result = case blockId:
|
||||
of "head":
|
||||
node.blockPool.get(node.blockPool.head.blck)
|
||||
node.blockPool.get(node.blockPool.head)
|
||||
of "genesis":
|
||||
node.blockPool.get(node.blockPool.tail)
|
||||
of "finalized":
|
||||
@ -154,14 +154,15 @@ proc getBlockDataFromBlockId(node: BeaconNode, blockId: string): BlockData =
|
||||
proc stateIdToBlockSlot(node: BeaconNode, stateId: string): BlockSlot =
|
||||
result = case stateId:
|
||||
of "head":
|
||||
node.blockPool.head.blck.toBlockSlot()
|
||||
node.blockPool.head.toBlockSlot()
|
||||
of "genesis":
|
||||
node.blockPool.tail.toBlockSlot()
|
||||
of "finalized":
|
||||
node.blockPool.finalizedHead
|
||||
of "justified":
|
||||
node.blockPool.justifiedState.blck.atSlot(
|
||||
node.blockPool.justifiedState.data.data.slot)
|
||||
node.blockPool.head.atSlot(
|
||||
node.blockPool.headState.data.data.current_justified_checkpoint.
|
||||
epoch.compute_start_slot_at_epoch)
|
||||
else:
|
||||
if stateId.startsWith("0x"):
|
||||
let blckRoot = parseRoot(stateId)
|
||||
@ -273,7 +274,7 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
|
||||
result.header.message.state_root = tsbb.message.state_root
|
||||
result.header.message.body_root = tsbb.message.body.hash_tree_root()
|
||||
|
||||
result.canonical = bd.refs.isAncestorOf(node.blockPool.head.blck)
|
||||
result.canonical = bd.refs.isAncestorOf(node.blockPool.head)
|
||||
|
||||
rpcServer.rpc("get_v1_beacon_blocks_blockId") do (
|
||||
blockId: string) -> TrustedSignedBeaconBlock:
|
||||
|
@ -57,9 +57,8 @@ proc addLocalValidator*(node: BeaconNode,
|
||||
proc addLocalValidators*(node: BeaconNode) {.async.} =
|
||||
let
|
||||
head = node.blockPool.head
|
||||
bs = BlockSlot(blck: head.blck, slot: head.blck.slot)
|
||||
|
||||
node.blockPool.withState(node.blockPool.tmpState, bs):
|
||||
node.blockPool.withState(node.blockPool.tmpState, head.atSlot(head.slot)):
|
||||
for validatorKey in node.config.validatorKeys:
|
||||
node.addLocalValidator state, validatorKey
|
||||
# Allow some network events to be processed:
|
||||
@ -429,7 +428,7 @@ proc broadcastAggregatedAttestations(
|
||||
proc handleValidatorDuties*(
|
||||
node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
||||
## Perform validator duties - create blocks, vote and aggregate existing votes
|
||||
var head = node.updateHead()
|
||||
var head = node.updateHead(slot)
|
||||
if node.attachedValidators.count == 0:
|
||||
# Nothing to do because we have no validator attached
|
||||
return
|
||||
@ -486,7 +485,7 @@ proc handleValidatorDuties*(
|
||||
template sleepToSlotOffsetWithHeadUpdate(extra: chronos.Duration, msg: static string) =
|
||||
if await node.beaconClock.sleepToSlotOffset(extra, slot, msg):
|
||||
# Time passed - we might need to select a new head in that case
|
||||
head = node.updateHead()
|
||||
head = node.updateHead(slot)
|
||||
|
||||
sleepToSlotOffsetWithHeadUpdate(
|
||||
seconds(int64(SECONDS_PER_SLOT)) div 3, "Waiting to send attestations")
|
||||
|
@ -3,7 +3,7 @@
|
||||
stew/byteutils,
|
||||
../beacon_chain/[beacon_chain_db, block_pool, extras],
|
||||
../beacon_chain/spec/[crypto, datatypes, digest, helpers,
|
||||
state_transition, validator, presets],
|
||||
state_transition, presets],
|
||||
../beacon_chain/sszdump, ../research/simutils,
|
||||
eth/db/[kvstore, kvstore_sqlite3]
|
||||
|
||||
@ -74,12 +74,12 @@ proc cmdBench(conf: DbConf) =
|
||||
let pool = withTimerRet(timers[tInit]):
|
||||
CandidateChains.init(defaultRuntimePreset, db, {})
|
||||
|
||||
echo &"Loaded {pool.blocks.len} blocks, head slot {pool.head.blck.slot}"
|
||||
echo &"Loaded {pool.blocks.len} blocks, head slot {pool.head.slot}"
|
||||
|
||||
var
|
||||
blockRefs: seq[BlockRef]
|
||||
blocks: seq[TrustedSignedBeaconBlock]
|
||||
cur = pool.head.blck
|
||||
cur = pool.head
|
||||
|
||||
while cur != nil:
|
||||
blockRefs.add cur
|
||||
|
@ -65,7 +65,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
|
||||
proc handleAttestations(slot: Slot) =
|
||||
let
|
||||
attestationHead = blockPool.head.blck.atSlot(slot)
|
||||
attestationHead = blockPool.head.atSlot(slot)
|
||||
|
||||
blockPool.withState(blockPool.tmpState, attestationHead):
|
||||
var cache = getEpochCache(attestationHead.blck, state)
|
||||
@ -92,14 +92,14 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
data: data,
|
||||
aggregation_bits: aggregation_bits,
|
||||
signature: sig
|
||||
))
|
||||
), data.slot)
|
||||
|
||||
proc proposeBlock(slot: Slot) =
|
||||
if rand(r, 1.0) > blockRatio:
|
||||
return
|
||||
|
||||
let
|
||||
head = blockPool.head.blck
|
||||
head = blockPool.head
|
||||
|
||||
blockPool.withState(blockPool.tmpState, head.atSlot(slot)):
|
||||
var cache = StateCache()
|
||||
@ -173,7 +173,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
if replay:
|
||||
withTimer(timers[tReplay]):
|
||||
blockPool.updateStateData(
|
||||
replayState[], blockPool.head.blck.atSlot(Slot(slots)))
|
||||
replayState[], blockPool.head.atSlot(Slot(slots)))
|
||||
|
||||
echo "Done!"
|
||||
|
||||
|
@ -48,7 +48,7 @@ suiteReport "Attestation pool processing" & preset():
|
||||
attestation = makeAttestation(
|
||||
state.data.data, state.blck.root, beacon_committee[0], cache)
|
||||
|
||||
pool[].addAttestation(attestation)
|
||||
pool[].addAttestation(attestation, attestation.data.slot)
|
||||
|
||||
check:
|
||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
|
||||
@ -77,8 +77,8 @@ suiteReport "Attestation pool processing" & preset():
|
||||
state.data.data, state.blck.root, bc1[0], cache)
|
||||
|
||||
# test reverse order
|
||||
pool[].addAttestation(attestation1)
|
||||
pool[].addAttestation(attestation0)
|
||||
pool[].addAttestation(attestation1, attestation1.data.slot)
|
||||
pool[].addAttestation(attestation0, attestation1.data.slot)
|
||||
|
||||
discard process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
|
||||
|
||||
@ -98,8 +98,8 @@ suiteReport "Attestation pool processing" & preset():
|
||||
attestation1 = makeAttestation(
|
||||
state.data.data, state.blck.root, bc0[1], cache)
|
||||
|
||||
pool[].addAttestation(attestation0)
|
||||
pool[].addAttestation(attestation1)
|
||||
pool[].addAttestation(attestation0, attestation0.data.slot)
|
||||
pool[].addAttestation(attestation1, attestation1.data.slot)
|
||||
|
||||
check:
|
||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
|
||||
@ -123,8 +123,8 @@ suiteReport "Attestation pool processing" & preset():
|
||||
|
||||
attestation0.combine(attestation1, {})
|
||||
|
||||
pool[].addAttestation(attestation0)
|
||||
pool[].addAttestation(attestation1)
|
||||
pool[].addAttestation(attestation0, attestation0.data.slot)
|
||||
pool[].addAttestation(attestation1, attestation1.data.slot)
|
||||
|
||||
check:
|
||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
|
||||
@ -147,8 +147,8 @@ suiteReport "Attestation pool processing" & preset():
|
||||
|
||||
attestation0.combine(attestation1, {})
|
||||
|
||||
pool[].addAttestation(attestation1)
|
||||
pool[].addAttestation(attestation0)
|
||||
pool[].addAttestation(attestation1, attestation1.data.slot)
|
||||
pool[].addAttestation(attestation0, attestation0.data.slot)
|
||||
|
||||
check:
|
||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
|
||||
@ -168,7 +168,6 @@ suiteReport "Attestation pool processing" & preset():
|
||||
# Callback add to fork choice if valid
|
||||
pool[].addForkChoice(state.data, blckRef, signedBlock.message, blckRef.slot)
|
||||
|
||||
|
||||
let head = pool[].selectHead(b1Add[].slot)
|
||||
|
||||
check:
|
||||
@ -216,7 +215,7 @@ suiteReport "Attestation pool processing" & preset():
|
||||
state.data.data, state.data.data.slot, 1.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(state.data.data, b10.root, bc1[0], cache)
|
||||
|
||||
pool[].addAttestation(attestation0)
|
||||
pool[].addAttestation(attestation0, attestation0.data.slot)
|
||||
|
||||
let head2 = pool[].selectHead(b10Add[].slot)
|
||||
|
||||
@ -227,7 +226,7 @@ suiteReport "Attestation pool processing" & preset():
|
||||
let
|
||||
attestation1 = makeAttestation(state.data.data, b11.root, bc1[1], cache)
|
||||
attestation2 = makeAttestation(state.data.data, b11.root, bc1[2], cache)
|
||||
pool[].addAttestation(attestation1)
|
||||
pool[].addAttestation(attestation1, attestation1.data.slot)
|
||||
|
||||
let head3 = pool[].selectHead(b10Add[].slot)
|
||||
let bigger = if b11.root.data < b10.root.data: b10Add else: b11Add
|
||||
@ -236,7 +235,7 @@ suiteReport "Attestation pool processing" & preset():
|
||||
# Ties broken lexicographically in spec -> ?
|
||||
head3 == bigger[]
|
||||
|
||||
pool[].addAttestation(attestation2)
|
||||
pool[].addAttestation(attestation2, attestation2.data.slot)
|
||||
|
||||
let head4 = pool[].selectHead(b11Add[].slot)
|
||||
|
||||
|
@ -118,7 +118,7 @@ suiteReport "Block pool processing" & preset():
|
||||
b1Get.get().refs.root == b1Root
|
||||
b1Add[].root == b1Get.get().refs.root
|
||||
pool.heads.len == 1
|
||||
pool.heads[0].blck == b1Add[]
|
||||
pool.heads[0] == b1Add[]
|
||||
|
||||
let
|
||||
b2Add = pool.addRawBlock(b2, nil)
|
||||
@ -129,7 +129,7 @@ suiteReport "Block pool processing" & preset():
|
||||
b2Get.get().refs.root == b2.root
|
||||
b2Add[].root == b2Get.get().refs.root
|
||||
pool.heads.len == 1
|
||||
pool.heads[0].blck == b2Add[]
|
||||
pool.heads[0] == b2Add[]
|
||||
|
||||
# Skip one slot to get a gap
|
||||
check:
|
||||
@ -199,7 +199,7 @@ suiteReport "Block pool processing" & preset():
|
||||
# The heads structure should have been updated to contain only the new
|
||||
# b2 head
|
||||
check:
|
||||
pool.heads.mapIt(it.blck) == @[b2Get.get().refs]
|
||||
pool.heads.mapIt(it) == @[b2Get.get().refs]
|
||||
|
||||
# check that init also reloads block graph
|
||||
var
|
||||
@ -207,12 +207,12 @@ suiteReport "Block pool processing" & preset():
|
||||
|
||||
check:
|
||||
# ensure we loaded the correct head state
|
||||
pool2.head.blck.root == b2Root
|
||||
pool2.head.root == b2Root
|
||||
hash_tree_root(pool2.headState.data.data) == b2.message.state_root
|
||||
pool2.get(b1Root).isSome()
|
||||
pool2.get(b2Root).isSome()
|
||||
pool2.heads.len == 1
|
||||
pool2.heads[0].blck.root == b2Root
|
||||
pool2.heads[0].root == b2Root
|
||||
|
||||
timedTest "Adding the same block twice returns a Duplicate error" & preset():
|
||||
let
|
||||
@ -230,7 +230,7 @@ suiteReport "Block pool processing" & preset():
|
||||
pool.updateHead(b1Add[])
|
||||
|
||||
check:
|
||||
pool.head.blck == b1Add[]
|
||||
pool.head == b1Add[]
|
||||
pool.headState.data.data.slot == b1Add[].slot
|
||||
|
||||
timedTest "updateStateData sanity" & preset():
|
||||
@ -291,18 +291,17 @@ suiteReport "BlockPool finalization tests" & preset():
|
||||
timedTest "prune heads on finalization" & preset():
|
||||
# Create a fork that will not be taken
|
||||
var
|
||||
blck = makeTestBlock(pool.headState.data, pool.head.blck.root, cache)
|
||||
blck = makeTestBlock(pool.headState.data, pool.head.root, cache)
|
||||
tmpState = assignClone(pool.headState.data)
|
||||
check:
|
||||
process_slots(
|
||||
tmpState[], tmpState.data.slot + (5 * SLOTS_PER_EPOCH).uint64)
|
||||
|
||||
let lateBlock = makeTestBlock(tmpState[], pool.head.blck.root, cache)
|
||||
let lateBlock = makeTestBlock(tmpState[], pool.head.root, cache)
|
||||
block:
|
||||
let status = pool.addRawBlock(blck, nil)
|
||||
check: status.isOk()
|
||||
|
||||
|
||||
for i in 0 ..< (SLOTS_PER_EPOCH * 6):
|
||||
if i == 1:
|
||||
# There are 2 heads now because of the fork at slot 1
|
||||
@ -310,9 +309,9 @@ suiteReport "BlockPool finalization tests" & preset():
|
||||
pool.heads.len == 2
|
||||
|
||||
blck = makeTestBlock(
|
||||
pool.headState.data, pool.head.blck.root, cache,
|
||||
pool.headState.data, pool.head.root, cache,
|
||||
attestations = makeFullAttestations(
|
||||
pool.headState.data.data, pool.head.blck.root,
|
||||
pool.headState.data.data, pool.head.root,
|
||||
pool.headState.data.data.slot, cache, {}))
|
||||
let added = pool.addRawBlock(blck, nil)
|
||||
check: added.isOk()
|
||||
@ -320,7 +319,6 @@ suiteReport "BlockPool finalization tests" & preset():
|
||||
|
||||
check:
|
||||
pool.heads.len() == 1
|
||||
pool.head.justified.slot.compute_epoch_at_slot() == 5
|
||||
|
||||
block:
|
||||
# The late block is a block whose parent was finalized long ago and thus
|
||||
@ -334,13 +332,11 @@ suiteReport "BlockPool finalization tests" & preset():
|
||||
# check that the state reloaded from database resembles what we had before
|
||||
check:
|
||||
pool2.tail.root == pool.tail.root
|
||||
pool2.head.blck.root == pool.head.blck.root
|
||||
pool2.head.root == pool.head.root
|
||||
pool2.finalizedHead.blck.root == pool.finalizedHead.blck.root
|
||||
pool2.finalizedHead.slot == pool.finalizedHead.slot
|
||||
hash_tree_root(pool2.headState.data.data) ==
|
||||
hash_tree_root(pool.headState.data.data)
|
||||
hash_tree_root(pool2.justifiedState.data.data) ==
|
||||
hash_tree_root(pool.justifiedState.data.data)
|
||||
|
||||
# timedTest "init with gaps" & preset():
|
||||
# var cache = StateCache()
|
||||
@ -384,5 +380,3 @@ suiteReport "BlockPool finalization tests" & preset():
|
||||
# pool2.finalizedHead.slot == pool.finalizedHead.slot
|
||||
# hash_tree_root(pool2.headState.data.data) ==
|
||||
# hash_tree_root(pool.headState.data.data)
|
||||
# hash_tree_root(pool2.justifiedState.data.data) ==
|
||||
# hash_tree_root(pool.justifiedState.data.data)
|
||||
|
Loading…
x
Reference in New Issue
Block a user