incorporate proposals into nextActionWait; switch some proc to func (#2438)
This commit is contained in:
parent
a727fc8e52
commit
36311bfc05
|
@ -161,9 +161,10 @@ type
|
||||||
subscribeSlot*: array[ATTESTATION_SUBNET_COUNT, Slot]
|
subscribeSlot*: array[ATTESTATION_SUBNET_COUNT, Slot]
|
||||||
unsubscribeSlot*: array[ATTESTATION_SUBNET_COUNT, Slot]
|
unsubscribeSlot*: array[ATTESTATION_SUBNET_COUNT, Slot]
|
||||||
|
|
||||||
# Used to track the next attestation slots, using an epoch-relative
|
# Used to track the next attestation and proposal slots using an
|
||||||
# coordinate system. Defaults don't need initialization.
|
# epoch-relative coordinate system. Doesn't need initialization.
|
||||||
attestingSlots*: array[2, uint32]
|
attestingSlots*: array[2, uint32]
|
||||||
lastCalculatedAttestationEpoch*: Epoch
|
proposingSlots*: array[2, uint32]
|
||||||
|
lastCalculatedEpoch*: Epoch
|
||||||
|
|
||||||
func shortLog*(v: AttachedValidator): string = shortLog(v.pubKey)
|
func shortLog*(v: AttachedValidator): string = shortLog(v.pubKey)
|
||||||
|
|
|
@ -37,8 +37,10 @@ import
|
||||||
validator_api],
|
validator_api],
|
||||||
./spec/[
|
./spec/[
|
||||||
datatypes, digest, crypto, beaconstate, eth2_apis/beacon_rpc_client,
|
datatypes, digest, crypto, beaconstate, eth2_apis/beacon_rpc_client,
|
||||||
helpers, network, presets, validator, weak_subjectivity, signatures],
|
helpers, network, presets, weak_subjectivity, signatures],
|
||||||
./consensus_object_pools/[blockchain_dag, block_quarantine, block_clearance, block_pools_types, attestation_pool, exit_pool],
|
./consensus_object_pools/[
|
||||||
|
blockchain_dag, block_quarantine, block_clearance, block_pools_types,
|
||||||
|
attestation_pool, exit_pool, spec_cache],
|
||||||
./eth1/eth1_monitor
|
./eth1/eth1_monitor
|
||||||
|
|
||||||
from eth/common/eth_types import BlockHashOrNumber
|
from eth/common/eth_types import BlockHashOrNumber
|
||||||
|
@ -456,32 +458,17 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#lookahead
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#lookahead
|
||||||
# Only subscribe when this node should aggregate; libp2p broadcasting works
|
# Only subscribe when this node should aggregate; libp2p broadcasting works
|
||||||
# on subnet topics regardless.
|
# on subnet topics regardless.
|
||||||
#
|
let epochRef = node.chainDag.getEpochRef(node.chainDag.head, epoch)
|
||||||
# Committee sizes in any given epoch vary by 1, i.e. committee sizes $n$
|
|
||||||
# $n+1$ can exist. Furthermore, according to
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#aggregation-selection
|
|
||||||
# is_aggregator uses `len(committee) div TARGET_AGGREGATORS_PER_COMMITTEE`
|
|
||||||
# to determine whether committee length/slot signature pairs aggregate the
|
|
||||||
# attestations in a slot/committee, where TARGET_AGGREGATORS_PER_COMMITTEE
|
|
||||||
# is currently 16 in all defined presets. Therefore, probe a committee len
|
|
||||||
# to determine whether it's possible that it's within a boundary such that
|
|
||||||
# either that length or other possible committee lengths don't cross those
|
|
||||||
# div/mod 16 boundaries which would change is_aggregator results.
|
|
||||||
static: doAssert TARGET_AGGREGATORS_PER_COMMITTEE == 16 # mainnet, minimal
|
|
||||||
|
|
||||||
let
|
# Update proposals
|
||||||
probeCommitteeLen = get_beacon_committee_len(
|
node.attestationSubnets.proposingSlots[epoch mod 2] = 0
|
||||||
node.chainDag.headState.data.data, compute_start_slot_at_epoch(epoch),
|
for i in 0 ..< SLOTS_PER_EPOCH:
|
||||||
0.CommitteeIndex, cache)
|
let beaconProposer = epochRef.beacon_proposers[i]
|
||||||
|
if beaconProposer.isSome and beaconProposer.get()[0] in attachedValidators:
|
||||||
# Without knowing whether probeCommitteeLen is the higher or lower, if it's
|
node.attestationsubnets.proposingSlots[epoch mod 2] =
|
||||||
# [-1, 1] mod TARGET_AGGREGATORS_PER_COMMITTEE it might cross boundaries in
|
node.attestationsubnets.proposingSlots[epoch mod 2] or (1'u32 shl i)
|
||||||
# is_aggregator, such that one can't hoist committee length calculation out
|
|
||||||
# of the anyIt(...) loop.
|
|
||||||
isConstAggregationLen =
|
|
||||||
(probeCommitteeLen mod TARGET_AGGREGATORS_PER_COMMITTEE) notin
|
|
||||||
[0'u64, 1'u64, TARGET_AGGREGATORS_PER_COMMITTEE - 1]
|
|
||||||
|
|
||||||
|
# Update attestations
|
||||||
template isAnyCommitteeValidatorAggregating(
|
template isAnyCommitteeValidatorAggregating(
|
||||||
validatorIndices, committeeLen: untyped, slot: Slot): bool =
|
validatorIndices, committeeLen: untyped, slot: Slot): bool =
|
||||||
anyIt(
|
anyIt(
|
||||||
|
@ -492,22 +479,17 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
|
||||||
node.chainDag.headState.data.data.fork,
|
node.chainDag.headState.data.data.fork,
|
||||||
node.chainDag.headState.data.data.genesis_validators_root, slot)))
|
node.chainDag.headState.data.data.genesis_validators_root, slot)))
|
||||||
|
|
||||||
# The relevant bitmap are 32 bits each.
|
node.attestationSubnets.lastCalculatedEpoch = epoch
|
||||||
static: doAssert SLOTS_PER_EPOCH <= 32
|
|
||||||
node.attestationSubnets.lastCalculatedAttestationEpoch = epoch
|
|
||||||
node.attestationSubnets.attestingSlots[epoch mod 2] = 0
|
node.attestationSubnets.attestingSlots[epoch mod 2] = 0
|
||||||
|
|
||||||
|
# The relevant bitmaps are 32 bits each.
|
||||||
|
static: doAssert SLOTS_PER_EPOCH <= 32
|
||||||
|
|
||||||
for (validatorIndices, committeeIndex, subnetIndex, slot) in
|
for (validatorIndices, committeeIndex, subnetIndex, slot) in
|
||||||
get_committee_assignments(
|
get_committee_assignments(
|
||||||
node.chainDag.headState.data.data, epoch, validatorIndices, cache):
|
node.chainDag.headState.data.data, epoch, validatorIndices, cache):
|
||||||
|
|
||||||
doAssert compute_epoch_at_slot(slot) == epoch
|
doAssert compute_epoch_at_slot(slot) == epoch
|
||||||
let committeeLen =
|
|
||||||
if isConstAggregationLen:
|
|
||||||
probeCommitteeLen
|
|
||||||
else:
|
|
||||||
get_beacon_committee_len(
|
|
||||||
node.chainDag.headState.data.data, slot, committeeIndex, cache)
|
|
||||||
|
|
||||||
# Each get_committee_assignments() call here is on the next epoch. At any
|
# Each get_committee_assignments() call here is on the next epoch. At any
|
||||||
# given time, only care about two epochs, the current and next epoch. So,
|
# given time, only care about two epochs, the current and next epoch. So,
|
||||||
|
@ -527,7 +509,8 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
|
||||||
(1'u32 shl (slot mod SLOTS_PER_EPOCH))
|
(1'u32 shl (slot mod SLOTS_PER_EPOCH))
|
||||||
|
|
||||||
if not isAnyCommitteeValidatorAggregating(
|
if not isAnyCommitteeValidatorAggregating(
|
||||||
validatorIndices, committeeLen, slot):
|
validatorIndices,
|
||||||
|
get_beacon_committee_len(epochRef, slot, committeeIndex), slot):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
node.attestationSubnets.unsubscribeSlot[subnetIndex] =
|
node.attestationSubnets.unsubscribeSlot[subnetIndex] =
|
||||||
|
@ -544,11 +527,11 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
|
||||||
node.attestationSubnets.subscribeSlot[subnetIndex])
|
node.attestationSubnets.subscribeSlot[subnetIndex])
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#phase-0-attestation-subnet-stability
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#phase-0-attestation-subnet-stability
|
||||||
proc getStabilitySubnetLength(node: BeaconNode): uint64 =
|
func getStabilitySubnetLength(node: BeaconNode): uint64 =
|
||||||
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION +
|
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION +
|
||||||
node.network.rng[].rand(EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION.int).uint64
|
node.network.rng[].rand(EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION.int).uint64
|
||||||
|
|
||||||
proc updateStabilitySubnets(node: BeaconNode, slot: Slot): set[uint8] =
|
func updateStabilitySubnets(node: BeaconNode, slot: Slot): set[uint8] =
|
||||||
# Equivalent to wallSlot by cycleAttestationSubnets(), especially
|
# Equivalent to wallSlot by cycleAttestationSubnets(), especially
|
||||||
# since it'll try to run early in epochs, avoiding race conditions.
|
# since it'll try to run early in epochs, avoiding race conditions.
|
||||||
static: doAssert ATTESTATION_SUBNET_COUNT <= high(uint8)
|
static: doAssert ATTESTATION_SUBNET_COUNT <= high(uint8)
|
||||||
|
@ -881,13 +864,14 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) =
|
||||||
# This exits early all but one call each epoch.
|
# This exits early all but one call each epoch.
|
||||||
traceAsyncErrors node.cycleAttestationSubnets(slot)
|
traceAsyncErrors node.cycleAttestationSubnets(slot)
|
||||||
|
|
||||||
func getNextAttestation(node: BeaconNode, slot: Slot): Slot =
|
func getNextValidatorAction(
|
||||||
# The relevant attestations are in, depending on calculated bounds:
|
actionSlotSource: auto, lastCalculatedEpoch: Epoch, slot: Slot): Slot =
|
||||||
|
# The relevant actions are in, depending on calculated bounds:
|
||||||
# [aS[epoch mod 2], aS[1 - (epoch mod 2)]]
|
# [aS[epoch mod 2], aS[1 - (epoch mod 2)]]
|
||||||
# current epoch next epoch
|
# current epoch next epoch
|
||||||
let orderedAttestingSlots = [
|
let orderedActionSlots = [
|
||||||
node.attestationSubnets.attestingSlots[ slot.epoch mod 2'u64],
|
actionSlotSource[ slot.epoch mod 2'u64],
|
||||||
node.attestationSubnets.attestingSlots[1 - (slot.epoch mod 2'u64)]]
|
actionSlotSource[1 - (slot.epoch mod 2'u64)]]
|
||||||
|
|
||||||
static: doAssert MIN_ATTESTATION_INCLUSION_DELAY == 1
|
static: doAssert MIN_ATTESTATION_INCLUSION_DELAY == 1
|
||||||
|
|
||||||
|
@ -897,15 +881,15 @@ func getNextAttestation(node: BeaconNode, slot: Slot): Slot =
|
||||||
for i in [0'u64, 1'u64]:
|
for i in [0'u64, 1'u64]:
|
||||||
let bitmapEpoch = slot.epoch + i
|
let bitmapEpoch = slot.epoch + i
|
||||||
|
|
||||||
if bitmapEpoch > node.attestationSubnets.lastCalculatedAttestationEpoch:
|
if bitmapEpoch > lastCalculatedEpoch:
|
||||||
return FAR_FUTURE_SLOT
|
return FAR_FUTURE_SLOT
|
||||||
|
|
||||||
for slotOffset in 0 ..< SLOTS_PER_EPOCH:
|
for slotOffset in 0 ..< SLOTS_PER_EPOCH:
|
||||||
let nextAttestationSlot =
|
let nextActionSlot =
|
||||||
compute_start_slot_at_epoch(bitmapEpoch) + slotOffset
|
compute_start_slot_at_epoch(bitmapEpoch) + slotOffset
|
||||||
if ((orderedAttestingSlots[i] and (1'u32 shl slotOffset)) != 0) and
|
if ((orderedActionSlots[i] and (1'u32 shl slotOffset)) != 0) and
|
||||||
nextAttestationSlot > slot:
|
nextActionSlot > slot:
|
||||||
return nextAttestationSlot
|
return nextActionSlot
|
||||||
|
|
||||||
FAR_FUTURE_SLOT
|
FAR_FUTURE_SLOT
|
||||||
|
|
||||||
|
@ -931,10 +915,23 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
|
||||||
# the database are synced with the filesystem.
|
# the database are synced with the filesystem.
|
||||||
node.db.checkpoint()
|
node.db.checkpoint()
|
||||||
|
|
||||||
|
# -1 is a more useful output than 18446744073709551615 as an indicator of
|
||||||
|
# no future attestation/proposal known.
|
||||||
|
template displayInt64(x: Slot): int64 =
|
||||||
|
if x == high(uint64).Slot:
|
||||||
|
-1'i64
|
||||||
|
else:
|
||||||
|
toGaugeValue(x)
|
||||||
|
|
||||||
let
|
let
|
||||||
nextAttestationSlot = node.getNextAttestation(slot)
|
nextAttestationSlot = getNextValidatorAction(
|
||||||
nextActionWaitTime =
|
node.attestationSubnets.attestingSlots,
|
||||||
saturate(fromNow(node.beaconClock, nextAttestationSlot))
|
node.attestationSubnets.lastCalculatedEpoch, slot)
|
||||||
|
nextProposalSlot = getNextValidatorAction(
|
||||||
|
node.attestationSubnets.proposingSlots,
|
||||||
|
node.attestationSubnets.lastCalculatedEpoch, slot)
|
||||||
|
nextActionWaitTime = saturate(fromNow(
|
||||||
|
node.beaconClock, min(nextAttestationSlot, nextProposalSlot)))
|
||||||
|
|
||||||
info "Slot end",
|
info "Slot end",
|
||||||
slot = shortLog(slot),
|
slot = shortLog(slot),
|
||||||
|
@ -944,7 +941,8 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
|
||||||
finalizedHead = shortLog(node.chainDag.finalizedHead.blck),
|
finalizedHead = shortLog(node.chainDag.finalizedHead.blck),
|
||||||
finalizedEpoch =
|
finalizedEpoch =
|
||||||
shortLog(node.chainDag.finalizedHead.blck.slot.compute_epoch_at_slot()),
|
shortLog(node.chainDag.finalizedHead.blck.slot.compute_epoch_at_slot()),
|
||||||
nextAttestationSlot,
|
nextAttestationSlot = displayInt64(nextAttestationSlot),
|
||||||
|
nextProposalSlot = displayInt64(nextProposalSlot),
|
||||||
nextActionWait =
|
nextActionWait =
|
||||||
if nextAttestationSlot == FAR_FUTURE_SLOT:
|
if nextAttestationSlot == FAR_FUTURE_SLOT:
|
||||||
"n/a"
|
"n/a"
|
||||||
|
@ -1138,7 +1136,7 @@ proc startSyncManager(node: BeaconNode) =
|
||||||
)
|
)
|
||||||
node.syncManager.start()
|
node.syncManager.start()
|
||||||
|
|
||||||
proc connectedPeersCount(node: BeaconNode): int =
|
func connectedPeersCount(node: BeaconNode): int =
|
||||||
len(node.network.peerPool)
|
len(node.network.peerPool)
|
||||||
|
|
||||||
proc installRpcHandlers(rpcServer: RpcServer, node: BeaconNode) =
|
proc installRpcHandlers(rpcServer: RpcServer, node: BeaconNode) =
|
||||||
|
|
|
@ -124,6 +124,8 @@ proc process_deposit*(preset: RuntimePreset,
|
||||||
# New validator! Add validator and balance entries
|
# New validator! Add validator and balance entries
|
||||||
state.validators.add(get_validator_from_deposit(deposit.data))
|
state.validators.add(get_validator_from_deposit(deposit.data))
|
||||||
state.balances.add(amount)
|
state.balances.add(amount)
|
||||||
|
|
||||||
|
doAssert state.validators.len == state.balances.len
|
||||||
else:
|
else:
|
||||||
# Deposits may come with invalid signatures - in that case, they are not
|
# Deposits may come with invalid signatures - in that case, they are not
|
||||||
# turned into a validator but still get processed to keep the deposit
|
# turned into a validator but still get processed to keep the deposit
|
||||||
|
|
Loading…
Reference in New Issue