2020-05-06 13:23:45 +00:00
|
|
|
# beacon_chain
|
2021-01-29 12:38:52 +00:00
|
|
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
2020-05-06 13:23:45 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
|
|
|
import
|
|
|
|
# Standard library
|
2020-11-16 09:44:18 +00:00
|
|
|
std/[os, osproc, random, sequtils, streams, tables],
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
# Nimble packages
|
2020-12-16 08:37:22 +00:00
|
|
|
stew/[assign2, objects, shims/macros],
|
2020-05-09 12:43:15 +00:00
|
|
|
chronos, metrics, json_rpc/[rpcserver, jsonmarshal],
|
|
|
|
chronicles,
|
2020-05-06 13:23:45 +00:00
|
|
|
json_serialization/std/[options, sets, net], serialization/errors,
|
2020-05-14 11:19:10 +00:00
|
|
|
eth/db/kvstore,
|
2020-05-09 12:43:15 +00:00
|
|
|
eth/[keys, async_utils], eth/p2p/discoveryv5/[protocol, enr],
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
# Local modules
|
2021-03-02 10:27:45 +00:00
|
|
|
../spec/[
|
2020-12-23 12:59:04 +00:00
|
|
|
datatypes, digest, crypto, helpers, network, signatures, state_transition,
|
|
|
|
validator],
|
2021-03-02 10:27:45 +00:00
|
|
|
../conf, ../time,
|
|
|
|
../attestation_pool, ../exit_pool,
|
|
|
|
../block_pools/[spec_cache, chain_dag, clearance],
|
|
|
|
../eth2_network, ../beacon_node_common,
|
|
|
|
../beacon_node_types, ../nimbus_binary_common, ../eth1_monitor, ../version,
|
|
|
|
../ssz/merkleization, ../attestation_aggregation, ../sync/sync_manager, ../sszdump,
|
|
|
|
./slashing_protection,
|
|
|
|
./validator_pool, ./keystore_management
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
# Metrics for tracking attestation and beacon block loss
|
2020-11-11 13:39:36 +00:00
|
|
|
const delayBuckets = [-Inf, -4.0, -2.0, -1.0, -0.5, -0.1, -0.05,
|
2020-11-11 12:14:09 +00:00
|
|
|
0.05, 0.1, 0.5, 1.0, 2.0, 4.0, 8.0, Inf]
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
declareCounter beacon_attestations_sent,
|
|
|
|
"Number of beacon chain attestations sent by this peer"
|
2020-11-11 12:14:09 +00:00
|
|
|
declareHistogram beacon_attestation_sent_delay,
|
|
|
|
"Time(s) between slot start and attestation sent moment",
|
|
|
|
buckets = delayBuckets
|
2020-05-06 13:23:45 +00:00
|
|
|
declareCounter beacon_blocks_proposed,
|
|
|
|
"Number of beacon chain blocks sent by this peer"
|
|
|
|
|
2020-11-27 23:34:25 +00:00
|
|
|
declareGauge(attached_validator_balance,
|
|
|
|
"Validator balance at slot end of the first 64 validators, in Gwei",
|
|
|
|
labels = ["pubkey"])
|
2020-11-28 18:53:51 +00:00
|
|
|
declarePublicGauge(attached_validator_balance_total,
|
2020-11-27 23:34:25 +00:00
|
|
|
"Validator balance of all attached validators, in Gwei")
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
logScope: topics = "beacval"
|
|
|
|
|
2020-11-27 23:34:25 +00:00
|
|
|
proc findValidator(state: BeaconState, pubKey: ValidatorPubKey):
|
|
|
|
Option[ValidatorIndex] =
|
2020-05-18 17:49:22 +00:00
|
|
|
let idx = state.validators.asSeq.findIt(it.pubKey == pubKey)
|
2020-05-06 13:23:45 +00:00
|
|
|
if idx == -1:
|
|
|
|
# We allow adding a validator even if its key is not in the state registry:
|
|
|
|
# it might be that the deposit for this validator has not yet been processed
|
2020-11-27 23:34:25 +00:00
|
|
|
notice "Validator deposit not yet processed, monitoring", pubKey
|
|
|
|
none(ValidatorIndex)
|
|
|
|
else:
|
|
|
|
some(idx.ValidatorIndex)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-09-01 13:44:40 +00:00
|
|
|
proc addLocalValidator*(node: BeaconNode,
|
|
|
|
state: BeaconState,
|
|
|
|
privKey: ValidatorPrivKey) =
|
|
|
|
let pubKey = privKey.toPubKey()
|
2021-02-22 16:17:48 +00:00
|
|
|
node.attachedValidators[].addLocalValidator(
|
2020-11-27 23:34:25 +00:00
|
|
|
pubKey, privKey, findValidator(state, pubKey))
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
proc addLocalValidators*(node: BeaconNode) =
|
|
|
|
for validatorKey in node.config.validatorKeys:
|
2020-07-30 19:18:17 +00:00
|
|
|
node.addLocalValidator node.chainDag.headState.data.data, validatorKey
|
2020-06-11 12:13:12 +00:00
|
|
|
|
2020-09-01 13:44:40 +00:00
|
|
|
proc addRemoteValidators*(node: BeaconNode) =
|
|
|
|
# load all the validators from the child process - loop until `end`
|
|
|
|
var line = newStringOfCap(120).TaintedString
|
|
|
|
while line != "end" and running(node.vcProcess):
|
|
|
|
if node.vcProcess.outputStream.readLine(line) and line != "end":
|
2020-11-27 23:34:25 +00:00
|
|
|
let
|
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
|
|
|
key = ValidatorPubKey.fromHex(line).get()
|
2020-11-27 23:34:25 +00:00
|
|
|
index = findValidator(node.chainDag.headState.data.data, key)
|
2020-09-01 13:44:40 +00:00
|
|
|
|
|
|
|
let v = AttachedValidator(pubKey: key,
|
2020-11-27 23:34:25 +00:00
|
|
|
index: index,
|
2020-09-01 13:44:40 +00:00
|
|
|
kind: ValidatorKind.remote,
|
|
|
|
connection: ValidatorConnection(
|
|
|
|
inStream: node.vcProcess.inputStream,
|
|
|
|
outStream: node.vcProcess.outputStream,
|
|
|
|
pubKeyStr: $key))
|
2021-02-22 16:17:48 +00:00
|
|
|
node.attachedValidators[].addRemoteValidator(key, v)
|
2020-09-01 13:44:40 +00:00
|
|
|
|
2020-08-10 13:21:31 +00:00
|
|
|
proc getAttachedValidator*(node: BeaconNode,
|
|
|
|
pubkey: ValidatorPubKey): AttachedValidator =
|
2021-02-22 16:17:48 +00:00
|
|
|
node.attachedValidators[].getValidator(pubkey)
|
2020-08-10 13:21:31 +00:00
|
|
|
|
2020-07-23 18:24:38 +00:00
|
|
|
proc getAttachedValidator*(node: BeaconNode,
|
2020-06-11 12:13:12 +00:00
|
|
|
state: BeaconState,
|
|
|
|
idx: ValidatorIndex): AttachedValidator =
|
2020-08-10 13:21:31 +00:00
|
|
|
if idx < state.validators.len.ValidatorIndex:
|
2020-11-27 23:34:25 +00:00
|
|
|
let validator = node.getAttachedValidator(state.validators[idx].pubkey)
|
|
|
|
if validator != nil and validator.index != some(idx.ValidatorIndex):
|
|
|
|
# Update index, in case the validator was activated!
|
|
|
|
notice "Validator activated", pubkey = validator.pubkey, index = idx
|
|
|
|
validator.index = some(idx.ValidatorIndex)
|
|
|
|
validator
|
2020-08-10 13:21:31 +00:00
|
|
|
else:
|
|
|
|
warn "Validator index out of bounds",
|
|
|
|
idx, stateSlot = state.slot, validators = state.validators.len
|
|
|
|
nil
|
|
|
|
|
|
|
|
proc getAttachedValidator*(node: BeaconNode,
|
|
|
|
epochRef: EpochRef,
|
|
|
|
idx: ValidatorIndex): AttachedValidator =
|
|
|
|
if idx < epochRef.validator_keys.len.ValidatorIndex:
|
2020-11-27 23:34:25 +00:00
|
|
|
let validator = node.getAttachedValidator(epochRef.validator_keys[idx])
|
|
|
|
if validator != nil and validator.index != some(idx.ValidatorIndex):
|
|
|
|
# Update index, in case the validator was activated!
|
|
|
|
notice "Validator activated", pubkey = validator.pubkey, index = idx
|
|
|
|
validator.index = some(idx.ValidatorIndex)
|
|
|
|
validator
|
2020-08-10 13:21:31 +00:00
|
|
|
else:
|
|
|
|
warn "Validator index out of bounds",
|
|
|
|
idx, epoch = epochRef.epoch, validators = epochRef.validator_keys.len
|
|
|
|
nil
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-07-08 10:11:22 +00:00
|
|
|
proc isSynced*(node: BeaconNode, head: BlockRef): bool =
|
2020-05-06 13:23:45 +00:00
|
|
|
## TODO This function is here as a placeholder for some better heurestics to
|
|
|
|
## determine if we're in sync and should be producing blocks and
|
|
|
|
## attestations. Generally, the problem is that slot time keeps advancing
|
|
|
|
## even when there are no blocks being produced, so there's no way to
|
|
|
|
## distinguish validators geniunely going missing from the node not being
|
|
|
|
## well connected (during a network split or an internet outage for
|
|
|
|
## example). It would generally be correct to simply keep running as if
|
|
|
|
## we were the only legit node left alive, but then we run into issues:
|
|
|
|
## with enough many empty slots, the validator pool is emptied leading
|
|
|
|
## to empty committees and lots of empty slot processing that will be
|
|
|
|
## thrown away as soon as we're synced again.
|
|
|
|
|
|
|
|
let
|
|
|
|
# The slot we should be at, according to the clock
|
|
|
|
beaconTime = node.beaconClock.now()
|
|
|
|
wallSlot = beaconTime.toSlot()
|
|
|
|
|
2020-09-16 11:30:03 +00:00
|
|
|
# TODO: MaxEmptySlotCount should likely involve the weak subjectivity period.
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
# TODO if everyone follows this logic, the network will not recover from a
|
|
|
|
# halt: nobody will be producing blocks because everone expects someone
|
|
|
|
# else to do it
|
|
|
|
if wallSlot.afterGenesis and head.slot + MaxEmptySlotCount < wallSlot.slot:
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
true
|
|
|
|
|
2020-06-23 10:38:59 +00:00
|
|
|
proc sendAttestation*(
|
2020-09-14 14:50:03 +00:00
|
|
|
node: BeaconNode, attestation: Attestation, num_active_validators: uint64) =
|
2020-12-23 12:59:04 +00:00
|
|
|
let subnet_index =
|
|
|
|
compute_subnet_for_attestation(
|
|
|
|
get_committee_count_per_slot(num_active_validators), attestation.data.slot,
|
|
|
|
attestation.data.index.CommitteeIndex)
|
2020-06-29 18:08:58 +00:00
|
|
|
node.network.broadcast(
|
2020-12-23 12:59:04 +00:00
|
|
|
getAttestationTopic(node.forkDigest, subnet_index), attestation)
|
|
|
|
|
|
|
|
# Ensure node's own broadcast attestations end up in its attestation pool
|
|
|
|
discard node.processor[].attestationValidator(
|
|
|
|
attestation, subnet_index, false)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-06-05 09:57:40 +00:00
|
|
|
beacon_attestations_sent.inc()
|
|
|
|
|
2020-11-27 19:48:33 +00:00
|
|
|
proc sendVoluntaryExit*(node: BeaconNode, exit: SignedVoluntaryExit) =
|
2020-11-30 04:32:46 +00:00
|
|
|
node.network.broadcast(getVoluntaryExitsTopic(node.forkDigest), exit)
|
|
|
|
|
|
|
|
proc sendAttesterSlashing*(node: BeaconNode, slashing: AttesterSlashing) =
|
|
|
|
node.network.broadcast(getAttesterSlashingsTopic(node.forkDigest),
|
|
|
|
slashing)
|
|
|
|
|
|
|
|
proc sendProposerSlashing*(node: BeaconNode, slashing: ProposerSlashing) =
|
|
|
|
node.network.broadcast(getProposerSlashingsTopic(node.forkDigest),
|
|
|
|
slashing)
|
2020-11-27 19:48:33 +00:00
|
|
|
|
2020-06-23 10:38:59 +00:00
|
|
|
proc sendAttestation*(node: BeaconNode, attestation: Attestation) =
|
|
|
|
# For the validator API, which doesn't supply num_active_validators.
|
|
|
|
let attestationBlck =
|
2020-07-30 19:18:17 +00:00
|
|
|
node.chainDag.getRef(attestation.data.beacon_block_root)
|
2020-06-23 10:38:59 +00:00
|
|
|
if attestationBlck.isNil:
|
|
|
|
debug "Attempt to send attestation without corresponding block"
|
|
|
|
return
|
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
node.sendAttestation(
|
|
|
|
attestation,
|
|
|
|
count_active_validators(
|
2020-07-30 19:18:17 +00:00
|
|
|
node.chainDag.getEpochRef(attestationBlck, attestation.data.target.epoch)))
|
2020-06-23 10:38:59 +00:00
|
|
|
|
2020-06-05 09:57:40 +00:00
|
|
|
proc createAndSendAttestation(node: BeaconNode,
|
|
|
|
fork: Fork,
|
|
|
|
genesis_validators_root: Eth2Digest,
|
|
|
|
validator: AttachedValidator,
|
|
|
|
attestationData: AttestationData,
|
|
|
|
committeeLen: int,
|
2020-06-23 10:38:59 +00:00
|
|
|
indexInCommittee: int,
|
|
|
|
num_active_validators: uint64) {.async.} =
|
2020-07-25 19:41:12 +00:00
|
|
|
var attestation = await validator.produceAndSignAttestation(
|
|
|
|
attestationData, committeeLen, indexInCommittee, fork,
|
|
|
|
genesis_validators_root)
|
2020-06-05 09:57:40 +00:00
|
|
|
|
2020-06-23 10:38:59 +00:00
|
|
|
node.sendAttestation(attestation, num_active_validators)
|
2020-06-05 09:57:40 +00:00
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
if node.config.dumpEnabled:
|
2020-06-16 08:49:32 +00:00
|
|
|
dump(node.config.dumpDirOutgoing, attestation.data, validator.pubKey)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-11-11 12:14:09 +00:00
|
|
|
let wallTime = node.beaconClock.now()
|
|
|
|
let deadline = attestationData.slot.toBeaconTime() +
|
|
|
|
seconds(int(SECONDS_PER_SLOT div 3))
|
|
|
|
|
|
|
|
let (delayStr, delayMillis) =
|
|
|
|
if wallTime < deadline:
|
2021-01-26 12:08:29 +00:00
|
|
|
("-" & $(deadline - wallTime), -toFloatSeconds(deadline - wallTime))
|
2020-11-11 12:14:09 +00:00
|
|
|
else:
|
2021-01-26 12:08:29 +00:00
|
|
|
($(wallTime - deadline), toFloatSeconds(wallTime - deadline))
|
2020-11-11 12:14:09 +00:00
|
|
|
|
|
|
|
notice "Attestation sent", attestation = shortLog(attestation),
|
|
|
|
validator = shortLog(validator), delay = delayStr,
|
|
|
|
indexInCommittee = indexInCommittee
|
|
|
|
|
|
|
|
beacon_attestation_sent_delay.observe(delayMillis)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-11-19 17:19:03 +00:00
|
|
|
proc getBlockProposalEth1Data*(node: BeaconNode,
|
|
|
|
state: BeaconState): BlockProposalEth1Data =
|
|
|
|
if node.eth1Monitor.isNil:
|
2020-11-24 21:21:47 +00:00
|
|
|
var pendingDepositsCount = state.eth1_data.deposit_count -
|
|
|
|
state.eth1_deposit_index
|
|
|
|
if pendingDepositsCount > 0:
|
|
|
|
result.hasMissingDeposits = true
|
|
|
|
else:
|
|
|
|
result.vote = state.eth1_data
|
2020-11-19 17:19:03 +00:00
|
|
|
else:
|
2020-11-24 21:21:47 +00:00
|
|
|
let finalizedEpochRef = node.chainDag.getFinalizedEpochRef()
|
|
|
|
result = node.eth1Monitor.getBlockProposalData(
|
|
|
|
state, finalizedEpochRef.eth1_data, finalizedEpochRef.eth1_deposit_index)
|
2020-11-19 17:19:03 +00:00
|
|
|
|
2020-05-22 17:04:52 +00:00
|
|
|
proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
|
2020-10-22 10:53:33 +00:00
|
|
|
randao_reveal: ValidatorSig,
|
2020-05-22 17:04:52 +00:00
|
|
|
validator_index: ValidatorIndex,
|
2020-06-29 17:30:19 +00:00
|
|
|
graffiti: GraffitiBytes,
|
2020-05-22 17:04:52 +00:00
|
|
|
head: BlockRef,
|
2020-10-22 10:53:33 +00:00
|
|
|
slot: Slot): Option[BeaconBlock] =
|
|
|
|
# Advance state to the slot that we're proposing for
|
|
|
|
node.chainDag.withState(node.chainDag.tmpState, head.atSlot(slot)):
|
2020-05-22 14:21:22 +00:00
|
|
|
let
|
2020-11-19 17:19:03 +00:00
|
|
|
eth1Proposal = node.getBlockProposalEth1Data(state)
|
2020-07-30 19:18:17 +00:00
|
|
|
poolPtr = unsafeAddr node.chainDag # safe because restore is short-lived
|
2020-05-22 14:21:22 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
if eth1Proposal.hasMissingDeposits:
|
|
|
|
error "Eth1 deposits not available. Skipping block proposal", slot
|
|
|
|
return none(BeaconBlock)
|
|
|
|
|
2020-05-22 14:21:22 +00:00
|
|
|
func restore(v: var HashedBeaconState) =
|
|
|
|
# TODO address this ugly workaround - there should probably be a
|
|
|
|
# `state_transition` that takes a `StateData` instead and updates
|
|
|
|
# the block as well
|
|
|
|
doAssert v.addr == addr poolPtr.tmpState.data
|
avoid genericAssign for beacon node types (#1166)
* avoid genericAssign for beacon node types
ok, I got fed up of this function messing up cpu measurements - it's so
ridiculously slow, it's sad.
before, while syncing:
```
40,65% beacon_node_shared_witti_0 [.]
genericAssignAux__U5DxFPRpHCCZDKWQzM9adaw
9,02% libc-2.31.so [.] __memmove_avx_unaligned_erms
7,07% beacon_node_shared_witti_0 [.] BIG_384_58_monty
5,19% beacon_node_shared_witti_0 [.] BIG_384_58_mul
2,72% beacon_node_shared_witti_0 [.] memcpy@plt
1,18% [kernel] [k] rb_next
1,17% beacon_node_shared_witti_0 [.] genericReset
1,06% [kernel] [k] map_private_extent_buffer
```
after:
```
24,88% beacon_node_shared_witti_0 [.] BIG_384_58_monty
20,29% beacon_node_shared_witti_0 [.] BIG_384_58_mul
3,15% beacon_node_shared_witti_0 [.] BIG_384_58_norm
2,93% beacon_node_shared_witti_0 [.] BIG_384_58_add
2,55% beacon_node_shared_witti_0 [.] BIG_384_58_sqr
1,64% beacon_node_shared_witti_0 [.] BIG_384_58_mod
1,63% beacon_node_shared_witti_0 [.]
sha256Transform__BJNBQtWr9bJwzqbyfKXd38Q
1,48% beacon_node_shared_witti_0 [.] FP_BLS381_add
1,39% beacon_node_shared_witti_0 [.] BIG_384_58_sub
1,33% beacon_node_shared_witti_0 [.] BIG_384_58_dnorm
1,14% beacon_node_shared_witti_0 [.] FP2_BLS381_mul
1,05% beacon_node_shared_witti_0 [.] BIG_384_58_cmove
1,05% beacon_node_shared_witti_0 [.]
get_shuffled_seq__4uncAHNsSG3Pndo5H11U9aQ
```
* better field iteration
2020-06-12 19:10:22 +00:00
|
|
|
assign(poolPtr.tmpState, poolPtr.headState)
|
2020-05-22 14:21:22 +00:00
|
|
|
|
2020-10-22 10:53:33 +00:00
|
|
|
makeBeaconBlock(
|
2021-01-29 21:21:44 +00:00
|
|
|
node.runtimePreset,
|
2020-05-22 14:21:22 +00:00
|
|
|
hashedState,
|
|
|
|
validator_index,
|
2020-05-06 13:23:45 +00:00
|
|
|
head.root,
|
2020-10-22 10:53:33 +00:00
|
|
|
randao_reveal,
|
2020-11-19 17:19:03 +00:00
|
|
|
eth1Proposal.vote,
|
2020-05-22 17:04:52 +00:00
|
|
|
graffiti,
|
2020-09-14 14:50:03 +00:00
|
|
|
node.attestationPool[].getAttestationsForBlock(state, cache),
|
2020-11-19 17:19:03 +00:00
|
|
|
eth1Proposal.deposits,
|
2020-10-07 16:57:21 +00:00
|
|
|
node.exitPool[].getProposerSlashingsForBlock(),
|
|
|
|
node.exitPool[].getAttesterSlashingsForBlock(),
|
|
|
|
node.exitPool[].getVoluntaryExitsForBlock(),
|
2020-06-04 12:03:16 +00:00
|
|
|
restore,
|
|
|
|
cache)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-06-05 09:57:40 +00:00
|
|
|
proc proposeSignedBlock*(node: BeaconNode,
|
|
|
|
head: BlockRef,
|
|
|
|
validator: AttachedValidator,
|
2020-11-30 23:59:35 +00:00
|
|
|
newBlock: SignedBeaconBlock): BlockRef =
|
|
|
|
let newBlockRef = node.chainDag.addRawBlock(node.quarantine, newBlock) do (
|
2021-01-25 18:45:48 +00:00
|
|
|
blckRef: BlockRef, trustedBlock: TrustedSignedBeaconBlock,
|
2020-09-14 14:50:03 +00:00
|
|
|
epochRef: EpochRef, state: HashedBeaconState):
|
2021-01-25 18:45:48 +00:00
|
|
|
# Callback add to fork choice if signed block valid (and becomes trusted)
|
2020-09-14 14:50:03 +00:00
|
|
|
node.attestationPool[].addForkChoice(
|
2021-01-25 18:45:48 +00:00
|
|
|
epochRef, blckRef, trustedBlock.message,
|
2020-09-14 14:50:03 +00:00
|
|
|
node.beaconClock.now().slotOrZero())
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-06-05 09:57:40 +00:00
|
|
|
if newBlockRef.isErr:
|
|
|
|
warn "Unable to add proposed block to block pool",
|
|
|
|
newBlock = shortLog(newBlock.message),
|
2020-07-16 13:16:51 +00:00
|
|
|
blockRoot = shortLog(newBlock.root)
|
2020-06-05 09:57:40 +00:00
|
|
|
|
|
|
|
return head
|
|
|
|
|
2020-08-31 14:34:04 +00:00
|
|
|
notice "Block proposed",
|
2020-06-05 09:57:40 +00:00
|
|
|
blck = shortLog(newBlock.message),
|
|
|
|
blockRoot = shortLog(newBlockRef[].root),
|
2020-07-16 13:16:51 +00:00
|
|
|
validator = shortLog(validator)
|
2020-06-05 09:57:40 +00:00
|
|
|
|
|
|
|
if node.config.dumpEnabled:
|
2020-07-16 13:16:51 +00:00
|
|
|
dump(node.config.dumpDirOutgoing, newBlock)
|
2020-06-05 09:57:40 +00:00
|
|
|
|
|
|
|
node.network.broadcast(node.topicBeaconBlocks, newBlock)
|
|
|
|
|
|
|
|
beacon_blocks_proposed.inc()
|
|
|
|
|
|
|
|
return newBlockRef[]
|
|
|
|
|
2020-05-22 17:04:52 +00:00
|
|
|
proc proposeBlock(node: BeaconNode,
|
|
|
|
validator: AttachedValidator,
|
|
|
|
validator_index: ValidatorIndex,
|
|
|
|
head: BlockRef,
|
|
|
|
slot: Slot): Future[BlockRef] {.async.} =
|
|
|
|
if head.slot >= slot:
|
|
|
|
# We should normally not have a head newer than the slot we're proposing for
|
|
|
|
# but this can happen if block proposal is delayed
|
|
|
|
warn "Skipping proposal, have newer head already",
|
|
|
|
headSlot = shortLog(head.slot),
|
|
|
|
headBlockRoot = shortLog(head.root),
|
2020-07-16 13:16:51 +00:00
|
|
|
slot = shortLog(slot)
|
2020-05-22 17:04:52 +00:00
|
|
|
return head
|
2020-05-22 14:21:22 +00:00
|
|
|
|
2020-09-16 11:30:03 +00:00
|
|
|
let notSlashable = node.attachedValidators
|
|
|
|
.slashingProtection
|
|
|
|
.checkSlashableBlockProposal(validator.pubkey, slot)
|
|
|
|
if notSlashable.isErr:
|
|
|
|
warn "Slashing protection activated",
|
|
|
|
validator = validator.pubkey,
|
|
|
|
slot = slot,
|
|
|
|
existingProposal = notSlashable.error
|
|
|
|
return head
|
|
|
|
|
2020-10-22 10:53:33 +00:00
|
|
|
let
|
|
|
|
fork = node.chainDag.headState.data.data.fork
|
|
|
|
genesis_validators_root =
|
|
|
|
node.chainDag.headState.data.data.genesis_validators_root
|
|
|
|
let
|
|
|
|
randao = await validator.genRandaoReveal(
|
|
|
|
fork, genesis_validators_root, slot)
|
|
|
|
message = makeBeaconBlockForHeadAndSlot(
|
|
|
|
node, randao, validator_index, node.graffitiBytes, head, slot)
|
|
|
|
if not message.isSome():
|
2020-05-22 17:04:52 +00:00
|
|
|
return head # already logged elsewhere!
|
|
|
|
var
|
|
|
|
newBlock = SignedBeaconBlock(
|
2020-10-22 10:53:33 +00:00
|
|
|
message: message.get()
|
2020-05-22 17:04:52 +00:00
|
|
|
)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-07-16 13:16:51 +00:00
|
|
|
newBlock.root = hash_tree_root(newBlock.message)
|
2020-09-16 11:30:03 +00:00
|
|
|
|
|
|
|
# TODO: recomputed in block proposal
|
|
|
|
let signing_root = compute_block_root(
|
2020-10-22 10:53:33 +00:00
|
|
|
fork, genesis_validators_root, slot, newBlock.root)
|
2020-09-16 11:30:03 +00:00
|
|
|
node.attachedValidators
|
|
|
|
.slashingProtection
|
|
|
|
.registerBlock(validator.pubkey, slot, signing_root)
|
|
|
|
|
2020-05-22 17:04:52 +00:00
|
|
|
newBlock.signature = await validator.signBlockProposal(
|
2020-10-22 10:53:33 +00:00
|
|
|
fork, genesis_validators_root, slot, newBlock.root)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-11-30 23:59:35 +00:00
|
|
|
return node.proposeSignedBlock(head, validator, newBlock)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
|
|
|
## Perform all attestations that the validators attached to this node should
|
|
|
|
## perform during the given slot
|
|
|
|
if slot + SLOTS_PER_EPOCH < head.slot:
|
|
|
|
# The latest block we know about is a lot newer than the slot we're being
|
|
|
|
# asked to attest to - this makes it unlikely that it will be included
|
|
|
|
# at all.
|
|
|
|
# TODO the oldest attestations allowed are those that are older than the
|
|
|
|
# finalized epoch.. also, it seems that posting very old attestations
|
|
|
|
# is risky from a slashing perspective. More work is needed here.
|
2020-10-01 18:56:42 +00:00
|
|
|
warn "Skipping attestation, head is too recent",
|
2020-05-06 13:23:45 +00:00
|
|
|
headSlot = shortLog(head.slot),
|
|
|
|
slot = shortLog(slot)
|
|
|
|
return
|
|
|
|
|
|
|
|
let attestationHead = head.atSlot(slot)
|
|
|
|
if head != attestationHead.blck:
|
|
|
|
# In rare cases, such as when we're busy syncing or just slow, we'll be
|
|
|
|
# attesting to a past state - we must then recreate the world as it looked
|
|
|
|
# like back then
|
|
|
|
notice "Attesting to a state in the past, falling behind?",
|
|
|
|
headSlot = shortLog(head.slot),
|
|
|
|
attestationHeadSlot = shortLog(attestationHead.slot),
|
|
|
|
attestationSlot = shortLog(slot)
|
|
|
|
|
|
|
|
trace "Checking attestations",
|
|
|
|
attestationHeadRoot = shortLog(attestationHead.blck.root),
|
2020-07-16 13:16:51 +00:00
|
|
|
attestationSlot = shortLog(slot)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
var attestations: seq[tuple[
|
|
|
|
data: AttestationData, committeeLen, indexInCommittee: int,
|
|
|
|
validator: AttachedValidator]]
|
|
|
|
|
|
|
|
# We need to run attestations exactly for the slot that we're attesting to.
|
|
|
|
# In case blocks went missing, this means advancing past the latest block
|
|
|
|
# using empty slots as fillers.
|
2021-03-02 06:04:14 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#validator-assignments
|
2020-08-10 13:21:31 +00:00
|
|
|
let
|
|
|
|
epochRef = node.chainDag.getEpochRef(
|
|
|
|
attestationHead.blck, slot.compute_epoch_at_slot())
|
|
|
|
committees_per_slot =
|
|
|
|
get_committee_count_per_slot(epochRef)
|
|
|
|
num_active_validators = count_active_validators(epochRef)
|
|
|
|
fork = node.chainDag.headState.data.data.fork
|
|
|
|
genesis_validators_root =
|
|
|
|
node.chainDag.headState.data.data.genesis_validators_root
|
|
|
|
|
|
|
|
for committee_index in 0'u64..<committees_per_slot:
|
|
|
|
let committee = get_beacon_committee(
|
|
|
|
epochRef, slot, committee_index.CommitteeIndex)
|
|
|
|
|
|
|
|
for index_in_committee, validatorIdx in committee:
|
|
|
|
let validator = node.getAttachedValidator(epochRef, validatorIdx)
|
|
|
|
if validator != nil:
|
2020-11-04 21:52:47 +00:00
|
|
|
let ad = makeAttestationData(
|
|
|
|
epochRef, attestationHead, committee_index.CommitteeIndex)
|
2020-08-10 13:21:31 +00:00
|
|
|
attestations.add((ad, committee.len, index_in_committee, validator))
|
|
|
|
|
|
|
|
for a in attestations:
|
2020-09-16 11:30:03 +00:00
|
|
|
let notSlashable = node.attachedValidators
|
|
|
|
.slashingProtection
|
|
|
|
.checkSlashableAttestation(
|
|
|
|
a.validator.pubkey,
|
|
|
|
a.data.source.epoch,
|
|
|
|
a.data.target.epoch)
|
|
|
|
|
|
|
|
if notSlashable.isOk():
|
2020-10-06 08:51:33 +00:00
|
|
|
# TODO signing_root is recomputed in produceAndSignAttestation/signAttestation just after
|
|
|
|
let signing_root = compute_attestation_root(
|
|
|
|
fork, genesis_validators_root, a.data)
|
|
|
|
node.attachedValidators
|
|
|
|
.slashingProtection
|
|
|
|
.registerAttestation(
|
|
|
|
a.validator.pubkey,
|
|
|
|
a.data.source.epoch,
|
|
|
|
a.data.target.epoch,
|
|
|
|
signing_root
|
|
|
|
)
|
|
|
|
|
2020-09-16 11:30:03 +00:00
|
|
|
traceAsyncErrors createAndSendAttestation(
|
|
|
|
node, fork, genesis_validators_root, a.validator, a.data,
|
|
|
|
a.committeeLen, a.indexInCommittee, num_active_validators)
|
|
|
|
else:
|
|
|
|
warn "Slashing protection activated for attestation",
|
|
|
|
validator = a.validator.pubkey,
|
|
|
|
badVoteDetails = $notSlashable.error
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot):
|
|
|
|
Future[BlockRef] {.async.} =
|
|
|
|
## Perform the proposal for the given slot, iff we have a validator attached
|
2020-11-20 14:16:04 +00:00
|
|
|
## that is supposed to do so, given the shuffling at that slot for the given
|
|
|
|
## head - to compute the proposer, we need to advance a state to the given
|
|
|
|
## slot
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-07-30 19:18:17 +00:00
|
|
|
let proposer = node.chainDag.getProposer(head, slot)
|
2020-05-22 14:21:22 +00:00
|
|
|
if proposer.isNone():
|
2020-05-06 13:23:45 +00:00
|
|
|
return head
|
|
|
|
|
2020-08-06 19:48:47 +00:00
|
|
|
let validator =
|
2021-02-22 16:17:48 +00:00
|
|
|
node.attachedValidators[].getValidator(proposer.get()[1])
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
if validator != nil:
|
2020-05-22 14:21:22 +00:00
|
|
|
return await proposeBlock(node, validator, proposer.get()[0], head, slot)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
debug "Expecting block proposal",
|
|
|
|
headRoot = shortLog(head.root),
|
|
|
|
slot = shortLog(slot),
|
2020-05-22 14:21:22 +00:00
|
|
|
proposer_index = proposer.get()[0],
|
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
|
|
|
proposer = shortLog(proposer.get()[1])
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
return head
|
|
|
|
|
|
|
|
proc broadcastAggregatedAttestations(
|
2020-09-01 13:44:40 +00:00
|
|
|
node: BeaconNode, aggregationHead: BlockRef, aggregationSlot: Slot) {.async.} =
|
2020-05-06 13:23:45 +00:00
|
|
|
# The index is via a
|
|
|
|
# locally attested validator. Unlike in handleAttestations(...) there's a
|
|
|
|
# single one at most per slot (because that's how aggregation attestation
|
|
|
|
# works), so the machinery that has to handle looping across, basically a
|
|
|
|
# set of locally attached validators is in principle not necessary, but a
|
|
|
|
# way to organize this. Then the private key for that validator should be
|
|
|
|
# the corresponding one -- whatver they are, they match.
|
|
|
|
|
2020-10-22 10:53:33 +00:00
|
|
|
let
|
|
|
|
epochRef = node.chainDag.getEpochRef(aggregationHead, aggregationSlot.epoch)
|
|
|
|
fork = node.chainDag.headState.data.data.fork
|
|
|
|
genesis_validators_root =
|
|
|
|
node.chainDag.headState.data.data.genesis_validators_root
|
|
|
|
committees_per_slot = get_committee_count_per_slot(epochRef)
|
|
|
|
|
|
|
|
var
|
|
|
|
slotSigs: seq[Future[ValidatorSig]] = @[]
|
|
|
|
slotSigsData: seq[tuple[committee_index: uint64,
|
|
|
|
validator_idx: ValidatorIndex,
|
|
|
|
v: AttachedValidator]] = @[]
|
|
|
|
|
|
|
|
for committee_index in 0'u64..<committees_per_slot:
|
|
|
|
let committee = get_beacon_committee(
|
|
|
|
epochRef, aggregationSlot, committee_index.CommitteeIndex)
|
|
|
|
|
|
|
|
for index_in_committee, validatorIdx in committee:
|
|
|
|
let validator = node.getAttachedValidator(epochRef, validatorIdx)
|
|
|
|
if validator != nil:
|
|
|
|
# the validator index and private key pair.
|
|
|
|
slotSigs.add getSlotSig(validator, fork,
|
|
|
|
genesis_validators_root, aggregationSlot)
|
|
|
|
slotSigsData.add (committee_index, validatorIdx, validator)
|
|
|
|
|
|
|
|
await allFutures(slotSigs)
|
|
|
|
|
|
|
|
for curr in zip(slotSigsData, slotSigs):
|
|
|
|
let aggregateAndProof =
|
|
|
|
aggregate_attestations(node.attestationPool[], epochRef, aggregationSlot,
|
|
|
|
curr[0].committee_index.CommitteeIndex,
|
|
|
|
curr[0].validator_idx,
|
|
|
|
curr[1].read)
|
|
|
|
|
|
|
|
# Don't broadcast when, e.g., this node isn't aggregator
|
|
|
|
if aggregateAndProof.isSome:
|
|
|
|
let sig = await signAggregateAndProof(curr[0].v,
|
|
|
|
aggregateAndProof.get, fork, genesis_validators_root)
|
|
|
|
var signedAP = SignedAggregateAndProof(
|
|
|
|
message: aggregateAndProof.get,
|
|
|
|
signature: sig)
|
|
|
|
node.network.broadcast(node.topicAggregateAndProofs, signedAP)
|
|
|
|
notice "Aggregated attestation sent",
|
|
|
|
attestation = shortLog(signedAP.message.aggregate),
|
2020-12-07 11:56:49 +00:00
|
|
|
validator = shortLog(curr[0].v),
|
|
|
|
aggregationSlot
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-11-16 09:44:18 +00:00
|
|
|
proc getSlotTimingEntropy(): int64 =
|
|
|
|
# Ensure SECONDS_PER_SLOT / ATTESTATION_PRODUCTION_DIVISOR >
|
|
|
|
# SECONDS_PER_SLOT / ATTESTATION_ENTROPY_DIVISOR, which will
|
|
|
|
# enure that the second condition can't go negative.
|
|
|
|
static: doAssert ATTESTATION_ENTROPY_DIVISOR > ATTESTATION_PRODUCTION_DIVISOR
|
|
|
|
|
|
|
|
# For each `slot`, a validator must generate a uniform random variable
|
|
|
|
# `slot_timing_entropy` between `(-SECONDS_PER_SLOT /
|
|
|
|
# ATTESTATION_ENTROPY_DIVISOR, SECONDS_PER_SLOT /
|
|
|
|
# ATTESTATION_ENTROPY_DIVISOR)` with millisecond resolution and using local
|
|
|
|
# entropy.
|
|
|
|
#
|
|
|
|
# Per issue discussion "validators served by the same beacon node can have
|
|
|
|
# the same attestation production time, i.e., they can share the source of
|
|
|
|
# the entropy and the actual slot_timing_entropy value."
|
|
|
|
const
|
|
|
|
slot_timing_entropy_upper_bound =
|
|
|
|
SECONDS_PER_SLOT.int64 * 1000 div ATTESTATION_ENTROPY_DIVISOR
|
|
|
|
slot_timing_entropy_lower_bound = 0-slot_timing_entropy_upper_bound
|
|
|
|
rand(range[(slot_timing_entropy_lower_bound + 1) ..
|
|
|
|
(slot_timing_entropy_upper_bound - 1)])
|
|
|
|
|
2020-12-16 13:03:04 +00:00
|
|
|
proc updateValidatorMetrics*(node: BeaconNode) =
|
2020-11-27 23:34:25 +00:00
|
|
|
when defined(metrics):
|
|
|
|
# Technically, this only needs to be done on epoch transitions and if there's
|
|
|
|
# a reorg that spans an epoch transition, but it's easier to implement this
|
|
|
|
# way for now..
|
|
|
|
|
|
|
|
# We'll limit labelled metrics to the first 64, so that we don't overload
|
|
|
|
# prom
|
|
|
|
|
|
|
|
template state: untyped = node.chainDag.headState.data.data
|
|
|
|
|
|
|
|
var total: Gwei
|
|
|
|
var i = 0
|
2021-02-22 16:17:48 +00:00
|
|
|
for _, v in node.attachedValidators[].validators:
|
2020-11-27 23:34:25 +00:00
|
|
|
let balance =
|
|
|
|
if v.index.isNone():
|
|
|
|
0.Gwei
|
|
|
|
elif v.index.get().uint64 >= state.balances.lenu64:
|
|
|
|
debug "Cannot get validator balance, index out of bounds",
|
|
|
|
pubkey = shortLog(v.pubkey), index = v.index.get(),
|
|
|
|
balances = state.balances.len,
|
|
|
|
stateRoot = node.chainDag.headState.data.root
|
|
|
|
0.Gwei
|
|
|
|
else:
|
|
|
|
state.balances[v.index.get()]
|
|
|
|
|
|
|
|
if i < 64:
|
|
|
|
attached_validator_balance.set(
|
|
|
|
min(balance, int64.high.uint64).int64,
|
|
|
|
labelValues = [shortLog(v.pubkey)])
|
|
|
|
else:
|
|
|
|
inc i
|
|
|
|
total += balance
|
|
|
|
|
2020-11-29 21:35:39 +00:00
|
|
|
node.attachedValidatorBalanceTotal = total
|
2021-03-01 19:55:25 +00:00
|
|
|
attached_validator_balance_total.set(total.toGaugeValue)
|
2020-11-27 23:34:25 +00:00
|
|
|
else:
|
|
|
|
discard
|
|
|
|
|
2020-10-28 07:55:36 +00:00
|
|
|
proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
2020-07-22 08:04:21 +00:00
|
|
|
## Perform validator duties - create blocks, vote and aggregate existing votes
|
2021-02-22 16:17:48 +00:00
|
|
|
if node.attachedValidators[].count == 0:
|
2020-05-06 13:23:45 +00:00
|
|
|
# Nothing to do because we have no validator attached
|
2020-06-10 06:58:12 +00:00
|
|
|
return
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-10-28 07:55:36 +00:00
|
|
|
# The chainDag head might be updated by sync while we're working due to the
|
|
|
|
# await calls, thus we use a local variable to keep the logic straight here
|
|
|
|
var head = node.chainDag.head
|
2020-05-06 13:23:45 +00:00
|
|
|
if not node.isSynced(head):
|
2020-11-02 07:52:16 +00:00
|
|
|
notice "Syncing in progress; skipping validator duties for now",
|
2020-05-06 13:23:45 +00:00
|
|
|
slot, headSlot = head.slot
|
2020-12-16 13:03:04 +00:00
|
|
|
|
|
|
|
# Rewards will be growing though, as we sync..
|
|
|
|
updateValidatorMetrics(node)
|
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
return
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
var curSlot = lastSlot + 1
|
|
|
|
|
2021-01-29 12:38:52 +00:00
|
|
|
# If broadcastStartEpoch is 0, it hasn't had time to initialize yet, which
|
|
|
|
# means that it'd be okay not to continue, but it won't gossip regardless.
|
|
|
|
if curSlot.epoch <
|
2021-02-01 11:18:16 +00:00
|
|
|
node.processor[].doppelgangerDetection.broadcastStartEpoch and
|
2021-02-03 17:11:42 +00:00
|
|
|
node.config.doppelgangerDetection:
|
2021-01-29 15:01:25 +00:00
|
|
|
debug "Waiting to gossip out to detect potential duplicate validators",
|
2020-10-27 17:21:35 +00:00
|
|
|
broadcastStartEpoch =
|
2021-02-01 11:18:16 +00:00
|
|
|
node.processor[].doppelgangerDetection.broadcastStartEpoch
|
2020-10-27 17:21:35 +00:00
|
|
|
return
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
# Start by checking if there's work we should have done in the past that we
|
|
|
|
# can still meaningfully do
|
|
|
|
while curSlot < slot:
|
2020-10-01 18:56:42 +00:00
|
|
|
notice "Catching up on validator duties",
|
2020-05-06 13:23:45 +00:00
|
|
|
curSlot = shortLog(curSlot),
|
|
|
|
lastSlot = shortLog(lastSlot),
|
2020-07-16 13:16:51 +00:00
|
|
|
slot = shortLog(slot)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
# For every slot we're catching up, we'll propose then send
|
|
|
|
# attestations - head should normally be advancing along the same branch
|
|
|
|
# in this case
|
|
|
|
head = await handleProposal(node, head, curSlot)
|
|
|
|
|
|
|
|
# For each slot we missed, we need to send out attestations - if we were
|
|
|
|
# proposing during this time, we'll use the newly proposed head, else just
|
|
|
|
# keep reusing the same - the attestation that goes out will actually
|
|
|
|
# rewind the state to what it looked like at the time of that slot
|
|
|
|
handleAttestations(node, head, curSlot)
|
|
|
|
|
|
|
|
curSlot += 1
|
|
|
|
|
|
|
|
head = await handleProposal(node, head, slot)
|
|
|
|
|
2020-11-16 09:44:18 +00:00
|
|
|
# Fix timing attack: https://github.com/ethereum/eth2.0-specs/pull/2101
|
|
|
|
# A validator must create and broadcast the `attestation` to the associated
|
|
|
|
# attestation subnet when the earlier one of the following two events occurs:
|
|
|
|
#
|
|
|
|
# - The validator has received a valid block from the expected block
|
|
|
|
# proposer for the assigned `slot`. In this case, the validator must set a
|
|
|
|
# timer for `abs(slot_timing_entropy)`. The end of this timer will be the
|
|
|
|
# trigger for attestation production.
|
|
|
|
#
|
|
|
|
# - `SECONDS_PER_SLOT / ATTESTATION_PRODUCTION_DIVISOR +
|
|
|
|
# slot_timing_entropy` seconds have elapsed since the start of the `slot`
|
|
|
|
# (using the `slot_timing_entropy` generated for this slot)
|
|
|
|
|
2021-03-01 16:36:06 +00:00
|
|
|
# Milliseconds to wait from the start of the slot before sending out
|
|
|
|
# attestations - base value
|
|
|
|
const attestationOffset =
|
|
|
|
SECONDS_PER_SLOT.int64 * 1000 div ATTESTATION_PRODUCTION_DIVISOR
|
|
|
|
|
|
|
|
let
|
|
|
|
slotTimingEntropy = getSlotTimingEntropy() # +/- 1s
|
|
|
|
# The latest point in time when we'll be sending out attestations
|
|
|
|
attestationCutoffTime = slot.toBeaconTime(
|
|
|
|
millis(attestationOffset + slotTimingEntropy))
|
|
|
|
attestationCutoff = node.beaconClock.fromNow(attestationCutoffTime)
|
|
|
|
|
|
|
|
if attestationCutoff.inFuture:
|
|
|
|
debug "Waiting to send attestations",
|
|
|
|
head = shortLog(head),
|
|
|
|
attestationCutoff = shortLog(attestationCutoff.offset)
|
|
|
|
|
|
|
|
# Wait either for the block or the attestation cutoff time to arrive
|
|
|
|
if await node.processor[].expectBlock(slot).withTimeout(attestationCutoff.offset):
|
|
|
|
# The expected block arrived (or expectBlock was called again which
|
|
|
|
# shouldn't happen as this is the only place we use it) - according to the
|
|
|
|
# spec, we should now wait for abs(slotTimingEntropy) - in our async loop
|
|
|
|
# however, we might have been doing other processing that caused delays
|
|
|
|
# here so we'll cap the waiting to the time when we would have sent out
|
|
|
|
# attestations had the block not arrived.
|
|
|
|
# An opposite case is that we received (or produced) a block that has
|
|
|
|
# not yet reached our neighbours. To protect against our attestations
|
|
|
|
# being dropped (because the others have not yet seen the block), we'll
|
|
|
|
# impose a minimum delay of 250ms. The delay is enforced only when we're
|
|
|
|
# not hitting the "normal" cutoff time for sending out attestations.
|
|
|
|
|
|
|
|
let
|
|
|
|
afterBlockDelay = max(250, abs(slotTimingEntropy))
|
|
|
|
afterBlockTime = node.beaconClock.now() + millis(afterBlockDelay)
|
|
|
|
afterBlockCutoff = node.beaconClock.fromNow(
|
|
|
|
min(afterBlockTime, attestationCutoffTime))
|
|
|
|
|
|
|
|
if afterBlockCutoff.inFuture:
|
|
|
|
debug "Got block, waiting to send attestations",
|
|
|
|
head = shortLog(head),
|
|
|
|
afterBlockCutoff = shortLog(afterBlockCutoff.offset)
|
|
|
|
|
|
|
|
await sleepAsync(afterBlockCutoff.offset)
|
|
|
|
|
|
|
|
# Time passed - we might need to select a new head in that case
|
|
|
|
node.processor[].updateHead(slot)
|
|
|
|
head = node.chainDag.head
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
handleAttestations(node, head, slot)
|
|
|
|
|
2020-12-16 13:03:04 +00:00
|
|
|
updateValidatorMetrics(node) # the important stuff is done, update the vanity numbers
|
2020-11-27 23:34:25 +00:00
|
|
|
|
2021-03-02 06:04:14 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#broadcast-aggregate
|
2020-05-06 13:23:45 +00:00
|
|
|
# If the validator is selected to aggregate (is_aggregator), then they
|
|
|
|
# broadcast their best aggregate as a SignedAggregateAndProof to the global
|
|
|
|
# aggregate channel (beacon_aggregate_and_proof) two-thirds of the way
|
|
|
|
# through the slot-that is, SECONDS_PER_SLOT * 2 / 3 seconds after the start
|
|
|
|
# of slot.
|
|
|
|
if slot > 2:
|
2021-03-01 16:36:06 +00:00
|
|
|
let
|
|
|
|
aggregateWaitTime = node.beaconClock.fromNow(
|
|
|
|
slot.toBeaconTime(seconds(int64(SECONDS_PER_SLOT * 2) div 3)))
|
|
|
|
if aggregateWaitTime.inFuture:
|
|
|
|
debug "Waiting to send aggregate attestations",
|
|
|
|
aggregateWaitTime = shortLog(aggregateWaitTime.offset)
|
|
|
|
await sleepAsync(aggregateWaitTime.offset)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-12-14 20:58:32 +00:00
|
|
|
await broadcastAggregatedAttestations(node, head, slot)
|
2020-11-30 23:59:35 +00:00
|
|
|
|
|
|
|
if node.eth1Monitor != nil and (slot mod SLOTS_PER_EPOCH) == 0:
|
|
|
|
let finalizedEpochRef = node.chainDag.getFinalizedEpochRef()
|
|
|
|
discard node.eth1Monitor.trackFinalizedState(
|
|
|
|
finalizedEpochRef.eth1_data, finalizedEpochRef.eth1_deposit_index)
|