mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-01-10 22:36:01 +00:00
reformat long lines (#3539)
Shortens some long lines by introducing temp variables and line breaks.
This commit is contained in:
parent
46e5175267
commit
2c5f725543
@ -1174,9 +1174,12 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
|
|||||||
await node.updateGossipStatus(slot + 1)
|
await node.updateGossipStatus(slot + 1)
|
||||||
|
|
||||||
func syncStatus(node: BeaconNode): string =
|
func syncStatus(node: BeaconNode): string =
|
||||||
if node.syncManager.inProgress: node.syncManager.syncStatus
|
if node.syncManager.inProgress:
|
||||||
elif node.backfiller.inProgress: "backfill: " & node.backfiller.syncStatus
|
node.syncManager.syncStatus
|
||||||
else: "synced"
|
elif node.backfiller.inProgress:
|
||||||
|
"backfill: " & node.backfiller.syncStatus
|
||||||
|
else:
|
||||||
|
"synced"
|
||||||
|
|
||||||
proc onSlotStart(
|
proc onSlotStart(
|
||||||
node: BeaconNode, wallTime: BeaconTime, lastSlot: Slot) {.async.} =
|
node: BeaconNode, wallTime: BeaconTime, lastSlot: Slot) {.async.} =
|
||||||
@ -1241,7 +1244,8 @@ proc onSecond(node: BeaconNode) =
|
|||||||
# Nim GC metrics (for the main thread)
|
# Nim GC metrics (for the main thread)
|
||||||
updateThreadMetrics()
|
updateThreadMetrics()
|
||||||
|
|
||||||
if node.config.stopAtSyncedEpoch != 0 and node.dag.head.slot.epoch >= node.config.stopAtSyncedEpoch:
|
if node.config.stopAtSyncedEpoch != 0 and
|
||||||
|
node.dag.head.slot.epoch >= node.config.stopAtSyncedEpoch:
|
||||||
notice "Shutting down after having reached the target synced epoch"
|
notice "Shutting down after having reached the target synced epoch"
|
||||||
bnStatus = BeaconNodeStatus.Stopping
|
bnStatus = BeaconNodeStatus.Stopping
|
||||||
|
|
||||||
@ -1285,11 +1289,13 @@ proc installMessageValidators(node: BeaconNode) =
|
|||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/p2p-interface.md#attestations-and-aggregation
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/p2p-interface.md#attestations-and-aggregation
|
||||||
# These validators stay around the whole time, regardless of which specific
|
# These validators stay around the whole time, regardless of which specific
|
||||||
# subnets are subscribed to during any given epoch.
|
# subnets are subscribed to during any given epoch.
|
||||||
|
let forkDigests = node.dag.forkDigests
|
||||||
|
|
||||||
func toValidationResult(res: ValidationRes): ValidationResult =
|
func toValidationResult(res: ValidationRes): ValidationResult =
|
||||||
if res.isOk(): ValidationResult.Accept else: res.error()[0]
|
if res.isOk(): ValidationResult.Accept else: res.error()[0]
|
||||||
|
|
||||||
node.network.addValidator(
|
node.network.addValidator(
|
||||||
getBeaconBlocksTopic(node.dag.forkDigests.phase0),
|
getBeaconBlocksTopic(forkDigests.phase0),
|
||||||
proc (signedBlock: phase0.SignedBeaconBlock): ValidationResult =
|
proc (signedBlock: phase0.SignedBeaconBlock): ValidationResult =
|
||||||
toValidationResult(node.processor[].blockValidator(
|
toValidationResult(node.processor[].blockValidator(
|
||||||
MsgSource.gossip, signedBlock)))
|
MsgSource.gossip, signedBlock)))
|
||||||
@ -1335,21 +1341,21 @@ proc installMessageValidators(node: BeaconNode) =
|
|||||||
node.processor[].voluntaryExitValidator(
|
node.processor[].voluntaryExitValidator(
|
||||||
MsgSource.gossip, signedVoluntaryExit)))
|
MsgSource.gossip, signedVoluntaryExit)))
|
||||||
|
|
||||||
installPhase0Validators(node.dag.forkDigests.phase0)
|
installPhase0Validators(forkDigests.phase0)
|
||||||
|
|
||||||
# Validators introduced in phase0 are also used in altair and merge, but with
|
# Validators introduced in phase0 are also used in altair and merge, but with
|
||||||
# different fork digest
|
# different fork digest
|
||||||
installPhase0Validators(node.dag.forkDigests.altair)
|
installPhase0Validators(forkDigests.altair)
|
||||||
installPhase0Validators(node.dag.forkDigests.bellatrix)
|
installPhase0Validators(forkDigests.bellatrix)
|
||||||
|
|
||||||
node.network.addValidator(
|
node.network.addValidator(
|
||||||
getBeaconBlocksTopic(node.dag.forkDigests.altair),
|
getBeaconBlocksTopic(forkDigests.altair),
|
||||||
proc (signedBlock: altair.SignedBeaconBlock): ValidationResult =
|
proc (signedBlock: altair.SignedBeaconBlock): ValidationResult =
|
||||||
toValidationResult(node.processor[].blockValidator(
|
toValidationResult(node.processor[].blockValidator(
|
||||||
MsgSource.gossip, signedBlock)))
|
MsgSource.gossip, signedBlock)))
|
||||||
|
|
||||||
node.network.addValidator(
|
node.network.addValidator(
|
||||||
getBeaconBlocksTopic(node.dag.forkDigests.bellatrix),
|
getBeaconBlocksTopic(forkDigests.bellatrix),
|
||||||
proc (signedBlock: bellatrix.SignedBeaconBlock): ValidationResult =
|
proc (signedBlock: bellatrix.SignedBeaconBlock): ValidationResult =
|
||||||
toValidationResult(node.processor[].blockValidator(
|
toValidationResult(node.processor[].blockValidator(
|
||||||
MsgSource.gossip, signedBlock)))
|
MsgSource.gossip, signedBlock)))
|
||||||
@ -1370,10 +1376,11 @@ proc installMessageValidators(node: BeaconNode) =
|
|||||||
getSyncCommitteeContributionAndProofTopic(digest),
|
getSyncCommitteeContributionAndProofTopic(digest),
|
||||||
proc(msg: SignedContributionAndProof): Future[ValidationResult] {.async.} =
|
proc(msg: SignedContributionAndProof): Future[ValidationResult] {.async.} =
|
||||||
return toValidationResult(
|
return toValidationResult(
|
||||||
await node.processor.contributionValidator(MsgSource.gossip, msg)))
|
await node.processor.contributionValidator(
|
||||||
|
MsgSource.gossip, msg)))
|
||||||
|
|
||||||
installSyncCommitteeeValidators(node.dag.forkDigests.altair)
|
installSyncCommitteeeValidators(forkDigests.altair)
|
||||||
installSyncCommitteeeValidators(node.dag.forkDigests.bellatrix)
|
installSyncCommitteeeValidators(forkDigests.bellatrix)
|
||||||
|
|
||||||
template installOptimisticLightClientUpdateValidator(digest: auto) =
|
template installOptimisticLightClientUpdateValidator(digest: auto) =
|
||||||
node.network.addValidator(
|
node.network.addValidator(
|
||||||
@ -1387,8 +1394,8 @@ proc installMessageValidators(node: BeaconNode) =
|
|||||||
debug "Ignoring optimistic light client update: Feature disabled"
|
debug "Ignoring optimistic light client update: Feature disabled"
|
||||||
ValidationResult.Ignore)
|
ValidationResult.Ignore)
|
||||||
|
|
||||||
installOptimisticLightClientUpdateValidator(node.dag.forkDigests.altair)
|
installOptimisticLightClientUpdateValidator(forkDigests.altair)
|
||||||
installOptimisticLightClientUpdateValidator(node.dag.forkDigests.bellatrix)
|
installOptimisticLightClientUpdateValidator(forkDigests.bellatrix)
|
||||||
|
|
||||||
proc stop(node: BeaconNode) =
|
proc stop(node: BeaconNode) =
|
||||||
bnStatus = BeaconNodeStatus.Stopping
|
bnStatus = BeaconNodeStatus.Stopping
|
||||||
|
@ -756,7 +756,8 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||||||
bid = node.getBlockId(blockIdent).valueOr:
|
bid = node.getBlockId(blockIdent).valueOr:
|
||||||
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
|
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
|
||||||
|
|
||||||
if node.dag.cfg.blockForkAtEpoch(bid.slot.epoch) != BeaconBlockFork.Phase0:
|
if node.dag.cfg.blockForkAtEpoch(bid.slot.epoch) !=
|
||||||
|
BeaconBlockFork.Phase0:
|
||||||
return RestApiResponse.jsonError(
|
return RestApiResponse.jsonError(
|
||||||
Http404, BlockNotFoundError, "v1 API supports only phase 0 blocks")
|
Http404, BlockNotFoundError, "v1 API supports only phase 0 blocks")
|
||||||
|
|
||||||
|
@ -15,14 +15,15 @@ export rest_utils
|
|||||||
logScope: topics = "rest_config"
|
logScope: topics = "rest_config"
|
||||||
|
|
||||||
proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||||
|
template cfg(): auto = node.dag.cfg
|
||||||
let
|
let
|
||||||
cachedForkSchedule =
|
cachedForkSchedule =
|
||||||
RestApiResponse.prepareJsonResponse(getForkSchedule(node.dag.cfg))
|
RestApiResponse.prepareJsonResponse(getForkSchedule(cfg))
|
||||||
cachedConfigSpec =
|
cachedConfigSpec =
|
||||||
RestApiResponse.prepareJsonResponse(
|
RestApiResponse.prepareJsonResponse(
|
||||||
(
|
(
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/configs/mainnet/phase0.yaml
|
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/configs/mainnet/phase0.yaml
|
||||||
CONFIG_NAME: node.dag.cfg.name(),
|
CONFIG_NAME: cfg.name(),
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.3/presets/mainnet/phase0.yaml
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.3/presets/mainnet/phase0.yaml
|
||||||
MAX_COMMITTEES_PER_SLOT:
|
MAX_COMMITTEES_PER_SLOT:
|
||||||
@ -108,57 +109,57 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.3/configs/mainnet.yaml
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.3/configs/mainnet.yaml
|
||||||
PRESET_BASE:
|
PRESET_BASE:
|
||||||
node.dag.cfg.PRESET_BASE,
|
cfg.PRESET_BASE,
|
||||||
TERMINAL_TOTAL_DIFFICULTY:
|
TERMINAL_TOTAL_DIFFICULTY:
|
||||||
toString(node.dag.cfg.TERMINAL_TOTAL_DIFFICULTY),
|
toString(cfg.TERMINAL_TOTAL_DIFFICULTY),
|
||||||
TERMINAL_BLOCK_HASH:
|
TERMINAL_BLOCK_HASH:
|
||||||
$node.dag.cfg.TERMINAL_BLOCK_HASH,
|
$cfg.TERMINAL_BLOCK_HASH,
|
||||||
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT:
|
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT:
|
||||||
Base10.toString(node.dag.cfg.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT),
|
Base10.toString(cfg.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT),
|
||||||
MIN_GENESIS_TIME:
|
MIN_GENESIS_TIME:
|
||||||
Base10.toString(node.dag.cfg.MIN_GENESIS_TIME),
|
Base10.toString(cfg.MIN_GENESIS_TIME),
|
||||||
GENESIS_FORK_VERSION:
|
GENESIS_FORK_VERSION:
|
||||||
"0x" & $node.dag.cfg.GENESIS_FORK_VERSION,
|
"0x" & $cfg.GENESIS_FORK_VERSION,
|
||||||
GENESIS_DELAY:
|
GENESIS_DELAY:
|
||||||
Base10.toString(node.dag.cfg.GENESIS_DELAY),
|
Base10.toString(cfg.GENESIS_DELAY),
|
||||||
ALTAIR_FORK_VERSION:
|
ALTAIR_FORK_VERSION:
|
||||||
"0x" & $node.dag.cfg.ALTAIR_FORK_VERSION,
|
"0x" & $cfg.ALTAIR_FORK_VERSION,
|
||||||
ALTAIR_FORK_EPOCH:
|
ALTAIR_FORK_EPOCH:
|
||||||
Base10.toString(uint64(node.dag.cfg.ALTAIR_FORK_EPOCH)),
|
Base10.toString(uint64(cfg.ALTAIR_FORK_EPOCH)),
|
||||||
BELLATRIX_FORK_VERSION:
|
BELLATRIX_FORK_VERSION:
|
||||||
"0x" & $node.dag.cfg.BELLATRIX_FORK_VERSION,
|
"0x" & $cfg.BELLATRIX_FORK_VERSION,
|
||||||
BELLATRIX_FORK_EPOCH:
|
BELLATRIX_FORK_EPOCH:
|
||||||
Base10.toString(uint64(node.dag.cfg.BELLATRIX_FORK_EPOCH)),
|
Base10.toString(uint64(cfg.BELLATRIX_FORK_EPOCH)),
|
||||||
SHARDING_FORK_VERSION:
|
SHARDING_FORK_VERSION:
|
||||||
"0x" & $node.dag.cfg.SHARDING_FORK_VERSION,
|
"0x" & $cfg.SHARDING_FORK_VERSION,
|
||||||
SHARDING_FORK_EPOCH:
|
SHARDING_FORK_EPOCH:
|
||||||
Base10.toString(uint64(node.dag.cfg.SHARDING_FORK_EPOCH)),
|
Base10.toString(uint64(cfg.SHARDING_FORK_EPOCH)),
|
||||||
SECONDS_PER_SLOT:
|
SECONDS_PER_SLOT:
|
||||||
Base10.toString(SECONDS_PER_SLOT),
|
Base10.toString(SECONDS_PER_SLOT),
|
||||||
SECONDS_PER_ETH1_BLOCK:
|
SECONDS_PER_ETH1_BLOCK:
|
||||||
Base10.toString(node.dag.cfg.SECONDS_PER_ETH1_BLOCK),
|
Base10.toString(cfg.SECONDS_PER_ETH1_BLOCK),
|
||||||
MIN_VALIDATOR_WITHDRAWABILITY_DELAY:
|
MIN_VALIDATOR_WITHDRAWABILITY_DELAY:
|
||||||
Base10.toString(node.dag.cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY),
|
Base10.toString(cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY),
|
||||||
SHARD_COMMITTEE_PERIOD:
|
SHARD_COMMITTEE_PERIOD:
|
||||||
Base10.toString(node.dag.cfg.SHARD_COMMITTEE_PERIOD),
|
Base10.toString(cfg.SHARD_COMMITTEE_PERIOD),
|
||||||
ETH1_FOLLOW_DISTANCE:
|
ETH1_FOLLOW_DISTANCE:
|
||||||
Base10.toString(node.dag.cfg.ETH1_FOLLOW_DISTANCE),
|
Base10.toString(cfg.ETH1_FOLLOW_DISTANCE),
|
||||||
INACTIVITY_SCORE_BIAS:
|
INACTIVITY_SCORE_BIAS:
|
||||||
Base10.toString(node.dag.cfg.INACTIVITY_SCORE_BIAS),
|
Base10.toString(cfg.INACTIVITY_SCORE_BIAS),
|
||||||
INACTIVITY_SCORE_RECOVERY_RATE:
|
INACTIVITY_SCORE_RECOVERY_RATE:
|
||||||
Base10.toString(node.dag.cfg.INACTIVITY_SCORE_RECOVERY_RATE),
|
Base10.toString(cfg.INACTIVITY_SCORE_RECOVERY_RATE),
|
||||||
EJECTION_BALANCE:
|
EJECTION_BALANCE:
|
||||||
Base10.toString(node.dag.cfg.EJECTION_BALANCE),
|
Base10.toString(cfg.EJECTION_BALANCE),
|
||||||
MIN_PER_EPOCH_CHURN_LIMIT:
|
MIN_PER_EPOCH_CHURN_LIMIT:
|
||||||
Base10.toString(node.dag.cfg.MIN_PER_EPOCH_CHURN_LIMIT),
|
Base10.toString(cfg.MIN_PER_EPOCH_CHURN_LIMIT),
|
||||||
CHURN_LIMIT_QUOTIENT:
|
CHURN_LIMIT_QUOTIENT:
|
||||||
Base10.toString(node.dag.cfg.CHURN_LIMIT_QUOTIENT),
|
Base10.toString(cfg.CHURN_LIMIT_QUOTIENT),
|
||||||
DEPOSIT_CHAIN_ID:
|
DEPOSIT_CHAIN_ID:
|
||||||
Base10.toString(node.dag.cfg.DEPOSIT_CHAIN_ID),
|
Base10.toString(cfg.DEPOSIT_CHAIN_ID),
|
||||||
DEPOSIT_NETWORK_ID:
|
DEPOSIT_NETWORK_ID:
|
||||||
Base10.toString(node.dag.cfg.DEPOSIT_NETWORK_ID),
|
Base10.toString(cfg.DEPOSIT_NETWORK_ID),
|
||||||
DEPOSIT_CONTRACT_ADDRESS:
|
DEPOSIT_CONTRACT_ADDRESS:
|
||||||
$node.dag.cfg.DEPOSIT_CONTRACT_ADDRESS,
|
$cfg.DEPOSIT_CONTRACT_ADDRESS,
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/beacon-chain.md#constants
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/beacon-chain.md#constants
|
||||||
# GENESIS_SLOT
|
# GENESIS_SLOT
|
||||||
@ -238,8 +239,8 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||||||
cachedDepositContract =
|
cachedDepositContract =
|
||||||
RestApiResponse.prepareJsonResponse(
|
RestApiResponse.prepareJsonResponse(
|
||||||
(
|
(
|
||||||
chain_id: $node.dag.cfg.DEPOSIT_CHAIN_ID,
|
chain_id: $cfg.DEPOSIT_CHAIN_ID,
|
||||||
address: $node.dag.cfg.DEPOSIT_CONTRACT_ADDRESS
|
address: $cfg.DEPOSIT_CONTRACT_ADDRESS
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -230,7 +230,9 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||||||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
|
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
|
||||||
res.get()
|
res.get()
|
||||||
let proposalState = assignClone(node.dag.headState)
|
let proposalState = assignClone(node.dag.headState)
|
||||||
node.dag.withUpdatedState(proposalState[], head.atSlot(wallSlot).toBlockSlotId().expect("not nil")) do:
|
node.dag.withUpdatedState(
|
||||||
|
proposalState[],
|
||||||
|
head.atSlot(wallSlot).toBlockSlotId().expect("not nil")):
|
||||||
return RestApiResponse.jsonResponse(
|
return RestApiResponse.jsonResponse(
|
||||||
node.getBlockProposalEth1Data(state))
|
node.getBlockProposalEth1Data(state))
|
||||||
do:
|
do:
|
||||||
|
@ -481,7 +481,8 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||||||
return RestApiResponse.jsonError(Http400,
|
return RestApiResponse.jsonError(Http400,
|
||||||
InvalidAttestationDataRootValueError, $res.error())
|
InvalidAttestationDataRootValueError, $res.error())
|
||||||
res.get()
|
res.get()
|
||||||
let res = node.attestationPool[].getAggregatedAttestation(qslot, qroot)
|
let res =
|
||||||
|
node.attestationPool[].getAggregatedAttestation(qslot, qroot)
|
||||||
if res.isNone():
|
if res.isNone():
|
||||||
return RestApiResponse.jsonError(Http400,
|
return RestApiResponse.jsonError(Http400,
|
||||||
UnableToGetAggregatedAttestationError)
|
UnableToGetAggregatedAttestationError)
|
||||||
@ -585,8 +586,9 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||||||
request.slot, subnet_id, request.validator_index,
|
request.slot, subnet_id, request.validator_index,
|
||||||
request.is_aggregator)
|
request.is_aggregator)
|
||||||
|
|
||||||
let validator_pubkey = getStateField(
|
let validator_pubkey =
|
||||||
node.dag.headState, validators).asSeq()[request.validator_index].pubkey
|
getStateField(node.dag.headState, validators)
|
||||||
|
.asSeq()[request.validator_index].pubkey
|
||||||
|
|
||||||
node.validatorMonitor[].addAutoMonitor(
|
node.validatorMonitor[].addAutoMonitor(
|
||||||
validator_pubkey, ValidatorIndex(request.validator_index))
|
validator_pubkey, ValidatorIndex(request.validator_index))
|
||||||
@ -617,11 +619,12 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||||||
lenu64(getStateField(node.dag.headState, validators)):
|
lenu64(getStateField(node.dag.headState, validators)):
|
||||||
return RestApiResponse.jsonError(Http400,
|
return RestApiResponse.jsonError(Http400,
|
||||||
InvalidValidatorIndexValueError)
|
InvalidValidatorIndexValueError)
|
||||||
let validator_pubkey = getStateField(
|
let validator_pubkey =
|
||||||
node.dag.headState, validators).asSeq()[item.validator_index].pubkey
|
getStateField(node.dag.headState, validators)
|
||||||
|
.asSeq()[item.validator_index].pubkey
|
||||||
|
|
||||||
node.syncCommitteeMsgPool.syncCommitteeSubscriptions[validator_pubkey] =
|
node.syncCommitteeMsgPool
|
||||||
item.until_epoch
|
.syncCommitteeSubscriptions[validator_pubkey] = item.until_epoch
|
||||||
|
|
||||||
node.validatorMonitor[].addAutoMonitor(
|
node.validatorMonitor[].addAutoMonitor(
|
||||||
validator_pubkey, ValidatorIndex(item.validator_index))
|
validator_pubkey, ValidatorIndex(item.validator_index))
|
||||||
|
@ -289,22 +289,23 @@ proc sendSyncCommitteeMessages*(node: BeaconNode,
|
|||||||
let (pending, indices) = block:
|
let (pending, indices) = block:
|
||||||
var resFutures: seq[Future[SendResult]]
|
var resFutures: seq[Future[SendResult]]
|
||||||
var resIndices: seq[int]
|
var resIndices: seq[int]
|
||||||
|
template headSyncCommittees(): auto = node.dag.headSyncCommittees
|
||||||
for subcommitteeIdx in SyncSubcommitteeIndex:
|
for subcommitteeIdx in SyncSubcommitteeIndex:
|
||||||
for valKey in syncSubcommittee(
|
for valKey in syncSubcommittee(
|
||||||
node.dag.headSyncCommittees.current_sync_committee, subcommitteeIdx):
|
headSyncCommittees.current_sync_committee, subcommitteeIdx):
|
||||||
let index = keysCur.getOrDefault(uint64(valKey), -1)
|
let index = keysCur.getOrDefault(uint64(valKey), -1)
|
||||||
if index >= 0:
|
if index >= 0:
|
||||||
resIndices.add(index)
|
resIndices.add(index)
|
||||||
resFutures.add(node.sendSyncCommitteeMessage(msgs[index],
|
resFutures.add(node.sendSyncCommitteeMessage(
|
||||||
subcommitteeIdx, true))
|
msgs[index], subcommitteeIdx, true))
|
||||||
for subcommitteeIdx in SyncSubcommitteeIndex:
|
for subcommitteeIdx in SyncSubcommitteeIndex:
|
||||||
for valKey in syncSubcommittee(
|
for valKey in syncSubcommittee(
|
||||||
node.dag.headSyncCommittees.next_sync_committee, subcommitteeIdx):
|
headSyncCommittees.next_sync_committee, subcommitteeIdx):
|
||||||
let index = keysNxt.getOrDefault(uint64(valKey), -1)
|
let index = keysNxt.getOrDefault(uint64(valKey), -1)
|
||||||
if index >= 0:
|
if index >= 0:
|
||||||
resIndices.add(index)
|
resIndices.add(index)
|
||||||
resFutures.add(node.sendSyncCommitteeMessage(msgs[index],
|
resFutures.add(node.sendSyncCommitteeMessage(
|
||||||
subcommitteeIdx, true))
|
msgs[index], subcommitteeIdx, true))
|
||||||
(resFutures, resIndices)
|
(resFutures, resIndices)
|
||||||
|
|
||||||
await allFutures(pending)
|
await allFutures(pending)
|
||||||
@ -447,7 +448,9 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
|
|||||||
proposalState = assignClone(node.dag.headState)
|
proposalState = assignClone(node.dag.headState)
|
||||||
|
|
||||||
# TODO fails at checkpoint synced head
|
# TODO fails at checkpoint synced head
|
||||||
node.dag.withUpdatedState(proposalState[], head.atSlot(slot - 1).toBlockSlotId().expect("not nil")) do:
|
node.dag.withUpdatedState(
|
||||||
|
proposalState[],
|
||||||
|
head.atSlot(slot - 1).toBlockSlotId().expect("not nil")):
|
||||||
# Advance to the given slot without calculating state root - we'll only
|
# Advance to the given slot without calculating state root - we'll only
|
||||||
# need a state root _with_ the block applied
|
# need a state root _with_ the block applied
|
||||||
var info: ForkedEpochInfo
|
var info: ForkedEpochInfo
|
||||||
@ -849,7 +852,8 @@ proc handleSyncCommitteeContributions(node: BeaconNode,
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
var contribution: SyncCommitteeContribution
|
var contribution: SyncCommitteeContribution
|
||||||
let contributionWasProduced = node.syncCommitteeMsgPool[].produceContribution(
|
let contributionWasProduced =
|
||||||
|
node.syncCommitteeMsgPool[].produceContribution(
|
||||||
slot,
|
slot,
|
||||||
head.root,
|
head.root,
|
||||||
candidateAggregators[i].subcommitteeIdx,
|
candidateAggregators[i].subcommitteeIdx,
|
||||||
@ -1055,9 +1059,9 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
|||||||
|
|
||||||
# If broadcastStartEpoch is 0, it hasn't had time to initialize yet, which
|
# If broadcastStartEpoch is 0, it hasn't had time to initialize yet, which
|
||||||
# means that it'd be okay not to continue, but it won't gossip regardless.
|
# means that it'd be okay not to continue, but it won't gossip regardless.
|
||||||
if curSlot.epoch <
|
let doppelgangerDetection = node.processor[].doppelgangerDetection
|
||||||
node.processor[].doppelgangerDetection.broadcastStartEpoch and
|
if curSlot.epoch < doppelgangerDetection.broadcastStartEpoch and
|
||||||
node.processor[].doppelgangerDetection.nodeLaunchSlot > GENESIS_SLOT and
|
doppelgangerDetection.nodeLaunchSlot > GENESIS_SLOT and
|
||||||
node.config.doppelgangerDetection:
|
node.config.doppelgangerDetection:
|
||||||
let
|
let
|
||||||
nextAttestationSlot = node.actionTracker.getNextAttestationSlot(slot - 1)
|
nextAttestationSlot = node.actionTracker.getNextAttestationSlot(slot - 1)
|
||||||
@ -1066,13 +1070,11 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
|||||||
if slot in [nextAttestationSlot, nextProposalSlot]:
|
if slot in [nextAttestationSlot, nextProposalSlot]:
|
||||||
notice "Doppelganger detection active - skipping validator duties while observing activity on the network",
|
notice "Doppelganger detection active - skipping validator duties while observing activity on the network",
|
||||||
slot, epoch = slot.epoch,
|
slot, epoch = slot.epoch,
|
||||||
broadcastStartEpoch =
|
broadcastStartEpoch = doppelgangerDetection.broadcastStartEpoch
|
||||||
node.processor[].doppelgangerDetection.broadcastStartEpoch
|
|
||||||
else:
|
else:
|
||||||
debug "Doppelganger detection active - skipping validator duties while observing activity on the network",
|
debug "Doppelganger detection active - skipping validator duties while observing activity on the network",
|
||||||
slot, epoch = slot.epoch,
|
slot, epoch = slot.epoch,
|
||||||
broadcastStartEpoch =
|
broadcastStartEpoch = doppelgangerDetection.broadcastStartEpoch
|
||||||
node.processor[].doppelgangerDetection.broadcastStartEpoch
|
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -1112,7 +1114,8 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
|||||||
attestationCutoff = shortLog(attestationCutoff.offset)
|
attestationCutoff = shortLog(attestationCutoff.offset)
|
||||||
|
|
||||||
# Wait either for the block or the attestation cutoff time to arrive
|
# Wait either for the block or the attestation cutoff time to arrive
|
||||||
if await node.consensusManager[].expectBlock(slot).withTimeout(attestationCutoff.offset):
|
if await node.consensusManager[].expectBlock(slot)
|
||||||
|
.withTimeout(attestationCutoff.offset):
|
||||||
# The expected block arrived (or expectBlock was called again which
|
# The expected block arrived (or expectBlock was called again which
|
||||||
# shouldn't happen as this is the only place we use it) - in our async
|
# shouldn't happen as this is the only place we use it) - in our async
|
||||||
# loop however, we might have been doing other processing that caused delays
|
# loop however, we might have been doing other processing that caused delays
|
||||||
@ -1237,7 +1240,8 @@ proc sendAggregateAndProof*(node: BeaconNode,
|
|||||||
proof: SignedAggregateAndProof): Future[SendResult] {.
|
proof: SignedAggregateAndProof): Future[SendResult] {.
|
||||||
async.} =
|
async.} =
|
||||||
# REST/JSON-RPC API helper procedure.
|
# REST/JSON-RPC API helper procedure.
|
||||||
let res = await node.processor.aggregateValidator(MsgSource.api, proof)
|
let res =
|
||||||
|
await node.processor.aggregateValidator(MsgSource.api, proof)
|
||||||
return
|
return
|
||||||
if res.isGoodForSending:
|
if res.isGoodForSending:
|
||||||
node.network.broadcastAggregateAndProof(proof)
|
node.network.broadcastAggregateAndProof(proof)
|
||||||
@ -1257,7 +1261,8 @@ proc sendAggregateAndProof*(node: BeaconNode,
|
|||||||
proc sendVoluntaryExit*(node: BeaconNode,
|
proc sendVoluntaryExit*(node: BeaconNode,
|
||||||
exit: SignedVoluntaryExit): SendResult =
|
exit: SignedVoluntaryExit): SendResult =
|
||||||
# REST/JSON-RPC API helper procedure.
|
# REST/JSON-RPC API helper procedure.
|
||||||
let res = node.processor[].voluntaryExitValidator(MsgSource.api, exit)
|
let res =
|
||||||
|
node.processor[].voluntaryExitValidator(MsgSource.api, exit)
|
||||||
if res.isGoodForSending:
|
if res.isGoodForSending:
|
||||||
node.network.broadcastVoluntaryExit(exit)
|
node.network.broadcastVoluntaryExit(exit)
|
||||||
ok()
|
ok()
|
||||||
@ -1269,7 +1274,8 @@ proc sendVoluntaryExit*(node: BeaconNode,
|
|||||||
proc sendAttesterSlashing*(node: BeaconNode,
|
proc sendAttesterSlashing*(node: BeaconNode,
|
||||||
slashing: AttesterSlashing): SendResult =
|
slashing: AttesterSlashing): SendResult =
|
||||||
# REST/JSON-RPC API helper procedure.
|
# REST/JSON-RPC API helper procedure.
|
||||||
let res = node.processor[].attesterSlashingValidator(MsgSource.api, slashing)
|
let res =
|
||||||
|
node.processor[].attesterSlashingValidator(MsgSource.api, slashing)
|
||||||
if res.isGoodForSending:
|
if res.isGoodForSending:
|
||||||
node.network.broadcastAttesterSlashing(slashing)
|
node.network.broadcastAttesterSlashing(slashing)
|
||||||
ok()
|
ok()
|
||||||
@ -1281,7 +1287,8 @@ proc sendAttesterSlashing*(node: BeaconNode,
|
|||||||
proc sendProposerSlashing*(node: BeaconNode,
|
proc sendProposerSlashing*(node: BeaconNode,
|
||||||
slashing: ProposerSlashing): SendResult =
|
slashing: ProposerSlashing): SendResult =
|
||||||
# REST/JSON-RPC API helper procedure.
|
# REST/JSON-RPC API helper procedure.
|
||||||
let res = node.processor[].proposerSlashingValidator(MsgSource.api, slashing)
|
let res =
|
||||||
|
node.processor[].proposerSlashingValidator(MsgSource.api, slashing)
|
||||||
if res.isGoodForSending:
|
if res.isGoodForSending:
|
||||||
node.network.broadcastProposerSlashing(slashing)
|
node.network.broadcastProposerSlashing(slashing)
|
||||||
ok()
|
ok()
|
||||||
@ -1356,7 +1363,8 @@ proc registerDuty*(
|
|||||||
proc registerDuties*(node: BeaconNode, wallSlot: Slot) {.async.} =
|
proc registerDuties*(node: BeaconNode, wallSlot: Slot) {.async.} =
|
||||||
## Register upcoming duties of attached validators with the duty tracker
|
## Register upcoming duties of attached validators with the duty tracker
|
||||||
|
|
||||||
if node.attachedValidators[].count() == 0 or not node.isSynced(node.dag.head):
|
if node.attachedValidators[].count() == 0 or
|
||||||
|
not node.isSynced(node.dag.head):
|
||||||
# Nothing to do because we have no validator attached
|
# Nothing to do because we have no validator attached
|
||||||
return
|
return
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user