reformat long lines (#3539)
Shortens some long lines by introducing temp variables and line breaks.
This commit is contained in:
parent
46e5175267
commit
2c5f725543
|
@ -1174,9 +1174,12 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
|
|||
await node.updateGossipStatus(slot + 1)
|
||||
|
||||
func syncStatus(node: BeaconNode): string =
|
||||
if node.syncManager.inProgress: node.syncManager.syncStatus
|
||||
elif node.backfiller.inProgress: "backfill: " & node.backfiller.syncStatus
|
||||
else: "synced"
|
||||
if node.syncManager.inProgress:
|
||||
node.syncManager.syncStatus
|
||||
elif node.backfiller.inProgress:
|
||||
"backfill: " & node.backfiller.syncStatus
|
||||
else:
|
||||
"synced"
|
||||
|
||||
proc onSlotStart(
|
||||
node: BeaconNode, wallTime: BeaconTime, lastSlot: Slot) {.async.} =
|
||||
|
@ -1241,7 +1244,8 @@ proc onSecond(node: BeaconNode) =
|
|||
# Nim GC metrics (for the main thread)
|
||||
updateThreadMetrics()
|
||||
|
||||
if node.config.stopAtSyncedEpoch != 0 and node.dag.head.slot.epoch >= node.config.stopAtSyncedEpoch:
|
||||
if node.config.stopAtSyncedEpoch != 0 and
|
||||
node.dag.head.slot.epoch >= node.config.stopAtSyncedEpoch:
|
||||
notice "Shutting down after having reached the target synced epoch"
|
||||
bnStatus = BeaconNodeStatus.Stopping
|
||||
|
||||
|
@ -1285,11 +1289,13 @@ proc installMessageValidators(node: BeaconNode) =
|
|||
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/p2p-interface.md#attestations-and-aggregation
|
||||
# These validators stay around the whole time, regardless of which specific
|
||||
# subnets are subscribed to during any given epoch.
|
||||
let forkDigests = node.dag.forkDigests
|
||||
|
||||
func toValidationResult(res: ValidationRes): ValidationResult =
|
||||
if res.isOk(): ValidationResult.Accept else: res.error()[0]
|
||||
|
||||
node.network.addValidator(
|
||||
getBeaconBlocksTopic(node.dag.forkDigests.phase0),
|
||||
getBeaconBlocksTopic(forkDigests.phase0),
|
||||
proc (signedBlock: phase0.SignedBeaconBlock): ValidationResult =
|
||||
toValidationResult(node.processor[].blockValidator(
|
||||
MsgSource.gossip, signedBlock)))
|
||||
|
@ -1335,21 +1341,21 @@ proc installMessageValidators(node: BeaconNode) =
|
|||
node.processor[].voluntaryExitValidator(
|
||||
MsgSource.gossip, signedVoluntaryExit)))
|
||||
|
||||
installPhase0Validators(node.dag.forkDigests.phase0)
|
||||
installPhase0Validators(forkDigests.phase0)
|
||||
|
||||
# Validators introduced in phase0 are also used in altair and merge, but with
|
||||
# different fork digest
|
||||
installPhase0Validators(node.dag.forkDigests.altair)
|
||||
installPhase0Validators(node.dag.forkDigests.bellatrix)
|
||||
installPhase0Validators(forkDigests.altair)
|
||||
installPhase0Validators(forkDigests.bellatrix)
|
||||
|
||||
node.network.addValidator(
|
||||
getBeaconBlocksTopic(node.dag.forkDigests.altair),
|
||||
getBeaconBlocksTopic(forkDigests.altair),
|
||||
proc (signedBlock: altair.SignedBeaconBlock): ValidationResult =
|
||||
toValidationResult(node.processor[].blockValidator(
|
||||
MsgSource.gossip, signedBlock)))
|
||||
|
||||
node.network.addValidator(
|
||||
getBeaconBlocksTopic(node.dag.forkDigests.bellatrix),
|
||||
getBeaconBlocksTopic(forkDigests.bellatrix),
|
||||
proc (signedBlock: bellatrix.SignedBeaconBlock): ValidationResult =
|
||||
toValidationResult(node.processor[].blockValidator(
|
||||
MsgSource.gossip, signedBlock)))
|
||||
|
@ -1370,10 +1376,11 @@ proc installMessageValidators(node: BeaconNode) =
|
|||
getSyncCommitteeContributionAndProofTopic(digest),
|
||||
proc(msg: SignedContributionAndProof): Future[ValidationResult] {.async.} =
|
||||
return toValidationResult(
|
||||
await node.processor.contributionValidator(MsgSource.gossip, msg)))
|
||||
await node.processor.contributionValidator(
|
||||
MsgSource.gossip, msg)))
|
||||
|
||||
installSyncCommitteeeValidators(node.dag.forkDigests.altair)
|
||||
installSyncCommitteeeValidators(node.dag.forkDigests.bellatrix)
|
||||
installSyncCommitteeeValidators(forkDigests.altair)
|
||||
installSyncCommitteeeValidators(forkDigests.bellatrix)
|
||||
|
||||
template installOptimisticLightClientUpdateValidator(digest: auto) =
|
||||
node.network.addValidator(
|
||||
|
@ -1387,8 +1394,8 @@ proc installMessageValidators(node: BeaconNode) =
|
|||
debug "Ignoring optimistic light client update: Feature disabled"
|
||||
ValidationResult.Ignore)
|
||||
|
||||
installOptimisticLightClientUpdateValidator(node.dag.forkDigests.altair)
|
||||
installOptimisticLightClientUpdateValidator(node.dag.forkDigests.bellatrix)
|
||||
installOptimisticLightClientUpdateValidator(forkDigests.altair)
|
||||
installOptimisticLightClientUpdateValidator(forkDigests.bellatrix)
|
||||
|
||||
proc stop(node: BeaconNode) =
|
||||
bnStatus = BeaconNodeStatus.Stopping
|
||||
|
|
|
@ -756,7 +756,8 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
bid = node.getBlockId(blockIdent).valueOr:
|
||||
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
|
||||
|
||||
if node.dag.cfg.blockForkAtEpoch(bid.slot.epoch) != BeaconBlockFork.Phase0:
|
||||
if node.dag.cfg.blockForkAtEpoch(bid.slot.epoch) !=
|
||||
BeaconBlockFork.Phase0:
|
||||
return RestApiResponse.jsonError(
|
||||
Http404, BlockNotFoundError, "v1 API supports only phase 0 blocks")
|
||||
|
||||
|
|
|
@ -15,14 +15,15 @@ export rest_utils
|
|||
logScope: topics = "rest_config"
|
||||
|
||||
proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||
template cfg(): auto = node.dag.cfg
|
||||
let
|
||||
cachedForkSchedule =
|
||||
RestApiResponse.prepareJsonResponse(getForkSchedule(node.dag.cfg))
|
||||
RestApiResponse.prepareJsonResponse(getForkSchedule(cfg))
|
||||
cachedConfigSpec =
|
||||
RestApiResponse.prepareJsonResponse(
|
||||
(
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/configs/mainnet/phase0.yaml
|
||||
CONFIG_NAME: node.dag.cfg.name(),
|
||||
CONFIG_NAME: cfg.name(),
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.3/presets/mainnet/phase0.yaml
|
||||
MAX_COMMITTEES_PER_SLOT:
|
||||
|
@ -108,57 +109,57 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.3/configs/mainnet.yaml
|
||||
PRESET_BASE:
|
||||
node.dag.cfg.PRESET_BASE,
|
||||
cfg.PRESET_BASE,
|
||||
TERMINAL_TOTAL_DIFFICULTY:
|
||||
toString(node.dag.cfg.TERMINAL_TOTAL_DIFFICULTY),
|
||||
toString(cfg.TERMINAL_TOTAL_DIFFICULTY),
|
||||
TERMINAL_BLOCK_HASH:
|
||||
$node.dag.cfg.TERMINAL_BLOCK_HASH,
|
||||
$cfg.TERMINAL_BLOCK_HASH,
|
||||
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT:
|
||||
Base10.toString(node.dag.cfg.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT),
|
||||
Base10.toString(cfg.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT),
|
||||
MIN_GENESIS_TIME:
|
||||
Base10.toString(node.dag.cfg.MIN_GENESIS_TIME),
|
||||
Base10.toString(cfg.MIN_GENESIS_TIME),
|
||||
GENESIS_FORK_VERSION:
|
||||
"0x" & $node.dag.cfg.GENESIS_FORK_VERSION,
|
||||
"0x" & $cfg.GENESIS_FORK_VERSION,
|
||||
GENESIS_DELAY:
|
||||
Base10.toString(node.dag.cfg.GENESIS_DELAY),
|
||||
Base10.toString(cfg.GENESIS_DELAY),
|
||||
ALTAIR_FORK_VERSION:
|
||||
"0x" & $node.dag.cfg.ALTAIR_FORK_VERSION,
|
||||
"0x" & $cfg.ALTAIR_FORK_VERSION,
|
||||
ALTAIR_FORK_EPOCH:
|
||||
Base10.toString(uint64(node.dag.cfg.ALTAIR_FORK_EPOCH)),
|
||||
Base10.toString(uint64(cfg.ALTAIR_FORK_EPOCH)),
|
||||
BELLATRIX_FORK_VERSION:
|
||||
"0x" & $node.dag.cfg.BELLATRIX_FORK_VERSION,
|
||||
"0x" & $cfg.BELLATRIX_FORK_VERSION,
|
||||
BELLATRIX_FORK_EPOCH:
|
||||
Base10.toString(uint64(node.dag.cfg.BELLATRIX_FORK_EPOCH)),
|
||||
Base10.toString(uint64(cfg.BELLATRIX_FORK_EPOCH)),
|
||||
SHARDING_FORK_VERSION:
|
||||
"0x" & $node.dag.cfg.SHARDING_FORK_VERSION,
|
||||
"0x" & $cfg.SHARDING_FORK_VERSION,
|
||||
SHARDING_FORK_EPOCH:
|
||||
Base10.toString(uint64(node.dag.cfg.SHARDING_FORK_EPOCH)),
|
||||
Base10.toString(uint64(cfg.SHARDING_FORK_EPOCH)),
|
||||
SECONDS_PER_SLOT:
|
||||
Base10.toString(SECONDS_PER_SLOT),
|
||||
SECONDS_PER_ETH1_BLOCK:
|
||||
Base10.toString(node.dag.cfg.SECONDS_PER_ETH1_BLOCK),
|
||||
Base10.toString(cfg.SECONDS_PER_ETH1_BLOCK),
|
||||
MIN_VALIDATOR_WITHDRAWABILITY_DELAY:
|
||||
Base10.toString(node.dag.cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY),
|
||||
Base10.toString(cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY),
|
||||
SHARD_COMMITTEE_PERIOD:
|
||||
Base10.toString(node.dag.cfg.SHARD_COMMITTEE_PERIOD),
|
||||
Base10.toString(cfg.SHARD_COMMITTEE_PERIOD),
|
||||
ETH1_FOLLOW_DISTANCE:
|
||||
Base10.toString(node.dag.cfg.ETH1_FOLLOW_DISTANCE),
|
||||
Base10.toString(cfg.ETH1_FOLLOW_DISTANCE),
|
||||
INACTIVITY_SCORE_BIAS:
|
||||
Base10.toString(node.dag.cfg.INACTIVITY_SCORE_BIAS),
|
||||
Base10.toString(cfg.INACTIVITY_SCORE_BIAS),
|
||||
INACTIVITY_SCORE_RECOVERY_RATE:
|
||||
Base10.toString(node.dag.cfg.INACTIVITY_SCORE_RECOVERY_RATE),
|
||||
Base10.toString(cfg.INACTIVITY_SCORE_RECOVERY_RATE),
|
||||
EJECTION_BALANCE:
|
||||
Base10.toString(node.dag.cfg.EJECTION_BALANCE),
|
||||
Base10.toString(cfg.EJECTION_BALANCE),
|
||||
MIN_PER_EPOCH_CHURN_LIMIT:
|
||||
Base10.toString(node.dag.cfg.MIN_PER_EPOCH_CHURN_LIMIT),
|
||||
Base10.toString(cfg.MIN_PER_EPOCH_CHURN_LIMIT),
|
||||
CHURN_LIMIT_QUOTIENT:
|
||||
Base10.toString(node.dag.cfg.CHURN_LIMIT_QUOTIENT),
|
||||
Base10.toString(cfg.CHURN_LIMIT_QUOTIENT),
|
||||
DEPOSIT_CHAIN_ID:
|
||||
Base10.toString(node.dag.cfg.DEPOSIT_CHAIN_ID),
|
||||
Base10.toString(cfg.DEPOSIT_CHAIN_ID),
|
||||
DEPOSIT_NETWORK_ID:
|
||||
Base10.toString(node.dag.cfg.DEPOSIT_NETWORK_ID),
|
||||
Base10.toString(cfg.DEPOSIT_NETWORK_ID),
|
||||
DEPOSIT_CONTRACT_ADDRESS:
|
||||
$node.dag.cfg.DEPOSIT_CONTRACT_ADDRESS,
|
||||
$cfg.DEPOSIT_CONTRACT_ADDRESS,
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/beacon-chain.md#constants
|
||||
# GENESIS_SLOT
|
||||
|
@ -238,8 +239,8 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
cachedDepositContract =
|
||||
RestApiResponse.prepareJsonResponse(
|
||||
(
|
||||
chain_id: $node.dag.cfg.DEPOSIT_CHAIN_ID,
|
||||
address: $node.dag.cfg.DEPOSIT_CONTRACT_ADDRESS
|
||||
chain_id: $cfg.DEPOSIT_CHAIN_ID,
|
||||
address: $cfg.DEPOSIT_CONTRACT_ADDRESS
|
||||
)
|
||||
)
|
||||
|
||||
|
|
|
@ -230,7 +230,9 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
|
||||
res.get()
|
||||
let proposalState = assignClone(node.dag.headState)
|
||||
node.dag.withUpdatedState(proposalState[], head.atSlot(wallSlot).toBlockSlotId().expect("not nil")) do:
|
||||
node.dag.withUpdatedState(
|
||||
proposalState[],
|
||||
head.atSlot(wallSlot).toBlockSlotId().expect("not nil")):
|
||||
return RestApiResponse.jsonResponse(
|
||||
node.getBlockProposalEth1Data(state))
|
||||
do:
|
||||
|
|
|
@ -481,7 +481,8 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonError(Http400,
|
||||
InvalidAttestationDataRootValueError, $res.error())
|
||||
res.get()
|
||||
let res = node.attestationPool[].getAggregatedAttestation(qslot, qroot)
|
||||
let res =
|
||||
node.attestationPool[].getAggregatedAttestation(qslot, qroot)
|
||||
if res.isNone():
|
||||
return RestApiResponse.jsonError(Http400,
|
||||
UnableToGetAggregatedAttestationError)
|
||||
|
@ -585,8 +586,9 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
request.slot, subnet_id, request.validator_index,
|
||||
request.is_aggregator)
|
||||
|
||||
let validator_pubkey = getStateField(
|
||||
node.dag.headState, validators).asSeq()[request.validator_index].pubkey
|
||||
let validator_pubkey =
|
||||
getStateField(node.dag.headState, validators)
|
||||
.asSeq()[request.validator_index].pubkey
|
||||
|
||||
node.validatorMonitor[].addAutoMonitor(
|
||||
validator_pubkey, ValidatorIndex(request.validator_index))
|
||||
|
@ -617,11 +619,12 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
lenu64(getStateField(node.dag.headState, validators)):
|
||||
return RestApiResponse.jsonError(Http400,
|
||||
InvalidValidatorIndexValueError)
|
||||
let validator_pubkey = getStateField(
|
||||
node.dag.headState, validators).asSeq()[item.validator_index].pubkey
|
||||
let validator_pubkey =
|
||||
getStateField(node.dag.headState, validators)
|
||||
.asSeq()[item.validator_index].pubkey
|
||||
|
||||
node.syncCommitteeMsgPool.syncCommitteeSubscriptions[validator_pubkey] =
|
||||
item.until_epoch
|
||||
node.syncCommitteeMsgPool
|
||||
.syncCommitteeSubscriptions[validator_pubkey] = item.until_epoch
|
||||
|
||||
node.validatorMonitor[].addAutoMonitor(
|
||||
validator_pubkey, ValidatorIndex(item.validator_index))
|
||||
|
|
|
@ -289,22 +289,23 @@ proc sendSyncCommitteeMessages*(node: BeaconNode,
|
|||
let (pending, indices) = block:
|
||||
var resFutures: seq[Future[SendResult]]
|
||||
var resIndices: seq[int]
|
||||
template headSyncCommittees(): auto = node.dag.headSyncCommittees
|
||||
for subcommitteeIdx in SyncSubcommitteeIndex:
|
||||
for valKey in syncSubcommittee(
|
||||
node.dag.headSyncCommittees.current_sync_committee, subcommitteeIdx):
|
||||
headSyncCommittees.current_sync_committee, subcommitteeIdx):
|
||||
let index = keysCur.getOrDefault(uint64(valKey), -1)
|
||||
if index >= 0:
|
||||
resIndices.add(index)
|
||||
resFutures.add(node.sendSyncCommitteeMessage(msgs[index],
|
||||
subcommitteeIdx, true))
|
||||
resFutures.add(node.sendSyncCommitteeMessage(
|
||||
msgs[index], subcommitteeIdx, true))
|
||||
for subcommitteeIdx in SyncSubcommitteeIndex:
|
||||
for valKey in syncSubcommittee(
|
||||
node.dag.headSyncCommittees.next_sync_committee, subcommitteeIdx):
|
||||
headSyncCommittees.next_sync_committee, subcommitteeIdx):
|
||||
let index = keysNxt.getOrDefault(uint64(valKey), -1)
|
||||
if index >= 0:
|
||||
resIndices.add(index)
|
||||
resFutures.add(node.sendSyncCommitteeMessage(msgs[index],
|
||||
subcommitteeIdx, true))
|
||||
resFutures.add(node.sendSyncCommitteeMessage(
|
||||
msgs[index], subcommitteeIdx, true))
|
||||
(resFutures, resIndices)
|
||||
|
||||
await allFutures(pending)
|
||||
|
@ -447,7 +448,9 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
|
|||
proposalState = assignClone(node.dag.headState)
|
||||
|
||||
# TODO fails at checkpoint synced head
|
||||
node.dag.withUpdatedState(proposalState[], head.atSlot(slot - 1).toBlockSlotId().expect("not nil")) do:
|
||||
node.dag.withUpdatedState(
|
||||
proposalState[],
|
||||
head.atSlot(slot - 1).toBlockSlotId().expect("not nil")):
|
||||
# Advance to the given slot without calculating state root - we'll only
|
||||
# need a state root _with_ the block applied
|
||||
var info: ForkedEpochInfo
|
||||
|
@ -849,7 +852,8 @@ proc handleSyncCommitteeContributions(node: BeaconNode,
|
|||
continue
|
||||
|
||||
var contribution: SyncCommitteeContribution
|
||||
let contributionWasProduced = node.syncCommitteeMsgPool[].produceContribution(
|
||||
let contributionWasProduced =
|
||||
node.syncCommitteeMsgPool[].produceContribution(
|
||||
slot,
|
||||
head.root,
|
||||
candidateAggregators[i].subcommitteeIdx,
|
||||
|
@ -1055,9 +1059,9 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
|||
|
||||
# If broadcastStartEpoch is 0, it hasn't had time to initialize yet, which
|
||||
# means that it'd be okay not to continue, but it won't gossip regardless.
|
||||
if curSlot.epoch <
|
||||
node.processor[].doppelgangerDetection.broadcastStartEpoch and
|
||||
node.processor[].doppelgangerDetection.nodeLaunchSlot > GENESIS_SLOT and
|
||||
let doppelgangerDetection = node.processor[].doppelgangerDetection
|
||||
if curSlot.epoch < doppelgangerDetection.broadcastStartEpoch and
|
||||
doppelgangerDetection.nodeLaunchSlot > GENESIS_SLOT and
|
||||
node.config.doppelgangerDetection:
|
||||
let
|
||||
nextAttestationSlot = node.actionTracker.getNextAttestationSlot(slot - 1)
|
||||
|
@ -1066,13 +1070,11 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
|||
if slot in [nextAttestationSlot, nextProposalSlot]:
|
||||
notice "Doppelganger detection active - skipping validator duties while observing activity on the network",
|
||||
slot, epoch = slot.epoch,
|
||||
broadcastStartEpoch =
|
||||
node.processor[].doppelgangerDetection.broadcastStartEpoch
|
||||
broadcastStartEpoch = doppelgangerDetection.broadcastStartEpoch
|
||||
else:
|
||||
debug "Doppelganger detection active - skipping validator duties while observing activity on the network",
|
||||
slot, epoch = slot.epoch,
|
||||
broadcastStartEpoch =
|
||||
node.processor[].doppelgangerDetection.broadcastStartEpoch
|
||||
broadcastStartEpoch = doppelgangerDetection.broadcastStartEpoch
|
||||
|
||||
return
|
||||
|
||||
|
@ -1112,7 +1114,8 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
|||
attestationCutoff = shortLog(attestationCutoff.offset)
|
||||
|
||||
# Wait either for the block or the attestation cutoff time to arrive
|
||||
if await node.consensusManager[].expectBlock(slot).withTimeout(attestationCutoff.offset):
|
||||
if await node.consensusManager[].expectBlock(slot)
|
||||
.withTimeout(attestationCutoff.offset):
|
||||
# The expected block arrived (or expectBlock was called again which
|
||||
# shouldn't happen as this is the only place we use it) - in our async
|
||||
# loop however, we might have been doing other processing that caused delays
|
||||
|
@ -1237,7 +1240,8 @@ proc sendAggregateAndProof*(node: BeaconNode,
|
|||
proof: SignedAggregateAndProof): Future[SendResult] {.
|
||||
async.} =
|
||||
# REST/JSON-RPC API helper procedure.
|
||||
let res = await node.processor.aggregateValidator(MsgSource.api, proof)
|
||||
let res =
|
||||
await node.processor.aggregateValidator(MsgSource.api, proof)
|
||||
return
|
||||
if res.isGoodForSending:
|
||||
node.network.broadcastAggregateAndProof(proof)
|
||||
|
@ -1257,7 +1261,8 @@ proc sendAggregateAndProof*(node: BeaconNode,
|
|||
proc sendVoluntaryExit*(node: BeaconNode,
|
||||
exit: SignedVoluntaryExit): SendResult =
|
||||
# REST/JSON-RPC API helper procedure.
|
||||
let res = node.processor[].voluntaryExitValidator(MsgSource.api, exit)
|
||||
let res =
|
||||
node.processor[].voluntaryExitValidator(MsgSource.api, exit)
|
||||
if res.isGoodForSending:
|
||||
node.network.broadcastVoluntaryExit(exit)
|
||||
ok()
|
||||
|
@ -1269,7 +1274,8 @@ proc sendVoluntaryExit*(node: BeaconNode,
|
|||
proc sendAttesterSlashing*(node: BeaconNode,
|
||||
slashing: AttesterSlashing): SendResult =
|
||||
# REST/JSON-RPC API helper procedure.
|
||||
let res = node.processor[].attesterSlashingValidator(MsgSource.api, slashing)
|
||||
let res =
|
||||
node.processor[].attesterSlashingValidator(MsgSource.api, slashing)
|
||||
if res.isGoodForSending:
|
||||
node.network.broadcastAttesterSlashing(slashing)
|
||||
ok()
|
||||
|
@ -1281,7 +1287,8 @@ proc sendAttesterSlashing*(node: BeaconNode,
|
|||
proc sendProposerSlashing*(node: BeaconNode,
|
||||
slashing: ProposerSlashing): SendResult =
|
||||
# REST/JSON-RPC API helper procedure.
|
||||
let res = node.processor[].proposerSlashingValidator(MsgSource.api, slashing)
|
||||
let res =
|
||||
node.processor[].proposerSlashingValidator(MsgSource.api, slashing)
|
||||
if res.isGoodForSending:
|
||||
node.network.broadcastProposerSlashing(slashing)
|
||||
ok()
|
||||
|
@ -1356,7 +1363,8 @@ proc registerDuty*(
|
|||
proc registerDuties*(node: BeaconNode, wallSlot: Slot) {.async.} =
|
||||
## Register upcoming duties of attached validators with the duty tracker
|
||||
|
||||
if node.attachedValidators[].count() == 0 or not node.isSynced(node.dag.head):
|
||||
if node.attachedValidators[].count() == 0 or
|
||||
not node.isSynced(node.dag.head):
|
||||
# Nothing to do because we have no validator attached
|
||||
return
|
||||
|
||||
|
|
Loading…
Reference in New Issue