restore doppelganger check on connectivity loss (#4616)
* restore doppelganger check on connectivity loss https://github.com/status-im/nimbus-eth2/pull/4398 introduced a regression in functionality where doppelganger detection would not be rerun during connectivity loss. This PR reintroduces this check and makes some adjustments to the implementation to simplify the code flow for both BN and VC. * track when check was last performed for each validator (to deal with late-added validators) * track when we performed a doppel-detectable activity (attesting) so as to avoid false positives * remove nodeStart special case (this should be treated the same as adding a validator dynamically just after startup) * allow sync committee duties in doppelganger period * don't trigger doppelganger when registering duties * fix crash when expected index response is missing * fix missing slashingSafe propagation
This commit is contained in:
parent
97247ea6e7
commit
83f9745df1
|
@ -527,15 +527,10 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||||
OK: 6/6 Fail: 0/6 Skip: 0/6
|
OK: 6/6 Fail: 0/6 Skip: 0/6
|
||||||
## Validator pool
|
## Validator pool
|
||||||
```diff
|
```diff
|
||||||
+ Activation after check OK
|
|
||||||
+ Doppelganger for already active validator OK
|
|
||||||
+ Doppelganger for genesis validator OK
|
+ Doppelganger for genesis validator OK
|
||||||
+ Doppelganger for validator that activates in future epoch OK
|
|
||||||
+ Doppelganger for validator that activates in previous epoch OK
|
|
||||||
+ Doppelganger for validator that activates in same epoch as check OK
|
+ Doppelganger for validator that activates in same epoch as check OK
|
||||||
+ Future activation after check OK
|
|
||||||
```
|
```
|
||||||
OK: 7/7 Fail: 0/7 Skip: 0/7
|
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||||
## Zero signature sanity checks
|
## Zero signature sanity checks
|
||||||
```diff
|
```diff
|
||||||
+ SSZ serialization roundtrip of SignedBeaconBlockHeader OK
|
+ SSZ serialization roundtrip of SignedBeaconBlockHeader OK
|
||||||
|
@ -630,4 +625,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||||
OK: 9/9 Fail: 0/9 Skip: 0/9
|
OK: 9/9 Fail: 0/9 Skip: 0/9
|
||||||
|
|
||||||
---TOTAL---
|
---TOTAL---
|
||||||
OK: 351/356 Fail: 0/356 Skip: 5/356
|
OK: 346/351 Fail: 0/351 Skip: 5/351
|
||||||
|
|
|
@ -85,11 +85,6 @@ type
|
||||||
## of gossip interleaving between nodes so long as they don't gossip at
|
## of gossip interleaving between nodes so long as they don't gossip at
|
||||||
## the same time.
|
## the same time.
|
||||||
|
|
||||||
nodeLaunchSlot*: Slot ##\
|
|
||||||
## Set once, at node launch. This functions as a basic protection against
|
|
||||||
## false positives from attestations persisting within the gossip network
|
|
||||||
## across quick restarts.
|
|
||||||
|
|
||||||
Eth2Processor* = object
|
Eth2Processor* = object
|
||||||
## The Eth2Processor is the entry point for untrusted message processing -
|
## The Eth2Processor is the entry point for untrusted message processing -
|
||||||
## when we receive messages from various sources, we pass them to the
|
## when we receive messages from various sources, we pass them to the
|
||||||
|
@ -164,7 +159,6 @@ proc new*(T: type Eth2Processor,
|
||||||
(ref Eth2Processor)(
|
(ref Eth2Processor)(
|
||||||
doppelgangerDetectionEnabled: doppelgangerDetectionEnabled,
|
doppelgangerDetectionEnabled: doppelgangerDetectionEnabled,
|
||||||
doppelgangerDetection: DoppelgangerProtection(
|
doppelgangerDetection: DoppelgangerProtection(
|
||||||
nodeLaunchSlot: getBeaconTime().slotOrZero,
|
|
||||||
broadcastStartEpoch: FAR_FUTURE_EPOCH),
|
broadcastStartEpoch: FAR_FUTURE_EPOCH),
|
||||||
blockProcessor: blockProcessor,
|
blockProcessor: blockProcessor,
|
||||||
validatorMonitor: validatorMonitor,
|
validatorMonitor: validatorMonitor,
|
||||||
|
@ -263,8 +257,7 @@ proc setupDoppelgangerDetection*(self: var Eth2Processor, slot: Slot) =
|
||||||
if self.doppelgangerDetectionEnabled:
|
if self.doppelgangerDetectionEnabled:
|
||||||
notice "Setting up doppelganger detection",
|
notice "Setting up doppelganger detection",
|
||||||
epoch = slot.epoch,
|
epoch = slot.epoch,
|
||||||
broadcast_epoch = self.doppelgangerDetection.broadcastStartEpoch,
|
broadcast_epoch = self.doppelgangerDetection.broadcastStartEpoch
|
||||||
nodestart_epoch = self.doppelgangerDetection.nodeLaunchSlot.epoch()
|
|
||||||
|
|
||||||
proc clearDoppelgangerProtection*(self: var Eth2Processor) =
|
proc clearDoppelgangerProtection*(self: var Eth2Processor) =
|
||||||
self.doppelgangerDetection.broadcastStartEpoch = FAR_FUTURE_EPOCH
|
self.doppelgangerDetection.broadcastStartEpoch = FAR_FUTURE_EPOCH
|
||||||
|
@ -278,25 +271,17 @@ proc checkForPotentialDoppelganger(
|
||||||
if not self.doppelgangerDetectionEnabled:
|
if not self.doppelgangerDetectionEnabled:
|
||||||
return
|
return
|
||||||
|
|
||||||
if attestation.data.slot <= self.doppelgangerDetection.nodeLaunchSlot + 1:
|
|
||||||
return
|
|
||||||
|
|
||||||
let broadcastStartEpoch = self.doppelgangerDetection.broadcastStartEpoch
|
|
||||||
|
|
||||||
for validatorIndex in attesterIndices:
|
for validatorIndex in attesterIndices:
|
||||||
let
|
let
|
||||||
validatorPubkey = self.dag.validatorKey(validatorIndex).get().toPubKey()
|
pubkey = self.dag.validatorKey(validatorIndex).get().toPubKey()
|
||||||
validator = self.validatorPool[].getValidator(validatorPubkey)
|
|
||||||
|
|
||||||
if not(isNil(validator)):
|
if self.validatorPool[].triggersDoppelganger(
|
||||||
if validator.triggersDoppelganger(broadcastStartEpoch):
|
pubkey, attestation.data.slot.epoch):
|
||||||
warn "Doppelganger attestation",
|
warn "Doppelganger attestation",
|
||||||
validator = shortLog(validator),
|
validator = shortLog(pubkey),
|
||||||
validator_index = validatorIndex,
|
validator_index = validatorIndex,
|
||||||
activation_epoch = validator.activationEpoch,
|
attestation = shortLog(attestation)
|
||||||
broadcast_epoch = broadcastStartEpoch,
|
quitDoppelganger()
|
||||||
attestation = shortLog(attestation)
|
|
||||||
quitDoppelganger()
|
|
||||||
|
|
||||||
proc processAttestation*(
|
proc processAttestation*(
|
||||||
self: ref Eth2Processor, src: MsgSource,
|
self: ref Eth2Processor, src: MsgSource,
|
||||||
|
|
|
@ -980,7 +980,7 @@ proc updateSyncCommitteeTopics(node: BeaconNode, slot: Slot) =
|
||||||
|
|
||||||
node.network.updateSyncnetsMetadata(syncnets)
|
node.network.updateSyncnetsMetadata(syncnets)
|
||||||
|
|
||||||
proc updateDoppelganger(node: BeaconNode, epoch: Epoch) =
|
proc doppelgangerChecked(node: BeaconNode, epoch: Epoch) =
|
||||||
if not node.processor[].doppelgangerDetectionEnabled:
|
if not node.processor[].doppelgangerDetectionEnabled:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -989,7 +989,7 @@ proc updateDoppelganger(node: BeaconNode, epoch: Epoch) =
|
||||||
# active
|
# active
|
||||||
if epoch > node.processor[].doppelgangerDetection.broadcastStartEpoch:
|
if epoch > node.processor[].doppelgangerDetection.broadcastStartEpoch:
|
||||||
for validator in node.attachedValidators[]:
|
for validator in node.attachedValidators[]:
|
||||||
validator.updateDoppelganger(epoch - 1)
|
validator.doppelgangerChecked(epoch - 1)
|
||||||
|
|
||||||
proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} =
|
proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} =
|
||||||
## Subscribe to subnets that we are providing stability for or aggregating
|
## Subscribe to subnets that we are providing stability for or aggregating
|
||||||
|
@ -1104,7 +1104,7 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} =
|
||||||
addMessageHandlers[gossipFork](node, forkDigests[gossipFork], slot)
|
addMessageHandlers[gossipFork](node, forkDigests[gossipFork], slot)
|
||||||
|
|
||||||
node.gossipState = targetGossipState
|
node.gossipState = targetGossipState
|
||||||
node.updateDoppelganger(slot.epoch)
|
node.doppelgangerChecked(slot.epoch)
|
||||||
node.updateAttestationSubnetHandlers(slot)
|
node.updateAttestationSubnetHandlers(slot)
|
||||||
node.updateBlocksGossipStatus(slot, isBehind)
|
node.updateBlocksGossipStatus(slot, isBehind)
|
||||||
node.updateLightClientGossipStatus(slot, isBehind)
|
node.updateLightClientGossipStatus(slot, isBehind)
|
||||||
|
|
|
@ -222,8 +222,7 @@ proc installApiHandlers*(node: SigningNode) =
|
||||||
if validator_key.isErr():
|
if validator_key.isErr():
|
||||||
return errorResponse(Http400, InvalidValidatorPublicKey)
|
return errorResponse(Http400, InvalidValidatorPublicKey)
|
||||||
let key = validator_key.get()
|
let key = validator_key.get()
|
||||||
let validator = node.attachedValidators.getValidator(key)
|
let validator = node.attachedValidators.getValidator(key).valueOr:
|
||||||
if isNil(validator):
|
|
||||||
return errorResponse(Http404, ValidatorNotFoundError)
|
return errorResponse(Http404, ValidatorNotFoundError)
|
||||||
validator
|
validator
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,8 @@ type
|
||||||
proc serveAttestation(service: AttestationServiceRef, adata: AttestationData,
|
proc serveAttestation(service: AttestationServiceRef, adata: AttestationData,
|
||||||
duty: DutyAndProof): Future[bool] {.async.} =
|
duty: DutyAndProof): Future[bool] {.async.} =
|
||||||
let vc = service.client
|
let vc = service.client
|
||||||
let validator = vc.getValidatorForDuties(duty.data.pubkey, adata.slot).valueOr:
|
let validator = vc.getValidatorForDuties(
|
||||||
|
duty.data.pubkey, adata.slot, true).valueOr:
|
||||||
return false
|
return false
|
||||||
let fork = vc.forkAtEpoch(adata.slot.epoch)
|
let fork = vc.forkAtEpoch(adata.slot.epoch)
|
||||||
|
|
||||||
|
@ -260,7 +261,7 @@ proc produceAndPublishAggregates(service: AttestationServiceRef,
|
||||||
block:
|
block:
|
||||||
var res: seq[AggregateItem]
|
var res: seq[AggregateItem]
|
||||||
for duty in duties:
|
for duty in duties:
|
||||||
let validator = vc.attachedValidators[].getValidatorForDuties(
|
let validator = vc.getValidatorForDuties(
|
||||||
duty.data.pubkey, slot).valueOr:
|
duty.data.pubkey, slot).valueOr:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
|
@ -484,8 +484,11 @@ proc getDelay*(vc: ValidatorClientRef, deadline: BeaconTime): TimeDiff =
|
||||||
vc.beaconClock.now() - deadline
|
vc.beaconClock.now() - deadline
|
||||||
|
|
||||||
proc getValidatorForDuties*(vc: ValidatorClientRef,
|
proc getValidatorForDuties*(vc: ValidatorClientRef,
|
||||||
key: ValidatorPubKey, slot: Slot): Opt[AttachedValidator] =
|
key: ValidatorPubKey, slot: Slot,
|
||||||
vc.attachedValidators[].getValidatorForDuties(key, slot)
|
doppelActivity = false,
|
||||||
|
slashingSafe = false): Opt[AttachedValidator] =
|
||||||
|
vc.attachedValidators[].getValidatorForDuties(
|
||||||
|
key, slot, doppelActivity, slashingSafe)
|
||||||
|
|
||||||
proc forkAtEpoch*(vc: ValidatorClientRef, epoch: Epoch): Fork =
|
proc forkAtEpoch*(vc: ValidatorClientRef, epoch: Epoch): Fork =
|
||||||
# If schedule is present, it MUST not be empty.
|
# If schedule is present, it MUST not be empty.
|
||||||
|
@ -518,22 +521,23 @@ proc addValidator*(vc: ValidatorClientRef, keystore: KeystoreData) =
|
||||||
|
|
||||||
proc removeValidator*(vc: ValidatorClientRef,
|
proc removeValidator*(vc: ValidatorClientRef,
|
||||||
pubkey: ValidatorPubKey) {.async.} =
|
pubkey: ValidatorPubKey) {.async.} =
|
||||||
let validator = vc.attachedValidators[].getValidator(pubkey)
|
let validator = vc.attachedValidators[].getValidator(pubkey).valueOr:
|
||||||
if not(isNil(validator)):
|
return
|
||||||
case validator.kind
|
# Remove validator from ValidatorPool.
|
||||||
of ValidatorKind.Local:
|
vc.attachedValidators[].removeValidator(pubkey)
|
||||||
discard
|
|
||||||
of ValidatorKind.Remote:
|
case validator.kind
|
||||||
# We must close all the REST clients running for the remote validator.
|
of ValidatorKind.Local:
|
||||||
let pending =
|
discard
|
||||||
block:
|
of ValidatorKind.Remote:
|
||||||
var res: seq[Future[void]]
|
# We must close all the REST clients running for the remote validator.
|
||||||
for item in validator.clients:
|
let pending =
|
||||||
res.add(item[0].closeWait())
|
block:
|
||||||
res
|
var res: seq[Future[void]]
|
||||||
await allFutures(pending)
|
for item in validator.clients:
|
||||||
# Remove validator from ValidatorPool.
|
res.add(item[0].closeWait())
|
||||||
vc.attachedValidators[].removeValidator(pubkey)
|
res
|
||||||
|
await allFutures(pending)
|
||||||
|
|
||||||
proc getFeeRecipient*(vc: ValidatorClientRef, pubkey: ValidatorPubKey,
|
proc getFeeRecipient*(vc: ValidatorClientRef, pubkey: ValidatorPubKey,
|
||||||
validatorIdx: ValidatorIndex,
|
validatorIdx: ValidatorIndex,
|
||||||
|
|
|
@ -16,7 +16,8 @@ logScope: service = ServiceName
|
||||||
proc getCheckingList*(vc: ValidatorClientRef, epoch: Epoch): seq[ValidatorIndex] =
|
proc getCheckingList*(vc: ValidatorClientRef, epoch: Epoch): seq[ValidatorIndex] =
|
||||||
var res: seq[ValidatorIndex]
|
var res: seq[ValidatorIndex]
|
||||||
for validator in vc.attachedValidators[]:
|
for validator in vc.attachedValidators[]:
|
||||||
if validator.index.isSome and validator.triggersDoppelganger(epoch):
|
if validator.index.isSome and
|
||||||
|
(validator.doppelCheck.isNone or validator.doppelCheck.get() < epoch):
|
||||||
res.add validator.index.get()
|
res.add validator.index.get()
|
||||||
res
|
res
|
||||||
|
|
||||||
|
@ -36,12 +37,11 @@ proc processActivities(service: DoppelgangerServiceRef, epoch: Epoch,
|
||||||
let vindex = item.index
|
let vindex = item.index
|
||||||
for validator in vc.attachedValidators[]:
|
for validator in vc.attachedValidators[]:
|
||||||
if validator.index == Opt.some(vindex):
|
if validator.index == Opt.some(vindex):
|
||||||
if item.is_live:
|
validator.doppelgangerChecked(epoch)
|
||||||
if validator.triggersDoppelganger(epoch):
|
|
||||||
vc.doppelExit.fire()
|
if item.is_live and validator.triggersDoppelganger(epoch):
|
||||||
return
|
vc.doppelExit.fire()
|
||||||
else:
|
return
|
||||||
validator.updateDoppelganger(epoch)
|
|
||||||
|
|
||||||
proc mainLoop(service: DoppelgangerServiceRef) {.async.} =
|
proc mainLoop(service: DoppelgangerServiceRef) {.async.} =
|
||||||
let vc = service.client
|
let vc = service.client
|
||||||
|
|
|
@ -87,15 +87,15 @@ proc pollForValidatorIndices*(vc: ValidatorClientRef) {.async.} =
|
||||||
list: seq[AttachedValidator]
|
list: seq[AttachedValidator]
|
||||||
|
|
||||||
for item in validators:
|
for item in validators:
|
||||||
var validator = vc.attachedValidators[].getValidator(item.validator.pubkey)
|
let validator = vc.attachedValidators[].getValidator(item.validator.pubkey)
|
||||||
if isNil(validator):
|
if validator.isNone():
|
||||||
missing.add(validatorLog(item.validator.pubkey, item.index))
|
missing.add(validatorLog(item.validator.pubkey, item.index))
|
||||||
else:
|
else:
|
||||||
validator.updateValidator(Opt.some ValidatorAndIndex(
|
validator.get().updateValidator(Opt.some ValidatorAndIndex(
|
||||||
index: item.index,
|
index: item.index,
|
||||||
validator: item.validator))
|
validator: item.validator))
|
||||||
updated.add(validatorLog(item.validator.pubkey, item.index))
|
updated.add(validatorLog(item.validator.pubkey, item.index))
|
||||||
list.add(validator)
|
list.add(validator.get())
|
||||||
|
|
||||||
if len(updated) > 0:
|
if len(updated) > 0:
|
||||||
info "Validator indices updated",
|
info "Validator indices updated",
|
||||||
|
@ -198,7 +198,9 @@ proc pollForAttesterDuties*(vc: ValidatorClientRef,
|
||||||
var pendingRequests: seq[Future[SignatureResult]]
|
var pendingRequests: seq[Future[SignatureResult]]
|
||||||
var validators: seq[AttachedValidator]
|
var validators: seq[AttachedValidator]
|
||||||
for item in addOrReplaceItems:
|
for item in addOrReplaceItems:
|
||||||
let validator = vc.attachedValidators[].getValidator(item.duty.pubkey)
|
let validator =
|
||||||
|
vc.attachedValidators[].getValidator(item.duty.pubkey).valueOr:
|
||||||
|
continue
|
||||||
let fork = vc.forkAtEpoch(item.duty.slot.epoch)
|
let fork = vc.forkAtEpoch(item.duty.slot.epoch)
|
||||||
let future = validator.getSlotSignature(
|
let future = validator.getSlotSignature(
|
||||||
fork, genesisRoot, item.duty.slot)
|
fork, genesisRoot, item.duty.slot)
|
||||||
|
|
|
@ -36,7 +36,8 @@ proc serveSyncCommitteeMessage*(service: SyncCommitteeServiceRef,
|
||||||
vindex = duty.validator_index
|
vindex = duty.validator_index
|
||||||
subcommitteeIdx = getSubcommitteeIndex(
|
subcommitteeIdx = getSubcommitteeIndex(
|
||||||
duty.validator_sync_committee_index)
|
duty.validator_sync_committee_index)
|
||||||
validator = vc.getValidatorForDuties(duty.pubkey, slot).valueOr: return false
|
validator = vc.getValidatorForDuties(
|
||||||
|
duty.pubkey, slot, slashingSafe = true).valueOr: return false
|
||||||
message =
|
message =
|
||||||
block:
|
block:
|
||||||
let res = await getSyncCommitteeMessage(validator, fork,
|
let res = await getSyncCommitteeMessage(validator, fork,
|
||||||
|
@ -212,10 +213,9 @@ proc produceAndPublishContributions(service: SyncCommitteeServiceRef,
|
||||||
var validators: seq[(AttachedValidator, SyncSubcommitteeIndex)]
|
var validators: seq[(AttachedValidator, SyncSubcommitteeIndex)]
|
||||||
|
|
||||||
for duty in duties:
|
for duty in duties:
|
||||||
let validator = vc.attachedValidators[].getValidatorForDuties(
|
|
||||||
duty.pubkey, slot).valueOr:
|
|
||||||
continue
|
|
||||||
let
|
let
|
||||||
|
validator = vc.getValidatorForDuties(duty.pubkey, slot).valueOr:
|
||||||
|
continue
|
||||||
subCommitteeIdx =
|
subCommitteeIdx =
|
||||||
getSubcommitteeIndex(duty.validator_sync_committee_index)
|
getSubcommitteeIndex(duty.validator_sync_committee_index)
|
||||||
future = validator.getSyncCommitteeSelectionProof(
|
future = validator.getSyncCommitteeSelectionProof(
|
||||||
|
|
|
@ -586,8 +586,7 @@ proc removeValidator*(pool: var ValidatorPool,
|
||||||
publicKey: ValidatorPubKey,
|
publicKey: ValidatorPubKey,
|
||||||
kind: KeystoreKind): KmResult[RemoveValidatorStatus] {.
|
kind: KeystoreKind): KmResult[RemoveValidatorStatus] {.
|
||||||
raises: [Defect].} =
|
raises: [Defect].} =
|
||||||
let validator = pool.getValidator(publicKey)
|
let validator = pool.getValidator(publicKey).valueOr:
|
||||||
if isNil(validator):
|
|
||||||
return ok(RemoveValidatorStatus.notFound)
|
return ok(RemoveValidatorStatus.notFound)
|
||||||
if validator.kind.toKeystoreKind() != kind:
|
if validator.kind.toKeystoreKind() != kind:
|
||||||
return ok(RemoveValidatorStatus.notFound)
|
return ok(RemoveValidatorStatus.notFound)
|
||||||
|
|
|
@ -125,12 +125,18 @@ proc addValidators*(node: BeaconNode) =
|
||||||
v = node.attachedValidators[].addValidator(keystore, feeRecipient, gasLimit)
|
v = node.attachedValidators[].addValidator(keystore, feeRecipient, gasLimit)
|
||||||
v.updateValidator(data)
|
v.updateValidator(data)
|
||||||
|
|
||||||
|
proc getValidator*(node: BeaconNode, idx: ValidatorIndex): Opt[AttachedValidator] =
|
||||||
|
let key = ? node.dag.validatorKey(idx)
|
||||||
|
node.attachedValidators[].getValidator(key.toPubKey())
|
||||||
|
|
||||||
proc getValidatorForDuties*(
|
proc getValidatorForDuties*(
|
||||||
node: BeaconNode,
|
node: BeaconNode,
|
||||||
idx: ValidatorIndex, slot: Slot): Opt[AttachedValidator] =
|
idx: ValidatorIndex, slot: Slot,
|
||||||
|
doppelActivity = false, slashingSafe = false): Opt[AttachedValidator] =
|
||||||
let key = ? node.dag.validatorKey(idx)
|
let key = ? node.dag.validatorKey(idx)
|
||||||
|
|
||||||
node.attachedValidators[].getValidatorForDuties(key.toPubKey(), slot)
|
node.attachedValidators[].getValidatorForDuties(
|
||||||
|
key.toPubKey(), slot, doppelActivity, slashingSafe)
|
||||||
|
|
||||||
proc isSynced*(node: BeaconNode, head: BlockRef): SyncStatus =
|
proc isSynced*(node: BeaconNode, head: BlockRef): SyncStatus =
|
||||||
## TODO This function is here as a placeholder for some better heurestics to
|
## TODO This function is here as a placeholder for some better heurestics to
|
||||||
|
@ -1036,7 +1042,8 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
||||||
epochRef.shufflingRef, slot, committee_index)
|
epochRef.shufflingRef, slot, committee_index)
|
||||||
|
|
||||||
for index_in_committee, validator_index in committee:
|
for index_in_committee, validator_index in committee:
|
||||||
let validator = node.getValidatorForDuties(validator_index, slot).valueOr:
|
let validator = node.getValidatorForDuties(
|
||||||
|
validator_index, slot, true).valueOr:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
let
|
let
|
||||||
|
@ -1107,7 +1114,8 @@ proc handleSyncCommitteeMessages(node: BeaconNode, head: BlockRef, slot: Slot) =
|
||||||
|
|
||||||
for subcommitteeIdx in SyncSubcommitteeIndex:
|
for subcommitteeIdx in SyncSubcommitteeIndex:
|
||||||
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
|
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
|
||||||
let validator = node.getValidatorForDuties(valIdx, slot).valueOr:
|
let validator = node.getValidatorForDuties(
|
||||||
|
valIdx, slot, slashingSafe = true).valueOr:
|
||||||
continue
|
continue
|
||||||
asyncSpawn createAndSendSyncCommitteeMessage(node, validator, slot,
|
asyncSpawn createAndSendSyncCommitteeMessage(node, validator, slot,
|
||||||
subcommitteeIdx, head)
|
subcommitteeIdx, head)
|
||||||
|
@ -1174,7 +1182,8 @@ proc handleSyncCommitteeContributions(
|
||||||
|
|
||||||
for subcommitteeIdx in SyncSubCommitteeIndex:
|
for subcommitteeIdx in SyncSubCommitteeIndex:
|
||||||
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
|
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
|
||||||
let validator = node.getValidatorForDuties(valIdx, slot).valueOr:
|
let validator = node.getValidatorForDuties(
|
||||||
|
valIdx, slot, slashingSafe = true).valueOr:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
asyncSpawn signAndSendContribution(
|
asyncSpawn signAndSendContribution(
|
||||||
|
@ -1475,9 +1484,10 @@ proc updateValidators(
|
||||||
# checking all validators would significantly slow down this loop when there
|
# checking all validators would significantly slow down this loop when there
|
||||||
# are many inactive keys
|
# are many inactive keys
|
||||||
for i in node.dutyValidatorCount..validators.high:
|
for i in node.dutyValidatorCount..validators.high:
|
||||||
let v = node.attachedValidators[].getValidator(validators[i].pubkey)
|
let
|
||||||
if v != nil:
|
v = node.attachedValidators[].getValidator(validators[i].pubkey).valueOr:
|
||||||
v.index = Opt.some ValidatorIndex(i)
|
continue
|
||||||
|
v.index = Opt.some ValidatorIndex(i)
|
||||||
|
|
||||||
node.dutyValidatorCount = validators.len
|
node.dutyValidatorCount = validators.len
|
||||||
|
|
||||||
|
@ -1656,9 +1666,10 @@ proc registerDuties*(node: BeaconNode, wallSlot: Slot) {.async.} =
|
||||||
let committee = get_beacon_committee(shufflingRef, slot, committee_index)
|
let committee = get_beacon_committee(shufflingRef, slot, committee_index)
|
||||||
|
|
||||||
for index_in_committee, validator_index in committee:
|
for index_in_committee, validator_index in committee:
|
||||||
let validator = node.getValidatorForDuties(validator_index, slot).valueOr:
|
|
||||||
continue
|
|
||||||
let
|
let
|
||||||
|
validator = node.getValidator(validator_index).valueOr:
|
||||||
|
continue
|
||||||
|
|
||||||
subnet_id = compute_subnet_for_attestation(
|
subnet_id = compute_subnet_for_attestation(
|
||||||
committees_per_slot, slot, committee_index)
|
committees_per_slot, slot, committee_index)
|
||||||
slotSigRes = await validator.getSlotSignature(
|
slotSigRes = await validator.getSlotSignature(
|
||||||
|
|
|
@ -27,13 +27,12 @@ export
|
||||||
|
|
||||||
const
|
const
|
||||||
WEB3_SIGNER_DELAY_TOLERANCE = 3.seconds
|
WEB3_SIGNER_DELAY_TOLERANCE = 3.seconds
|
||||||
DOPPELGANGER_EPOCHS_COUNT = 1
|
|
||||||
## The number of full epochs that we monitor validators for doppelganger
|
|
||||||
## protection
|
|
||||||
|
|
||||||
declareGauge validators,
|
declareGauge validators,
|
||||||
"Number of validators attached to the beacon node"
|
"Number of validators attached to the beacon node"
|
||||||
|
|
||||||
|
logScope: topics = "val_pool"
|
||||||
|
|
||||||
type
|
type
|
||||||
ValidatorKind* {.pure.} = enum
|
ValidatorKind* {.pure.} = enum
|
||||||
Local, Remote
|
Local, Remote
|
||||||
|
@ -44,9 +43,6 @@ type
|
||||||
index*: ValidatorIndex
|
index*: ValidatorIndex
|
||||||
validator*: Validator
|
validator*: Validator
|
||||||
|
|
||||||
DoppelgangerStatus {.pure.} = enum
|
|
||||||
Unknown, Checking, Checked
|
|
||||||
|
|
||||||
AttachedValidator* = ref object
|
AttachedValidator* = ref object
|
||||||
data*: KeystoreData
|
data*: KeystoreData
|
||||||
case kind*: ValidatorKind
|
case kind*: ValidatorKind
|
||||||
|
@ -76,9 +72,10 @@ type
|
||||||
# builder should be informed of current validators
|
# builder should be informed of current validators
|
||||||
externalBuilderRegistration*: Opt[SignedValidatorRegistrationV1]
|
externalBuilderRegistration*: Opt[SignedValidatorRegistrationV1]
|
||||||
|
|
||||||
doppelStatus: DoppelgangerStatus
|
doppelCheck*: Opt[Epoch]
|
||||||
doppelEpoch*: Opt[Epoch]
|
## The epoch where doppelganger detection last performed a check
|
||||||
## The epoch where doppelganger detection started doing its monitoring
|
doppelActivity*: Opt[Epoch]
|
||||||
|
## The last time we attempted to perform a duty with this validator
|
||||||
|
|
||||||
lastWarning*: Opt[Slot]
|
lastWarning*: Opt[Slot]
|
||||||
|
|
||||||
|
@ -203,8 +200,9 @@ proc addValidator*(pool: var ValidatorPool,
|
||||||
pool.addRemoteValidator(keystore, feeRecipient, gasLimit)
|
pool.addRemoteValidator(keystore, feeRecipient, gasLimit)
|
||||||
|
|
||||||
proc getValidator*(pool: ValidatorPool,
|
proc getValidator*(pool: ValidatorPool,
|
||||||
validatorKey: ValidatorPubKey): AttachedValidator =
|
validatorKey: ValidatorPubKey): Opt[AttachedValidator] =
|
||||||
pool.validators.getOrDefault(validatorKey)
|
let v = pool.validators.getOrDefault(validatorKey)
|
||||||
|
if v == nil: Opt.none(AttachedValidator) else: Opt.some(v)
|
||||||
|
|
||||||
proc contains*(pool: ValidatorPool, pubkey: ValidatorPubKey): bool =
|
proc contains*(pool: ValidatorPool, pubkey: ValidatorPubKey): bool =
|
||||||
## Returns ``true`` if validator with key ``pubkey`` present in ``pool``.
|
## Returns ``true`` if validator with key ``pubkey`` present in ``pool``.
|
||||||
|
@ -253,14 +251,6 @@ proc updateValidator*(
|
||||||
|
|
||||||
validator.activationEpoch = activationEpoch
|
validator.activationEpoch = activationEpoch
|
||||||
|
|
||||||
if validator.doppelStatus == DoppelgangerStatus.Unknown:
|
|
||||||
if validator.doppelEpoch.isSome() and activationEpoch != FAR_FUTURE_EPOCH:
|
|
||||||
let doppelEpoch = validator.doppelEpoch.get()
|
|
||||||
if doppelEpoch >= validator.activationEpoch + DOPPELGANGER_EPOCHS_COUNT:
|
|
||||||
validator.doppelStatus = DoppelgangerStatus.Checking
|
|
||||||
else:
|
|
||||||
validator.doppelStatus = DoppelgangerStatus.Checked
|
|
||||||
|
|
||||||
proc close*(pool: var ValidatorPool) =
|
proc close*(pool: var ValidatorPool) =
|
||||||
## Unlock and close all validator keystore's files managed by ``pool``.
|
## Unlock and close all validator keystore's files managed by ``pool``.
|
||||||
for validator in pool.validators.values():
|
for validator in pool.validators.values():
|
||||||
|
@ -282,70 +272,94 @@ iterator items*(pool: ValidatorPool): AttachedValidator =
|
||||||
for item in pool.validators.values():
|
for item in pool.validators.values():
|
||||||
yield item
|
yield item
|
||||||
|
|
||||||
proc triggersDoppelganger*(v: AttachedValidator, epoch: Epoch): bool =
|
proc doppelgangerChecked*(validator: AttachedValidator, epoch: Epoch) =
|
||||||
## Returns true iff detected activity in the given epoch would trigger
|
## Call when the validator was checked for activity in the given epoch
|
||||||
## doppelganger detection
|
|
||||||
if v.doppelStatus != DoppelgangerStatus.Checked:
|
if validator.doppelCheck.isNone():
|
||||||
if v.activationEpoch == FAR_FUTURE_EPOCH:
|
debug "Doppelganger first check",
|
||||||
false
|
validator = shortLog(validator), epoch
|
||||||
elif epoch < v.activationEpoch + DOPPELGANGER_EPOCHS_COUNT:
|
elif validator.doppelCheck.get() + 1 notin [epoch, epoch + 1]:
|
||||||
v.doppelStatus = DoppelgangerStatus.Checked
|
debug "Doppelganger stale check",
|
||||||
false
|
validator = shortLog(validator),
|
||||||
else:
|
checked = validator.doppelCheck.get(), epoch
|
||||||
true
|
|
||||||
|
validator.doppelCheck = Opt.some epoch
|
||||||
|
|
||||||
|
proc doppelgangerActivity*(validator: AttachedValidator, epoch: Epoch) =
|
||||||
|
## Call when we performed a doppelganger-monitored activity in the epoch
|
||||||
|
if validator.doppelActivity.isNone():
|
||||||
|
debug "Doppelganger first activity",
|
||||||
|
validator = shortLog(validator), epoch
|
||||||
|
elif validator.doppelActivity.get() + 1 notin [epoch, epoch + 1]:
|
||||||
|
debug "Doppelganger stale activity",
|
||||||
|
validator = shortLog(validator),
|
||||||
|
checked = validator.doppelActivity.get(), epoch
|
||||||
|
|
||||||
|
validator.doppelActivity = Opt.some epoch
|
||||||
|
|
||||||
|
func triggersDoppelganger*(v: AttachedValidator, epoch: Epoch): bool =
|
||||||
|
## Returns true iff we have proof that an activity in the given epoch
|
||||||
|
## triggers doppelganger detection: this means the network was active for this
|
||||||
|
## validator during the given epoch (via doppelgangerChecked) but the activity
|
||||||
|
## did not originate from this instance.
|
||||||
|
|
||||||
|
if v.doppelActivity.isSome() and v.doppelActivity.get() >= epoch:
|
||||||
|
false # This was our own activity
|
||||||
|
elif v.doppelCheck.isNone():
|
||||||
|
false # Can't prove that the activity triggers the check
|
||||||
else:
|
else:
|
||||||
false
|
v.doppelCheck.get() == epoch
|
||||||
|
|
||||||
proc updateDoppelganger*(validator: AttachedValidator, epoch: Epoch) =
|
proc doppelgangerReady*(validator: AttachedValidator, slot: Slot): bool =
|
||||||
## Called when the validator has proven to be inactive in the given epoch -
|
## Returns true iff the validator has passed doppelganger detection by being
|
||||||
## this call should be made after the end of `epoch` before acting on duties
|
## monitored in the previous epoch (or the given epoch is the activation
|
||||||
## in `epoch + 1`.
|
## epoch, in which case we always consider it ready)
|
||||||
|
##
|
||||||
if validator.doppelStatus == DoppelgangerStatus.Checked:
|
## If we checked doppelganger, we allow the check to lag by one slot to avoid
|
||||||
return
|
## a race condition where the check for epoch N is ongoing and block
|
||||||
|
## block production for slot_start(N+1) is about to happen
|
||||||
if validator.doppelEpoch.isNone():
|
let epoch = slot.epoch
|
||||||
validator.doppelEpoch = Opt.some epoch
|
epoch == validator.activationEpoch or
|
||||||
|
(validator.doppelCheck.isSome and
|
||||||
let doppelEpoch = validator.doppelEpoch.get()
|
(((validator.doppelCheck.get() + 1) == epoch) or
|
||||||
|
(((validator.doppelCheck.get() + 2).start_slot) == slot)))
|
||||||
if validator.doppelStatus == DoppelgangerStatus.Unknown:
|
|
||||||
if validator.activationEpoch == FAR_FUTURE_EPOCH:
|
|
||||||
return
|
|
||||||
|
|
||||||
# We don't do doppelganger checking for validators that are about to be
|
|
||||||
# activated since both clients would be waiting for the other to start
|
|
||||||
# performing duties - this accounts for genesis as well
|
|
||||||
# The slot is rounded up to ensure we cover all slots
|
|
||||||
if doppelEpoch + 1 <= validator.activationEpoch + DOPPELGANGER_EPOCHS_COUNT:
|
|
||||||
validator.doppelStatus = DoppelgangerStatus.Checked
|
|
||||||
return
|
|
||||||
|
|
||||||
validator.doppelStatus = DoppelgangerStatus.Checking
|
|
||||||
|
|
||||||
if epoch + 1 >= doppelEpoch + DOPPELGANGER_EPOCHS_COUNT:
|
|
||||||
validator.doppelStatus = DoppelgangerStatus.Checked
|
|
||||||
|
|
||||||
proc getValidatorForDuties*(
|
proc getValidatorForDuties*(
|
||||||
pool: ValidatorPool, key: ValidatorPubKey, slot: Slot):
|
pool: ValidatorPool, key: ValidatorPubKey, slot: Slot,
|
||||||
|
doppelActivity: bool, slashingSafe: bool):
|
||||||
Opt[AttachedValidator] =
|
Opt[AttachedValidator] =
|
||||||
## Return validator only if it is ready for duties (has index and has passed
|
## Return validator only if it is ready for duties (has index and has passed
|
||||||
## doppelganger check where applicable)
|
## doppelganger check where applicable)
|
||||||
let validator = pool.getValidator(key)
|
let validator = ? pool.getValidator(key)
|
||||||
if isNil(validator) or validator.index.isNone():
|
if validator.index.isNone():
|
||||||
return Opt.none(AttachedValidator)
|
return Opt.none(AttachedValidator)
|
||||||
|
|
||||||
|
# Sync committee duties are not slashable, so we perform them even during
|
||||||
|
# doppelganger detection
|
||||||
if pool.doppelgangerDetectionEnabled and
|
if pool.doppelgangerDetectionEnabled and
|
||||||
validator.triggersDoppelganger(slot.epoch):
|
not validator.doppelgangerReady(slot) and
|
||||||
# If the validator would trigger for an activity in the given slot, we don't
|
not slashingSafe:
|
||||||
# return it for duties
|
|
||||||
notice "Doppelganger detection active - " &
|
notice "Doppelganger detection active - " &
|
||||||
"skipping validator duties while observing the network",
|
"skipping validator duties while observing the network",
|
||||||
validator = shortLog(validator)
|
validator = shortLog(validator),
|
||||||
|
slot,
|
||||||
|
doppelCheck = validator.doppelCheck,
|
||||||
|
activationEpoch = shortLog(validator.activationEpoch)
|
||||||
|
|
||||||
return Opt.none(AttachedValidator)
|
return Opt.none(AttachedValidator)
|
||||||
|
|
||||||
|
if doppelActivity:
|
||||||
|
# Record the activity
|
||||||
|
# TODO consider moving to the the "registration point"
|
||||||
|
validator.doppelgangerActivity(slot.epoch)
|
||||||
|
|
||||||
return Opt.some(validator)
|
return Opt.some(validator)
|
||||||
|
|
||||||
|
func triggersDoppelganger*(
|
||||||
|
pool: ValidatorPool, pubkey: ValidatorPubKey, epoch: Epoch): bool =
|
||||||
|
let v = pool.getValidator(pubkey)
|
||||||
|
v.isSome() and v[].triggersDoppelganger(epoch)
|
||||||
|
|
||||||
proc signWithDistributedKey(v: AttachedValidator,
|
proc signWithDistributedKey(v: AttachedValidator,
|
||||||
request: Web3SignerRequest): Future[SignatureResult]
|
request: Web3SignerRequest): Future[SignatureResult]
|
||||||
{.async.} =
|
{.async.} =
|
||||||
|
|
|
@ -24,12 +24,23 @@ suite "Validator pool":
|
||||||
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
|
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
not v.triggersDoppelganger(GENESIS_EPOCH)
|
not v.triggersDoppelganger(GENESIS_EPOCH) # no check
|
||||||
|
not v.doppelgangerReady(GENESIS_EPOCH.start_slot) # no activation
|
||||||
|
|
||||||
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(1), GENESIS_EPOCH))
|
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(1), GENESIS_EPOCH))
|
||||||
|
|
||||||
check:
|
check:
|
||||||
not v.triggersDoppelganger(GENESIS_EPOCH)
|
not v.triggersDoppelganger(GENESIS_EPOCH) # no check
|
||||||
|
v.doppelgangerReady(GENESIS_EPOCH.start_slot) # ready in activation epoch
|
||||||
|
not v.doppelgangerReady((GENESIS_EPOCH + 1).start_slot) # old check
|
||||||
|
|
||||||
|
v.doppelgangerChecked(GENESIS_EPOCH)
|
||||||
|
|
||||||
|
check:
|
||||||
|
v.triggersDoppelganger(GENESIS_EPOCH) # checked, triggered
|
||||||
|
v.doppelgangerReady((GENESIS_EPOCH + 1).start_slot) # checked
|
||||||
|
v.doppelgangerReady((GENESIS_EPOCH + 2).start_slot) # 1 slot lag allowance
|
||||||
|
not v.doppelgangerReady((GENESIS_EPOCH + 2).start_slot + 1) # old check
|
||||||
|
|
||||||
test "Doppelganger for validator that activates in same epoch as check":
|
test "Doppelganger for validator that activates in same epoch as check":
|
||||||
let
|
let
|
||||||
|
@ -40,77 +51,32 @@ suite "Validator pool":
|
||||||
not v.triggersDoppelganger(GENESIS_EPOCH)
|
not v.triggersDoppelganger(GENESIS_EPOCH)
|
||||||
not v.triggersDoppelganger(now.epoch())
|
not v.triggersDoppelganger(now.epoch())
|
||||||
|
|
||||||
|
not v.doppelgangerReady(GENESIS_EPOCH.start_slot)
|
||||||
|
not v.doppelgangerReady(now)
|
||||||
|
|
||||||
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(5), FAR_FUTURE_EPOCH))
|
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(5), FAR_FUTURE_EPOCH))
|
||||||
|
|
||||||
check: # We still don't know when validator activates so we wouldn't trigger
|
check: # We still don't know when validator activates so we wouldn't trigger
|
||||||
not v.triggersDoppelganger(GENESIS_EPOCH)
|
not v.triggersDoppelganger(GENESIS_EPOCH)
|
||||||
not v.triggersDoppelganger(now.epoch())
|
not v.triggersDoppelganger(now.epoch())
|
||||||
|
|
||||||
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(5), now.epoch()))
|
not v.doppelgangerReady(GENESIS_EPOCH.start_slot)
|
||||||
|
not v.doppelgangerReady(now)
|
||||||
check:
|
|
||||||
# Activates in current epoch, shouldn't trigger
|
|
||||||
not v.triggersDoppelganger(now.epoch())
|
|
||||||
|
|
||||||
test "Doppelganger for validator that activates in previous epoch":
|
|
||||||
let
|
|
||||||
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
|
|
||||||
now = Epoch(10).start_slot()
|
|
||||||
|
|
||||||
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(5), now.epoch() - 1))
|
|
||||||
|
|
||||||
check:
|
|
||||||
# Already activated, should trigger
|
|
||||||
v.triggersDoppelganger(now.epoch())
|
|
||||||
|
|
||||||
test "Doppelganger for validator that activates in future epoch":
|
|
||||||
let
|
|
||||||
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
|
|
||||||
now = Epoch(10).start_slot()
|
|
||||||
|
|
||||||
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(5), now.epoch() + 1))
|
|
||||||
|
|
||||||
check:
|
|
||||||
# Activates in the future, should not be checked
|
|
||||||
not v.triggersDoppelganger(now.epoch())
|
|
||||||
|
|
||||||
test "Doppelganger for already active validator":
|
|
||||||
let
|
|
||||||
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
|
|
||||||
now = Epoch(10).start_slot()
|
|
||||||
|
|
||||||
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(5), now.epoch() - 4))
|
|
||||||
|
|
||||||
check:
|
|
||||||
v.triggersDoppelganger(now.epoch)
|
|
||||||
|
|
||||||
v.updateDoppelganger(now.epoch())
|
|
||||||
|
|
||||||
check:
|
|
||||||
not v.triggersDoppelganger(now.epoch + 1)
|
|
||||||
|
|
||||||
test "Activation after check":
|
|
||||||
let
|
|
||||||
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
|
|
||||||
now = Epoch(10).start_slot()
|
|
||||||
|
|
||||||
v.updateDoppelganger(now.epoch())
|
|
||||||
|
|
||||||
check:
|
|
||||||
not v.triggersDoppelganger(now.epoch)
|
|
||||||
|
|
||||||
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(5), now.epoch()))
|
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(5), now.epoch()))
|
||||||
|
|
||||||
check: # already proven not to validate
|
check: # No check done yet
|
||||||
not v.triggersDoppelganger(now.epoch)
|
not v.triggersDoppelganger(GENESIS_EPOCH)
|
||||||
|
not v.triggersDoppelganger(now.epoch())
|
||||||
|
|
||||||
test "Future activation after check":
|
not v.doppelgangerReady(GENESIS_EPOCH.start_slot)
|
||||||
let
|
v.doppelgangerReady(now)
|
||||||
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
|
|
||||||
now = Epoch(10).start_slot()
|
|
||||||
|
|
||||||
v.updateDoppelganger(now.epoch())
|
v.doppelgangerChecked(GENESIS_EPOCH)
|
||||||
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(5), now.epoch() + 1))
|
|
||||||
|
|
||||||
check: # doesn't trigger check just after activation
|
check:
|
||||||
not v.triggersDoppelganger(now.epoch() + 1)
|
v.triggersDoppelganger(GENESIS_EPOCH)
|
||||||
|
not v.triggersDoppelganger(now.epoch())
|
||||||
|
|
||||||
|
not v.doppelgangerReady(GENESIS_EPOCH.start_slot)
|
||||||
|
v.doppelgangerReady(now)
|
||||||
|
|
Loading…
Reference in New Issue