restore doppelganger check on connectivity loss (#4616)

* restore doppelganger check on connectivity loss

https://github.com/status-im/nimbus-eth2/pull/4398 introduced a
regression in functionality where doppelganger detection would not be
rerun during connectivity loss. This PR reintroduces this check and
makes some adjustments to the implementation to simplify the code flow
for both BN and VC.

* track when check was last performed for each validator (to deal with
late-added validators)
* track when we performed a doppel-detectable activity (attesting) so as
to avoid false positives
* remove nodeStart special case (this should be treated the same as
adding a validator dynamically just after startup)

* allow sync committee duties in doppelganger period

* don't trigger doppelganger when registering duties

* fix crash when expected index response is missing

* fix missing slashingSafe propagation
This commit is contained in:
Jacek Sieka 2023-02-20 12:28:56 +01:00 committed by GitHub
parent 97247ea6e7
commit 83f9745df1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 192 additions and 216 deletions

View File

@ -527,15 +527,10 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
OK: 6/6 Fail: 0/6 Skip: 0/6
## Validator pool
```diff
+ Activation after check OK
+ Doppelganger for already active validator OK
+ Doppelganger for genesis validator OK
+ Doppelganger for validator that activates in future epoch OK
+ Doppelganger for validator that activates in previous epoch OK
+ Doppelganger for validator that activates in same epoch as check OK
+ Future activation after check OK
```
OK: 7/7 Fail: 0/7 Skip: 0/7
OK: 2/2 Fail: 0/2 Skip: 0/2
## Zero signature sanity checks
```diff
+ SSZ serialization roundtrip of SignedBeaconBlockHeader OK
@ -630,4 +625,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
OK: 9/9 Fail: 0/9 Skip: 0/9
---TOTAL---
OK: 351/356 Fail: 0/356 Skip: 5/356
OK: 346/351 Fail: 0/351 Skip: 5/351

View File

@ -85,11 +85,6 @@ type
## of gossip interleaving between nodes so long as they don't gossip at
## the same time.
nodeLaunchSlot*: Slot ##\
## Set once, at node launch. This functions as a basic protection against
## false positives from attestations persisting within the gossip network
## across quick restarts.
Eth2Processor* = object
## The Eth2Processor is the entry point for untrusted message processing -
## when we receive messages from various sources, we pass them to the
@ -164,7 +159,6 @@ proc new*(T: type Eth2Processor,
(ref Eth2Processor)(
doppelgangerDetectionEnabled: doppelgangerDetectionEnabled,
doppelgangerDetection: DoppelgangerProtection(
nodeLaunchSlot: getBeaconTime().slotOrZero,
broadcastStartEpoch: FAR_FUTURE_EPOCH),
blockProcessor: blockProcessor,
validatorMonitor: validatorMonitor,
@ -263,8 +257,7 @@ proc setupDoppelgangerDetection*(self: var Eth2Processor, slot: Slot) =
if self.doppelgangerDetectionEnabled:
notice "Setting up doppelganger detection",
epoch = slot.epoch,
broadcast_epoch = self.doppelgangerDetection.broadcastStartEpoch,
nodestart_epoch = self.doppelgangerDetection.nodeLaunchSlot.epoch()
broadcast_epoch = self.doppelgangerDetection.broadcastStartEpoch
proc clearDoppelgangerProtection*(self: var Eth2Processor) =
self.doppelgangerDetection.broadcastStartEpoch = FAR_FUTURE_EPOCH
@ -278,25 +271,17 @@ proc checkForPotentialDoppelganger(
if not self.doppelgangerDetectionEnabled:
return
if attestation.data.slot <= self.doppelgangerDetection.nodeLaunchSlot + 1:
return
let broadcastStartEpoch = self.doppelgangerDetection.broadcastStartEpoch
for validatorIndex in attesterIndices:
let
validatorPubkey = self.dag.validatorKey(validatorIndex).get().toPubKey()
validator = self.validatorPool[].getValidator(validatorPubkey)
pubkey = self.dag.validatorKey(validatorIndex).get().toPubKey()
if not(isNil(validator)):
if validator.triggersDoppelganger(broadcastStartEpoch):
warn "Doppelganger attestation",
validator = shortLog(validator),
validator_index = validatorIndex,
activation_epoch = validator.activationEpoch,
broadcast_epoch = broadcastStartEpoch,
attestation = shortLog(attestation)
quitDoppelganger()
if self.validatorPool[].triggersDoppelganger(
pubkey, attestation.data.slot.epoch):
warn "Doppelganger attestation",
validator = shortLog(pubkey),
validator_index = validatorIndex,
attestation = shortLog(attestation)
quitDoppelganger()
proc processAttestation*(
self: ref Eth2Processor, src: MsgSource,

View File

@ -980,7 +980,7 @@ proc updateSyncCommitteeTopics(node: BeaconNode, slot: Slot) =
node.network.updateSyncnetsMetadata(syncnets)
proc updateDoppelganger(node: BeaconNode, epoch: Epoch) =
proc doppelgangerChecked(node: BeaconNode, epoch: Epoch) =
if not node.processor[].doppelgangerDetectionEnabled:
return
@ -989,7 +989,7 @@ proc updateDoppelganger(node: BeaconNode, epoch: Epoch) =
# active
if epoch > node.processor[].doppelgangerDetection.broadcastStartEpoch:
for validator in node.attachedValidators[]:
validator.updateDoppelganger(epoch - 1)
validator.doppelgangerChecked(epoch - 1)
proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} =
## Subscribe to subnets that we are providing stability for or aggregating
@ -1104,7 +1104,7 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} =
addMessageHandlers[gossipFork](node, forkDigests[gossipFork], slot)
node.gossipState = targetGossipState
node.updateDoppelganger(slot.epoch)
node.doppelgangerChecked(slot.epoch)
node.updateAttestationSubnetHandlers(slot)
node.updateBlocksGossipStatus(slot, isBehind)
node.updateLightClientGossipStatus(slot, isBehind)

View File

@ -222,8 +222,7 @@ proc installApiHandlers*(node: SigningNode) =
if validator_key.isErr():
return errorResponse(Http400, InvalidValidatorPublicKey)
let key = validator_key.get()
let validator = node.attachedValidators.getValidator(key)
if isNil(validator):
let validator = node.attachedValidators.getValidator(key).valueOr:
return errorResponse(Http404, ValidatorNotFoundError)
validator

View File

@ -25,7 +25,8 @@ type
proc serveAttestation(service: AttestationServiceRef, adata: AttestationData,
duty: DutyAndProof): Future[bool] {.async.} =
let vc = service.client
let validator = vc.getValidatorForDuties(duty.data.pubkey, adata.slot).valueOr:
let validator = vc.getValidatorForDuties(
duty.data.pubkey, adata.slot, true).valueOr:
return false
let fork = vc.forkAtEpoch(adata.slot.epoch)
@ -260,7 +261,7 @@ proc produceAndPublishAggregates(service: AttestationServiceRef,
block:
var res: seq[AggregateItem]
for duty in duties:
let validator = vc.attachedValidators[].getValidatorForDuties(
let validator = vc.getValidatorForDuties(
duty.data.pubkey, slot).valueOr:
continue

View File

@ -484,8 +484,11 @@ proc getDelay*(vc: ValidatorClientRef, deadline: BeaconTime): TimeDiff =
vc.beaconClock.now() - deadline
proc getValidatorForDuties*(vc: ValidatorClientRef,
key: ValidatorPubKey, slot: Slot): Opt[AttachedValidator] =
vc.attachedValidators[].getValidatorForDuties(key, slot)
key: ValidatorPubKey, slot: Slot,
doppelActivity = false,
slashingSafe = false): Opt[AttachedValidator] =
vc.attachedValidators[].getValidatorForDuties(
key, slot, doppelActivity, slashingSafe)
proc forkAtEpoch*(vc: ValidatorClientRef, epoch: Epoch): Fork =
# If schedule is present, it MUST not be empty.
@ -518,22 +521,23 @@ proc addValidator*(vc: ValidatorClientRef, keystore: KeystoreData) =
proc removeValidator*(vc: ValidatorClientRef,
pubkey: ValidatorPubKey) {.async.} =
let validator = vc.attachedValidators[].getValidator(pubkey)
if not(isNil(validator)):
case validator.kind
of ValidatorKind.Local:
discard
of ValidatorKind.Remote:
# We must close all the REST clients running for the remote validator.
let pending =
block:
var res: seq[Future[void]]
for item in validator.clients:
res.add(item[0].closeWait())
res
await allFutures(pending)
# Remove validator from ValidatorPool.
vc.attachedValidators[].removeValidator(pubkey)
let validator = vc.attachedValidators[].getValidator(pubkey).valueOr:
return
# Remove validator from ValidatorPool.
vc.attachedValidators[].removeValidator(pubkey)
case validator.kind
of ValidatorKind.Local:
discard
of ValidatorKind.Remote:
# We must close all the REST clients running for the remote validator.
let pending =
block:
var res: seq[Future[void]]
for item in validator.clients:
res.add(item[0].closeWait())
res
await allFutures(pending)
proc getFeeRecipient*(vc: ValidatorClientRef, pubkey: ValidatorPubKey,
validatorIdx: ValidatorIndex,

View File

@ -16,7 +16,8 @@ logScope: service = ServiceName
proc getCheckingList*(vc: ValidatorClientRef, epoch: Epoch): seq[ValidatorIndex] =
var res: seq[ValidatorIndex]
for validator in vc.attachedValidators[]:
if validator.index.isSome and validator.triggersDoppelganger(epoch):
if validator.index.isSome and
(validator.doppelCheck.isNone or validator.doppelCheck.get() < epoch):
res.add validator.index.get()
res
@ -36,12 +37,11 @@ proc processActivities(service: DoppelgangerServiceRef, epoch: Epoch,
let vindex = item.index
for validator in vc.attachedValidators[]:
if validator.index == Opt.some(vindex):
if item.is_live:
if validator.triggersDoppelganger(epoch):
vc.doppelExit.fire()
return
else:
validator.updateDoppelganger(epoch)
validator.doppelgangerChecked(epoch)
if item.is_live and validator.triggersDoppelganger(epoch):
vc.doppelExit.fire()
return
proc mainLoop(service: DoppelgangerServiceRef) {.async.} =
let vc = service.client

View File

@ -87,15 +87,15 @@ proc pollForValidatorIndices*(vc: ValidatorClientRef) {.async.} =
list: seq[AttachedValidator]
for item in validators:
var validator = vc.attachedValidators[].getValidator(item.validator.pubkey)
if isNil(validator):
let validator = vc.attachedValidators[].getValidator(item.validator.pubkey)
if validator.isNone():
missing.add(validatorLog(item.validator.pubkey, item.index))
else:
validator.updateValidator(Opt.some ValidatorAndIndex(
validator.get().updateValidator(Opt.some ValidatorAndIndex(
index: item.index,
validator: item.validator))
updated.add(validatorLog(item.validator.pubkey, item.index))
list.add(validator)
list.add(validator.get())
if len(updated) > 0:
info "Validator indices updated",
@ -198,7 +198,9 @@ proc pollForAttesterDuties*(vc: ValidatorClientRef,
var pendingRequests: seq[Future[SignatureResult]]
var validators: seq[AttachedValidator]
for item in addOrReplaceItems:
let validator = vc.attachedValidators[].getValidator(item.duty.pubkey)
let validator =
vc.attachedValidators[].getValidator(item.duty.pubkey).valueOr:
continue
let fork = vc.forkAtEpoch(item.duty.slot.epoch)
let future = validator.getSlotSignature(
fork, genesisRoot, item.duty.slot)

View File

@ -36,7 +36,8 @@ proc serveSyncCommitteeMessage*(service: SyncCommitteeServiceRef,
vindex = duty.validator_index
subcommitteeIdx = getSubcommitteeIndex(
duty.validator_sync_committee_index)
validator = vc.getValidatorForDuties(duty.pubkey, slot).valueOr: return false
validator = vc.getValidatorForDuties(
duty.pubkey, slot, slashingSafe = true).valueOr: return false
message =
block:
let res = await getSyncCommitteeMessage(validator, fork,
@ -212,10 +213,9 @@ proc produceAndPublishContributions(service: SyncCommitteeServiceRef,
var validators: seq[(AttachedValidator, SyncSubcommitteeIndex)]
for duty in duties:
let validator = vc.attachedValidators[].getValidatorForDuties(
duty.pubkey, slot).valueOr:
continue
let
validator = vc.getValidatorForDuties(duty.pubkey, slot).valueOr:
continue
subCommitteeIdx =
getSubcommitteeIndex(duty.validator_sync_committee_index)
future = validator.getSyncCommitteeSelectionProof(

View File

@ -586,8 +586,7 @@ proc removeValidator*(pool: var ValidatorPool,
publicKey: ValidatorPubKey,
kind: KeystoreKind): KmResult[RemoveValidatorStatus] {.
raises: [Defect].} =
let validator = pool.getValidator(publicKey)
if isNil(validator):
let validator = pool.getValidator(publicKey).valueOr:
return ok(RemoveValidatorStatus.notFound)
if validator.kind.toKeystoreKind() != kind:
return ok(RemoveValidatorStatus.notFound)

View File

@ -125,12 +125,18 @@ proc addValidators*(node: BeaconNode) =
v = node.attachedValidators[].addValidator(keystore, feeRecipient, gasLimit)
v.updateValidator(data)
proc getValidator*(node: BeaconNode, idx: ValidatorIndex): Opt[AttachedValidator] =
let key = ? node.dag.validatorKey(idx)
node.attachedValidators[].getValidator(key.toPubKey())
proc getValidatorForDuties*(
node: BeaconNode,
idx: ValidatorIndex, slot: Slot): Opt[AttachedValidator] =
idx: ValidatorIndex, slot: Slot,
doppelActivity = false, slashingSafe = false): Opt[AttachedValidator] =
let key = ? node.dag.validatorKey(idx)
node.attachedValidators[].getValidatorForDuties(key.toPubKey(), slot)
node.attachedValidators[].getValidatorForDuties(
key.toPubKey(), slot, doppelActivity, slashingSafe)
proc isSynced*(node: BeaconNode, head: BlockRef): SyncStatus =
## TODO This function is here as a placeholder for some better heurestics to
@ -1036,7 +1042,8 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
epochRef.shufflingRef, slot, committee_index)
for index_in_committee, validator_index in committee:
let validator = node.getValidatorForDuties(validator_index, slot).valueOr:
let validator = node.getValidatorForDuties(
validator_index, slot, true).valueOr:
continue
let
@ -1107,7 +1114,8 @@ proc handleSyncCommitteeMessages(node: BeaconNode, head: BlockRef, slot: Slot) =
for subcommitteeIdx in SyncSubcommitteeIndex:
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
let validator = node.getValidatorForDuties(valIdx, slot).valueOr:
let validator = node.getValidatorForDuties(
valIdx, slot, slashingSafe = true).valueOr:
continue
asyncSpawn createAndSendSyncCommitteeMessage(node, validator, slot,
subcommitteeIdx, head)
@ -1174,7 +1182,8 @@ proc handleSyncCommitteeContributions(
for subcommitteeIdx in SyncSubCommitteeIndex:
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
let validator = node.getValidatorForDuties(valIdx, slot).valueOr:
let validator = node.getValidatorForDuties(
valIdx, slot, slashingSafe = true).valueOr:
continue
asyncSpawn signAndSendContribution(
@ -1475,9 +1484,10 @@ proc updateValidators(
# checking all validators would significantly slow down this loop when there
# are many inactive keys
for i in node.dutyValidatorCount..validators.high:
let v = node.attachedValidators[].getValidator(validators[i].pubkey)
if v != nil:
v.index = Opt.some ValidatorIndex(i)
let
v = node.attachedValidators[].getValidator(validators[i].pubkey).valueOr:
continue
v.index = Opt.some ValidatorIndex(i)
node.dutyValidatorCount = validators.len
@ -1656,9 +1666,10 @@ proc registerDuties*(node: BeaconNode, wallSlot: Slot) {.async.} =
let committee = get_beacon_committee(shufflingRef, slot, committee_index)
for index_in_committee, validator_index in committee:
let validator = node.getValidatorForDuties(validator_index, slot).valueOr:
continue
let
validator = node.getValidator(validator_index).valueOr:
continue
subnet_id = compute_subnet_for_attestation(
committees_per_slot, slot, committee_index)
slotSigRes = await validator.getSlotSignature(

View File

@ -27,13 +27,12 @@ export
const
WEB3_SIGNER_DELAY_TOLERANCE = 3.seconds
DOPPELGANGER_EPOCHS_COUNT = 1
## The number of full epochs that we monitor validators for doppelganger
## protection
declareGauge validators,
"Number of validators attached to the beacon node"
logScope: topics = "val_pool"
type
ValidatorKind* {.pure.} = enum
Local, Remote
@ -44,9 +43,6 @@ type
index*: ValidatorIndex
validator*: Validator
DoppelgangerStatus {.pure.} = enum
Unknown, Checking, Checked
AttachedValidator* = ref object
data*: KeystoreData
case kind*: ValidatorKind
@ -76,9 +72,10 @@ type
# builder should be informed of current validators
externalBuilderRegistration*: Opt[SignedValidatorRegistrationV1]
doppelStatus: DoppelgangerStatus
doppelEpoch*: Opt[Epoch]
## The epoch where doppelganger detection started doing its monitoring
doppelCheck*: Opt[Epoch]
## The epoch where doppelganger detection last performed a check
doppelActivity*: Opt[Epoch]
## The last time we attempted to perform a duty with this validator
lastWarning*: Opt[Slot]
@ -203,8 +200,9 @@ proc addValidator*(pool: var ValidatorPool,
pool.addRemoteValidator(keystore, feeRecipient, gasLimit)
proc getValidator*(pool: ValidatorPool,
validatorKey: ValidatorPubKey): AttachedValidator =
pool.validators.getOrDefault(validatorKey)
validatorKey: ValidatorPubKey): Opt[AttachedValidator] =
let v = pool.validators.getOrDefault(validatorKey)
if v == nil: Opt.none(AttachedValidator) else: Opt.some(v)
proc contains*(pool: ValidatorPool, pubkey: ValidatorPubKey): bool =
## Returns ``true`` if validator with key ``pubkey`` present in ``pool``.
@ -253,14 +251,6 @@ proc updateValidator*(
validator.activationEpoch = activationEpoch
if validator.doppelStatus == DoppelgangerStatus.Unknown:
if validator.doppelEpoch.isSome() and activationEpoch != FAR_FUTURE_EPOCH:
let doppelEpoch = validator.doppelEpoch.get()
if doppelEpoch >= validator.activationEpoch + DOPPELGANGER_EPOCHS_COUNT:
validator.doppelStatus = DoppelgangerStatus.Checking
else:
validator.doppelStatus = DoppelgangerStatus.Checked
proc close*(pool: var ValidatorPool) =
## Unlock and close all validator keystore's files managed by ``pool``.
for validator in pool.validators.values():
@ -282,70 +272,94 @@ iterator items*(pool: ValidatorPool): AttachedValidator =
for item in pool.validators.values():
yield item
proc triggersDoppelganger*(v: AttachedValidator, epoch: Epoch): bool =
## Returns true iff detected activity in the given epoch would trigger
## doppelganger detection
if v.doppelStatus != DoppelgangerStatus.Checked:
if v.activationEpoch == FAR_FUTURE_EPOCH:
false
elif epoch < v.activationEpoch + DOPPELGANGER_EPOCHS_COUNT:
v.doppelStatus = DoppelgangerStatus.Checked
false
else:
true
proc doppelgangerChecked*(validator: AttachedValidator, epoch: Epoch) =
## Call when the validator was checked for activity in the given epoch
if validator.doppelCheck.isNone():
debug "Doppelganger first check",
validator = shortLog(validator), epoch
elif validator.doppelCheck.get() + 1 notin [epoch, epoch + 1]:
debug "Doppelganger stale check",
validator = shortLog(validator),
checked = validator.doppelCheck.get(), epoch
validator.doppelCheck = Opt.some epoch
proc doppelgangerActivity*(validator: AttachedValidator, epoch: Epoch) =
## Call when we performed a doppelganger-monitored activity in the epoch
if validator.doppelActivity.isNone():
debug "Doppelganger first activity",
validator = shortLog(validator), epoch
elif validator.doppelActivity.get() + 1 notin [epoch, epoch + 1]:
debug "Doppelganger stale activity",
validator = shortLog(validator),
checked = validator.doppelActivity.get(), epoch
validator.doppelActivity = Opt.some epoch
func triggersDoppelganger*(v: AttachedValidator, epoch: Epoch): bool =
## Returns true iff we have proof that an activity in the given epoch
## triggers doppelganger detection: this means the network was active for this
## validator during the given epoch (via doppelgangerChecked) but the activity
## did not originate from this instance.
if v.doppelActivity.isSome() and v.doppelActivity.get() >= epoch:
false # This was our own activity
elif v.doppelCheck.isNone():
false # Can't prove that the activity triggers the check
else:
false
v.doppelCheck.get() == epoch
proc updateDoppelganger*(validator: AttachedValidator, epoch: Epoch) =
## Called when the validator has proven to be inactive in the given epoch -
## this call should be made after the end of `epoch` before acting on duties
## in `epoch + 1`.
if validator.doppelStatus == DoppelgangerStatus.Checked:
return
if validator.doppelEpoch.isNone():
validator.doppelEpoch = Opt.some epoch
let doppelEpoch = validator.doppelEpoch.get()
if validator.doppelStatus == DoppelgangerStatus.Unknown:
if validator.activationEpoch == FAR_FUTURE_EPOCH:
return
# We don't do doppelganger checking for validators that are about to be
# activated since both clients would be waiting for the other to start
# performing duties - this accounts for genesis as well
# The slot is rounded up to ensure we cover all slots
if doppelEpoch + 1 <= validator.activationEpoch + DOPPELGANGER_EPOCHS_COUNT:
validator.doppelStatus = DoppelgangerStatus.Checked
return
validator.doppelStatus = DoppelgangerStatus.Checking
if epoch + 1 >= doppelEpoch + DOPPELGANGER_EPOCHS_COUNT:
validator.doppelStatus = DoppelgangerStatus.Checked
proc doppelgangerReady*(validator: AttachedValidator, slot: Slot): bool =
## Returns true iff the validator has passed doppelganger detection by being
## monitored in the previous epoch (or the given epoch is the activation
## epoch, in which case we always consider it ready)
##
## If we checked doppelganger, we allow the check to lag by one slot to avoid
## a race condition where the check for epoch N is ongoing and block
## block production for slot_start(N+1) is about to happen
let epoch = slot.epoch
epoch == validator.activationEpoch or
(validator.doppelCheck.isSome and
(((validator.doppelCheck.get() + 1) == epoch) or
(((validator.doppelCheck.get() + 2).start_slot) == slot)))
proc getValidatorForDuties*(
pool: ValidatorPool, key: ValidatorPubKey, slot: Slot):
pool: ValidatorPool, key: ValidatorPubKey, slot: Slot,
doppelActivity: bool, slashingSafe: bool):
Opt[AttachedValidator] =
## Return validator only if it is ready for duties (has index and has passed
## doppelganger check where applicable)
let validator = pool.getValidator(key)
if isNil(validator) or validator.index.isNone():
let validator = ? pool.getValidator(key)
if validator.index.isNone():
return Opt.none(AttachedValidator)
# Sync committee duties are not slashable, so we perform them even during
# doppelganger detection
if pool.doppelgangerDetectionEnabled and
validator.triggersDoppelganger(slot.epoch):
# If the validator would trigger for an activity in the given slot, we don't
# return it for duties
not validator.doppelgangerReady(slot) and
not slashingSafe:
notice "Doppelganger detection active - " &
"skipping validator duties while observing the network",
validator = shortLog(validator)
"skipping validator duties while observing the network",
validator = shortLog(validator),
slot,
doppelCheck = validator.doppelCheck,
activationEpoch = shortLog(validator.activationEpoch)
return Opt.none(AttachedValidator)
if doppelActivity:
# Record the activity
# TODO consider moving to the the "registration point"
validator.doppelgangerActivity(slot.epoch)
return Opt.some(validator)
func triggersDoppelganger*(
pool: ValidatorPool, pubkey: ValidatorPubKey, epoch: Epoch): bool =
let v = pool.getValidator(pubkey)
v.isSome() and v[].triggersDoppelganger(epoch)
proc signWithDistributedKey(v: AttachedValidator,
request: Web3SignerRequest): Future[SignatureResult]
{.async.} =

View File

@ -24,12 +24,23 @@ suite "Validator pool":
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
check:
not v.triggersDoppelganger(GENESIS_EPOCH)
not v.triggersDoppelganger(GENESIS_EPOCH) # no check
not v.doppelgangerReady(GENESIS_EPOCH.start_slot) # no activation
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(1), GENESIS_EPOCH))
check:
not v.triggersDoppelganger(GENESIS_EPOCH)
not v.triggersDoppelganger(GENESIS_EPOCH) # no check
v.doppelgangerReady(GENESIS_EPOCH.start_slot) # ready in activation epoch
not v.doppelgangerReady((GENESIS_EPOCH + 1).start_slot) # old check
v.doppelgangerChecked(GENESIS_EPOCH)
check:
v.triggersDoppelganger(GENESIS_EPOCH) # checked, triggered
v.doppelgangerReady((GENESIS_EPOCH + 1).start_slot) # checked
v.doppelgangerReady((GENESIS_EPOCH + 2).start_slot) # 1 slot lag allowance
not v.doppelgangerReady((GENESIS_EPOCH + 2).start_slot + 1) # old check
test "Doppelganger for validator that activates in same epoch as check":
let
@ -40,77 +51,32 @@ suite "Validator pool":
not v.triggersDoppelganger(GENESIS_EPOCH)
not v.triggersDoppelganger(now.epoch())
not v.doppelgangerReady(GENESIS_EPOCH.start_slot)
not v.doppelgangerReady(now)
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(5), FAR_FUTURE_EPOCH))
check: # We still don't know when validator activates so we wouldn't trigger
not v.triggersDoppelganger(GENESIS_EPOCH)
not v.triggersDoppelganger(now.epoch())
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(5), now.epoch()))
check:
# Activates in current epoch, shouldn't trigger
not v.triggersDoppelganger(now.epoch())
test "Doppelganger for validator that activates in previous epoch":
let
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
now = Epoch(10).start_slot()
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(5), now.epoch() - 1))
check:
# Already activated, should trigger
v.triggersDoppelganger(now.epoch())
test "Doppelganger for validator that activates in future epoch":
let
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
now = Epoch(10).start_slot()
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(5), now.epoch() + 1))
check:
# Activates in the future, should not be checked
not v.triggersDoppelganger(now.epoch())
test "Doppelganger for already active validator":
let
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
now = Epoch(10).start_slot()
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(5), now.epoch() - 4))
check:
v.triggersDoppelganger(now.epoch)
v.updateDoppelganger(now.epoch())
check:
not v.triggersDoppelganger(now.epoch + 1)
test "Activation after check":
let
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
now = Epoch(10).start_slot()
v.updateDoppelganger(now.epoch())
check:
not v.triggersDoppelganger(now.epoch)
not v.doppelgangerReady(GENESIS_EPOCH.start_slot)
not v.doppelgangerReady(now)
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(5), now.epoch()))
check: # already proven not to validate
not v.triggersDoppelganger(now.epoch)
check: # No check done yet
not v.triggersDoppelganger(GENESIS_EPOCH)
not v.triggersDoppelganger(now.epoch())
test "Future activation after check":
let
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
now = Epoch(10).start_slot()
not v.doppelgangerReady(GENESIS_EPOCH.start_slot)
v.doppelgangerReady(now)
v.updateDoppelganger(now.epoch())
v.updateValidator(makeValidatorAndIndex(ValidatorIndex(5), now.epoch() + 1))
v.doppelgangerChecked(GENESIS_EPOCH)
check: # doesn't trigger check just after activation
not v.triggersDoppelganger(now.epoch() + 1)
check:
v.triggersDoppelganger(GENESIS_EPOCH)
not v.triggersDoppelganger(now.epoch())
not v.doppelgangerReady(GENESIS_EPOCH.start_slot)
v.doppelgangerReady(now)