unify bn/vc doppelganger detection (#4398)
* fix REST liveness endpoint responding even when gossip is not enabled * fix VC exit code on doppelganger hit * fix activation epoch not being updated correctly on long deposit queues * fix activation epoch being set incorrectly when updating validator * move most implementation logic to `validator_pool`, add tests * ensure consistent logging between VC and BN * add docs
This commit is contained in:
parent
9df19f68fe
commit
6e2a02466e
|
@ -139,11 +139,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
|||
+ Tail block only in common OK
|
||||
```
|
||||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||
## Doppelganger protection test suite
|
||||
```diff
|
||||
+ doppelgangerCheck() test OK
|
||||
```
|
||||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||
## EF - SSZ generic types
|
||||
```diff
|
||||
Testing basic_vector inputs - invalid Skip
|
||||
|
@ -508,6 +503,17 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
|
|||
+ [SyncQueue] hasEndGap() test OK
|
||||
```
|
||||
OK: 23/23 Fail: 0/23 Skip: 0/23
|
||||
## Validator pool
|
||||
```diff
|
||||
+ Activation after check OK
|
||||
+ Doppelganger for already active validator OK
|
||||
+ Doppelganger for genesis validator OK
|
||||
+ Doppelganger for validator that activates in future epoch OK
|
||||
+ Doppelganger for validator that activates in previous epoch OK
|
||||
+ Doppelganger for validator that activates in same epoch as check OK
|
||||
+ Future activation after check OK
|
||||
```
|
||||
OK: 7/7 Fail: 0/7 Skip: 0/7
|
||||
## Zero signature sanity checks
|
||||
```diff
|
||||
+ SSZ serialization roundtrip of SignedBeaconBlockHeader OK
|
||||
|
@ -614,4 +620,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
|
|||
OK: 9/9 Fail: 0/9 Skip: 0/9
|
||||
|
||||
---TOTAL---
|
||||
OK: 339/344 Fail: 0/344 Skip: 5/344
|
||||
OK: 345/350 Fail: 0/350 Skip: 5/350
|
||||
|
|
|
@ -21,7 +21,8 @@ import
|
|||
light_client_pool, sync_committee_msg_pool],
|
||||
../validators/validator_pool,
|
||||
../beacon_clock,
|
||||
"."/[gossip_validation, block_processor, batch_validation]
|
||||
"."/[gossip_validation, block_processor, batch_validation],
|
||||
../nimbus_binary_common
|
||||
|
||||
export
|
||||
results, taskpools, block_clearance, blockchain_dag, exit_pool, attestation_pool,
|
||||
|
@ -247,14 +248,20 @@ proc setupDoppelgangerDetection*(self: var Eth2Processor, slot: Slot) =
|
|||
# can be up to around 10,000 Wei. Thus, skipping attestations isn't cheap
|
||||
# and one should gauge the likelihood of this simultaneous launch to tune
|
||||
# the epoch delay to one's perceived risk.
|
||||
if self.doppelgangerDetectionEnabled:
|
||||
self.doppelgangerDetection.broadcastStartEpoch = slot.epoch
|
||||
|
||||
# Round up to ensure that we cover the entire epoch - used by rest api also
|
||||
self.doppelgangerDetection.broadcastStartEpoch =
|
||||
(slot + SLOTS_PER_EPOCH - 1).epoch
|
||||
|
||||
if self.doppelgangerDetectionEnabled:
|
||||
notice "Setting up doppelganger detection",
|
||||
epoch = slot.epoch,
|
||||
broadcast_epoch = self.doppelgangerDetection.broadcastStartEpoch,
|
||||
nodestart_epoch = self.doppelgangerDetection.nodeLaunchSlot.epoch()
|
||||
|
||||
proc clearDoppelgangerProtection*(self: var Eth2Processor) =
|
||||
self.doppelgangerDetection.broadcastStartEpoch = FAR_FUTURE_EPOCH
|
||||
|
||||
proc checkForPotentialDoppelganger(
|
||||
self: var Eth2Processor, attestation: Attestation,
|
||||
attesterIndices: openArray[ValidatorIndex]) =
|
||||
|
@ -275,28 +282,14 @@ proc checkForPotentialDoppelganger(
|
|||
validator = self.validatorPool[].getValidator(validatorPubkey)
|
||||
|
||||
if not(isNil(validator)):
|
||||
let res = validator.doppelgangerCheck(attestation.data.slot.epoch(),
|
||||
broadcastStartEpoch)
|
||||
if res.isOk() and not(res.get()):
|
||||
warn "We believe you are currently running another instance of the " &
|
||||
"same validator. We've disconnected you from the network as " &
|
||||
"this presents a significant slashing risk. Possible next steps "&
|
||||
"are (a) making sure you've disconnected your validator from " &
|
||||
"your old machine before restarting the client; and (b) running " &
|
||||
"the client again with the gossip-slashing-protection option " &
|
||||
"disabled, only if you are absolutely sure this is the only " &
|
||||
"instance of your validator running, and reporting the issue " &
|
||||
"at https://github.com/status-im/nimbus-eth2/issues.",
|
||||
if validator.triggersDoppelganger(broadcastStartEpoch):
|
||||
warn "Doppelganger attestation",
|
||||
validator = shortLog(validator),
|
||||
start_slot = validator.startSlot,
|
||||
validator_index = validatorIndex,
|
||||
activation_epoch = validator.activationEpoch.get(),
|
||||
activation_epoch = validator.activationEpoch,
|
||||
broadcast_epoch = broadcastStartEpoch,
|
||||
attestation = shortLog(attestation)
|
||||
# Avoid colliding with
|
||||
# https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Process%20Exit%20Codes
|
||||
const QuitDoppelganger = 129
|
||||
quit QuitDoppelganger
|
||||
quitDoppelganger()
|
||||
|
||||
proc processAttestation*(
|
||||
self: ref Eth2Processor, src: MsgSource,
|
||||
|
|
|
@ -622,7 +622,8 @@ proc init*(T: type BeaconNode,
|
|||
SlashingProtectionDB.init(
|
||||
getStateField(dag.headState, genesis_validators_root),
|
||||
config.validatorsDir(), SlashingDbName)
|
||||
validatorPool = newClone(ValidatorPool.init(slashingProtectionDB))
|
||||
validatorPool = newClone(ValidatorPool.init(
|
||||
slashingProtectionDB, config.doppelgangerDetection))
|
||||
|
||||
keymanagerInitResult = initKeymanagerServer(config, restServer)
|
||||
keymanagerHost = if keymanagerInitResult.server != nil:
|
||||
|
@ -963,6 +964,17 @@ proc updateSyncCommitteeTopics(node: BeaconNode, slot: Slot) =
|
|||
|
||||
node.network.updateSyncnetsMetadata(syncnets)
|
||||
|
||||
proc updateDoppelganger(node: BeaconNode, epoch: Epoch) =
|
||||
if not node.processor[].doppelgangerDetectionEnabled:
|
||||
return
|
||||
|
||||
# broadcastStartEpoch is set to FAR_FUTURE_EPOCH when we're not monitoring
|
||||
# gossip - it is only viable to assert liveness in epochs where gossip is
|
||||
# active
|
||||
if epoch > node.processor[].doppelgangerDetection.broadcastStartEpoch:
|
||||
for validator in node.attachedValidators[]:
|
||||
validator.updateDoppelganger(epoch - 1)
|
||||
|
||||
proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} =
|
||||
## Subscribe to subnets that we are providing stability for or aggregating
|
||||
## and unsubscribe from the ones that are no longer relevant.
|
||||
|
@ -1049,6 +1061,8 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} =
|
|||
headSlot = head.slot,
|
||||
headDistance
|
||||
|
||||
node.processor[].clearDoppelgangerProtection()
|
||||
|
||||
let forkDigests = node.forkDigests()
|
||||
|
||||
discard $eip4844ImplementationMissing & "nimbus_beacon_node.nim:updateGossipStatus check EIP4844 removeMessageHandlers"
|
||||
|
@ -1076,6 +1090,7 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} =
|
|||
addMessageHandlers[gossipFork](node, forkDigests[gossipFork], slot)
|
||||
|
||||
node.gossipState = targetGossipState
|
||||
node.updateDoppelganger(slot.epoch)
|
||||
node.updateAttestationSubnetHandlers(slot)
|
||||
node.updateBlocksGossipStatus(slot, isBehind)
|
||||
node.updateLightClientGossipStatus(slot, isBehind)
|
||||
|
|
|
@ -20,7 +20,6 @@ import
|
|||
chronos, confutils, presto, toml_serialization, metrics,
|
||||
chronicles, chronicles/helpers as chroniclesHelpers, chronicles/topics_registry,
|
||||
stew/io2,
|
||||
presto,
|
||||
|
||||
# Local modules
|
||||
./spec/[helpers],
|
||||
|
@ -413,3 +412,15 @@ proc initKeymanagerServer*(
|
|||
nil
|
||||
|
||||
KeymanagerInitResult(server: keymanagerServer, token: token)
|
||||
|
||||
proc quitDoppelganger*() =
|
||||
# Avoid colliding with
|
||||
# https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Process%20Exit%20Codes
|
||||
# This error code is used to permanently shut down validators
|
||||
fatal "Doppelganger detection triggered! It appears a validator loaded into " &
|
||||
"this process is already live on the network - the validator is at high " &
|
||||
"risk of being slashed due to the same keys being used in two setups. " &
|
||||
"See https://nimbus.guide/doppelganger-detection.html for more information!"
|
||||
|
||||
const QuitDoppelganger = 129
|
||||
quit QuitDoppelganger
|
||||
|
|
|
@ -103,10 +103,7 @@ proc initValidators(sn: var SigningNode): bool =
|
|||
let feeRecipient = default(Eth1Address)
|
||||
case keystore.kind
|
||||
of KeystoreKind.Local:
|
||||
# Signing node is not supposed to know genesis time, so we just set
|
||||
# `start_slot` to GENESIS_SLOT.
|
||||
sn.attachedValidators.addLocalValidator(
|
||||
keystore, feeRecipient, GENESIS_SLOT)
|
||||
discard sn.attachedValidators.addLocalValidator(keystore, feeRecipient)
|
||||
publicKeyIdents.add("\"0x" & keystore.pubkey.toHex() & "\"")
|
||||
of KeystoreKind.Remote:
|
||||
error "Signing node do not support remote validators",
|
||||
|
|
|
@ -210,7 +210,7 @@ proc new*(T: type ValidatorClientRef,
|
|||
graffitiBytes: config.graffiti.get(defaultGraffitiBytes()),
|
||||
nodesAvailable: newAsyncEvent(),
|
||||
forksAvailable: newAsyncEvent(),
|
||||
gracefulExit: newAsyncEvent(),
|
||||
doppelExit: newAsyncEvent(),
|
||||
indicesAvailable: newAsyncEvent(),
|
||||
dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init()),
|
||||
sigintHandleFut: waitSignal(SIGINT),
|
||||
|
@ -225,7 +225,7 @@ proc new*(T: type ValidatorClientRef,
|
|||
nodesAvailable: newAsyncEvent(),
|
||||
forksAvailable: newAsyncEvent(),
|
||||
indicesAvailable: newAsyncEvent(),
|
||||
gracefulExit: newAsyncEvent(),
|
||||
doppelExit: newAsyncEvent(),
|
||||
dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init()),
|
||||
sigintHandleFut: newFuture[void]("sigint_placeholder"),
|
||||
sigtermHandleFut: newFuture[void]("sigterm_placeholder")
|
||||
|
@ -255,7 +255,8 @@ proc asyncInit(vc: ValidatorClientRef): Future[ValidatorClientRef] {.async.} =
|
|||
SlashingProtectionDB.init(
|
||||
vc.beaconGenesis.genesis_validators_root,
|
||||
vc.config.validatorsDir(), "slashing_protection")
|
||||
validatorPool = newClone(ValidatorPool.init(slashingProtectionDB))
|
||||
validatorPool = newClone(ValidatorPool.init(
|
||||
slashingProtectionDB, vc.config.doppelgangerDetection))
|
||||
|
||||
vc.attachedValidators = validatorPool
|
||||
|
||||
|
@ -315,10 +316,10 @@ proc asyncRun*(vc: ValidatorClientRef) {.async.} =
|
|||
vc.keymanagerServer.router.installKeymanagerHandlers(vc.keymanagerHost[])
|
||||
vc.keymanagerServer.start()
|
||||
|
||||
var exitEventFut = vc.gracefulExit.wait()
|
||||
var doppelEventFut = vc.doppelExit.wait()
|
||||
try:
|
||||
vc.runSlotLoopFut = runSlotLoop(vc, vc.beaconClock.now(), onSlotStart)
|
||||
discard await race(vc.runSlotLoopFut, exitEventFut)
|
||||
discard await race(vc.runSlotLoopFut, doppelEventFut)
|
||||
if not(vc.runSlotLoopFut.finished()):
|
||||
notice "Received shutdown event, exiting"
|
||||
except CancelledError:
|
||||
|
@ -329,12 +330,20 @@ proc asyncRun*(vc: ValidatorClientRef) {.async.} =
|
|||
|
||||
await vc.shutdownMetrics()
|
||||
vc.shutdownSlashingProtection()
|
||||
|
||||
if doppelEventFut.finished:
|
||||
# Critically, database has been shut down - the rest doesn't matter, we need
|
||||
# to stop as soon as possible
|
||||
# TODO we need to actually quit _before_ any other async tasks have had the
|
||||
# chance to happen
|
||||
quitDoppelganger()
|
||||
|
||||
debug "Stopping main processing loop"
|
||||
var pending: seq[Future[void]]
|
||||
if not(vc.runSlotLoopFut.finished()):
|
||||
pending.add(vc.runSlotLoopFut.cancelAndWait())
|
||||
if not(exitEventFut.finished()):
|
||||
pending.add(exitEventFut.cancelAndWait())
|
||||
if not(doppelEventFut.finished()):
|
||||
pending.add(doppelEventFut.cancelAndWait())
|
||||
debug "Stopping running services"
|
||||
pending.add(vc.fallbackService.stop())
|
||||
pending.add(vc.forkService.stop())
|
||||
|
|
|
@ -127,22 +127,13 @@ proc handleAddRemoteValidatorReq(host: KeymanagerHost,
|
|||
let res = importKeystore(host.validatorPool[], host.validatorsDir, keystore)
|
||||
if res.isOk:
|
||||
let
|
||||
slot = host.getBeaconTimeFn().slotOrZero
|
||||
validator = host.getValidatorData(keystore.pubkey)
|
||||
data = host.getValidatorData(keystore.pubkey)
|
||||
feeRecipient = host.getSuggestedFeeRecipient(keystore.pubkey).valueOr(
|
||||
host.defaultFeeRecipient)
|
||||
index =
|
||||
if validator.isSome():
|
||||
Opt.some(validator.get().index)
|
||||
else:
|
||||
Opt.none(ValidatorIndex)
|
||||
activationEpoch =
|
||||
if validator.isSome():
|
||||
Opt.some(validator.get().validator.activation_epoch)
|
||||
else:
|
||||
Opt.none(Epoch)
|
||||
host.validatorPool[].addRemoteValidator(
|
||||
res.get, index, feeRecipient, slot, activationEpoch)
|
||||
v = host.validatorPool[].addRemoteValidator(res.get, feeRecipient)
|
||||
if data.isSome():
|
||||
v.updateValidator(data.get().index, data.get().validator.activation_epoch)
|
||||
|
||||
RequestItemStatus(status: $KeystoreStatus.imported)
|
||||
else:
|
||||
case res.error().status
|
||||
|
|
|
@ -858,6 +858,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
|
||||
return RestApiResponse.response("", Http200, "text/plain")
|
||||
|
||||
# https://github.com/ethereum/beacon-APIs/blob/master/apis/validator/liveness.yaml
|
||||
router.api(MethodPost, "/eth/v1/validator/liveness/{epoch}") do (
|
||||
epoch: Epoch, contentBody: Option[ContentBody]) -> RestApiResponse:
|
||||
let
|
||||
|
@ -878,6 +879,11 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
if (res < prevEpoch) or (res > nextEpoch):
|
||||
return RestApiResponse.jsonError(Http400, InvalidEpochValueError,
|
||||
"Requested epoch is more than one epoch from current epoch")
|
||||
|
||||
if res < node.processor[].doppelgangerDetection.broadcastStartEpoch:
|
||||
# We can't accurately respond if we're not in sync and aren't
|
||||
# processing gossip
|
||||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
|
||||
res
|
||||
indexList =
|
||||
block:
|
||||
|
|
|
@ -25,7 +25,8 @@ type
|
|||
proc serveAttestation(service: AttestationServiceRef, adata: AttestationData,
|
||||
duty: DutyAndProof): Future[bool] {.async.} =
|
||||
let vc = service.client
|
||||
let validator = vc.getValidator(duty.data.pubkey).valueOr: return false
|
||||
let validator = vc.getValidatorForDuties(duty.data.pubkey, adata.slot).valueOr:
|
||||
return false
|
||||
let fork = vc.forkAtEpoch(adata.slot.epoch)
|
||||
|
||||
doAssert(validator.index.isSome())
|
||||
|
@ -259,23 +260,25 @@ proc produceAndPublishAggregates(service: AttestationServiceRef,
|
|||
block:
|
||||
var res: seq[AggregateItem]
|
||||
for duty in duties:
|
||||
let validator = vc.attachedValidators[].getValidator(duty.data.pubkey)
|
||||
if not(isNil(validator)):
|
||||
if (duty.data.slot != slot) or
|
||||
(duty.data.committee_index != committeeIndex):
|
||||
error "Inconsistent validator duties during aggregate signing",
|
||||
duty_slot = duty.data.slot, slot = slot,
|
||||
duty_committee_index = duty.data.committee_index,
|
||||
committee_index = committeeIndex
|
||||
continue
|
||||
if duty.slotSig.isSome():
|
||||
let slotSignature = duty.slotSig.get()
|
||||
if is_aggregator(duty.data.committee_length, slotSignature):
|
||||
res.add(AggregateItem(
|
||||
aggregator_index: uint64(duty.data.validator_index),
|
||||
selection_proof: slotSignature,
|
||||
validator: validator
|
||||
))
|
||||
let validator = vc.attachedValidators[].getValidatorForDuties(
|
||||
duty.data.pubkey, slot).valueOr:
|
||||
continue
|
||||
|
||||
if (duty.data.slot != slot) or
|
||||
(duty.data.committee_index != committeeIndex):
|
||||
error "Inconsistent validator duties during aggregate signing",
|
||||
duty_slot = duty.data.slot, slot = slot,
|
||||
duty_committee_index = duty.data.committee_index,
|
||||
committee_index = committeeIndex
|
||||
continue
|
||||
if duty.slotSig.isSome():
|
||||
let slotSignature = duty.slotSig.get()
|
||||
if is_aggregator(duty.data.committee_length, slotSignature):
|
||||
res.add(AggregateItem(
|
||||
aggregator_index: uint64(duty.data.validator_index),
|
||||
selection_proof: slotSignature,
|
||||
validator: validator
|
||||
))
|
||||
res
|
||||
|
||||
if len(aggregateItems) > 0:
|
||||
|
@ -395,11 +398,7 @@ proc spawnAttestationTasks(service: AttestationServiceRef,
|
|||
|
||||
var dutiesSkipped: seq[string]
|
||||
for index, duties in dutiesByCommittee:
|
||||
let (protectedDuties, skipped) = vc.doppelgangerFilter(duties)
|
||||
if len(skipped) > 0: dutiesSkipped.add(skipped)
|
||||
if len(protectedDuties) > 0:
|
||||
asyncSpawn service.publishAttestationsAndAggregates(slot, index,
|
||||
protectedDuties)
|
||||
asyncSpawn service.publishAttestationsAndAggregates(slot, index, duties)
|
||||
if len(dutiesSkipped) > 0:
|
||||
info "Doppelganger protection disabled validator duties",
|
||||
validators = len(dutiesSkipped)
|
||||
|
|
|
@ -107,10 +107,6 @@ proc publishBlock(vc: ValidatorClientRef, currentSlot, slot: Slot,
|
|||
slot = slot
|
||||
wall_slot = currentSlot
|
||||
|
||||
if not(vc.doppelgangerCheck(validator)):
|
||||
info "Block has not been produced (doppelganger check still active)"
|
||||
return
|
||||
|
||||
debug "Publishing block", delay = vc.getDelay(slot.block_deadline()),
|
||||
genesis_root = genesisRoot,
|
||||
graffiti = graffiti, fork = fork
|
||||
|
@ -316,7 +312,7 @@ proc proposeBlock(vc: ValidatorClientRef, slot: Slot,
|
|||
if sres.isSome():
|
||||
let
|
||||
currentSlot = sres.get()
|
||||
validator = vc.getValidator(proposerKey).valueOr: return
|
||||
validator = vc.getValidatorForDuties(proposerKey, slot).valueOr: return
|
||||
await vc.publishBlock(currentSlot, slot, validator)
|
||||
except CancelledError as exc:
|
||||
debug "Block proposing was interrupted", slot = slot,
|
||||
|
|
|
@ -141,16 +141,6 @@ type
|
|||
DoppelgangerAttempt* {.pure.} = enum
|
||||
None, Failure, SuccessTrue, SuccessFalse
|
||||
|
||||
DoppelgangerState* = object
|
||||
startEpoch*: Epoch
|
||||
epochsCount*: uint64
|
||||
lastAttempt*: DoppelgangerAttempt
|
||||
status*: DoppelgangerStatus
|
||||
|
||||
DoppelgangerDetection* = object
|
||||
startSlot*: Slot
|
||||
validators*: Table[ValidatorIndex, DoppelgangerState]
|
||||
|
||||
ValidatorClient* = object
|
||||
config*: ValidatorClientConf
|
||||
metricsServer*: Option[MetricsHttpServerRef]
|
||||
|
@ -169,13 +159,12 @@ type
|
|||
keymanagerHost*: ref KeymanagerHost
|
||||
keymanagerServer*: RestServerRef
|
||||
beaconClock*: BeaconClock
|
||||
doppelgangerDetection*: DoppelgangerDetection
|
||||
attachedValidators*: ref ValidatorPool
|
||||
forks*: seq[Fork]
|
||||
forksAvailable*: AsyncEvent
|
||||
nodesAvailable*: AsyncEvent
|
||||
indicesAvailable*: AsyncEvent
|
||||
gracefulExit*: AsyncEvent
|
||||
doppelExit*: AsyncEvent
|
||||
attesters*: AttesterMap
|
||||
proposers*: ProposerMap
|
||||
syncCommitteeDuties*: SyncCommitteeDutiesMap
|
||||
|
@ -492,18 +481,9 @@ proc syncMembersSubscriptionInfoForEpoch*(
|
|||
proc getDelay*(vc: ValidatorClientRef, deadline: BeaconTime): TimeDiff =
|
||||
vc.beaconClock.now() - deadline
|
||||
|
||||
proc getValidator*(vc: ValidatorClientRef,
|
||||
key: ValidatorPubKey): Opt[AttachedValidator] =
|
||||
let validator = vc.attachedValidators[].getValidator(key)
|
||||
if isNil(validator):
|
||||
info "Validator not in pool anymore", validator = shortLog(validator)
|
||||
Opt.none(AttachedValidator)
|
||||
else:
|
||||
if validator.index.isNone():
|
||||
info "Validator index is missing", validator = shortLog(validator)
|
||||
Opt.none(AttachedValidator)
|
||||
else:
|
||||
Opt.some(validator)
|
||||
proc getValidatorForDuties*(vc: ValidatorClientRef,
|
||||
key: ValidatorPubKey, slot: Slot): Opt[AttachedValidator] =
|
||||
vc.attachedValidators[].getValidatorForDuties(key, slot)
|
||||
|
||||
proc forkAtEpoch*(vc: ValidatorClientRef, epoch: Epoch): Fork =
|
||||
# If schedule is present, it MUST not be empty.
|
||||
|
@ -522,91 +502,6 @@ proc getSubcommitteeIndex*(index: IndexInSyncCommittee): SyncSubcommitteeIndex =
|
|||
proc currentSlot*(vc: ValidatorClientRef): Slot =
|
||||
vc.beaconClock.now().slotOrZero()
|
||||
|
||||
proc addDoppelganger*(vc: ValidatorClientRef,
|
||||
validators: openArray[AttachedValidator]) =
|
||||
if vc.config.doppelgangerDetection:
|
||||
let startEpoch = vc.currentSlot().epoch()
|
||||
var
|
||||
check: seq[string]
|
||||
skip: seq[string]
|
||||
exist: seq[string]
|
||||
|
||||
for validator in validators:
|
||||
let
|
||||
vindex = validator.index.get()
|
||||
state =
|
||||
if (startEpoch == GENESIS_EPOCH) and
|
||||
(validator.startSlot == GENESIS_SLOT):
|
||||
DoppelgangerState(startEpoch: startEpoch, epochsCount: 0'u64,
|
||||
lastAttempt: DoppelgangerAttempt.None,
|
||||
status: DoppelgangerStatus.Passed)
|
||||
else:
|
||||
if validator.activationEpoch.isSome() and
|
||||
(validator.activationEpoch.get() >= startEpoch):
|
||||
DoppelgangerState(startEpoch: startEpoch, epochsCount: 0'u64,
|
||||
lastAttempt: DoppelgangerAttempt.None,
|
||||
status: DoppelgangerStatus.Passed)
|
||||
else:
|
||||
DoppelgangerState(startEpoch: startEpoch, epochsCount: 0'u64,
|
||||
lastAttempt: DoppelgangerAttempt.None,
|
||||
status: DoppelgangerStatus.Checking)
|
||||
res = vc.doppelgangerDetection.validators.hasKeyOrPut(vindex, state)
|
||||
if res:
|
||||
exist.add(validatorLog(validator.pubkey, vindex))
|
||||
else:
|
||||
if state.status == DoppelgangerStatus.Checking:
|
||||
check.add(validatorLog(validator.pubkey, vindex))
|
||||
else:
|
||||
skip.add(validatorLog(validator.pubkey, vindex))
|
||||
info "Validator's doppelganger protection activated",
|
||||
validators_count = len(validators),
|
||||
pending_check_count = len(check),
|
||||
skipped_count = len(skip),
|
||||
exist_count = len(exist)
|
||||
debug "Validator's doppelganger protection dump",
|
||||
checking_validators = check,
|
||||
skip_validators = skip,
|
||||
existing_validators = exist
|
||||
|
||||
proc addDoppelganger*(vc: ValidatorClientRef, validator: AttachedValidator) =
|
||||
logScope:
|
||||
validator = shortLog(validator)
|
||||
|
||||
if vc.config.doppelgangerDetection:
|
||||
let
|
||||
vindex = validator.index.get()
|
||||
startEpoch = vc.currentSlot().epoch()
|
||||
state =
|
||||
if (startEpoch == GENESIS_EPOCH) and
|
||||
(validator.startSlot == GENESIS_SLOT):
|
||||
DoppelgangerState(startEpoch: startEpoch, epochsCount: 0'u64,
|
||||
lastAttempt: DoppelgangerAttempt.None,
|
||||
status: DoppelgangerStatus.Passed)
|
||||
else:
|
||||
DoppelgangerState(startEpoch: startEpoch, epochsCount: 0'u64,
|
||||
lastAttempt: DoppelgangerAttempt.None,
|
||||
status: DoppelgangerStatus.Checking)
|
||||
res = vc.doppelgangerDetection.validators.hasKeyOrPut(vindex, state)
|
||||
|
||||
if res:
|
||||
warn "Validator is already in doppelganger table",
|
||||
validator_index = vindex, start_epoch = startEpoch,
|
||||
start_slot = validator.startSlot
|
||||
else:
|
||||
if state.status == DoppelgangerStatus.Checking:
|
||||
info "Doppelganger protection activated", validator_index = vindex,
|
||||
start_epoch = startEpoch, start_slot = validator.startSlot
|
||||
else:
|
||||
info "Doppelganger protection skipped", validator_index = vindex,
|
||||
start_epoch = startEpoch, start_slot = validator.startSlot
|
||||
|
||||
proc removeDoppelganger*(vc: ValidatorClientRef, index: ValidatorIndex) =
|
||||
if vc.config.doppelgangerDetection:
|
||||
var state: DoppelgangerState
|
||||
# We do not care about race condition, when validator is not yet added to
|
||||
# the doppelganger's table, but it should be removed.
|
||||
discard vc.doppelgangerDetection.validators.pop(index, state)
|
||||
|
||||
proc addValidator*(vc: ValidatorClientRef, keystore: KeystoreData) =
|
||||
let
|
||||
slot = vc.currentSlot()
|
||||
|
@ -615,9 +510,7 @@ proc addValidator*(vc: ValidatorClientRef, keystore: KeystoreData) =
|
|||
vc.config.defaultFeeRecipient)
|
||||
case keystore.kind
|
||||
of KeystoreKind.Local:
|
||||
vc.attachedValidators[].addLocalValidator(keystore, Opt.none ValidatorIndex,
|
||||
feeRecipient, slot,
|
||||
Opt.none(Epoch))
|
||||
discard vc.attachedValidators[].addLocalValidator(keystore, feeRecipient)
|
||||
of KeystoreKind.Remote:
|
||||
let
|
||||
httpFlags =
|
||||
|
@ -641,10 +534,8 @@ proc addValidator*(vc: ValidatorClientRef, keystore: KeystoreData) =
|
|||
res.add((client.get(), remote))
|
||||
res
|
||||
if len(clients) > 0:
|
||||
vc.attachedValidators[].addRemoteValidator(keystore, clients,
|
||||
Opt.none(ValidatorIndex),
|
||||
feeRecipient, slot,
|
||||
Opt.none(Epoch))
|
||||
discard vc.attachedValidators[].addRemoteValidator(keystore, clients,
|
||||
feeRecipient)
|
||||
else:
|
||||
warn "Unable to initialize remote validator",
|
||||
validator = $keystore.pubkey
|
||||
|
@ -653,9 +544,6 @@ proc removeValidator*(vc: ValidatorClientRef,
|
|||
pubkey: ValidatorPubKey) {.async.} =
|
||||
let validator = vc.attachedValidators[].getValidator(pubkey)
|
||||
if not(isNil(validator)):
|
||||
if vc.config.doppelgangerDetection:
|
||||
if validator.index.isSome():
|
||||
vc.removeDoppelganger(validator.index.get())
|
||||
case validator.kind
|
||||
of ValidatorKind.Local:
|
||||
discard
|
||||
|
@ -671,43 +559,6 @@ proc removeValidator*(vc: ValidatorClientRef,
|
|||
# Remove validator from ValidatorPool.
|
||||
vc.attachedValidators[].removeValidator(pubkey)
|
||||
|
||||
proc doppelgangerCheck*(vc: ValidatorClientRef,
|
||||
validator: AttachedValidator): bool =
|
||||
if vc.config.doppelgangerDetection:
|
||||
if validator.index.isNone():
|
||||
false
|
||||
else:
|
||||
let
|
||||
vindex = validator.index.get()
|
||||
default = DoppelgangerState(status: DoppelgangerStatus.None)
|
||||
state = vc.doppelgangerDetection.validators.getOrDefault(vindex,
|
||||
default)
|
||||
state.status == DoppelgangerStatus.Passed
|
||||
else:
|
||||
true
|
||||
|
||||
proc doppelgangerCheck*(vc: ValidatorClientRef,
|
||||
key: ValidatorPubKey): bool =
|
||||
let validator = vc.getValidator(key).valueOr: return false
|
||||
vc.doppelgangerCheck(validator)
|
||||
|
||||
proc doppelgangerFilter*(
|
||||
vc: ValidatorClientRef,
|
||||
duties: openArray[DutyAndProof]
|
||||
): tuple[filtered: seq[DutyAndProof], skipped: seq[string]] =
|
||||
var
|
||||
pending: seq[string]
|
||||
ready: seq[DutyAndProof]
|
||||
for duty in duties:
|
||||
let
|
||||
vindex = duty.data.validator_index
|
||||
vkey = duty.data.pubkey
|
||||
if vc.doppelgangerCheck(vkey):
|
||||
ready.add(duty)
|
||||
else:
|
||||
pending.add(validatorLog(vkey, vindex))
|
||||
(ready, pending)
|
||||
|
||||
proc getFeeRecipient*(vc: ValidatorClientRef, pubkey: ValidatorPubKey,
|
||||
validatorIdx: ValidatorIndex,
|
||||
epoch: Epoch): Opt[Eth1Address] =
|
||||
|
|
|
@ -13,14 +13,11 @@ const
|
|||
|
||||
logScope: service = ServiceName
|
||||
|
||||
const
|
||||
DOPPELGANGER_EPOCHS_COUNT = 1
|
||||
|
||||
proc getCheckingList*(vc: ValidatorClientRef): seq[ValidatorIndex] =
|
||||
proc getCheckingList*(vc: ValidatorClientRef, epoch: Epoch): seq[ValidatorIndex] =
|
||||
var res: seq[ValidatorIndex]
|
||||
for index, value in vc.doppelgangerDetection.validators.pairs():
|
||||
if value.status == DoppelgangerStatus.Checking:
|
||||
res.add(index)
|
||||
for validator in vc.attachedValidators[]:
|
||||
if validator.index.isSome and validator.triggersDoppelganger(epoch):
|
||||
res.add validator.index.get()
|
||||
res
|
||||
|
||||
proc waitForNextEpoch(service: DoppelgangerServiceRef) {.async.} =
|
||||
|
@ -34,34 +31,17 @@ proc processActivities(service: DoppelgangerServiceRef, epoch: Epoch,
|
|||
let vc = service.client
|
||||
if len(activities.data) == 0:
|
||||
debug "Unable to monitor validator's activity for epoch", epoch = epoch
|
||||
for index, value in vc.doppelgangerDetection.validators.mpairs():
|
||||
if value.status == DoppelgangerStatus.Checking:
|
||||
value.epochsCount = 0'u64
|
||||
value.lastAttempt = DoppelgangerAttempt.Failure
|
||||
else:
|
||||
for item in activities.data:
|
||||
let vindex = item.index
|
||||
vc.doppelgangerDetection.validators.withValue(vindex, value):
|
||||
if item.is_live:
|
||||
if value.status == DoppelgangerStatus.Checking:
|
||||
value.epochsCount = 0'u64
|
||||
value.lastAttempt = DoppelgangerAttempt.SuccessTrue
|
||||
warn "Validator's activity has been seen",
|
||||
validator_index = vindex, epoch = epoch
|
||||
vc.gracefulExit.fire()
|
||||
return
|
||||
for validator in vc.attachedValidators[]:
|
||||
if validator.index == Opt.some(vindex):
|
||||
if item.is_live:
|
||||
if validator.triggersDoppelganger(epoch):
|
||||
vc.doppelExit.fire()
|
||||
return
|
||||
else:
|
||||
if value.status == DoppelgangerStatus.Checking:
|
||||
value.lastAttempt = DoppelgangerAttempt.SuccessFalse
|
||||
if value.epochsCount == DOPPELGANGER_EPOCHS_COUNT:
|
||||
value.status = DoppelgangerStatus.Passed
|
||||
info "Validator successfully passed doppelganger detection",
|
||||
validator_index = vindex
|
||||
else:
|
||||
inc(value.epochsCount)
|
||||
debug "Validator's activity was not seen",
|
||||
validator_index = vindex, epoch = epoch,
|
||||
epochs_count = value.epochsCount
|
||||
validator.updateDoppelganger(epoch)
|
||||
|
||||
proc mainLoop(service: DoppelgangerServiceRef) {.async.} =
|
||||
let vc = service.client
|
||||
|
@ -73,36 +53,39 @@ proc mainLoop(service: DoppelgangerServiceRef) {.async.} =
|
|||
debug "Service disabled because of configuration settings"
|
||||
return
|
||||
|
||||
while true:
|
||||
let breakLoop =
|
||||
try:
|
||||
await service.waitForNextEpoch()
|
||||
let
|
||||
currentEpoch = vc.currentSlot().epoch()
|
||||
previousEpoch =
|
||||
if currentEpoch == Epoch(0):
|
||||
currentEpoch
|
||||
else:
|
||||
currentEpoch - 1'u64
|
||||
validators = vc.getCheckingList()
|
||||
if len(validators) > 0:
|
||||
let activities = await vc.getValidatorsLiveness(previousEpoch,
|
||||
validators)
|
||||
service.processActivities(previousEpoch, activities)
|
||||
else:
|
||||
debug "No validators found that require doppelganger protection"
|
||||
discard
|
||||
false
|
||||
except CancelledError:
|
||||
debug "Service interrupted"
|
||||
true
|
||||
except CatchableError as exc:
|
||||
warn "Service crashed with unexpected error", err_name = exc.name,
|
||||
err_msg = exc.msg
|
||||
true
|
||||
# On (re)start, we skip the remainder of the epoch before we start monitoring
|
||||
# for doppelgangers so we don't trigger on the attestations we produced before
|
||||
# the epoch
|
||||
await service.waitForNextEpoch()
|
||||
|
||||
if breakLoop:
|
||||
break
|
||||
while try:
|
||||
# Wait for the epoch to end - at the end (or really, the beginning of the
|
||||
# next one, we ask what happened
|
||||
await service.waitForNextEpoch()
|
||||
let
|
||||
currentEpoch = vc.currentSlot().epoch()
|
||||
previousEpoch =
|
||||
if currentEpoch == Epoch(0):
|
||||
currentEpoch
|
||||
else:
|
||||
currentEpoch - 1'u64
|
||||
validators = vc.getCheckingList(previousEpoch)
|
||||
if len(validators) > 0:
|
||||
let activities = await vc.getValidatorsLiveness(previousEpoch,
|
||||
validators)
|
||||
service.processActivities(previousEpoch, activities)
|
||||
else:
|
||||
debug "No validators found that require doppelganger protection"
|
||||
discard
|
||||
true
|
||||
except CancelledError:
|
||||
debug "Service interrupted"
|
||||
false
|
||||
except CatchableError as exc:
|
||||
warn "Service crashed with unexpected error", err_name = exc.name,
|
||||
err_msg = exc.msg
|
||||
false
|
||||
: discard
|
||||
|
||||
proc init*(t: type DoppelgangerServiceRef,
|
||||
vc: ValidatorClientRef): Future[DoppelgangerServiceRef] {.async.} =
|
||||
|
|
|
@ -43,7 +43,7 @@ proc pollForValidatorIndices*(vc: ValidatorClientRef) {.async.} =
|
|||
block:
|
||||
var res: seq[ValidatorIdent]
|
||||
for validator in vc.attachedValidators[].items():
|
||||
if validator.index.isNone():
|
||||
if validator.needsUpdate():
|
||||
res.add(ValidatorIdent.init(validator.pubkey))
|
||||
res
|
||||
|
||||
|
@ -91,8 +91,7 @@ proc pollForValidatorIndices*(vc: ValidatorClientRef) {.async.} =
|
|||
if isNil(validator):
|
||||
missing.add(validatorLog(item.validator.pubkey, item.index))
|
||||
else:
|
||||
validator.index = Opt.some(item.index)
|
||||
validator.activationEpoch = Opt.some(item.validator.activation_epoch)
|
||||
validator.updateValidator(item.index, item.validator.activation_epoch)
|
||||
updated.add(validatorLog(item.validator.pubkey, item.index))
|
||||
list.add(validator)
|
||||
|
||||
|
@ -102,7 +101,6 @@ proc pollForValidatorIndices*(vc: ValidatorClientRef) {.async.} =
|
|||
trace "Validator indices update dump", missing_validators = missing,
|
||||
updated_validators = updated
|
||||
vc.indicesAvailable.fire()
|
||||
vc.addDoppelganger(list)
|
||||
|
||||
proc pollForAttesterDuties*(vc: ValidatorClientRef,
|
||||
epoch: Epoch): Future[int] {.async.} =
|
||||
|
|
|
@ -36,7 +36,7 @@ proc serveSyncCommitteeMessage*(service: SyncCommitteeServiceRef,
|
|||
vindex = duty.validator_index
|
||||
subcommitteeIdx = getSubcommitteeIndex(
|
||||
duty.validator_sync_committee_index)
|
||||
validator = vc.getValidator(duty.pubkey).valueOr: return false
|
||||
validator = vc.getValidatorForDuties(duty.pubkey, slot).valueOr: return false
|
||||
message =
|
||||
block:
|
||||
let res = await getSyncCommitteeMessage(validator, fork,
|
||||
|
@ -212,8 +212,9 @@ proc produceAndPublishContributions(service: SyncCommitteeServiceRef,
|
|||
var validators: seq[(AttachedValidator, SyncSubcommitteeIndex)]
|
||||
|
||||
for duty in duties:
|
||||
let validator = vc.attachedValidators[].getValidator(duty.pubkey)
|
||||
if isNil(validator): continue # This should never happen
|
||||
let validator = vc.attachedValidators[].getValidatorForDuties(
|
||||
duty.pubkey, slot).valueOr:
|
||||
continue
|
||||
let
|
||||
subCommitteeIdx =
|
||||
getSubcommitteeIndex(duty.validator_sync_committee_index)
|
||||
|
|
|
@ -1336,23 +1336,13 @@ proc getSuggestedFeeRecipient*(
|
|||
|
||||
proc addLocalValidator*(host: KeymanagerHost, keystore: KeystoreData) =
|
||||
let
|
||||
slot = host.getBeaconTimeFn().slotOrZero
|
||||
data = host.getValidatorData(keystore.pubkey)
|
||||
feeRecipient = host.getSuggestedFeeRecipient(keystore.pubkey).valueOr(
|
||||
host.defaultFeeRecipient)
|
||||
index =
|
||||
if data.isSome():
|
||||
Opt.some(data.get().index)
|
||||
else:
|
||||
Opt.none(ValidatorIndex)
|
||||
activationEpoch =
|
||||
if data.isSome():
|
||||
Opt.some(data.get().validator.activation_epoch)
|
||||
else:
|
||||
Opt.none(Epoch)
|
||||
|
||||
host.validatorPool[].addLocalValidator(keystore, index, feeRecipient, slot,
|
||||
activationEpoch)
|
||||
let v = host.validatorPool[].addLocalValidator(keystore, feeRecipient)
|
||||
if data.isSome():
|
||||
v.updateValidator(data.get().index, data.get().validator.activation_epoch)
|
||||
|
||||
proc generateDeposits*(cfg: RuntimeConfig,
|
||||
rng: var HmacDrbgContext,
|
||||
|
|
|
@ -16,7 +16,7 @@ else:
|
|||
|
||||
import
|
||||
# Standard library
|
||||
std/[os, tables],
|
||||
std/[os, tables, sequtils],
|
||||
|
||||
# Nimble packages
|
||||
stew/byteutils,
|
||||
|
@ -107,7 +107,8 @@ proc getValidator*(validators: auto,
|
|||
|
||||
proc addValidators*(node: BeaconNode) =
|
||||
debug "Loading validators", validatorsDir = node.config.validatorsDir()
|
||||
let slot = node.currentSlot()
|
||||
let
|
||||
epoch = node.currentSlot().epoch
|
||||
for keystore in listLoadableKeystores(node.config):
|
||||
let
|
||||
data = withState(node.dag.headState):
|
||||
|
@ -118,76 +119,29 @@ proc addValidators*(node: BeaconNode) =
|
|||
else:
|
||||
Opt.none(ValidatorIndex)
|
||||
feeRecipient = node.consensusManager[].getFeeRecipient(
|
||||
keystore.pubkey, index, slot.epoch)
|
||||
activationEpoch =
|
||||
if data.isSome():
|
||||
Opt.some(data.get().validator.activation_epoch)
|
||||
else:
|
||||
Opt.none(Epoch)
|
||||
keystore.pubkey, index, epoch)
|
||||
|
||||
case keystore.kind
|
||||
let v = case keystore.kind
|
||||
of KeystoreKind.Local:
|
||||
node.attachedValidators[].addLocalValidator(
|
||||
keystore, index, feeRecipient, slot, activationEpoch)
|
||||
node.attachedValidators[].addLocalValidator(keystore, feeRecipient)
|
||||
of KeystoreKind.Remote:
|
||||
node.attachedValidators[].addRemoteValidator(
|
||||
keystore, index, feeRecipient, slot, activationEpoch)
|
||||
node.attachedValidators[].addRemoteValidator(keystore, feeRecipient)
|
||||
|
||||
if data.isSome:
|
||||
v.updateValidator(data.get().index, data.get().validator.activation_epoch)
|
||||
|
||||
proc getAttachedValidator(node: BeaconNode,
|
||||
pubkey: ValidatorPubKey): AttachedValidator =
|
||||
node.attachedValidators[].getValidator(pubkey)
|
||||
|
||||
proc getAttachedValidator(node: BeaconNode,
|
||||
idx: ValidatorIndex,
|
||||
slot: Slot): Opt[AttachedValidator] =
|
||||
proc getValidatorForDuties*(
|
||||
node: BeaconNode,
|
||||
idx: ValidatorIndex, slot: Slot): Opt[AttachedValidator] =
|
||||
let key = node.dag.validatorKey(idx)
|
||||
if key.isNone():
|
||||
return Opt.some(AttachedValidator(nil))
|
||||
if key.isNone:
|
||||
return Opt.none(AttachedValidator)
|
||||
|
||||
let validator = node.getAttachedValidator(key.get().toPubKey())
|
||||
if isNil(validator):
|
||||
return Opt.some(AttachedValidator(nil))
|
||||
|
||||
if validator.index != Opt.some(idx):
|
||||
# Update index and activation_epoch, in case the validator was activated!
|
||||
notice "Validator activated", pubkey = validator.pubkey, index = idx,
|
||||
activation_epoch = slot.epoch()
|
||||
validator.index = Opt.some(idx)
|
||||
if validator.activationEpoch.isNone():
|
||||
validator.activationEpoch = Opt.some(slot.epoch())
|
||||
|
||||
if not(node.config.doppelgangerDetection):
|
||||
Opt.some(validator)
|
||||
else:
|
||||
let
|
||||
doppelgangerDetection = node.processor[].doppelgangerDetection
|
||||
broadcastStartEpoch = doppelgangerDetection.broadcastStartEpoch
|
||||
res = validator.doppelgangerCheck(slot.epoch(), broadcastStartEpoch)
|
||||
|
||||
if res.isErr():
|
||||
if validator.lastWarning != Opt.some(slot):
|
||||
validator.lastWarning = Opt.some(slot)
|
||||
warn "Doppelganger detection check failed - skipping validator " &
|
||||
"duties while observing activity on the network",
|
||||
pubkey = validator.pubkey, index = idx,
|
||||
activation_epoch = validator.activationEpoch,
|
||||
broadcast_start_epoch = broadcastStartEpoch,
|
||||
slot = slot, epoch = slot.epoch(),
|
||||
error_msg = res.error()
|
||||
Opt.none(AttachedValidator)
|
||||
else:
|
||||
if res.get():
|
||||
Opt.some(validator)
|
||||
else:
|
||||
if validator.lastWarning != Opt.some(slot):
|
||||
validator.lastWarning = Opt.some(slot)
|
||||
notice "Doppelganger detection active - skipping validator " &
|
||||
"duties while observing activity on the network",
|
||||
slot = slot, epoch = slot.epoch(),
|
||||
validator = shortLog(validator),
|
||||
index = idx, broadcast_start_epoch = broadcastStartEpoch,
|
||||
activation_epoch = validator.activationEpoch
|
||||
Opt.none(AttachedValidator)
|
||||
node.attachedValidators[].getValidatorForDuties(key.get().toPubKey(), slot)
|
||||
|
||||
proc isSynced*(node: BeaconNode, head: BlockRef): SyncStatus =
|
||||
## TODO This function is here as a placeholder for some better heurestics to
|
||||
|
@ -1013,9 +967,7 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
|||
epochRef.shufflingRef, slot, committee_index)
|
||||
|
||||
for index_in_committee, validator_index in committee:
|
||||
let validator = node.getAttachedValidator(validator_index, slot).valueOr:
|
||||
continue
|
||||
if isNil(validator):
|
||||
let validator = node.getValidatorForDuties(validator_index, slot).valueOr:
|
||||
continue
|
||||
|
||||
let
|
||||
|
@ -1086,9 +1038,7 @@ proc handleSyncCommitteeMessages(node: BeaconNode, head: BlockRef, slot: Slot) =
|
|||
|
||||
for subcommitteeIdx in SyncSubcommitteeIndex:
|
||||
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
|
||||
let validator = node.getAttachedValidator(valIdx, slot).valueOr:
|
||||
continue
|
||||
if isNil(validator) or validator.index.isNone():
|
||||
let validator = node.getValidatorForDuties(valIdx, slot).valueOr:
|
||||
continue
|
||||
asyncSpawn createAndSendSyncCommitteeMessage(node, validator, slot,
|
||||
subcommitteeIdx, head)
|
||||
|
@ -1155,10 +1105,7 @@ proc handleSyncCommitteeContributions(
|
|||
|
||||
for subcommitteeIdx in SyncSubCommitteeIndex:
|
||||
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
|
||||
let validator = node.getAttachedValidator(valIdx, slot).valueOr:
|
||||
continue
|
||||
# Why there no check on `validator.index`?
|
||||
if isNil(validator):
|
||||
let validator = node.getValidatorForDuties(valIdx, slot).valueOr:
|
||||
continue
|
||||
|
||||
asyncSpawn signAndSendContribution(
|
||||
|
@ -1176,18 +1123,14 @@ proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot):
|
|||
|
||||
let
|
||||
proposerKey = node.dag.validatorKey(proposer.get()).get().toPubKey
|
||||
validator = node.getAttachedValidator(proposer.get(), slot).valueOr:
|
||||
return head
|
||||
|
||||
return
|
||||
if isNil(validator):
|
||||
validator = node.getValidatorForDuties(proposer.get(), slot).valueOr:
|
||||
debug "Expecting block proposal", headRoot = shortLog(head.root),
|
||||
slot = shortLog(slot),
|
||||
proposer_index = proposer.get(),
|
||||
proposer = shortLog(proposerKey)
|
||||
head
|
||||
else:
|
||||
await proposeBlock(node, validator, proposer.get(), head, slot)
|
||||
return head
|
||||
|
||||
return await proposeBlock(node, validator, proposer.get(), head, slot)
|
||||
|
||||
proc signAndSendAggregate(
|
||||
node: BeaconNode, validator: AttachedValidator, shufflingRef: ShufflingRef,
|
||||
|
@ -1245,7 +1188,7 @@ proc signAndSendAggregate(
|
|||
proc sendAggregatedAttestations(
|
||||
node: BeaconNode, head: BlockRef, slot: Slot) {.async.} =
|
||||
# Aggregated attestations must be sent by members of the beacon committees for
|
||||
# the given slot, for which `is_aggregator` returns `true.
|
||||
# the given slot, for which `is_aggregator` returns `true`.
|
||||
|
||||
let
|
||||
shufflingRef = node.dag.getShufflingRef(head, slot.epoch, false).valueOr:
|
||||
|
@ -1257,11 +1200,10 @@ proc sendAggregatedAttestations(
|
|||
for committee_index in get_committee_indices(committees_per_slot):
|
||||
for _, validator_index in
|
||||
get_beacon_committee(shufflingRef, slot, committee_index):
|
||||
let validator = node.getAttachedValidator(validator_index, slot).valueOr:
|
||||
let validator = node.getValidatorForDuties(validator_index, slot).valueOr:
|
||||
continue
|
||||
if not(isNil(validator)):
|
||||
asyncSpawn signAndSendAggregate(node, validator, shufflingRef, slot,
|
||||
committee_index)
|
||||
asyncSpawn signAndSendAggregate(node, validator, shufflingRef, slot,
|
||||
committee_index)
|
||||
|
||||
proc updateValidatorMetrics*(node: BeaconNode) =
|
||||
# Technically, this only needs to be done on epoch transitions and if there's
|
||||
|
@ -1492,6 +1434,19 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
|||
of SyncStatus.synced:
|
||||
discard # keep going
|
||||
|
||||
for validator in node.attachedValidators[]:
|
||||
# Check if any validators have been activated
|
||||
# TODO we could do this the other way around and update only validators
|
||||
# based on a "last-updated" index since only the tail end of the
|
||||
# validator list in the state ever changes
|
||||
if validator.needsUpdate:
|
||||
let data = withState(node.dag.headState):
|
||||
getValidator(forkyState.data.validators.asSeq(), validator.pubkey)
|
||||
if data.isSome():
|
||||
# Activation epoch can change after index is assigned..
|
||||
validator.updateValidator(
|
||||
data.get().index, data.get().validator.activation_epoch)
|
||||
|
||||
var curSlot = lastSlot + 1
|
||||
|
||||
# Start by checking if there's work we should have done in the past that we
|
||||
|
@ -1623,21 +1578,19 @@ proc registerDuties*(node: BeaconNode, wallSlot: Slot) {.async.} =
|
|||
let committee = get_beacon_committee(shufflingRef, slot, committee_index)
|
||||
|
||||
for index_in_committee, validator_index in committee:
|
||||
let validator = node.getAttachedValidator(validator_index,
|
||||
slot).valueOr:
|
||||
let validator = node.getValidatorForDuties(validator_index, slot).valueOr:
|
||||
continue
|
||||
if not(isNil(validator)):
|
||||
let
|
||||
subnet_id = compute_subnet_for_attestation(
|
||||
committees_per_slot, slot, committee_index)
|
||||
slotSigRes = await validator.getSlotSignature(
|
||||
fork, genesis_validators_root, slot)
|
||||
if slotSigRes.isErr():
|
||||
error "Unable to create slot signature",
|
||||
validator = shortLog(validator),
|
||||
error_msg = slotSigRes.error()
|
||||
continue
|
||||
let isAggregator = is_aggregator(committee.lenu64, slotSigRes.get())
|
||||
let
|
||||
subnet_id = compute_subnet_for_attestation(
|
||||
committees_per_slot, slot, committee_index)
|
||||
slotSigRes = await validator.getSlotSignature(
|
||||
fork, genesis_validators_root, slot)
|
||||
if slotSigRes.isErr():
|
||||
error "Unable to create slot signature",
|
||||
validator = shortLog(validator),
|
||||
error_msg = slotSigRes.error()
|
||||
continue
|
||||
let isAggregator = is_aggregator(committee.lenu64, slotSigRes.get())
|
||||
|
||||
node.consensusManager[].actionTracker.registerDuty(
|
||||
slot, subnet_id, validator_index, isAggregator)
|
||||
node.consensusManager[].actionTracker.registerDuty(
|
||||
slot, subnet_id, validator_index, isAggregator)
|
||||
|
|
|
@ -30,7 +30,9 @@ export
|
|||
|
||||
const
|
||||
WEB3_SIGNER_DELAY_TOLERANCE = 3.seconds
|
||||
DOPPELGANGER_EPOCHS_COUNT = 2
|
||||
DOPPELGANGER_EPOCHS_COUNT = 1
|
||||
## The number of full epochs that we monitor validators for doppelganger
|
||||
## protection
|
||||
|
||||
declareGauge validators,
|
||||
"Number of validators attached to the beacon node"
|
||||
|
@ -41,6 +43,9 @@ type
|
|||
|
||||
ValidatorConnection* = RestClientRef
|
||||
|
||||
DoppelgangerStatus {.pure.} = enum
|
||||
Unknown, Checking, Checked
|
||||
|
||||
AttachedValidator* = ref object
|
||||
data*: KeystoreData
|
||||
case kind*: ValidatorKind
|
||||
|
@ -50,14 +55,16 @@ type
|
|||
clients*: seq[(RestClientRef, RemoteSignerInfo)]
|
||||
threshold*: uint32
|
||||
|
||||
# The index at which this validator has been observed in the chain -
|
||||
# it does not change as long as there are no reorgs on eth1 - however, the
|
||||
# index might not be valid in all eth2 histories, so it should not be
|
||||
# assumed that a valid index is stored here!
|
||||
index*: Opt[ValidatorIndex]
|
||||
## Validator index which is assigned after the eth1 deposit has been
|
||||
## processed - this index is valid across all eth2 forks for fork depths
|
||||
## up to ETH1_FOLLOW_DISTANCE - we don't support changing indices.
|
||||
|
||||
# Epoch when validator activated.
|
||||
activationEpoch*: Opt[Epoch]
|
||||
activationEpoch*: Epoch
|
||||
## Epoch when validator activated - this happens at the time or some time
|
||||
## after the validator index has been assigned depending on how many
|
||||
## validators are in the activation queue - this is the first epoch that
|
||||
## the validator starts performing duties
|
||||
|
||||
# Cache the latest slot signature - the slot signature is used to determine
|
||||
# if the validator will be aggregating (in the near future)
|
||||
|
@ -67,7 +74,9 @@ type
|
|||
# builder should be informed of current validators
|
||||
externalBuilderRegistration*: Opt[SignedValidatorRegistrationV1]
|
||||
|
||||
startSlot*: Slot
|
||||
doppelStatus: DoppelgangerStatus
|
||||
doppelEpoch*: Opt[Epoch]
|
||||
## The epoch where doppelganger detection started doing its monitoring
|
||||
|
||||
lastWarning*: Opt[Slot]
|
||||
|
||||
|
@ -79,6 +88,7 @@ type
|
|||
ValidatorPool* = object
|
||||
validators*: Table[ValidatorPubKey, AttachedValidator]
|
||||
slashingProtection*: SlashingProtectionDB
|
||||
doppelgangerDetectionEnabled*: bool
|
||||
|
||||
template pubkey*(v: AttachedValidator): ValidatorPubKey =
|
||||
v.data.pubkey
|
||||
|
@ -91,27 +101,28 @@ func shortLog*(v: AttachedValidator): string =
|
|||
shortLog(v.pubkey)
|
||||
|
||||
func init*(T: type ValidatorPool,
|
||||
slashingProtectionDB: SlashingProtectionDB): T =
|
||||
slashingProtectionDB: SlashingProtectionDB,
|
||||
doppelgangerDetectionEnabled: bool): T =
|
||||
## Initialize the validator pool and the slashing protection service
|
||||
## `genesis_validators_root` is used as an unique ID for the
|
||||
## blockchain
|
||||
## `backend` is the KeyValue Store backend
|
||||
T(slashingProtection: slashingProtectionDB)
|
||||
T(
|
||||
slashingProtection: slashingProtectionDB,
|
||||
doppelgangerDetectionEnabled: doppelgangerDetectionEnabled)
|
||||
|
||||
template count*(pool: ValidatorPool): int =
|
||||
len(pool.validators)
|
||||
|
||||
proc addLocalValidator*(
|
||||
pool: var ValidatorPool, keystore: KeystoreData, index: Opt[ValidatorIndex],
|
||||
feeRecipient: Eth1Address, slot: Slot, activationEpoch: Opt[Epoch]) =
|
||||
pool: var ValidatorPool, keystore: KeystoreData,
|
||||
feeRecipient: Eth1Address): AttachedValidator =
|
||||
doAssert keystore.kind == KeystoreKind.Local
|
||||
let v = AttachedValidator(
|
||||
kind: ValidatorKind.Local,
|
||||
index: index,
|
||||
data: keystore,
|
||||
externalBuilderRegistration: Opt.none SignedValidatorRegistrationV1,
|
||||
startSlot: slot,
|
||||
activationEpoch: activationEpoch
|
||||
activationEpoch: FAR_FUTURE_EPOCH
|
||||
)
|
||||
pool.validators[v.pubkey] = v
|
||||
|
||||
|
@ -119,38 +130,33 @@ proc addLocalValidator*(
|
|||
notice "Local validator attached",
|
||||
pubkey = v.pubkey,
|
||||
validator = shortLog(v),
|
||||
initial_fee_recipient = feeRecipient.toHex(),
|
||||
start_slot = slot
|
||||
initial_fee_recipient = feeRecipient.toHex()
|
||||
validators.set(pool.count().int64)
|
||||
|
||||
proc addLocalValidator*(
|
||||
pool: var ValidatorPool, keystore: KeystoreData, feeRecipient: Eth1Address,
|
||||
slot: Slot) =
|
||||
addLocalValidator(pool, keystore, feeRecipient, slot)
|
||||
v
|
||||
|
||||
proc addRemoteValidator*(pool: var ValidatorPool, keystore: KeystoreData,
|
||||
clients: seq[(RestClientRef, RemoteSignerInfo)],
|
||||
index: Opt[ValidatorIndex], feeRecipient: Eth1Address,
|
||||
slot: Slot, activationEpoch: Opt[Epoch]) =
|
||||
feeRecipient: Eth1Address): AttachedValidator =
|
||||
doAssert keystore.kind == KeystoreKind.Remote
|
||||
let v = AttachedValidator(
|
||||
kind: ValidatorKind.Remote,
|
||||
index: index,
|
||||
data: keystore,
|
||||
clients: clients,
|
||||
externalBuilderRegistration: Opt.none SignedValidatorRegistrationV1,
|
||||
startSlot: slot,
|
||||
activationEpoch: activationEpoch
|
||||
activationEpoch: FAR_FUTURE_EPOCH,
|
||||
)
|
||||
pool.validators[v.pubkey] = v
|
||||
notice "Remote validator attached",
|
||||
pubkey = v.pubkey,
|
||||
validator = shortLog(v),
|
||||
remote_signer = $keystore.remotes,
|
||||
initial_fee_recipient = feeRecipient.toHex(),
|
||||
start_slot = slot
|
||||
initial_fee_recipient = feeRecipient.toHex()
|
||||
|
||||
validators.set(pool.count().int64)
|
||||
|
||||
v
|
||||
|
||||
proc getValidator*(pool: ValidatorPool,
|
||||
validatorKey: ValidatorPubKey): AttachedValidator =
|
||||
pool.validators.getOrDefault(validatorKey)
|
||||
|
@ -172,16 +178,22 @@ proc removeValidator*(pool: var ValidatorPool, pubkey: ValidatorPubKey) =
|
|||
validator = shortLog(validator)
|
||||
validators.set(pool.count().int64)
|
||||
|
||||
proc updateValidator*(pool: var ValidatorPool, pubkey: ValidatorPubKey,
|
||||
index: ValidatorIndex) =
|
||||
## Set validator ``index`` to validator with public key ``pubkey`` stored
|
||||
## in ``pool``.
|
||||
## This procedure will not raise if validator with public key ``pubkey`` is
|
||||
## not present in the pool.
|
||||
var v: AttachedValidator
|
||||
if pool.validators.pop(pubkey, v):
|
||||
v.index = Opt.some(index)
|
||||
pool.validators[pubkey] = v
|
||||
proc needsUpdate*(validator: AttachedValidator): bool =
|
||||
validator.index.isNone() or validator.activationEpoch == FAR_FUTURE_EPOCH
|
||||
|
||||
proc updateValidator*(validator: AttachedValidator,
|
||||
index: ValidatorIndex, activationEpoch: Epoch) =
|
||||
## Update activation information for a validator
|
||||
validator.index = Opt.some(index)
|
||||
validator.activationEpoch = activationEpoch
|
||||
|
||||
if validator.doppelStatus == DoppelgangerStatus.Unknown:
|
||||
if validator.doppelEpoch.isSome() and activationEpoch != FAR_FUTURE_EPOCH:
|
||||
let doppelEpoch = validator.doppelEpoch.get()
|
||||
if doppelEpoch >= validator.activationEpoch + DOPPELGANGER_EPOCHS_COUNT:
|
||||
validator.doppelStatus = DoppelgangerStatus.Checking
|
||||
else:
|
||||
validator.doppelStatus = DoppelgangerStatus.Checked
|
||||
|
||||
proc close*(pool: var ValidatorPool) =
|
||||
## Unlock and close all validator keystore's files managed by ``pool``.
|
||||
|
@ -193,10 +205,7 @@ proc close*(pool: var ValidatorPool) =
|
|||
|
||||
proc addRemoteValidator*(pool: var ValidatorPool,
|
||||
keystore: KeystoreData,
|
||||
index: Opt[ValidatorIndex],
|
||||
feeRecipient: Eth1Address,
|
||||
slot: Slot,
|
||||
activationEpoch: Opt[Epoch]) =
|
||||
feeRecipient: Eth1Address): AttachedValidator =
|
||||
var clients: seq[(RestClientRef, RemoteSignerInfo)]
|
||||
let httpFlags =
|
||||
block:
|
||||
|
@ -212,8 +221,7 @@ proc addRemoteValidator*(pool: var ValidatorPool,
|
|||
warn "Unable to resolve distributed signer address",
|
||||
remote_url = $remote.url, validator = $remote.pubkey
|
||||
clients.add((client.get(), remote))
|
||||
pool.addRemoteValidator(keystore, clients, index, feeRecipient, slot,
|
||||
activationEpoch)
|
||||
pool.addRemoteValidator(keystore, clients, feeRecipient)
|
||||
|
||||
iterator publicKeys*(pool: ValidatorPool): ValidatorPubKey =
|
||||
for item in pool.validators.keys():
|
||||
|
@ -228,46 +236,69 @@ iterator items*(pool: ValidatorPool): AttachedValidator =
|
|||
for item in pool.validators.values():
|
||||
yield item
|
||||
|
||||
proc doppelgangerCheck*(validator: AttachedValidator,
|
||||
epoch: Epoch,
|
||||
broadcastEpoch: Epoch): Result[bool, cstring] =
|
||||
## Perform check of ``validator`` for doppelganger.
|
||||
##
|
||||
## Returns ``true`` if `validator` do not have doppelganger and could perform
|
||||
## validator actions.
|
||||
##
|
||||
## Returns ``false`` if `validator` has doppelganger in network and MUST not
|
||||
## perform any validator actions.
|
||||
##
|
||||
## Returns error, if its impossible to perform doppelganger check.
|
||||
let
|
||||
startEpoch = validator.startSlot.epoch() # startEpoch is epoch when /
|
||||
# validator appeared in beacon_node.
|
||||
activationEpoch = validator.activationEpoch # validator's activation_epoch
|
||||
currentStartEpoch = max(startEpoch, broadcastEpoch)
|
||||
|
||||
if activationEpoch.isNone() or activationEpoch.get() > epoch:
|
||||
# If validator's `activation_epoch` is not set or `activation_epoch` is far
|
||||
# from current wall epoch - it should not participate in the network.
|
||||
err("Validator is not activated yet, or beacon node clock is invalid")
|
||||
else:
|
||||
if currentStartEpoch > epoch:
|
||||
err("Validator is not started or broadcast is not started, or " &
|
||||
"beacon node clock is invalid")
|
||||
proc triggersDoppelganger*(v: AttachedValidator, epoch: Epoch): bool =
|
||||
## Returns true iff detected activity in the given epoch would trigger
|
||||
## doppelganger detection
|
||||
if v.doppelStatus != DoppelgangerStatus.Checked:
|
||||
if v.activationEpoch == FAR_FUTURE_EPOCH:
|
||||
false
|
||||
elif epoch < v.activationEpoch + DOPPELGANGER_EPOCHS_COUNT:
|
||||
v.doppelStatus = DoppelgangerStatus.Checked
|
||||
false
|
||||
else:
|
||||
let actEpoch = activationEpoch.get()
|
||||
# max(startEpoch, broadcastEpoch) <= activateEpoch <= epoch
|
||||
if (currentStartEpoch <= actEpoch) and (actEpoch <= epoch):
|
||||
# Validator was activated, we going to skip doppelganger protection
|
||||
ok(true)
|
||||
else:
|
||||
if epoch - currentStartEpoch < DOPPELGANGER_EPOCHS_COUNT:
|
||||
# Validator is started in unsafe period.
|
||||
ok(false)
|
||||
else:
|
||||
# Validator is already passed checking period, so we allow
|
||||
# validator to participate in the network.
|
||||
ok(true)
|
||||
true
|
||||
else:
|
||||
false
|
||||
|
||||
proc updateDoppelganger*(validator: AttachedValidator, epoch: Epoch) =
|
||||
## Called when the validator has proven to be inactive in the given epoch -
|
||||
## this call should be made after the end of `epoch` before acting on duties
|
||||
## in `epoch + 1`.
|
||||
|
||||
if validator.doppelStatus == DoppelgangerStatus.Checked:
|
||||
return
|
||||
|
||||
if validator.doppelEpoch.isNone():
|
||||
validator.doppelEpoch = Opt.some epoch
|
||||
|
||||
let doppelEpoch = validator.doppelEpoch.get()
|
||||
|
||||
if validator.doppelStatus == DoppelgangerStatus.Unknown:
|
||||
if validator.activationEpoch == FAR_FUTURE_EPOCH:
|
||||
return
|
||||
|
||||
# We don't do doppelganger checking for validators that are about to be
|
||||
# activated since both clients would be waiting for the other to start
|
||||
# performing duties - this accounts for genesis as well
|
||||
# The slot is rounded up to ensure we cover all slots
|
||||
if doppelEpoch + 1 <= validator.activationEpoch + DOPPELGANGER_EPOCHS_COUNT:
|
||||
validator.doppelStatus = DoppelgangerStatus.Checked
|
||||
return
|
||||
|
||||
validator.doppelStatus = DoppelgangerStatus.Checking
|
||||
|
||||
if epoch + 1 >= doppelEpoch + DOPPELGANGER_EPOCHS_COUNT:
|
||||
validator.doppelStatus = DoppelgangerStatus.Checked
|
||||
|
||||
proc getValidatorForDuties*(
|
||||
pool: ValidatorPool, key: ValidatorPubKey, slot: Slot):
|
||||
Opt[AttachedValidator] =
|
||||
## Return validator only if it is ready for duties (has index and has passed
|
||||
## doppelganger check where applicable)
|
||||
let validator = pool.getValidator(key)
|
||||
if isNil(validator) or validator.index.isNone():
|
||||
return Opt.none(AttachedValidator)
|
||||
|
||||
if pool.doppelgangerDetectionEnabled and
|
||||
validator.triggersDoppelganger(slot.epoch):
|
||||
# If the validator would trigger for an activity in the given slot, we don't
|
||||
# return it for duties
|
||||
notice "Doppelganger detection active - " &
|
||||
"skipped validator duty while observing the network",
|
||||
validator = shortLog(validator)
|
||||
return Opt.none(AttachedValidator)
|
||||
|
||||
return Opt.some(validator)
|
||||
|
||||
proc signWithDistributedKey(v: AttachedValidator,
|
||||
request: Web3SignerRequest): Future[SignatureResult]
|
||||
|
|
|
@ -97,6 +97,7 @@ nav:
|
|||
- 'logging.md'
|
||||
- 'validator-client-options.md'
|
||||
- 'validator-monitor.md'
|
||||
- 'doppelganger-detection.md'
|
||||
- "REST APIs":
|
||||
- 'rest-api.md'
|
||||
- 'keymanager-api.md'
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
# Doppelganger detection
|
||||
|
||||
Doppelganger detection is a safety feature for preventing slashing in the event that two setups are using the same validator keys, for example after a migration of keys from one setup to another.
|
||||
|
||||
Doppelganger detection works by monitoring network activity for a short period for each validator while preventing duties from being performed.
|
||||
|
||||
If any activity is detected, the node shuts down with exit code 129.
|
||||
|
||||
Because detection depends on network detection, there are cases where it may fail to find duplicate validators even though they are live - you should never use it as a mechanism for running redundant setups!
|
||||
|
||||
## Command line
|
||||
|
||||
Doppelganger detection is turned on by default - disable it with:
|
||||
|
||||
=== "Beacon node"
|
||||
|
||||
```sh
|
||||
# Disable doppelganger detection
|
||||
./run-mainnet-beacon-node.sh --doppelganger-detection=off ...
|
||||
```
|
||||
|
||||
=== "Validator client"
|
||||
|
||||
```sh
|
||||
# Disable doppelganger detection
|
||||
build/nimbus_validator_client --doppelganger-detection=off ...
|
||||
```
|
|
@ -20,6 +20,7 @@ import # Unit test
|
|||
./test_block_quarantine,
|
||||
./test_conf,
|
||||
./test_datatypes,
|
||||
./test_deposit_snapshots,
|
||||
./test_discovery,
|
||||
./test_engine_authentication,
|
||||
./test_eth1_monitor,
|
||||
|
@ -31,24 +32,23 @@ import # Unit test
|
|||
./test_helpers,
|
||||
./test_honest_validator,
|
||||
./test_interop,
|
||||
./test_light_client,
|
||||
./test_key_splitting,
|
||||
./test_light_client_processor,
|
||||
./test_light_client,
|
||||
./test_message_signatures,
|
||||
./test_peer_pool,
|
||||
./test_remote_keystore,
|
||||
./test_serialization,
|
||||
./test_spec,
|
||||
./test_statediff,
|
||||
./test_sync_committee_pool,
|
||||
./test_sync_manager,
|
||||
./test_validator_pool,
|
||||
./test_zero_signature,
|
||||
./test_key_splitting,
|
||||
./test_remote_keystore,
|
||||
./test_serialization,
|
||||
./test_deposit_snapshots,
|
||||
./fork_choice/tests_fork_choice,
|
||||
./consensus_spec/all_tests as consensus_all_tests,
|
||||
./slashing_protection/test_fixtures,
|
||||
./slashing_protection/test_slashing_protection_db,
|
||||
./test_doppelganger
|
||||
./slashing_protection/test_slashing_protection_db
|
||||
|
||||
import # Refactor state transition unit tests
|
||||
# In mainnet these take 2 minutes and are empty TODOs
|
||||
|
|
|
@ -1,111 +0,0 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018-2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
# Stdlib modules
|
||||
std/strutils, std/options,
|
||||
# Status modules
|
||||
stew/results,
|
||||
unittest2,
|
||||
# Local modules
|
||||
../beacon_chain/validators/validator_pool
|
||||
|
||||
proc createValidator*(startEpoch: Epoch,
|
||||
activatedEpoch: Option[Epoch]): AttachedValidator =
|
||||
let aepoch =
|
||||
if activatedEpoch.isSome():
|
||||
Opt.some(activatedEpoch.get())
|
||||
else:
|
||||
Opt.none(Epoch)
|
||||
AttachedValidator(
|
||||
startSlot: startEpoch.start_slot(),
|
||||
activationEpoch: aepoch
|
||||
)
|
||||
|
||||
suite "Doppelganger protection test suite":
|
||||
test "doppelgangerCheck() test":
|
||||
const TestVectors = [
|
||||
(0, 9, Epoch(0), some(Epoch(0)), Epoch(0), "TTTTTTTTTT"),
|
||||
(0, 9, Epoch(0), some(Epoch(1)), Epoch(0), "1TTTTTTTTT"),
|
||||
(0, 9, Epoch(0), some(Epoch(2)), Epoch(0), "11TTTTTTTT"),
|
||||
(0, 9, Epoch(0), some(Epoch(3)), Epoch(0), "111TTTTTTT"),
|
||||
(0, 9, Epoch(0), some(Epoch(4)), Epoch(0), "1111TTTTTT"),
|
||||
(0, 9, Epoch(0), some(Epoch(5)), Epoch(0), "11111TTTTT"),
|
||||
(0, 9, Epoch(0), some(Epoch(6)), Epoch(0), "111111TTTT"),
|
||||
(0, 9, Epoch(0), some(Epoch(7)), Epoch(0), "1111111TTT"),
|
||||
(0, 9, Epoch(0), some(Epoch(8)), Epoch(0), "11111111TT"),
|
||||
(0, 9, Epoch(0), some(Epoch(9)), Epoch(0), "111111111T"),
|
||||
|
||||
(0, 9, Epoch(0), some(Epoch(5)), Epoch(0), "11111TTTTT"),
|
||||
(0, 9, Epoch(0), some(Epoch(5)), Epoch(1), "11111TTTTT"),
|
||||
(0, 9, Epoch(0), some(Epoch(5)), Epoch(2), "11111TTTTT"),
|
||||
(0, 9, Epoch(0), some(Epoch(5)), Epoch(3), "11111TTTTT"),
|
||||
(0, 9, Epoch(0), some(Epoch(5)), Epoch(4), "11111TTTTT"),
|
||||
(0, 9, Epoch(0), some(Epoch(5)), Epoch(5), "11111TTTTT"),
|
||||
(0, 9, Epoch(0), some(Epoch(5)), Epoch(6), "111112FFTT"),
|
||||
(0, 9, Epoch(0), some(Epoch(5)), Epoch(7), "1111122FFT"),
|
||||
(0, 9, Epoch(0), some(Epoch(5)), Epoch(8), "11111222FF"),
|
||||
(0, 9, Epoch(0), some(Epoch(5)), Epoch(9), "111112222F"),
|
||||
|
||||
(0, 9, Epoch(1), some(Epoch(0)), Epoch(0), "2FFTTTTTTT"),
|
||||
(0, 9, Epoch(2), some(Epoch(0)), Epoch(0), "22FFTTTTTT"),
|
||||
(0, 9, Epoch(3), some(Epoch(0)), Epoch(0), "222FFTTTTT"),
|
||||
(0, 9, Epoch(4), some(Epoch(0)), Epoch(0), "2222FFTTTT"),
|
||||
(0, 9, Epoch(5), some(Epoch(0)), Epoch(0), "22222FFTTT"),
|
||||
(0, 9, Epoch(6), some(Epoch(0)), Epoch(0), "222222FFTT"),
|
||||
(0, 9, Epoch(7), some(Epoch(0)), Epoch(0), "2222222FFT"),
|
||||
(0, 9, Epoch(8), some(Epoch(0)), Epoch(0), "22222222FF"),
|
||||
(0, 9, Epoch(9), some(Epoch(0)), Epoch(0), "222222222F"),
|
||||
|
||||
(0, 9, Epoch(0), none(Epoch), Epoch(0), "1111111111"),
|
||||
(0, 9, Epoch(1), none(Epoch), Epoch(0), "1111111111"),
|
||||
(0, 9, Epoch(2), none(Epoch), Epoch(0), "1111111111"),
|
||||
(0, 9, Epoch(3), none(Epoch), Epoch(0), "1111111111"),
|
||||
(0, 9, Epoch(4), none(Epoch), Epoch(0), "1111111111"),
|
||||
(0, 9, Epoch(5), none(Epoch), Epoch(0), "1111111111"),
|
||||
(0, 9, Epoch(6), none(Epoch), Epoch(0), "1111111111"),
|
||||
(0, 9, Epoch(7), none(Epoch), Epoch(0), "1111111111"),
|
||||
(0, 9, Epoch(8), none(Epoch), Epoch(0), "1111111111"),
|
||||
(0, 9, Epoch(9), none(Epoch), Epoch(0), "1111111111"),
|
||||
|
||||
(0, 9, Epoch(0), none(Epoch), Epoch(0), "1111111111"),
|
||||
(0, 9, Epoch(0), none(Epoch), Epoch(1), "1111111111"),
|
||||
(0, 9, Epoch(0), none(Epoch), Epoch(2), "1111111111"),
|
||||
(0, 9, Epoch(0), none(Epoch), Epoch(3), "1111111111"),
|
||||
(0, 9, Epoch(0), none(Epoch), Epoch(4), "1111111111"),
|
||||
(0, 9, Epoch(0), none(Epoch), Epoch(5), "1111111111"),
|
||||
(0, 9, Epoch(0), none(Epoch), Epoch(6), "1111111111"),
|
||||
(0, 9, Epoch(0), none(Epoch), Epoch(7), "1111111111"),
|
||||
(0, 9, Epoch(0), none(Epoch), Epoch(8), "1111111111"),
|
||||
(0, 9, Epoch(0), none(Epoch), Epoch(9), "1111111111")
|
||||
]
|
||||
|
||||
for test in TestVectors:
|
||||
let validator = createValidator(test[2], test[3])
|
||||
let value =
|
||||
block:
|
||||
var res = ""
|
||||
for index in test[0] .. test[1]:
|
||||
let epoch = Epoch(uint64(index))
|
||||
let dres = validator.doppelgangerCheck(epoch, test[4])
|
||||
if dres.isErr():
|
||||
let errorMsg = $dres.error()
|
||||
if errorMsg.startsWith("Validator is not activated"):
|
||||
res.add("1")
|
||||
elif errorMsg.startsWith("Validator is not started"):
|
||||
res.add("2")
|
||||
else:
|
||||
res.add("E")
|
||||
else:
|
||||
if dres.get():
|
||||
res.add("T")
|
||||
else:
|
||||
res.add("F")
|
||||
res
|
||||
check value == test[5]
|
|
@ -0,0 +1,109 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.used.}
|
||||
|
||||
import
|
||||
unittest2,
|
||||
../beacon_chain/validators/validator_pool
|
||||
|
||||
suite "Validator pool":
|
||||
test "Doppelganger for genesis validator":
|
||||
let
|
||||
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
|
||||
|
||||
check:
|
||||
not v.triggersDoppelganger(GENESIS_EPOCH)
|
||||
|
||||
v.updateValidator(ValidatorIndex(1), GENESIS_EPOCH)
|
||||
|
||||
check:
|
||||
not v.triggersDoppelganger(GENESIS_EPOCH)
|
||||
|
||||
test "Doppelganger for validator that activates in same epoch as check":
|
||||
let
|
||||
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
|
||||
now = Epoch(10).start_slot()
|
||||
|
||||
check: # We don't know when validator activates so we wouldn't trigger
|
||||
not v.triggersDoppelganger(GENESIS_EPOCH)
|
||||
not v.triggersDoppelganger(now.epoch())
|
||||
|
||||
v.updateValidator(ValidatorIndex(5), FAR_FUTURE_EPOCH)
|
||||
|
||||
check: # We still don't know when validator activates so we wouldn't trigger
|
||||
not v.triggersDoppelganger(GENESIS_EPOCH)
|
||||
not v.triggersDoppelganger(now.epoch())
|
||||
|
||||
v.updateValidator(ValidatorIndex(5), now.epoch())
|
||||
|
||||
check:
|
||||
# Activates in current epoch, shouldn't trigger
|
||||
not v.triggersDoppelganger(now.epoch())
|
||||
|
||||
test "Doppelganger for validator that activates in previous epoch":
|
||||
let
|
||||
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
|
||||
now = Epoch(10).start_slot()
|
||||
|
||||
v.updateValidator(ValidatorIndex(5), now.epoch() - 1)
|
||||
|
||||
check:
|
||||
# Already activated, should trigger
|
||||
v.triggersDoppelganger(now.epoch())
|
||||
|
||||
test "Doppelganger for validator that activates in future epoch":
|
||||
let
|
||||
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
|
||||
now = Epoch(10).start_slot()
|
||||
|
||||
v.updateValidator(ValidatorIndex(5), now.epoch() + 1)
|
||||
|
||||
check:
|
||||
# Activates in the future, should not be checked
|
||||
not v.triggersDoppelganger(now.epoch())
|
||||
|
||||
test "Doppelganger for already active validator":
|
||||
let
|
||||
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
|
||||
now = Epoch(10).start_slot()
|
||||
|
||||
v.updateValidator(ValidatorIndex(5), now.epoch() - 4)
|
||||
|
||||
check:
|
||||
v.triggersDoppelganger(now.epoch)
|
||||
|
||||
v.updateDoppelganger(now.epoch())
|
||||
|
||||
check:
|
||||
not v.triggersDoppelganger(now.epoch + 1)
|
||||
|
||||
test "Activation after check":
|
||||
let
|
||||
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
|
||||
now = Epoch(10).start_slot()
|
||||
|
||||
v.updateDoppelganger(now.epoch())
|
||||
|
||||
check:
|
||||
not v.triggersDoppelganger(now.epoch)
|
||||
|
||||
v.updateValidator(ValidatorIndex(5), now.epoch())
|
||||
|
||||
check: # already proven not to validate
|
||||
not v.triggersDoppelganger(now.epoch)
|
||||
|
||||
test "Future activation after check":
|
||||
let
|
||||
v = AttachedValidator(activationEpoch: FAR_FUTURE_EPOCH)
|
||||
now = Epoch(10).start_slot()
|
||||
|
||||
v.updateDoppelganger(now.epoch())
|
||||
v.updateValidator(ValidatorIndex(5), now.epoch() + 1)
|
||||
|
||||
check: # doesn't trigger check just after activation
|
||||
not v.triggersDoppelganger(now.epoch() + 1)
|
Loading…
Reference in New Issue