Per-validator doppelganger protection. (#4304)

* Initial commit.

* NextAttestationEntry type.

* Add doppelgangerCheck and actual check.

* Recover deleted check.

* Remove NextAttestainEntry changes.

* More cleanups for NextAttestationEntry.

* Address review comments.

* Remove GENESIS_EPOCH specific check branch.

* Decrease number of full epochs for doppelganger check in VC.

Co-authored-by: zah <zahary@status.im>
This commit is contained in:
Eugene Kabanov 2022-11-20 15:55:43 +02:00 committed by GitHub
parent cc1464a935
commit eb661565ed
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 281 additions and 67 deletions

View File

@ -826,7 +826,7 @@ proc prune*(pool: var AttestationPool) =
# but we'll keep running hoping that the fork chocie will recover eventually
error "Couldn't prune fork choice, bug?", err = v.error()
proc validatorSeenAtEpoch*(pool: var AttestationPool, epoch: Epoch,
proc validatorSeenAtEpoch*(pool: AttestationPool, epoch: Epoch,
vindex: ValidatorIndex): bool =
if uint64(vindex) < lenu64(pool.nextAttestationEpoch):
let mark = pool.nextAttestationEpoch[vindex]

View File

@ -247,24 +247,11 @@ proc setupDoppelgangerDetection*(self: var Eth2Processor, slot: Slot) =
# can be up to around 10,000 Wei. Thus, skipping attestations isn't cheap
# and one should gauge the likelihood of this simultaneous launch to tune
# the epoch delay to one's perceived risk.
const duplicateValidatorEpochs = 2
# TODO:
# We should switch to a model where this value is set for each validator
# as it gets added to the validator pool.
# Currently, we set it here because otherwise if the client is started
# without any validators, it will remain set to FAR_FUTURE_EPOCH and
# any new validators added through the Keymanager API will never get
# activated.
self.doppelgangerDetection.broadcastStartEpoch =
slot.epoch + duplicateValidatorEpochs
if self.validatorPool[].count() > 0:
if self.doppelgangerDetectionEnabled:
notice "Setting up doppelganger detection",
epoch = slot.epoch,
broadcastStartEpoch = self.doppelgangerDetection.broadcastStartEpoch
if self.doppelgangerDetectionEnabled:
notice "Setting up doppelganger detection",
epoch = slot.epoch,
broadcast_epoch = self.doppelgangerDetection.broadcastStartEpoch,
nodestart_epoch = self.doppelgangerDetection.nodeLaunchSlot.epoch()
proc checkForPotentialDoppelganger(
self: var Eth2Processor, attestation: Attestation,
@ -278,17 +265,32 @@ proc checkForPotentialDoppelganger(
if attestation.data.slot <= self.doppelgangerDetection.nodeLaunchSlot + 1:
return
if attestation.data.slot.epoch <
self.doppelgangerDetection.broadcastStartEpoch and
self.doppelgangerDetection.nodeLaunchSlot > GENESIS_SLOT:
for validatorIndex in attesterIndices:
let validatorPubkey = self.dag.validatorKey(validatorIndex).get().toPubKey()
if not isNil(self.validatorPool[].getValidator(validatorPubkey)):
warn "We believe you are currently running another instance of the same validator. We've disconnected you from the network as this presents a significant slashing risk. Possible next steps are (a) making sure you've disconnected your validator from your old machine before restarting the client; and (b) running the client again with the gossip-slashing-protection option disabled, only if you are absolutely sure this is the only instance of your validator running, and reporting the issue at https://github.com/status-im/nimbus-eth2/issues.",
validatorIndex,
validatorPubkey,
attestation = shortLog(attestation)
let broadcastStartEpoch = self.doppelgangerDetection.broadcastStartEpoch
for validatorIndex in attesterIndices:
let
validatorPubkey = self.dag.validatorKey(validatorIndex).get().toPubKey()
validator = self.validatorPool[].getValidator(validatorPubkey)
if not(isNil(validator)):
let res = validator.doppelgangerCheck(attestation.data.slot.epoch(),
broadcastStartEpoch)
if res.isOk() and not(res.get()):
warn "We believe you are currently running another instance of the " &
"same validator. We've disconnected you from the network as " &
"this presents a significant slashing risk. Possible next steps "&
"are (a) making sure you've disconnected your validator from " &
"your old machine before restarting the client; and (b) running " &
"the client again with the gossip-slashing-protection option " &
"disabled, only if you are absolutely sure this is the only " &
"instance of your validator running, and reporting the issue " &
"at https://github.com/status-im/nimbus-eth2/issues.",
validator = shortLog(validator),
start_slot = validator.startSlot,
validator_index = validatorIndex,
activation_epoch = validator.activationEpoch.get(),
broadcast_epoch = broadcastStartEpoch,
attestation = shortLog(attestation)
# Avoid colliding with
# https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Process%20Exit%20Codes
const QuitDoppelganger = 129

View File

@ -654,9 +654,9 @@ proc init*(T: type BeaconNode,
info "Loading slashing protection database (v2)",
path = config.validatorsDir()
proc getValidatorIdx(pubkey: ValidatorPubKey): Opt[ValidatorIndex] =
proc getValidatorAndIdx(pubkey: ValidatorPubKey): Opt[ValidatorAndIndex] =
withState(dag.headState):
findValidator(forkyState().data.validators.asSeq(), pubkey)
getValidator(forkyState().data.validators.asSeq(), pubkey)
let
slashingProtectionDB =
@ -674,7 +674,7 @@ proc init*(T: type BeaconNode,
config.validatorsDir,
config.secretsDir,
config.defaultFeeRecipient,
getValidatorIdx,
getValidatorAndIdx,
getBeaconTime)
else: nil

View File

@ -276,8 +276,8 @@ proc asyncInit(vc: ValidatorClientRef): Future[ValidatorClientRef] {.async.} =
vc.syncCommitteeService = await SyncCommitteeServiceRef.init(vc)
vc.keymanagerServer = keymanagerInitResult.server
if vc.keymanagerServer != nil:
func getValidatorIdx(pubkey: ValidatorPubKey): Opt[ValidatorIndex] =
Opt.none ValidatorIndex
func getValidatorData(pubkey: ValidatorPubKey): Opt[ValidatorAndIndex] =
Opt.none(ValidatorAndIndex)
vc.keymanagerHost = newClone KeymanagerHost.init(
validatorPool,
@ -286,7 +286,7 @@ proc asyncInit(vc: ValidatorClientRef): Future[ValidatorClientRef] {.async.} =
vc.config.validatorsDir,
vc.config.secretsDir,
vc.config.defaultFeeRecipient,
getValidatorIdx,
getValidatorData,
vc.beaconClock.getBeaconTimeFn)
except CatchableError as exc:

View File

@ -128,11 +128,21 @@ proc handleAddRemoteValidatorReq(host: KeymanagerHost,
if res.isOk:
let
slot = host.getBeaconTimeFn().slotOrZero
validatorIdx = host.getValidatorIdx(keystore.pubkey)
validator = host.getValidatorData(keystore.pubkey)
feeRecipient = host.getSuggestedFeeRecipient(keystore.pubkey).valueOr(
host.defaultFeeRecipient)
index =
if validator.isSome():
Opt.some(validator.get().index)
else:
Opt.none(ValidatorIndex)
activationEpoch =
if validator.isSome():
Opt.some(validator.get().validator.activation_epoch)
else:
Opt.none(Epoch)
host.validatorPool[].addRemoteValidator(
res.get, validatorIdx, feeRecipient, slot)
res.get, index, feeRecipient, slot, activationEpoch)
RequestItemStatus(status: $KeystoreStatus.imported)
else:
case res.error().status

View File

@ -631,7 +631,8 @@ proc addValidator*(vc: ValidatorClientRef, keystore: KeystoreData) =
case keystore.kind
of KeystoreKind.Local:
vc.attachedValidators[].addLocalValidator(keystore, Opt.none ValidatorIndex,
feeRecipient, slot)
feeRecipient, slot,
Opt.none(Epoch))
of KeystoreKind.Remote:
let
httpFlags =
@ -656,8 +657,9 @@ proc addValidator*(vc: ValidatorClientRef, keystore: KeystoreData) =
res
if len(clients) > 0:
vc.attachedValidators[].addRemoteValidator(keystore, clients,
Opt.none ValidatorIndex,
feeRecipient, slot)
Opt.none(ValidatorIndex),
feeRecipient, slot,
Opt.none(Epoch))
else:
warn "Unable to initialize remote validator",
validator = $keystore.pubkey

View File

@ -14,7 +14,7 @@ const
logScope: service = ServiceName
const
DOPPELGANGER_EPOCHS_COUNT = 2
DOPPELGANGER_EPOCHS_COUNT = 1
proc getCheckingList*(vc: ValidatorClientRef): seq[ValidatorIndex] =
var res: seq[ValidatorIndex]

View File

@ -67,8 +67,12 @@ type
ImportResult*[T] = Result[T, AddValidatorFailure]
ValidatorPubKeyToIdxFn* =
proc (pubkey: ValidatorPubKey): Opt[ValidatorIndex]
ValidatorAndIndex* = object
index*: ValidatorIndex
validator*: Validator
ValidatorPubKeyToDataFn* =
proc (pubkey: ValidatorPubKey): Opt[ValidatorAndIndex]
{.raises: [Defect], gcsafe.}
KeymanagerHost* = object
@ -78,7 +82,7 @@ type
validatorsDir*: string
secretsDir*: string
defaultFeeRecipient*: Eth1Address
getValidatorIdxFn*: ValidatorPubKeyToIdxFn
getValidatorAndIdxFn*: ValidatorPubKeyToDataFn
getBeaconTimeFn*: GetBeaconTimeFn
const
@ -97,7 +101,7 @@ func init*(T: type KeymanagerHost,
validatorsDir: string,
secretsDir: string,
defaultFeeRecipient: Eth1Address,
getValidatorIdxFn: ValidatorPubKeyToIdxFn,
getValidatorAndIdxFn: ValidatorPubKeyToDataFn,
getBeaconTimeFn: GetBeaconTimeFn): T =
T(validatorPool: validatorPool,
rng: rng,
@ -105,16 +109,25 @@ func init*(T: type KeymanagerHost,
validatorsDir: validatorsDir,
secretsDir: secretsDir,
defaultFeeRecipient: defaultFeeRecipient,
getValidatorIdxFn: getValidatorIdxFn,
getValidatorAndIdxFn: getValidatorAndIdxFn,
getBeaconTimeFn: getBeaconTimeFn)
proc getValidatorIdx*(host: KeymanagerHost,
pubkey: ValidatorPubKey): Opt[ValidatorIndex] =
if host.getValidatorIdxFn != nil:
host.getValidatorIdxFn(pubkey)
if not(isNil(host.getValidatorAndIdxFn)):
let res = host.getValidatorAndIdxFn(pubkey).valueOr:
return Opt.none ValidatorIndex
Opt.some res.index
else:
Opt.none ValidatorIndex
proc getValidatorData*(host: KeymanagerHost,
pubkey: ValidatorPubKey): Opt[ValidatorAndIndex] =
if not(isNil(host.getValidatorAndIdxFn)):
host.getValidatorAndIdxFn(pubkey)
else:
Opt.none ValidatorAndIndex
proc echoP*(msg: string) =
## Prints a paragraph aligned to 80 columns
echo ""
@ -1324,11 +1337,22 @@ proc getSuggestedFeeRecipient*(
proc addLocalValidator*(host: KeymanagerHost, keystore: KeystoreData) =
let
slot = host.getBeaconTimeFn().slotOrZero
validatorIdx = host.getValidatorIdx(keystore.pubkey)
data = host.getValidatorData(keystore.pubkey)
feeRecipient = host.getSuggestedFeeRecipient(keystore.pubkey).valueOr(
host.defaultFeeRecipient)
host.validatorPool[].addLocalValidator(
keystore, validatorIdx, feeRecipient, slot)
index =
if data.isSome():
Opt.some(data.get().index)
else:
Opt.none(ValidatorIndex)
activationEpoch =
if data.isSome():
Opt.some(data.get().validator.activation_epoch)
else:
Opt.none(Epoch)
host.validatorPool[].addLocalValidator(keystore, index, feeRecipient, slot,
activationEpoch)
proc generateDeposits*(cfg: RuntimeConfig,
rng: var HmacDrbgContext,

View File

@ -93,33 +93,45 @@ type
unsynced
optimistic
proc findValidator*(validators: auto, pubkey: ValidatorPubKey): Opt[ValidatorIndex] =
proc getValidator*(validators: auto,
pubkey: ValidatorPubKey): Opt[ValidatorAndIndex] =
let idx = validators.findIt(it.pubkey == pubkey)
if idx == -1:
# We allow adding a validator even if its key is not in the state registry:
# it might be that the deposit for this validator has not yet been processed
notice "Validator deposit not yet processed, monitoring", pubkey
Opt.none ValidatorIndex
Opt.none ValidatorAndIndex
else:
Opt.some idx.ValidatorIndex
Opt.some ValidatorAndIndex(index: ValidatorIndex(idx),
validator: validators[idx])
proc addValidators*(node: BeaconNode) =
debug "Loading validators", validatorsDir = node.config.validatorsDir()
let slot = node.currentSlot()
for keystore in listLoadableKeystores(node.config):
let
index = withState(node.dag.headState):
findValidator(forkyState.data.validators.asSeq(), keystore.pubkey)
data = withState(node.dag.headState):
getValidator(forkyState.data.validators.asSeq(), keystore.pubkey)
index =
if data.isSome():
Opt.some(data.get().index)
else:
Opt.none(ValidatorIndex)
feeRecipient = node.consensusManager[].getFeeRecipient(
keystore.pubkey, index, slot.epoch)
activationEpoch =
if data.isSome():
Opt.some(data.get().validator.activation_epoch)
else:
Opt.none(Epoch)
case keystore.kind
of KeystoreKind.Local:
node.attachedValidators[].addLocalValidator(
keystore, index, feeRecipient, slot)
keystore, index, feeRecipient, slot, activationEpoch)
of KeystoreKind.Remote:
node.attachedValidators[].addRemoteValidator(
keystore, index, feeRecipient, slot)
keystore, index, feeRecipient, slot, activationEpoch)
proc getAttachedValidator(node: BeaconNode,
pubkey: ValidatorPubKey): AttachedValidator =

View File

@ -30,6 +30,7 @@ export
const
WEB3_SIGNER_DELAY_TOLERANCE = 3.seconds
DOPPELGANGER_EPOCHS_COUNT = 2
declareGauge validators,
"Number of validators attached to the beacon node"
@ -100,12 +101,16 @@ template count*(pool: ValidatorPool): int =
proc addLocalValidator*(
pool: var ValidatorPool, keystore: KeystoreData, index: Opt[ValidatorIndex],
feeRecipient: Eth1Address, slot: Slot) =
feeRecipient: Eth1Address, slot: Slot, activationEpoch: Opt[Epoch]) =
doAssert keystore.kind == KeystoreKind.Local
let v = AttachedValidator(
kind: ValidatorKind.Local, index: index, data: keystore,
kind: ValidatorKind.Local,
index: index,
data: keystore,
externalBuilderRegistration: Opt.none SignedValidatorRegistrationV1,
startSlot: slot)
startSlot: slot,
activationEpoch: activationEpoch
)
pool.validators[v.pubkey] = v
# Fee recipient may change after startup, but we log the initial value here
@ -124,13 +129,17 @@ proc addLocalValidator*(
proc addRemoteValidator*(pool: var ValidatorPool, keystore: KeystoreData,
clients: seq[(RestClientRef, RemoteSignerInfo)],
index: Opt[ValidatorIndex], feeRecipient: Eth1Address,
slot: Slot) =
slot: Slot, activationEpoch: Opt[Epoch]) =
doAssert keystore.kind == KeystoreKind.Remote
let v = AttachedValidator(
kind: ValidatorKind.Remote, index: index, data: keystore,
kind: ValidatorKind.Remote,
index: index,
data: keystore,
clients: clients,
externalBuilderRegistration: Opt.none SignedValidatorRegistrationV1,
startSlot: slot)
startSlot: slot,
activationEpoch: activationEpoch
)
pool.validators[v.pubkey] = v
notice "Remote validator attached",
pubkey = v.pubkey,
@ -184,7 +193,8 @@ proc addRemoteValidator*(pool: var ValidatorPool,
keystore: KeystoreData,
index: Opt[ValidatorIndex],
feeRecipient: Eth1Address,
slot: Slot) =
slot: Slot,
activationEpoch: Opt[Epoch]) =
var clients: seq[(RestClientRef, RemoteSignerInfo)]
let httpFlags =
block:
@ -200,7 +210,8 @@ proc addRemoteValidator*(pool: var ValidatorPool,
warn "Unable to resolve distributed signer address",
remote_url = $remote.url, validator = $remote.pubkey
clients.add((client.get(), remote))
pool.addRemoteValidator(keystore, clients, index, feeRecipient, slot)
pool.addRemoteValidator(keystore, clients, index, feeRecipient, slot,
activationEpoch)
iterator publicKeys*(pool: ValidatorPool): ValidatorPubKey =
for item in pool.validators.keys():
@ -215,6 +226,47 @@ iterator items*(pool: ValidatorPool): AttachedValidator =
for item in pool.validators.values():
yield item
proc doppelgangerCheck*(validator: AttachedValidator,
epoch: Epoch,
broadcastEpoch: Epoch): Result[bool, cstring] =
## Perform check of ``validator`` for doppelganger.
##
## Returns ``true`` if `validator` do not have doppelganger and could perform
## validator actions.
##
## Returns ``false`` if `validator` has doppelganger in network and MUST not
## perform any validator actions.
##
## Returns error, if its impossible to perform doppelganger check.
let
startEpoch = validator.startSlot.epoch() # startEpoch is epoch when /
# validator appeared in beacon_node.
activationEpoch = validator.activationEpoch # validator's activation_epoch
currentStartEpoch = max(startEpoch, broadcastEpoch)
if activationEpoch.isNone() or activationEpoch.get() > epoch:
# If validator's `activation_epoch` is not set or `activation_epoch` is far
# from current wall epoch - it should not participate in the network.
err("Validator is not activated yet, or beacon node clock is invalid")
else:
if currentStartEpoch > epoch:
err("Validator is not started or broadcast is not started, or " &
"beacon node clock is invalid")
else:
let actEpoch = activationEpoch.get()
# max(startEpoch, broadcastEpoch) <= activateEpoch <= epoch
if (currentStartEpoch <= actEpoch) and (actEpoch <= epoch):
# Validator was activated, we going to skip doppelganger protection
ok(true)
else:
if epoch - currentStartEpoch < DOPPELGANGER_EPOCHS_COUNT:
# Validator is started in unsafe period.
ok(false)
else:
# Validator is already passed checking period, so we allow
# validator to participate in the network.
ok(true)
proc signWithDistributedKey(v: AttachedValidator,
request: Web3SignerRequest): Future[SignatureResult]
{.async.} =

View File

@ -46,7 +46,8 @@ import # Unit test
./fork_choice/tests_fork_choice,
./consensus_spec/all_tests as consensus_all_tests,
./slashing_protection/test_fixtures,
./slashing_protection/test_slashing_protection_db
./slashing_protection/test_slashing_protection_db,
./test_doppelganger
import # Refactor state transition unit tests
# In mainnet these take 2 minutes and are empty TODOs

111
tests/test_doppelganger.nim Normal file
View File

@ -0,0 +1,111 @@
# beacon_chain
# Copyright (c) 2018-2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.used.}
import
# Stdlib modules
std/strutils, std/options,
# Status modules
stew/results,
unittest2,
# Local modules
../beacon_chain/validators/validator_pool
proc createValidator*(startEpoch: Epoch,
activatedEpoch: Option[Epoch]): AttachedValidator =
let aepoch =
if activatedEpoch.isSome():
Opt.some(activatedEpoch.get())
else:
Opt.none(Epoch)
AttachedValidator(
startSlot: startEpoch.start_slot(),
activationEpoch: aepoch
)
suite "Doppelganger protection test suite":
test "doppelgangerCheck() test":
const TestVectors = [
(0, 9, Epoch(0), some(Epoch(0)), Epoch(0), "TTTTTTTTTT"),
(0, 9, Epoch(0), some(Epoch(1)), Epoch(0), "1TTTTTTTTT"),
(0, 9, Epoch(0), some(Epoch(2)), Epoch(0), "11TTTTTTTT"),
(0, 9, Epoch(0), some(Epoch(3)), Epoch(0), "111TTTTTTT"),
(0, 9, Epoch(0), some(Epoch(4)), Epoch(0), "1111TTTTTT"),
(0, 9, Epoch(0), some(Epoch(5)), Epoch(0), "11111TTTTT"),
(0, 9, Epoch(0), some(Epoch(6)), Epoch(0), "111111TTTT"),
(0, 9, Epoch(0), some(Epoch(7)), Epoch(0), "1111111TTT"),
(0, 9, Epoch(0), some(Epoch(8)), Epoch(0), "11111111TT"),
(0, 9, Epoch(0), some(Epoch(9)), Epoch(0), "111111111T"),
(0, 9, Epoch(0), some(Epoch(5)), Epoch(0), "11111TTTTT"),
(0, 9, Epoch(0), some(Epoch(5)), Epoch(1), "11111TTTTT"),
(0, 9, Epoch(0), some(Epoch(5)), Epoch(2), "11111TTTTT"),
(0, 9, Epoch(0), some(Epoch(5)), Epoch(3), "11111TTTTT"),
(0, 9, Epoch(0), some(Epoch(5)), Epoch(4), "11111TTTTT"),
(0, 9, Epoch(0), some(Epoch(5)), Epoch(5), "11111TTTTT"),
(0, 9, Epoch(0), some(Epoch(5)), Epoch(6), "111112FFTT"),
(0, 9, Epoch(0), some(Epoch(5)), Epoch(7), "1111122FFT"),
(0, 9, Epoch(0), some(Epoch(5)), Epoch(8), "11111222FF"),
(0, 9, Epoch(0), some(Epoch(5)), Epoch(9), "111112222F"),
(0, 9, Epoch(1), some(Epoch(0)), Epoch(0), "2FFTTTTTTT"),
(0, 9, Epoch(2), some(Epoch(0)), Epoch(0), "22FFTTTTTT"),
(0, 9, Epoch(3), some(Epoch(0)), Epoch(0), "222FFTTTTT"),
(0, 9, Epoch(4), some(Epoch(0)), Epoch(0), "2222FFTTTT"),
(0, 9, Epoch(5), some(Epoch(0)), Epoch(0), "22222FFTTT"),
(0, 9, Epoch(6), some(Epoch(0)), Epoch(0), "222222FFTT"),
(0, 9, Epoch(7), some(Epoch(0)), Epoch(0), "2222222FFT"),
(0, 9, Epoch(8), some(Epoch(0)), Epoch(0), "22222222FF"),
(0, 9, Epoch(9), some(Epoch(0)), Epoch(0), "222222222F"),
(0, 9, Epoch(0), none(Epoch), Epoch(0), "1111111111"),
(0, 9, Epoch(1), none(Epoch), Epoch(0), "1111111111"),
(0, 9, Epoch(2), none(Epoch), Epoch(0), "1111111111"),
(0, 9, Epoch(3), none(Epoch), Epoch(0), "1111111111"),
(0, 9, Epoch(4), none(Epoch), Epoch(0), "1111111111"),
(0, 9, Epoch(5), none(Epoch), Epoch(0), "1111111111"),
(0, 9, Epoch(6), none(Epoch), Epoch(0), "1111111111"),
(0, 9, Epoch(7), none(Epoch), Epoch(0), "1111111111"),
(0, 9, Epoch(8), none(Epoch), Epoch(0), "1111111111"),
(0, 9, Epoch(9), none(Epoch), Epoch(0), "1111111111"),
(0, 9, Epoch(0), none(Epoch), Epoch(0), "1111111111"),
(0, 9, Epoch(0), none(Epoch), Epoch(1), "1111111111"),
(0, 9, Epoch(0), none(Epoch), Epoch(2), "1111111111"),
(0, 9, Epoch(0), none(Epoch), Epoch(3), "1111111111"),
(0, 9, Epoch(0), none(Epoch), Epoch(4), "1111111111"),
(0, 9, Epoch(0), none(Epoch), Epoch(5), "1111111111"),
(0, 9, Epoch(0), none(Epoch), Epoch(6), "1111111111"),
(0, 9, Epoch(0), none(Epoch), Epoch(7), "1111111111"),
(0, 9, Epoch(0), none(Epoch), Epoch(8), "1111111111"),
(0, 9, Epoch(0), none(Epoch), Epoch(9), "1111111111")
]
for test in TestVectors:
let validator = createValidator(test[2], test[3])
let value =
block:
var res = ""
for index in test[0] .. test[1]:
let epoch = Epoch(uint64(index))
let dres = validator.doppelgangerCheck(epoch, test[4])
if dres.isErr():
let errorMsg = $dres.error()
if errorMsg.startsWith("Validator is not activated"):
res.add("1")
elif errorMsg.startsWith("Validator is not started"):
res.add("2")
else:
res.add("E")
else:
if dres.get():
res.add("T")
else:
res.add("F")
res
check value == test[5]