load suggested fee recipient file also when keymanager is disabled (#4078)
Since these files may have been created in a previous run or manually, we want to keep loading them even on nodes that don't enable the keystore API (for example static setups) Other changes: * log keystore loading progressively (#3699) * print initial fee recipient when loading validators * log dynamic fee recipient updates
This commit is contained in:
parent
dc21897e48
commit
ef8bab58eb
|
@ -55,7 +55,7 @@ type
|
|||
# Allow determination of preferred fee recipient during proposals
|
||||
# ----------------------------------------------------------------
|
||||
dynamicFeeRecipientsStore: ref DynamicFeeRecipientsStore
|
||||
keymanagerHost: ref KeymanagerHost
|
||||
validatorsDir: string
|
||||
defaultFeeRecipient: Eth1Address
|
||||
|
||||
# Tracking last proposal forkchoiceUpdated payload information
|
||||
|
@ -73,7 +73,7 @@ func new*(T: type ConsensusManager,
|
|||
eth1Monitor: Eth1Monitor,
|
||||
actionTracker: ActionTracker,
|
||||
dynamicFeeRecipientsStore: ref DynamicFeeRecipientsStore,
|
||||
keymanagerHost: ref KeymanagerHost,
|
||||
validatorsDir: string,
|
||||
defaultFeeRecipient: Eth1Address
|
||||
): ref ConsensusManager =
|
||||
(ref ConsensusManager)(
|
||||
|
@ -83,7 +83,7 @@ func new*(T: type ConsensusManager,
|
|||
eth1Monitor: eth1Monitor,
|
||||
actionTracker: actionTracker,
|
||||
dynamicFeeRecipientsStore: dynamicFeeRecipientsStore,
|
||||
keymanagerHost: keymanagerHost,
|
||||
validatorsDir: validatorsDir,
|
||||
forkchoiceUpdatedInfo: Opt.none ForkchoiceUpdatedInformation,
|
||||
defaultFeeRecipient: defaultFeeRecipient
|
||||
)
|
||||
|
@ -319,13 +319,18 @@ proc checkNextProposer*(self: ref ConsensusManager, wallSlot: Slot):
|
|||
self.actionTracker, self.dynamicFeeRecipientsStore, wallSlot)
|
||||
|
||||
proc getFeeRecipient*(
|
||||
self: ref ConsensusManager, pubkey: ValidatorPubKey, validatorIdx: ValidatorIndex,
|
||||
epoch: Epoch): Eth1Address =
|
||||
self.dynamicFeeRecipientsStore[].getDynamicFeeRecipient(validatorIdx, epoch).valueOr:
|
||||
if self.keymanagerHost != nil:
|
||||
self.keymanagerHost[].getSuggestedFeeRecipient(pubkey).valueOr:
|
||||
self.defaultFeeRecipient
|
||||
else:
|
||||
self: ConsensusManager, pubkey: ValidatorPubKey,
|
||||
validatorIdx: Opt[ValidatorIndex], epoch: Epoch): Eth1Address =
|
||||
let dynFeeRecipient = if validatorIdx.isSome:
|
||||
self.dynamicFeeRecipientsStore[].getDynamicFeeRecipient(
|
||||
validatorIdx.get(), epoch)
|
||||
else:
|
||||
Opt.none(Eth1Address)
|
||||
|
||||
dynFeeRecipient.valueOr:
|
||||
self.validatorsDir.getSuggestedFeeRecipient(
|
||||
pubkey, self.defaultFeeRecipient).valueOr:
|
||||
# Ignore errors and use default - errors are logged in gsfr
|
||||
self.defaultFeeRecipient
|
||||
|
||||
from ../spec/datatypes/bellatrix import PayloadID
|
||||
|
@ -346,8 +351,8 @@ proc runProposalForkchoiceUpdated*(
|
|||
compute_timestamp_at_slot(forkyState.data, nextWallSlot)
|
||||
randomData = withState(self.dag.headState):
|
||||
get_randao_mix(forkyState.data, get_current_epoch(forkyState.data)).data
|
||||
feeRecipient = self.getFeeRecipient(
|
||||
nextProposer, validatorIndex, nextWallSlot.epoch)
|
||||
feeRecipient = self[].getFeeRecipient(
|
||||
nextProposer, Opt.some(validatorIndex), nextWallSlot.epoch)
|
||||
beaconHead = self.attestationPool[].getBeaconHead(self.dag.head)
|
||||
headBlockRoot = self.dag.loadExecutionBlockRoot(beaconHead.blck)
|
||||
|
||||
|
|
|
@ -293,7 +293,7 @@ proc initFullNode(
|
|||
consensusManager = ConsensusManager.new(
|
||||
dag, attestationPool, quarantine, node.eth1Monitor,
|
||||
ActionTracker.init(rng, config.subscribeAllSubnets),
|
||||
node.dynamicFeeRecipientsStore, node.keymanagerHost,
|
||||
node.dynamicFeeRecipientsStore, config.validatorsDir,
|
||||
config.defaultFeeRecipient)
|
||||
blockProcessor = BlockProcessor.new(
|
||||
config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming,
|
||||
|
@ -359,8 +359,6 @@ proc initFullNode(
|
|||
node.backfiller = backfiller
|
||||
node.router = router
|
||||
|
||||
debug "Loading validators", validatorsDir = config.validatorsDir()
|
||||
|
||||
node.addValidators()
|
||||
|
||||
block:
|
||||
|
@ -1830,7 +1828,6 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai
|
|||
# There are no managed event loops in here, to do a graceful shutdown, but
|
||||
# letting the default Ctrl+C handler exit is safe, since we only read from
|
||||
# the db.
|
||||
|
||||
var metadata = config.loadEth2Network()
|
||||
|
||||
if config.terminalTotalDifficultyOverride.isSome:
|
||||
|
|
|
@ -98,11 +98,15 @@ proc initValidators(sn: var SigningNode): bool =
|
|||
info "Initializaing validators", path = sn.config.validatorsDir()
|
||||
var publicKeyIdents: seq[string]
|
||||
for keystore in listLoadableKeystores(sn.config):
|
||||
# Not relevant in signing node
|
||||
# TODO don't print when loading validators
|
||||
let feeRecipient = default(Eth1Address)
|
||||
case keystore.kind
|
||||
of KeystoreKind.Local:
|
||||
# Signing node is not supposed to know genesis time, so we just set
|
||||
# `start_slot` to GENESIS_SLOT.
|
||||
sn.attachedValidators.addLocalValidator(keystore, GENESIS_SLOT)
|
||||
sn.attachedValidators.addLocalValidator(
|
||||
keystore, feeRecipient, GENESIS_SLOT)
|
||||
publicKeyIdents.add("\"0x" & keystore.pubkey.toHex() & "\"")
|
||||
of KeystoreKind.Remote:
|
||||
error "Signing node do not support remote validators",
|
||||
|
|
|
@ -125,7 +125,10 @@ proc handleAddRemoteValidatorReq(host: KeymanagerHost,
|
|||
let
|
||||
slot = host.getBeaconTimeFn().slotOrZero
|
||||
validatorIdx = host.getValidatorIdx(keystore.pubkey)
|
||||
host.validatorPool[].addRemoteValidator(validatorIdx, res.get, slot)
|
||||
feeRecipient = host.getSuggestedFeeRecipient(keystore.pubkey).valueOr(
|
||||
host.defaultFeeRecipient)
|
||||
host.validatorPool[].addRemoteValidator(
|
||||
res.get, validatorIdx, feeRecipient, slot)
|
||||
RequestItemStatus(status: $KeystoreStatus.imported)
|
||||
else:
|
||||
case res.error().status
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import
|
||||
std/tables,
|
||||
stew/results,
|
||||
chronicles,
|
||||
web3/ethtypes,
|
||||
../datatypes/base
|
||||
|
||||
|
@ -15,10 +16,12 @@ type
|
|||
func init*(T: type DynamicFeeRecipientsStore): T =
|
||||
T(mappings: initTable[ValidatorIndex, Entry]())
|
||||
|
||||
func addMapping*(store: var DynamicFeeRecipientsStore,
|
||||
proc addMapping*(store: var DynamicFeeRecipientsStore,
|
||||
validator: ValidatorIndex,
|
||||
feeRecipient: Eth1Address,
|
||||
currentEpoch: Epoch) =
|
||||
info "Updating fee recipient",
|
||||
validator, feeRecipient = feeRecipient.toHex(), currentEpoch
|
||||
store.mappings[validator] = Entry(recipient: feeRecipient,
|
||||
addedAt: currentEpoch)
|
||||
|
||||
|
|
|
@ -14,6 +14,8 @@ import
|
|||
std/[macros, strutils, parseutils, tables],
|
||||
stew/[byteutils], stint, web3/[ethtypes]
|
||||
|
||||
export stint, ethtypes.toHex
|
||||
|
||||
const
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.3/specs/phase0/beacon-chain.md#withdrawal-prefixes
|
||||
BLS_WITHDRAWAL_PREFIX*: byte = 0
|
||||
|
|
|
@ -413,11 +413,15 @@ proc removeDoppelganger*(vc: ValidatorClientRef, index: ValidatorIndex) =
|
|||
discard vc.doppelgangerDetection.validators.pop(index, state)
|
||||
|
||||
proc addValidator*(vc: ValidatorClientRef, keystore: KeystoreData) =
|
||||
let slot = vc.currentSlot()
|
||||
let
|
||||
slot = vc.currentSlot()
|
||||
feeRecipient = vc.config.validatorsDir.getSuggestedFeeRecipient(
|
||||
keystore.pubkey, vc.config.defaultFeeRecipient).valueOr(
|
||||
vc.config.defaultFeeRecipient)
|
||||
case keystore.kind
|
||||
of KeystoreKind.Local:
|
||||
vc.attachedValidators[].addLocalValidator(keystore, Opt.none ValidatorIndex,
|
||||
slot)
|
||||
feeRecipient, slot)
|
||||
of KeystoreKind.Remote:
|
||||
let
|
||||
httpFlags =
|
||||
|
@ -442,7 +446,8 @@ proc addValidator*(vc: ValidatorClientRef, keystore: KeystoreData) =
|
|||
res
|
||||
if len(clients) > 0:
|
||||
vc.attachedValidators[].addRemoteValidator(keystore, clients,
|
||||
Opt.none ValidatorIndex, slot)
|
||||
Opt.none ValidatorIndex,
|
||||
feeRecipient, slot)
|
||||
else:
|
||||
warn "Unable to initialize remote validator",
|
||||
validator = $keystore.pubkey
|
||||
|
|
|
@ -115,13 +115,6 @@ proc getValidatorIdx*(host: KeymanagerHost,
|
|||
else:
|
||||
Opt.none ValidatorIndex
|
||||
|
||||
proc addLocalValidator*(host: KeymanagerHost, keystore: KeystoreData) =
|
||||
let
|
||||
slot = host.getBeaconTimeFn().slotOrZero
|
||||
validatorIdx = host.getValidatorIdx(keystore.pubkey)
|
||||
|
||||
host.validatorPool[].addLocalValidator(keystore, validatorIdx, slot)
|
||||
|
||||
proc echoP*(msg: string) =
|
||||
## Prints a paragraph aligned to 80 columns
|
||||
echo ""
|
||||
|
@ -719,6 +712,48 @@ iterator listLoadableKeystores*(config: AnyConf): KeystoreData =
|
|||
{KeystoreKind.Local, KeystoreKind.Remote}):
|
||||
yield el
|
||||
|
||||
type
|
||||
FeeRecipientStatus* = enum
|
||||
noSuchValidator
|
||||
invalidFeeRecipientFile
|
||||
|
||||
func validatorKeystoreDir(
|
||||
validatorsDir: string, pubkey: ValidatorPubKey): string =
|
||||
validatorsDir / pubkey.fsName
|
||||
|
||||
func feeRecipientPath(validatorsDir: string,
|
||||
pubkey: ValidatorPubKey): string =
|
||||
validatorsDir.validatorKeystoreDir(pubkey) / FeeRecipientFilename
|
||||
|
||||
proc getSuggestedFeeRecipient*(
|
||||
validatorsDir: string,
|
||||
pubkey: ValidatorPubKey,
|
||||
defaultFeeRecipient: Eth1Address): Result[Eth1Address, FeeRecipientStatus] =
|
||||
# In this particular case, an error might be by design. If the file exists,
|
||||
# but doesn't load or parse that's a more urgent matter to fix. Many people
|
||||
# people might prefer, however, not to override their default suggested fee
|
||||
# recipients per validator, so don't warn very loudly, if at all.
|
||||
if not dirExists(validatorsDir.validatorKeystoreDir(pubkey)):
|
||||
return err noSuchValidator
|
||||
|
||||
let feeRecipientPath = validatorsDir.feeRecipientPath(pubkey)
|
||||
if not fileExists(feeRecipientPath):
|
||||
return ok defaultFeeRecipient
|
||||
|
||||
try:
|
||||
# Avoid being overly flexible initially. Trailing whitespace is common
|
||||
# enough it probably should be allowed, but it is reasonable to simply
|
||||
# disallow the mostly-pointless flexibility of leading whitespace.
|
||||
ok Eth1Address.fromHex(strutils.strip(
|
||||
readFile(feeRecipientPath), leading = false, trailing = true))
|
||||
except CatchableError as exc:
|
||||
# Because the nonexistent validator case was already checked, any failure
|
||||
# at this point is serious enough to alert the user.
|
||||
warn "getSuggestedFeeRecipient: failed loading fee recipient file; falling back to default fee recipient",
|
||||
feeRecipientPath,
|
||||
err = exc.msg
|
||||
err invalidFeeRecipientFile
|
||||
|
||||
type
|
||||
KeystoreGenerationErrorKind* = enum
|
||||
FailedToCreateValidatorsDir
|
||||
|
@ -1254,11 +1289,11 @@ proc generateDistirbutedStore*(rng: var HmacDrbgContext,
|
|||
|
||||
func validatorKeystoreDir(host: KeymanagerHost,
|
||||
pubkey: ValidatorPubKey): string =
|
||||
host.validatorsDir / pubkey.fsName
|
||||
host.validatorsDir.validatorKeystoreDir(pubkey)
|
||||
|
||||
func feeRecipientPath*(host: KeymanagerHost,
|
||||
pubkey: ValidatorPubKey): string =
|
||||
host.validatorKeystoreDir(pubkey) / FeeRecipientFilename
|
||||
host.validatorsDir.feeRecipientPath(pubkey)
|
||||
|
||||
proc removeFeeRecipientFile*(host: KeymanagerHost,
|
||||
pubkey: ValidatorPubKey): Result[void, string] =
|
||||
|
@ -1279,40 +1314,19 @@ proc setFeeRecipient*(host: KeymanagerHost, pubkey: ValidatorPubKey, feeRecipien
|
|||
io2.writeFile(validatorKeystoreDir / FeeRecipientFilename, $feeRecipient)
|
||||
.mapErr(proc(e: auto): string = "Failed to write fee recipient file: " & $e)
|
||||
|
||||
type
|
||||
FeeRecipientStatus* = enum
|
||||
noSuchValidator
|
||||
invalidFeeRecipientFile
|
||||
|
||||
proc getSuggestedFeeRecipient*(
|
||||
host: KeymanagerHost,
|
||||
pubkey: ValidatorPubKey): Result[Eth1Address, FeeRecipientStatus] =
|
||||
let validatorDir = host.validatorKeystoreDir(pubkey)
|
||||
host.validatorsDir.getSuggestedFeeRecipient(pubkey, host.defaultFeeRecipient)
|
||||
|
||||
# In this particular case, an error might be by design. If the file exists,
|
||||
# but doesn't load or parse that's a more urgent matter to fix. Many people
|
||||
# people might prefer, however, not to override their default suggested fee
|
||||
# recipients per validator, so don't warn very loudly, if at all.
|
||||
if not dirExists(validatorDir):
|
||||
return err noSuchValidator
|
||||
|
||||
let feeRecipientPath = validatorDir / FeeRecipientFilename
|
||||
if not fileExists(feeRecipientPath):
|
||||
return ok host.defaultFeeRecipient
|
||||
|
||||
try:
|
||||
# Avoid being overly flexible initially. Trailing whitespace is common
|
||||
# enough it probably should be allowed, but it is reasonable to simply
|
||||
# disallow the mostly-pointless flexibility of leading whitespace.
|
||||
ok Eth1Address.fromHex(strutils.strip(
|
||||
readFile(feeRecipientPath), leading = false, trailing = true))
|
||||
except CatchableError as exc:
|
||||
# Because the nonexistent validator case was already checked, any failure
|
||||
# at this point is serious enough to alert the user.
|
||||
warn "getSuggestedFeeRecipient: failed loading fee recipient file; falling back to default fee recipient",
|
||||
feeRecipientPath,
|
||||
err = exc.msg
|
||||
err invalidFeeRecipientFile
|
||||
proc addLocalValidator*(host: KeymanagerHost, keystore: KeystoreData) =
|
||||
let
|
||||
slot = host.getBeaconTimeFn().slotOrZero
|
||||
validatorIdx = host.getValidatorIdx(keystore.pubkey)
|
||||
feeRecipient = host.getSuggestedFeeRecipient(keystore.pubkey).valueOr(
|
||||
host.defaultFeeRecipient)
|
||||
host.validatorPool[].addLocalValidator(
|
||||
keystore, validatorIdx, feeRecipient, slot)
|
||||
|
||||
proc generateDeposits*(cfg: RuntimeConfig,
|
||||
rng: var HmacDrbgContext,
|
||||
|
|
|
@ -91,64 +91,46 @@ proc findValidator*(validators: auto, pubkey: ValidatorPubKey): Opt[ValidatorInd
|
|||
else:
|
||||
Opt.some idx.ValidatorIndex
|
||||
|
||||
proc addLocalValidator(node: BeaconNode, validators: auto,
|
||||
item: KeystoreData, slot: Slot) =
|
||||
let
|
||||
pubkey = item.pubkey
|
||||
index = findValidator(validators, pubkey)
|
||||
node.attachedValidators[].addLocalValidator(item, index, slot)
|
||||
|
||||
# TODO: This should probably be moved to the validator_pool module
|
||||
proc addRemoteValidator*(pool: var ValidatorPool,
|
||||
keystore: KeystoreData,
|
||||
index: Opt[ValidatorIndex],
|
||||
item: KeystoreData,
|
||||
feeRecipient: Eth1Address,
|
||||
slot: Slot) =
|
||||
var clients: seq[(RestClientRef, RemoteSignerInfo)]
|
||||
let httpFlags =
|
||||
block:
|
||||
var res: set[HttpClientFlag]
|
||||
if RemoteKeystoreFlag.IgnoreSSLVerification in item.flags:
|
||||
if RemoteKeystoreFlag.IgnoreSSLVerification in keystore.flags:
|
||||
res.incl({HttpClientFlag.NoVerifyHost,
|
||||
HttpClientFlag.NoVerifyServerName})
|
||||
res
|
||||
let prestoFlags = {RestClientFlag.CommaSeparatedArray}
|
||||
for remote in item.remotes:
|
||||
for remote in keystore.remotes:
|
||||
let client = RestClientRef.new($remote.url, prestoFlags, httpFlags)
|
||||
if client.isErr():
|
||||
warn "Unable to resolve distributed signer address",
|
||||
remote_url = $remote.url, validator = $remote.pubkey
|
||||
clients.add((client.get(), remote))
|
||||
pool.addRemoteValidator(item, clients, index, slot)
|
||||
|
||||
proc addLocalValidators*(node: BeaconNode,
|
||||
validators: openArray[KeystoreData]) =
|
||||
let slot = node.currentSlot()
|
||||
withState(node.dag.headState):
|
||||
for item in validators:
|
||||
node.addLocalValidator(forkyState.data.validators.asSeq(), item, slot)
|
||||
|
||||
proc addRemoteValidators*(node: BeaconNode,
|
||||
validators: openArray[KeystoreData]) =
|
||||
let slot = node.currentSlot()
|
||||
withState(node.dag.headState):
|
||||
for item in validators:
|
||||
let index = findValidator(
|
||||
forkyState.data.validators.asSeq(), item.pubkey)
|
||||
node.attachedValidators[].addRemoteValidator(index, item, slot)
|
||||
pool.addRemoteValidator(keystore, clients, index, feeRecipient, slot)
|
||||
|
||||
proc addValidators*(node: BeaconNode) =
|
||||
let (localValidators, remoteValidators) =
|
||||
block:
|
||||
var local, remote, distributed: seq[KeystoreData]
|
||||
for keystore in listLoadableKeystores(node.config):
|
||||
case keystore.kind
|
||||
of KeystoreKind.Local:
|
||||
local.add(keystore)
|
||||
of KeystoreKind.Remote:
|
||||
remote.add(keystore)
|
||||
(local, remote)
|
||||
node.addLocalValidators(localValidators)
|
||||
node.addRemoteValidators(remoteValidators)
|
||||
debug "Loading validators", validatorsDir = node.config.validatorsDir()
|
||||
let slot = node.currentSlot()
|
||||
for keystore in listLoadableKeystores(node.config):
|
||||
let
|
||||
index = withState(node.dag.headState):
|
||||
findValidator(forkyState.data.validators.asSeq(), keystore.pubkey)
|
||||
feeRecipient = node.consensusManager[].getFeeRecipient(
|
||||
keystore.pubkey, index, slot.epoch)
|
||||
|
||||
case keystore.kind
|
||||
of KeystoreKind.Local:
|
||||
node.attachedValidators[].addLocalValidator(
|
||||
keystore, index, feeRecipient, slot)
|
||||
of KeystoreKind.Remote:
|
||||
node.attachedValidators[].addRemoteValidator(
|
||||
keystore, index, feeRecipient, slot)
|
||||
|
||||
proc getAttachedValidator(node: BeaconNode,
|
||||
pubkey: ValidatorPubKey): AttachedValidator =
|
||||
|
@ -357,17 +339,11 @@ proc get_execution_payload(
|
|||
asConsensusExecutionPayload(
|
||||
await execution_engine.getPayload(payload_id.get))
|
||||
|
||||
# TODO remove in favor of consensusManager copy
|
||||
proc getFeeRecipient(node: BeaconNode,
|
||||
pubkey: ValidatorPubKey,
|
||||
validatorIdx: ValidatorIndex,
|
||||
epoch: Epoch): Eth1Address =
|
||||
node.dynamicFeeRecipientsStore[].getDynamicFeeRecipient(validatorIdx, epoch).valueOr:
|
||||
if node.keymanagerHost != nil:
|
||||
node.keymanagerHost[].getSuggestedFeeRecipient(pubkey).valueOr:
|
||||
node.config.defaultFeeRecipient
|
||||
else:
|
||||
node.config.defaultFeeRecipient
|
||||
node.consensusManager[].getFeeRecipient(pubkey, Opt.some(validatorIdx), epoch)
|
||||
|
||||
from web3/engine_api_types import PayloadExecutionStatus
|
||||
|
||||
|
|
|
@ -41,7 +41,6 @@ type
|
|||
ValidatorConnection* = RestClientRef
|
||||
|
||||
AttachedValidator* = ref object
|
||||
pubkey*: ValidatorPubKey
|
||||
data*: KeystoreData
|
||||
case kind*: ValidatorKind
|
||||
of ValidatorKind.Local:
|
||||
|
@ -75,6 +74,9 @@ type
|
|||
validators*: Table[ValidatorPubKey, AttachedValidator]
|
||||
slashingProtection*: SlashingProtectionDB
|
||||
|
||||
template pubkey*(v: AttachedValidator): ValidatorPubKey =
|
||||
v.data.pubkey
|
||||
|
||||
func shortLog*(v: AttachedValidator): string =
|
||||
case v.kind
|
||||
of ValidatorKind.Local:
|
||||
|
@ -93,37 +95,46 @@ func init*(T: type ValidatorPool,
|
|||
template count*(pool: ValidatorPool): int =
|
||||
len(pool.validators)
|
||||
|
||||
proc addLocalValidator*(pool: var ValidatorPool, item: KeystoreData,
|
||||
index: Opt[ValidatorIndex], slot: Slot) =
|
||||
doAssert item.kind == KeystoreKind.Local
|
||||
let pubkey = item.pubkey
|
||||
proc addLocalValidator*(
|
||||
pool: var ValidatorPool, keystore: KeystoreData, index: Opt[ValidatorIndex],
|
||||
feeRecipient: Eth1Address, slot: Slot) =
|
||||
doAssert keystore.kind == KeystoreKind.Local
|
||||
let v = AttachedValidator(
|
||||
kind: ValidatorKind.Local, pubkey: pubkey, index: index, data: item,
|
||||
kind: ValidatorKind.Local, index: index, data: keystore,
|
||||
externalBuilderRegistration: Opt.none SignedValidatorRegistrationV1,
|
||||
startSlot: slot)
|
||||
pool.validators[pubkey] = v
|
||||
notice "Local validator attached", pubkey, validator = shortLog(v),
|
||||
start_slot = slot
|
||||
pool.validators[v.pubkey] = v
|
||||
|
||||
# Fee recipient may change after startup, but we log the initial value here
|
||||
notice "Local validator attached",
|
||||
pubkey = v.pubkey,
|
||||
validator = shortLog(v),
|
||||
initial_fee_recipient = feeRecipient.toHex(),
|
||||
start_slot = slot
|
||||
validators.set(pool.count().int64)
|
||||
|
||||
proc addLocalValidator*(pool: var ValidatorPool, item: KeystoreData,
|
||||
slot: Slot) =
|
||||
addLocalValidator(pool, item, Opt.none ValidatorIndex, slot)
|
||||
proc addLocalValidator*(
|
||||
pool: var ValidatorPool, keystore: KeystoreData, feeRecipient: Eth1Address,
|
||||
slot: Slot) =
|
||||
addLocalValidator(pool, keystore, feeRecipient, slot)
|
||||
|
||||
proc addRemoteValidator*(pool: var ValidatorPool, item: KeystoreData,
|
||||
proc addRemoteValidator*(pool: var ValidatorPool, keystore: KeystoreData,
|
||||
clients: seq[(RestClientRef, RemoteSignerInfo)],
|
||||
index: Opt[ValidatorIndex], slot: Slot) =
|
||||
doAssert item.kind == KeystoreKind.Remote
|
||||
let pubkey = item.pubkey
|
||||
index: Opt[ValidatorIndex], feeRecipient: Eth1Address,
|
||||
slot: Slot) =
|
||||
doAssert keystore.kind == KeystoreKind.Remote
|
||||
let v = AttachedValidator(
|
||||
kind: ValidatorKind.Remote, pubkey: pubkey, index: index, data: item,
|
||||
kind: ValidatorKind.Remote, index: index, data: keystore,
|
||||
clients: clients,
|
||||
externalBuilderRegistration: Opt.none SignedValidatorRegistrationV1,
|
||||
startSlot: slot)
|
||||
pool.validators[pubkey] = v
|
||||
notice "Remote validator attached", pubkey, validator = shortLog(v),
|
||||
remote_signer = $item.remotes,
|
||||
start_slot = slot
|
||||
pool.validators[v.pubkey] = v
|
||||
notice "Remote validator attached",
|
||||
pubkey = v.pubkey,
|
||||
validator = shortLog(v),
|
||||
remote_signer = $keystore.remotes,
|
||||
initial_fee_recipient = feeRecipient.toHex(),
|
||||
start_slot = slot
|
||||
validators.set(pool.count().int64)
|
||||
|
||||
proc getValidator*(pool: ValidatorPool,
|
||||
|
|
|
@ -45,7 +45,7 @@ suite "Block processor" & preset():
|
|||
keymanagerHost: ref KeymanagerHost
|
||||
consensusManager = ConsensusManager.new(
|
||||
dag, attestationPool, quarantine, eth1Monitor, actionTracker,
|
||||
newClone(DynamicFeeRecipientsStore.init()), keymanagerHost,
|
||||
newClone(DynamicFeeRecipientsStore.init()), "",
|
||||
default(Eth1Address))
|
||||
state = newClone(dag.headState)
|
||||
cache = StateCache()
|
||||
|
|
|
@ -220,8 +220,9 @@ suite "Gossip validation - Extra": # Not based on preset config
|
|||
expectedCount = subcommittee.count(index)
|
||||
pubkey = state[].data.validators.item(index).pubkey
|
||||
keystoreData = KeystoreData(kind: KeystoreKind.Local,
|
||||
pubkey: pubkey,
|
||||
privateKey: MockPrivKeys[index])
|
||||
validator = AttachedValidator(pubkey: pubkey,
|
||||
validator = AttachedValidator(
|
||||
kind: ValidatorKind.Local, data: keystoreData, index: Opt.some index)
|
||||
resMsg = waitFor getSyncCommitteeMessage(
|
||||
validator, state[].data.fork, state[].data.genesis_validators_root, slot,
|
||||
|
|
Loading…
Reference in New Issue