2022-06-09 08:50:36 +00:00
|
|
|
# beacon_chain
|
2023-05-17 04:56:37 +00:00
|
|
|
# Copyright (c) 2021-2023 Status Research & Development GmbH
|
2022-06-09 08:50:36 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2021-08-12 13:08:20 +00:00
|
|
|
import
|
2023-08-15 17:00:27 +00:00
|
|
|
std/[tables, os, sets, sequtils, strutils, uri, algorithm],
|
2022-08-19 10:30:07 +00:00
|
|
|
stew/[base10, results, byteutils],
|
|
|
|
bearssl/rand, chronos, presto, presto/client as presto_client,
|
2023-05-30 08:38:34 +00:00
|
|
|
chronicles, confutils,
|
2022-08-19 10:30:07 +00:00
|
|
|
metrics, metrics/chronos_httpserver,
|
2023-06-28 13:33:07 +00:00
|
|
|
".."/spec/datatypes/[base, phase0, altair],
|
2022-08-19 10:30:07 +00:00
|
|
|
".."/spec/[eth2_merkleization, helpers, signatures, validator],
|
2022-10-21 14:53:30 +00:00
|
|
|
".."/spec/eth2_apis/[eth2_rest_serialization, rest_beacon_client,
|
|
|
|
dynamic_fee_recipients],
|
2023-06-08 08:44:32 +00:00
|
|
|
".."/consensus_object_pools/block_pools_types,
|
|
|
|
".."/validators/[keystore_management, validator_pool, slashing_protection,
|
|
|
|
validator_duties],
|
2022-08-19 10:30:07 +00:00
|
|
|
".."/[conf, beacon_clock, version, nimbus_binary_common]
|
|
|
|
|
2022-10-29 09:00:51 +00:00
|
|
|
from std/times import Time, toUnix, fromUnix, getTime
|
|
|
|
|
2022-08-19 10:30:07 +00:00
|
|
|
export
|
|
|
|
os, sets, sequtils, chronos, presto, chronicles, confutils,
|
2023-05-30 08:38:34 +00:00
|
|
|
nimbus_binary_common, version, conf, tables, results, base10,
|
2022-08-19 10:30:07 +00:00
|
|
|
byteutils, presto_client, eth2_rest_serialization, rest_beacon_client,
|
|
|
|
phase0, altair, helpers, signatures, validator, eth2_merkleization,
|
2022-10-21 14:53:30 +00:00
|
|
|
beacon_clock, keystore_management, slashing_protection, validator_pool,
|
2023-06-28 13:33:07 +00:00
|
|
|
dynamic_fee_recipients, Time, toUnix, fromUnix, getTime, block_pools_types,
|
|
|
|
base, metrics
|
2021-07-13 11:15:07 +00:00
|
|
|
|
|
|
|
const
|
|
|
|
SYNC_TOLERANCE* = 4'u64
|
|
|
|
SLOT_LOOKAHEAD* = 1.seconds
|
|
|
|
HISTORICAL_DUTIES_EPOCHS* = 2'u64
|
|
|
|
TIME_DELAY_FROM_SLOT* = 79.milliseconds
|
|
|
|
SUBSCRIPTION_BUFFER_SLOTS* = 2'u64
|
2022-10-29 09:00:51 +00:00
|
|
|
EPOCHS_BETWEEN_VALIDATOR_REGISTRATION* = 1
|
2021-07-13 11:15:07 +00:00
|
|
|
|
2022-07-29 08:36:20 +00:00
|
|
|
DelayBuckets* = [-Inf, -4.0, -2.0, -1.0, -0.5, -0.1, -0.05,
|
|
|
|
0.05, 0.1, 0.5, 1.0, 2.0, 4.0, 8.0, Inf]
|
|
|
|
|
2023-04-17 21:31:54 +00:00
|
|
|
ZeroTimeDiff* = TimeDiff(nanoseconds: 0'i64)
|
|
|
|
|
2021-07-13 11:15:07 +00:00
|
|
|
type
|
|
|
|
ServiceState* {.pure.} = enum
|
|
|
|
Initialized, Running, Error, Closing, Closed
|
|
|
|
|
2022-10-29 09:00:51 +00:00
|
|
|
RegistrationKind* {.pure.} = enum
|
2023-05-30 08:38:34 +00:00
|
|
|
Cached, IncorrectTime, MissingIndex, MissingFee, MissingGasLimit,
|
|
|
|
ErrorSignature, NoSignature
|
2022-10-29 09:00:51 +00:00
|
|
|
|
|
|
|
PendingValidatorRegistration* = object
|
|
|
|
registration*: SignedValidatorRegistrationV1
|
|
|
|
future*: Future[SignatureResult]
|
|
|
|
|
2021-07-13 11:15:07 +00:00
|
|
|
ClientServiceRef* = ref object of RootObj
|
2022-07-13 14:43:57 +00:00
|
|
|
name*: string
|
2021-07-13 11:15:07 +00:00
|
|
|
state*: ServiceState
|
|
|
|
lifeFut*: Future[void]
|
|
|
|
client*: ValidatorClientRef
|
|
|
|
|
|
|
|
DutiesServiceRef* = ref object of ClientServiceRef
|
2023-08-15 17:00:27 +00:00
|
|
|
pollingAttesterDutiesTask*: Future[void]
|
|
|
|
pollingSyncDutiesTask*: Future[void]
|
|
|
|
syncSubscriptionEpoch*: Opt[Epoch]
|
2021-07-13 11:15:07 +00:00
|
|
|
|
|
|
|
FallbackServiceRef* = ref object of ClientServiceRef
|
2023-02-23 00:11:00 +00:00
|
|
|
changesEvent*: AsyncEvent
|
2021-07-13 11:15:07 +00:00
|
|
|
|
|
|
|
ForkServiceRef* = ref object of ClientServiceRef
|
|
|
|
|
|
|
|
AttestationServiceRef* = ref object of ClientServiceRef
|
|
|
|
|
|
|
|
BlockServiceRef* = ref object of ClientServiceRef
|
|
|
|
|
2022-05-10 10:03:40 +00:00
|
|
|
SyncCommitteeServiceRef* = ref object of ClientServiceRef
|
|
|
|
|
2022-07-21 16:54:07 +00:00
|
|
|
DoppelgangerServiceRef* = ref object of ClientServiceRef
|
|
|
|
enabled*: bool
|
|
|
|
|
2021-07-13 11:15:07 +00:00
|
|
|
DutyAndProof* = object
|
|
|
|
epoch*: Epoch
|
|
|
|
dependentRoot*: Eth2Digest
|
|
|
|
data*: RestAttesterDuty
|
2023-05-30 08:38:34 +00:00
|
|
|
slotSig*: Opt[ValidatorSig]
|
2021-07-13 11:15:07 +00:00
|
|
|
|
2023-08-15 17:00:27 +00:00
|
|
|
SyncCommitteeDuty* = RestSyncCommitteeDuty
|
2022-05-10 10:03:40 +00:00
|
|
|
|
|
|
|
SyncCommitteeSubscriptionInfo* = object
|
|
|
|
validator_index*: ValidatorIndex
|
|
|
|
validator_sync_committee_indices*: seq[IndexInSyncCommittee]
|
|
|
|
|
2021-07-13 11:15:07 +00:00
|
|
|
ProposerTask* = object
|
|
|
|
duty*: RestProposerDuty
|
|
|
|
future*: Future[void]
|
|
|
|
|
|
|
|
ProposedData* = object
|
|
|
|
epoch*: Epoch
|
|
|
|
dependentRoot*: Eth2Digest
|
|
|
|
duties*: seq[ProposerTask]
|
|
|
|
|
2022-09-29 07:57:14 +00:00
|
|
|
BeaconNodeRole* {.pure.} = enum
|
|
|
|
Duties,
|
|
|
|
AttestationData, AttestationPublish,
|
|
|
|
AggregatedData, AggregatedPublish,
|
|
|
|
BlockProposalData, BlockProposalPublish,
|
2023-06-28 13:33:07 +00:00
|
|
|
SyncCommitteeData, SyncCommitteePublish,
|
|
|
|
NoTimeCheck
|
|
|
|
|
|
|
|
RestBeaconNodeFeature* {.pure.} = enum
|
|
|
|
NoNimbusExtensions ## BN do not supports Nimbus Extensions
|
|
|
|
|
|
|
|
TimeOffset* = object
|
|
|
|
value: int64
|
2022-09-29 07:57:14 +00:00
|
|
|
|
2021-07-13 11:15:07 +00:00
|
|
|
BeaconNodeServer* = object
|
|
|
|
client*: RestClientRef
|
2023-09-15 05:34:32 +00:00
|
|
|
uri*: Uri
|
2021-07-13 11:15:07 +00:00
|
|
|
endpoint*: string
|
2023-06-28 13:33:38 +00:00
|
|
|
config*: VCRuntimeConfig
|
2023-05-30 08:38:34 +00:00
|
|
|
ident*: Opt[string]
|
|
|
|
genesis*: Opt[RestGenesis]
|
|
|
|
syncInfo*: Opt[RestSyncInfo]
|
2021-07-13 11:15:07 +00:00
|
|
|
status*: RestBeaconNodeStatus
|
2023-06-28 13:33:07 +00:00
|
|
|
features*: set[RestBeaconNodeFeature]
|
2022-09-29 07:57:14 +00:00
|
|
|
roles*: set[BeaconNodeRole]
|
|
|
|
logIdent*: string
|
|
|
|
index*: int
|
2023-06-28 13:33:07 +00:00
|
|
|
timeOffset*: Opt[TimeOffset]
|
2021-07-13 11:15:07 +00:00
|
|
|
|
2023-08-15 17:00:27 +00:00
|
|
|
EpochSelectionProof* = object
|
2023-08-17 17:26:08 +00:00
|
|
|
signatures*: array[SLOTS_PER_EPOCH.int, Opt[ValidatorSig]]
|
2023-08-15 17:00:27 +00:00
|
|
|
sync_committee_index*: IndexInSyncCommittee
|
|
|
|
|
|
|
|
SyncCommitteeSelectionProof* = seq[EpochSelectionProof]
|
|
|
|
|
2021-07-13 11:15:07 +00:00
|
|
|
EpochDuties* = object
|
|
|
|
duties*: Table[Epoch, DutyAndProof]
|
|
|
|
|
2023-08-15 17:00:27 +00:00
|
|
|
SyncPeriodDuties* = object
|
|
|
|
duties*: Table[SyncCommitteePeriod, SyncCommitteeDuty]
|
|
|
|
|
|
|
|
SyncCommitteeProofs* = object
|
|
|
|
proofs*: Table[ValidatorPubKey, SyncCommitteeSelectionProof]
|
2022-05-10 10:03:40 +00:00
|
|
|
|
2021-07-13 11:15:07 +00:00
|
|
|
RestBeaconNodeStatus* {.pure.} = enum
|
2023-09-15 05:34:32 +00:00
|
|
|
Invalid, ## BN address is invalid.
|
|
|
|
Noname, ## BN address could not be resolved yet.
|
2023-05-30 08:38:34 +00:00
|
|
|
Offline, ## BN is offline.
|
|
|
|
Online, ## BN is online, passed checkOnline() check.
|
|
|
|
Incompatible, ## BN configuration is NOT compatible with VC.
|
|
|
|
Compatible, ## BN configuration is compatible with VC configuration.
|
|
|
|
NotSynced, ## BN is not in sync.
|
|
|
|
OptSynced, ## BN is optimistically synced (EL is not in sync).
|
|
|
|
Synced, ## BN and EL are synced.
|
|
|
|
UnexpectedCode, ## BN sends unexpected/incorrect HTTP status code .
|
|
|
|
UnexpectedResponse, ## BN sends unexpected/incorrect response.
|
2023-06-28 13:33:07 +00:00
|
|
|
BrokenClock, ## BN wall clock is broken or has significan offset.
|
2023-05-30 08:38:34 +00:00
|
|
|
InternalError ## BN reports internal error.
|
2023-02-23 00:11:00 +00:00
|
|
|
|
|
|
|
BeaconNodesCounters* = object
|
|
|
|
data*: array[int(high(RestBeaconNodeStatus)) + 1, int]
|
2021-07-13 11:15:07 +00:00
|
|
|
|
|
|
|
BeaconNodeServerRef* = ref BeaconNodeServer
|
|
|
|
|
|
|
|
AttesterMap* = Table[ValidatorPubKey, EpochDuties]
|
2023-08-15 17:00:27 +00:00
|
|
|
SyncCommitteeDutiesMap* = Table[ValidatorPubKey, SyncPeriodDuties]
|
2021-07-13 11:15:07 +00:00
|
|
|
ProposerMap* = Table[Epoch, ProposedData]
|
2023-08-15 17:00:27 +00:00
|
|
|
SyncCommitteeProofsMap* = Table[Epoch, SyncCommitteeProofs]
|
2021-07-13 11:15:07 +00:00
|
|
|
|
2022-07-21 16:54:07 +00:00
|
|
|
DoppelgangerStatus* {.pure.} = enum
|
|
|
|
None, Checking, Passed
|
|
|
|
|
|
|
|
DoppelgangerAttempt* {.pure.} = enum
|
|
|
|
None, Failure, SuccessTrue, SuccessFalse
|
|
|
|
|
2023-06-08 08:44:32 +00:00
|
|
|
BlockWaiter* = object
|
|
|
|
future*: Future[seq[Eth2Digest]]
|
|
|
|
count*: int
|
|
|
|
|
|
|
|
BlockDataItem* = object
|
|
|
|
blocks: seq[Eth2Digest]
|
|
|
|
waiters*: seq[BlockWaiter]
|
|
|
|
|
2023-08-15 17:00:27 +00:00
|
|
|
ValidatorRuntimeConfig* = object
|
|
|
|
altairEpoch*: Opt[Epoch]
|
|
|
|
|
2021-07-13 11:15:07 +00:00
|
|
|
ValidatorClient* = object
|
|
|
|
config*: ValidatorClientConf
|
2023-08-15 17:00:27 +00:00
|
|
|
runtimeConfig*: ValidatorRuntimeConfig
|
2023-05-30 08:38:34 +00:00
|
|
|
metricsServer*: Opt[MetricsHttpServerRef]
|
2021-07-13 11:15:07 +00:00
|
|
|
graffitiBytes*: GraffitiBytes
|
|
|
|
beaconNodes*: seq[BeaconNodeServerRef]
|
|
|
|
fallbackService*: FallbackServiceRef
|
|
|
|
forkService*: ForkServiceRef
|
|
|
|
dutiesService*: DutiesServiceRef
|
|
|
|
attestationService*: AttestationServiceRef
|
|
|
|
blockService*: BlockServiceRef
|
2022-05-10 10:03:40 +00:00
|
|
|
syncCommitteeService*: SyncCommitteeServiceRef
|
2022-07-21 16:54:07 +00:00
|
|
|
doppelgangerService*: DoppelgangerServiceRef
|
2022-07-13 14:43:57 +00:00
|
|
|
runSlotLoopFut*: Future[void]
|
2023-02-16 17:25:48 +00:00
|
|
|
runKeystoreCachePruningLoopFut*: Future[void]
|
2022-07-13 14:43:57 +00:00
|
|
|
sigintHandleFut*: Future[void]
|
|
|
|
sigtermHandleFut*: Future[void]
|
2022-08-19 10:30:07 +00:00
|
|
|
keymanagerHost*: ref KeymanagerHost
|
|
|
|
keymanagerServer*: RestServerRef
|
2023-02-16 17:25:48 +00:00
|
|
|
keystoreCache*: KeystoreCacheRef
|
2021-07-13 11:15:07 +00:00
|
|
|
beaconClock*: BeaconClock
|
2022-08-19 10:30:07 +00:00
|
|
|
attachedValidators*: ref ValidatorPool
|
2022-02-16 11:31:23 +00:00
|
|
|
forks*: seq[Fork]
|
2023-04-17 21:31:54 +00:00
|
|
|
preGenesisEvent*: AsyncEvent
|
|
|
|
genesisEvent*: AsyncEvent
|
2022-02-16 11:31:23 +00:00
|
|
|
forksAvailable*: AsyncEvent
|
2022-07-14 21:11:25 +00:00
|
|
|
nodesAvailable*: AsyncEvent
|
2022-10-21 14:53:30 +00:00
|
|
|
indicesAvailable*: AsyncEvent
|
2022-12-09 16:05:55 +00:00
|
|
|
doppelExit*: AsyncEvent
|
2021-07-13 11:15:07 +00:00
|
|
|
attesters*: AttesterMap
|
|
|
|
proposers*: ProposerMap
|
2022-05-10 10:03:40 +00:00
|
|
|
syncCommitteeDuties*: SyncCommitteeDutiesMap
|
2023-08-15 17:00:27 +00:00
|
|
|
syncCommitteeProofs*: SyncCommitteeProofsMap
|
2021-08-03 15:17:11 +00:00
|
|
|
beaconGenesis*: RestGenesis
|
2021-07-13 11:15:07 +00:00
|
|
|
proposerTasks*: Table[Slot, seq[ProposerTask]]
|
2022-10-21 14:53:30 +00:00
|
|
|
dynamicFeeRecipientsStore*: ref DynamicFeeRecipientsStore
|
2022-10-29 09:00:51 +00:00
|
|
|
validatorsRegCache*: Table[ValidatorPubKey, SignedValidatorRegistrationV1]
|
2023-06-08 08:44:32 +00:00
|
|
|
blocksSeen*: Table[Slot, BlockDataItem]
|
2023-07-06 09:14:22 +00:00
|
|
|
rootsSeen*: Table[Eth2Digest, Slot]
|
2023-06-28 13:33:07 +00:00
|
|
|
processingDelay*: Opt[Duration]
|
2022-08-19 10:30:07 +00:00
|
|
|
rng*: ref HmacDrbgContext
|
2021-07-13 11:15:07 +00:00
|
|
|
|
2023-05-30 08:38:34 +00:00
|
|
|
ApiStrategyKind* {.pure.} = enum
|
|
|
|
Priority, Best, First
|
|
|
|
|
2023-02-23 00:11:00 +00:00
|
|
|
ApiFailure* {.pure.} = enum
|
2023-05-30 08:38:34 +00:00
|
|
|
Communication, Invalid, NotFound, OptSynced, NotSynced, Internal,
|
|
|
|
UnexpectedCode, UnexpectedResponse, NoError
|
2023-02-23 00:11:00 +00:00
|
|
|
|
|
|
|
ApiNodeFailure* = object
|
|
|
|
node*: BeaconNodeServerRef
|
2023-05-30 08:38:34 +00:00
|
|
|
request*: string
|
|
|
|
strategy*: Opt[ApiStrategyKind]
|
2023-02-23 00:11:00 +00:00
|
|
|
failure*: ApiFailure
|
2023-05-30 08:38:34 +00:00
|
|
|
status*: Opt[int]
|
|
|
|
reason*: string
|
2023-02-23 00:11:00 +00:00
|
|
|
|
2021-07-13 11:15:07 +00:00
|
|
|
ValidatorClientRef* = ref ValidatorClient
|
|
|
|
|
|
|
|
ValidatorClientError* = object of CatchableError
|
|
|
|
ValidatorApiError* = object of ValidatorClientError
|
2023-02-23 00:11:00 +00:00
|
|
|
data*: seq[ApiNodeFailure]
|
2021-07-13 11:15:07 +00:00
|
|
|
|
2023-08-15 17:00:27 +00:00
|
|
|
FillSignaturesResult* = object
|
|
|
|
signaturesRequested*: int
|
|
|
|
signaturesReceived*: int
|
|
|
|
|
|
|
|
AttestationSlotRequest* = object
|
|
|
|
validator*: AttachedValidator
|
|
|
|
fork*: Fork
|
|
|
|
slot*: Slot
|
|
|
|
|
|
|
|
SyncCommitteeSlotRequest* = object
|
|
|
|
validator*: AttachedValidator
|
|
|
|
fork*: Fork
|
|
|
|
slot*: Slot
|
|
|
|
sync_committee_index*: IndexInSyncCommittee
|
|
|
|
duty*: SyncCommitteeDuty
|
|
|
|
|
2021-07-13 11:15:07 +00:00
|
|
|
const
|
2023-04-17 21:31:54 +00:00
|
|
|
DefaultDutyAndProof* = DutyAndProof(epoch: FAR_FUTURE_EPOCH)
|
2023-08-15 17:00:27 +00:00
|
|
|
DefaultSyncCommitteeDuty* = SyncCommitteeDuty()
|
2021-07-13 11:15:07 +00:00
|
|
|
SlotDuration* = int64(SECONDS_PER_SLOT).seconds
|
2022-01-05 13:41:39 +00:00
|
|
|
OneThirdDuration* = int64(SECONDS_PER_SLOT).seconds div INTERVALS_PER_SLOT
|
2022-09-29 07:57:14 +00:00
|
|
|
AllBeaconNodeRoles* = {
|
|
|
|
BeaconNodeRole.Duties,
|
|
|
|
BeaconNodeRole.AttestationData,
|
|
|
|
BeaconNodeRole.AttestationPublish,
|
|
|
|
BeaconNodeRole.AggregatedData,
|
|
|
|
BeaconNodeRole.AggregatedPublish,
|
|
|
|
BeaconNodeRole.BlockProposalData,
|
|
|
|
BeaconNodeRole.BlockProposalPublish,
|
|
|
|
BeaconNodeRole.SyncCommitteeData,
|
2023-06-28 13:33:07 +00:00
|
|
|
BeaconNodeRole.SyncCommitteePublish
|
2022-09-29 07:57:14 +00:00
|
|
|
}
|
2023-06-28 13:33:07 +00:00
|
|
|
## AllBeaconNodeRoles missing BeaconNodeRole.NoTimeCheck, because timecheks
|
|
|
|
## are enabled by default.
|
|
|
|
|
2023-06-08 08:44:32 +00:00
|
|
|
AllBeaconNodeStatuses* = {
|
2023-09-15 05:34:32 +00:00
|
|
|
RestBeaconNodeStatus.Invalid,
|
|
|
|
RestBeaconNodeStatus.Noname,
|
|
|
|
RestBeaconNodeStatus.Offline,
|
|
|
|
RestBeaconNodeStatus.Online,
|
|
|
|
RestBeaconNodeStatus.Incompatible,
|
|
|
|
RestBeaconNodeStatus.Compatible,
|
|
|
|
RestBeaconNodeStatus.NotSynced,
|
|
|
|
RestBeaconNodeStatus.OptSynced,
|
|
|
|
RestBeaconNodeStatus.Synced,
|
|
|
|
RestBeaconNodeStatus.UnexpectedCode,
|
|
|
|
RestBeaconNodeStatus.UnexpectedResponse,
|
|
|
|
RestBeaconNodeStatus.BrokenClock,
|
|
|
|
RestBeaconNodeStatus.InternalError
|
|
|
|
}
|
|
|
|
|
|
|
|
ResolvedBeaconNodeStatuses* = {
|
2023-06-08 08:44:32 +00:00
|
|
|
RestBeaconNodeStatus.Offline,
|
|
|
|
RestBeaconNodeStatus.Online,
|
|
|
|
RestBeaconNodeStatus.Incompatible,
|
|
|
|
RestBeaconNodeStatus.Compatible,
|
|
|
|
RestBeaconNodeStatus.NotSynced,
|
|
|
|
RestBeaconNodeStatus.OptSynced,
|
|
|
|
RestBeaconNodeStatus.Synced,
|
|
|
|
RestBeaconNodeStatus.UnexpectedCode,
|
|
|
|
RestBeaconNodeStatus.UnexpectedResponse,
|
2023-06-28 13:33:07 +00:00
|
|
|
RestBeaconNodeStatus.BrokenClock,
|
2023-06-08 08:44:32 +00:00
|
|
|
RestBeaconNodeStatus.InternalError
|
|
|
|
}
|
2022-09-29 07:57:14 +00:00
|
|
|
|
2023-06-28 13:33:07 +00:00
|
|
|
proc `$`*(to: TimeOffset): string =
|
|
|
|
if to.value < 0:
|
|
|
|
"-" & $chronos.nanoseconds(-to.value)
|
|
|
|
else:
|
|
|
|
$chronos.nanoseconds(to.value)
|
|
|
|
|
|
|
|
chronicles.formatIt(TimeOffset):
|
|
|
|
$it
|
|
|
|
|
|
|
|
chronicles.formatIt(Opt[TimeOffset]):
|
|
|
|
if it.isSome(): $(it.get()) else: "<unknown>"
|
|
|
|
|
2022-09-29 07:57:14 +00:00
|
|
|
proc `$`*(roles: set[BeaconNodeRole]): string =
|
|
|
|
if card(roles) > 0:
|
|
|
|
if roles != AllBeaconNodeRoles:
|
|
|
|
var res: seq[string]
|
|
|
|
if BeaconNodeRole.Duties in roles:
|
|
|
|
res.add("duties")
|
|
|
|
if BeaconNodeRole.AttestationData in roles:
|
|
|
|
res.add("attestation-data")
|
|
|
|
if BeaconNodeRole.AttestationPublish in roles:
|
|
|
|
res.add("attestation-publish")
|
|
|
|
if BeaconNodeRole.AggregatedData in roles:
|
|
|
|
res.add("aggregated-data")
|
|
|
|
if BeaconNodeRole.AggregatedPublish in roles:
|
|
|
|
res.add("aggregated-publish")
|
|
|
|
if BeaconNodeRole.BlockProposalData in roles:
|
|
|
|
res.add("block-data")
|
|
|
|
if BeaconNodeRole.BlockProposalPublish in roles:
|
|
|
|
res.add("block-publish")
|
|
|
|
if BeaconNodeRole.SyncCommitteeData in roles:
|
|
|
|
res.add("sync-data")
|
|
|
|
if BeaconNodeRole.SyncCommitteePublish in roles:
|
|
|
|
res.add("sync-publish")
|
2023-06-28 13:33:07 +00:00
|
|
|
if BeaconNodeRole.NoTimeCheck in roles:
|
|
|
|
res.add("no-timecheck")
|
2022-09-29 07:57:14 +00:00
|
|
|
res.join(",")
|
|
|
|
else:
|
|
|
|
"{all}"
|
|
|
|
else:
|
|
|
|
"{}"
|
|
|
|
|
2023-02-23 00:11:00 +00:00
|
|
|
proc `$`*(status: RestBeaconNodeStatus): string =
|
|
|
|
case status
|
2023-09-15 05:34:32 +00:00
|
|
|
of RestBeaconNodeStatus.Invalid: "invalid-address"
|
|
|
|
of RestBeaconNodeStatus.Noname: "dns-error"
|
2023-02-23 00:11:00 +00:00
|
|
|
of RestBeaconNodeStatus.Offline: "offline"
|
|
|
|
of RestBeaconNodeStatus.Online: "online"
|
|
|
|
of RestBeaconNodeStatus.Incompatible: "incompatible"
|
|
|
|
of RestBeaconNodeStatus.Compatible: "compatible"
|
|
|
|
of RestBeaconNodeStatus.NotSynced: "bn-unsynced"
|
|
|
|
of RestBeaconNodeStatus.OptSynced: "el-unsynced"
|
|
|
|
of RestBeaconNodeStatus.Synced: "synced"
|
2023-05-30 08:38:34 +00:00
|
|
|
of RestBeaconNodeStatus.UnexpectedCode: "unexpected code"
|
|
|
|
of RestBeaconNodeStatus.UnexpectedResponse: "unexpected data"
|
2023-03-03 20:20:01 +00:00
|
|
|
of RestBeaconNodeStatus.InternalError: "internal error"
|
2023-06-28 13:33:07 +00:00
|
|
|
of RestBeaconNodeStatus.BrokenClock: "broken clock"
|
2023-02-23 00:11:00 +00:00
|
|
|
|
|
|
|
proc `$`*(failure: ApiFailure): string =
|
|
|
|
case failure
|
2023-05-30 08:38:34 +00:00
|
|
|
of ApiFailure.Communication: "communication"
|
|
|
|
of ApiFailure.Invalid: "invalid-request"
|
|
|
|
of ApiFailure.NotFound: "not-found"
|
|
|
|
of ApiFailure.NotSynced: "not-synced"
|
|
|
|
of ApiFailure.OptSynced: "opt-synced"
|
|
|
|
of ApiFailure.Internal: "internal-issue"
|
|
|
|
of ApiFailure.UnexpectedCode: "unexpected-code"
|
|
|
|
of ApiFailure.UnexpectedResponse: "unexpected-data"
|
|
|
|
of ApiFailure.NoError: "status-update"
|
2023-02-23 00:11:00 +00:00
|
|
|
|
|
|
|
proc getNodeCounts*(vc: ValidatorClientRef): BeaconNodesCounters =
|
|
|
|
var res = BeaconNodesCounters()
|
|
|
|
for node in vc.beaconNodes: inc(res.data[int(node.status)])
|
|
|
|
res
|
|
|
|
|
2023-05-30 08:38:34 +00:00
|
|
|
proc hash*(f: ApiNodeFailure): Hash =
|
|
|
|
hash(f.failure)
|
|
|
|
|
|
|
|
proc toString*(strategy: ApiStrategyKind): string =
|
|
|
|
case strategy
|
|
|
|
of ApiStrategyKind.First:
|
|
|
|
"first"
|
|
|
|
of ApiStrategyKind.Best:
|
|
|
|
"best"
|
|
|
|
of ApiStrategyKind.Priority:
|
|
|
|
"priority"
|
|
|
|
|
|
|
|
func getFailureReason*(failure: ApiNodeFailure): string =
|
|
|
|
let status =
|
|
|
|
if failure.status.isSome():
|
|
|
|
Base10.toString(uint32(failure.status.get()))
|
|
|
|
else:
|
|
|
|
"n/a"
|
|
|
|
let request =
|
|
|
|
if failure.strategy.isSome():
|
|
|
|
failure.request & "(" & failure.strategy.get().toString() & ")"
|
|
|
|
else:
|
|
|
|
failure.request & "()"
|
|
|
|
[failure.reason, status, request, $failure.failure].join(";")
|
|
|
|
|
2023-02-23 00:11:00 +00:00
|
|
|
proc getFailureReason*(exc: ref ValidatorApiError): string =
|
|
|
|
var counts: array[int(high(ApiFailure)) + 1, int]
|
2023-02-23 16:02:17 +00:00
|
|
|
let
|
|
|
|
errors = exc[].data
|
|
|
|
errorsCount = len(errors)
|
2023-02-23 00:11:00 +00:00
|
|
|
|
2023-02-23 16:02:17 +00:00
|
|
|
if errorsCount > 1:
|
2023-05-30 08:38:34 +00:00
|
|
|
let distinctErrors =
|
2023-02-23 00:11:00 +00:00
|
|
|
block:
|
2023-05-30 08:38:34 +00:00
|
|
|
var res: seq[ApiNodeFailure]
|
|
|
|
for item in errors.toHashSet().items():
|
|
|
|
res.add(item)
|
2023-02-23 00:11:00 +00:00
|
|
|
res
|
2023-05-30 08:38:34 +00:00
|
|
|
if len(distinctErrors) > 1:
|
|
|
|
# If we have many unique errors, we going to report only failures,
|
|
|
|
# full reasons could be obtained via previosly made log statements.
|
|
|
|
"[" & distinctErrors.mapIt($it.failure).join(",") & "]"
|
|
|
|
else:
|
|
|
|
getFailureReason(distinctErrors[0])
|
2023-02-23 16:02:17 +00:00
|
|
|
elif errorsCount == 1:
|
2023-05-30 08:38:34 +00:00
|
|
|
getFailureReason(errors[0])
|
2023-02-23 16:02:17 +00:00
|
|
|
else:
|
|
|
|
exc.msg
|
2023-02-23 00:11:00 +00:00
|
|
|
|
2022-09-29 07:57:14 +00:00
|
|
|
proc shortLog*(roles: set[BeaconNodeRole]): string =
|
2023-06-28 13:33:07 +00:00
|
|
|
var r = "AGBSDT"
|
2022-09-29 07:57:14 +00:00
|
|
|
if BeaconNodeRole.AttestationData in roles:
|
|
|
|
if BeaconNodeRole.AttestationPublish in roles: r[0] = 'A' else: r[0] = 'a'
|
|
|
|
else:
|
|
|
|
if BeaconNodeRole.AttestationPublish in roles: r[0] = '+' else: r[0] = '-'
|
|
|
|
if BeaconNodeRole.AggregatedData in roles:
|
|
|
|
if BeaconNodeRole.AggregatedPublish in roles: r[1] = 'G' else: r[1] = 'g'
|
|
|
|
else:
|
|
|
|
if BeaconNodeRole.AggregatedPublish in roles: r[1] = '+' else: r[1] = '-'
|
|
|
|
if BeaconNodeRole.BlockProposalData in roles:
|
|
|
|
if BeaconNodeRole.BlockProposalPublish in roles: r[2] = 'B' else: r[2] = 'b'
|
|
|
|
else:
|
|
|
|
if BeaconNodeRole.BlockProposalPublish in roles: r[2] = '+' else: r[2] = '-'
|
|
|
|
if BeaconNodeRole.SyncCommitteeData in roles:
|
|
|
|
if BeaconNodeRole.SyncCommitteePublish in roles:
|
|
|
|
r[3] = 'S' else: r[3] = 's'
|
|
|
|
else:
|
|
|
|
if BeaconNodeRole.SyncCommitteePublish in roles:
|
|
|
|
r[3] = '+' else: r[3] = '-'
|
|
|
|
if BeaconNodeRole.Duties in roles: r[4] = 'D' else: r[4] = '-'
|
2023-06-28 13:33:07 +00:00
|
|
|
if BeaconNodeRole.NoTimeCheck notin roles: r[5] = 'T' else: r[5] = '-'
|
2022-09-29 07:57:14 +00:00
|
|
|
r
|
2021-07-13 11:15:07 +00:00
|
|
|
|
|
|
|
proc `$`*(bn: BeaconNodeServerRef): string =
|
|
|
|
if bn.ident.isSome():
|
2022-09-29 07:57:14 +00:00
|
|
|
bn.logIdent & "[" & bn.ident.get() & "]"
|
2021-07-13 11:15:07 +00:00
|
|
|
else:
|
2022-09-29 07:57:14 +00:00
|
|
|
bn.logIdent
|
2021-07-13 11:15:07 +00:00
|
|
|
|
2022-10-14 12:19:17 +00:00
|
|
|
proc validatorLog*(key: ValidatorPubKey,
|
2023-04-17 21:31:54 +00:00
|
|
|
index: ValidatorIndex): string =
|
2022-10-14 12:19:17 +00:00
|
|
|
var res = shortLog(key)
|
|
|
|
res.add('@')
|
|
|
|
res.add(Base10.toString(uint64(index)))
|
|
|
|
res
|
|
|
|
|
2023-04-17 21:31:54 +00:00
|
|
|
proc validatorLog*(validator: AttachedValidator): string =
|
|
|
|
var res = shortLog(validator)
|
|
|
|
res.add('@')
|
|
|
|
if validator.index.isSome():
|
|
|
|
res.add(Base10.toString(uint64(validator.index.get())))
|
|
|
|
else:
|
|
|
|
res.add("<missing>")
|
|
|
|
res
|
|
|
|
|
2022-09-29 07:57:14 +00:00
|
|
|
chronicles.expandIt(BeaconNodeServerRef):
|
|
|
|
node = $it
|
|
|
|
node_index = it.index
|
|
|
|
node_roles = shortLog(it.roles)
|
2021-07-13 11:15:07 +00:00
|
|
|
|
|
|
|
chronicles.expandIt(RestAttesterDuty):
|
|
|
|
pubkey = shortLog(it.pubkey)
|
|
|
|
slot = it.slot
|
|
|
|
validator_index = it.validator_index
|
|
|
|
committee_index = it.committee_index
|
|
|
|
committee_length = it.committee_length
|
|
|
|
committees_at_slot = it.committees_at_slot
|
|
|
|
validator_committee_index = it.validator_committee_index
|
|
|
|
|
2022-10-21 14:53:30 +00:00
|
|
|
chronicles.expandIt(SyncCommitteeDuty):
|
|
|
|
pubkey = shortLog(it.pubkey)
|
|
|
|
validator_index = it.validator_index
|
2023-08-15 17:00:27 +00:00
|
|
|
validator_sync_committee_indices = it.validator_sync_committee_indices
|
2022-10-21 14:53:30 +00:00
|
|
|
|
2023-06-28 13:33:38 +00:00
|
|
|
proc equals*(info: VCRuntimeConfig, name: string, check: uint64): bool =
|
|
|
|
let numstr = info.getOrDefault(name, "missing")
|
|
|
|
if numstr == "missing": return false
|
|
|
|
let value = Base10.decode(uint64, numstr).valueOr:
|
|
|
|
return false
|
|
|
|
value == check
|
|
|
|
|
|
|
|
proc equals*(info: VCRuntimeConfig, name: string, check: DomainType): bool =
|
|
|
|
let domstr = info.getOrDefault(name, "missing")
|
|
|
|
if domstr == "missing": return false
|
|
|
|
let value =
|
|
|
|
try:
|
|
|
|
var dres: DomainType
|
|
|
|
hexToByteArray(domstr, distinctBase(dres))
|
|
|
|
dres
|
|
|
|
except ValueError:
|
|
|
|
return false
|
|
|
|
value == check
|
|
|
|
|
|
|
|
proc equals*(info: VCRuntimeConfig, name: string, check: Epoch): bool =
|
|
|
|
info.equals(name, uint64(check))
|
|
|
|
|
|
|
|
proc getOrDefault*(info: VCRuntimeConfig, name: string,
|
|
|
|
default: uint64): uint64 =
|
|
|
|
let numstr = info.getOrDefault(name, "missing")
|
|
|
|
if numstr == "missing": return default
|
|
|
|
Base10.decode(uint64, numstr).valueOr:
|
|
|
|
return default
|
|
|
|
|
|
|
|
proc getOrDefault*(info: VCRuntimeConfig, name: string, default: Epoch): Epoch =
|
|
|
|
Epoch(info.getOrDefault(name, uint64(default)))
|
|
|
|
|
|
|
|
proc checkConfig*(c: VCRuntimeConfig): bool =
|
|
|
|
c.equals("MAX_VALIDATORS_PER_COMMITTEE", MAX_VALIDATORS_PER_COMMITTEE) and
|
|
|
|
c.equals("SLOTS_PER_EPOCH", SLOTS_PER_EPOCH) and
|
|
|
|
c.equals("SECONDS_PER_SLOT", SECONDS_PER_SLOT) and
|
|
|
|
c.equals("EPOCHS_PER_ETH1_VOTING_PERIOD", EPOCHS_PER_ETH1_VOTING_PERIOD) and
|
|
|
|
c.equals("SLOTS_PER_HISTORICAL_ROOT", SLOTS_PER_HISTORICAL_ROOT) and
|
|
|
|
c.equals("EPOCHS_PER_HISTORICAL_VECTOR", EPOCHS_PER_HISTORICAL_VECTOR) and
|
|
|
|
c.equals("EPOCHS_PER_SLASHINGS_VECTOR", EPOCHS_PER_SLASHINGS_VECTOR) and
|
|
|
|
c.equals("HISTORICAL_ROOTS_LIMIT", HISTORICAL_ROOTS_LIMIT) and
|
|
|
|
c.equals("VALIDATOR_REGISTRY_LIMIT", VALIDATOR_REGISTRY_LIMIT) and
|
|
|
|
c.equals("MAX_PROPOSER_SLASHINGS", MAX_PROPOSER_SLASHINGS) and
|
|
|
|
c.equals("MAX_ATTESTER_SLASHINGS", MAX_ATTESTER_SLASHINGS) and
|
|
|
|
c.equals("MAX_ATTESTATIONS", MAX_ATTESTATIONS) and
|
|
|
|
c.equals("MAX_DEPOSITS", MAX_DEPOSITS) and
|
|
|
|
c.equals("MAX_VOLUNTARY_EXITS", MAX_VOLUNTARY_EXITS) and
|
|
|
|
c.equals("DOMAIN_BEACON_PROPOSER", DOMAIN_BEACON_PROPOSER) and
|
|
|
|
c.equals("DOMAIN_BEACON_ATTESTER", DOMAIN_BEACON_ATTESTER) and
|
|
|
|
c.equals("DOMAIN_RANDAO", DOMAIN_RANDAO) and
|
|
|
|
c.equals("DOMAIN_DEPOSIT", DOMAIN_DEPOSIT) and
|
|
|
|
c.equals("DOMAIN_VOLUNTARY_EXIT", DOMAIN_VOLUNTARY_EXIT) and
|
|
|
|
c.equals("DOMAIN_SELECTION_PROOF", DOMAIN_SELECTION_PROOF) and
|
|
|
|
c.equals("DOMAIN_AGGREGATE_AND_PROOF", DOMAIN_AGGREGATE_AND_PROOF) and
|
|
|
|
c.hasKey("ALTAIR_FORK_VERSION") and c.hasKey("ALTAIR_FORK_EPOCH") and
|
|
|
|
not(c.equals("ALTAIR_FORK_EPOCH", FAR_FUTURE_EPOCH))
|
2023-03-03 20:20:01 +00:00
|
|
|
|
2023-05-30 08:38:34 +00:00
|
|
|
proc updateStatus*(node: BeaconNodeServerRef,
|
|
|
|
status: RestBeaconNodeStatus,
|
|
|
|
failure: ApiNodeFailure) =
|
2023-03-03 20:20:01 +00:00
|
|
|
logScope:
|
2023-05-30 08:38:34 +00:00
|
|
|
node = node
|
|
|
|
|
2023-03-03 20:20:01 +00:00
|
|
|
case status
|
2023-09-15 05:34:32 +00:00
|
|
|
of RestBeaconNodeStatus.Invalid:
|
2023-03-03 20:20:01 +00:00
|
|
|
if node.status != status:
|
2023-09-15 05:34:32 +00:00
|
|
|
warn "Beacon node could not be used"
|
|
|
|
node.status = status
|
|
|
|
of RestBeaconNodeStatus.Noname:
|
|
|
|
if node.status != status:
|
|
|
|
warn "Beacon node address cannot be resolved"
|
2023-03-03 20:20:01 +00:00
|
|
|
node.status = status
|
2023-09-15 05:34:32 +00:00
|
|
|
of RestBeaconNodeStatus.Offline:
|
|
|
|
if node.status != status:
|
|
|
|
if node.status in {RestBeaconNodeStatus.Invalid,
|
|
|
|
RestBeaconNodeStatus.Noname}:
|
|
|
|
notice "Beacon node address has been resolved"
|
|
|
|
node.status = status
|
|
|
|
else:
|
|
|
|
warn "Beacon node down", reason = failure.getFailureReason()
|
|
|
|
node.status = status
|
2023-03-03 20:20:01 +00:00
|
|
|
of RestBeaconNodeStatus.Online:
|
|
|
|
if node.status != status:
|
|
|
|
let version = if node.ident.isSome(): node.ident.get() else: "<missing>"
|
|
|
|
notice "Beacon node is online", agent_version = version
|
|
|
|
node.status = status
|
|
|
|
of RestBeaconNodeStatus.Incompatible:
|
|
|
|
if node.status != status:
|
2023-05-30 08:38:34 +00:00
|
|
|
warn "Beacon node has incompatible configuration",
|
|
|
|
reason = failure.getFailureReason()
|
2023-03-03 20:20:01 +00:00
|
|
|
node.status = status
|
|
|
|
of RestBeaconNodeStatus.Compatible:
|
|
|
|
if node.status != status:
|
|
|
|
notice "Beacon node is compatible"
|
|
|
|
node.status = status
|
|
|
|
of RestBeaconNodeStatus.NotSynced:
|
|
|
|
if node.status notin {RestBeaconNodeStatus.NotSynced,
|
|
|
|
RestBeaconNodeStatus.OptSynced}:
|
|
|
|
doAssert(node.syncInfo.isSome())
|
|
|
|
let si = node.syncInfo.get()
|
2023-05-30 08:38:34 +00:00
|
|
|
warn "Beacon node not in sync", reason = failure.getFailureReason(),
|
2023-03-03 20:20:01 +00:00
|
|
|
last_head_slot = si.head_slot,
|
|
|
|
last_sync_distance = si.sync_distance,
|
|
|
|
last_optimistic = si.is_optimistic.get(false)
|
|
|
|
node.status = status
|
|
|
|
of RestBeaconNodeStatus.OptSynced:
|
|
|
|
if node.status != status:
|
|
|
|
doAssert(node.syncInfo.isSome())
|
|
|
|
let si = node.syncInfo.get()
|
2023-05-30 08:38:34 +00:00
|
|
|
notice "Beacon node optimistically synced (Execution client not in sync)",
|
|
|
|
reason = failure.getFailureReason(),
|
2023-03-03 20:20:01 +00:00
|
|
|
last_head_slot = si.head_slot,
|
|
|
|
last_sync_distance = si.sync_distance,
|
|
|
|
last_optimistic = si.is_optimistic.get(false)
|
|
|
|
node.status = status
|
|
|
|
of RestBeaconNodeStatus.Synced:
|
|
|
|
if node.status != status:
|
|
|
|
doAssert(node.syncInfo.isSome())
|
|
|
|
let si = node.syncInfo.get()
|
|
|
|
notice "Beacon node is in sync",
|
2023-05-30 08:38:34 +00:00
|
|
|
head_slot = si.head_slot,
|
|
|
|
sync_distance = si.sync_distance,
|
2023-03-03 20:20:01 +00:00
|
|
|
is_optimistic = si.is_optimistic.get(false)
|
|
|
|
node.status = status
|
2023-05-30 08:38:34 +00:00
|
|
|
of RestBeaconNodeStatus.UnexpectedResponse:
|
|
|
|
if node.status != status:
|
|
|
|
error "Beacon node provides unexpected response",
|
|
|
|
reason = failure.getFailureReason()
|
|
|
|
node.status = status
|
|
|
|
of RestBeaconNodeStatus.UnexpectedCode:
|
2023-03-03 20:20:01 +00:00
|
|
|
if node.status != status:
|
2023-05-30 08:38:34 +00:00
|
|
|
error "Beacon node provides unexpected status code",
|
|
|
|
reason = failure.getFailureReason()
|
2023-03-03 20:20:01 +00:00
|
|
|
node.status = status
|
|
|
|
of RestBeaconNodeStatus.InternalError:
|
|
|
|
if node.status != status:
|
2023-05-30 08:38:34 +00:00
|
|
|
warn "Beacon node reports internal error",
|
|
|
|
reason = failure.getFailureReason()
|
2023-03-03 20:20:01 +00:00
|
|
|
node.status = status
|
2023-06-28 13:33:07 +00:00
|
|
|
of RestBeaconNodeStatus.BrokenClock:
|
|
|
|
if node.status != status:
|
|
|
|
warn "Beacon node's clock is out of order, (beacon node is unusable)"
|
|
|
|
node.status = status
|
2023-03-03 20:20:01 +00:00
|
|
|
|
2021-07-13 11:15:07 +00:00
|
|
|
proc stop*(csr: ClientServiceRef) {.async.} =
|
2022-07-14 21:11:25 +00:00
|
|
|
debug "Stopping service", service = csr.name
|
2021-07-13 11:15:07 +00:00
|
|
|
if csr.state == ServiceState.Running:
|
|
|
|
csr.state = ServiceState.Closing
|
|
|
|
if not(csr.lifeFut.finished()):
|
|
|
|
await csr.lifeFut.cancelAndWait()
|
|
|
|
csr.state = ServiceState.Closed
|
2022-07-14 21:11:25 +00:00
|
|
|
debug "Service stopped", service = csr.name
|
2021-07-13 11:15:07 +00:00
|
|
|
|
|
|
|
proc isDefault*(dap: DutyAndProof): bool =
|
2023-08-15 17:00:27 +00:00
|
|
|
dap.epoch == FAR_FUTURE_EPOCH
|
2021-07-13 11:15:07 +00:00
|
|
|
|
|
|
|
proc isDefault*(prd: ProposedData): bool =
|
2023-08-15 17:00:27 +00:00
|
|
|
prd.epoch == FAR_FUTURE_EPOCH
|
|
|
|
|
|
|
|
proc isDefault*(scd: SyncCommitteeDuty): bool =
|
|
|
|
len(scd.validator_sync_committee_indices) == 0
|
2021-07-13 11:15:07 +00:00
|
|
|
|
2022-09-29 07:57:14 +00:00
|
|
|
proc parseRoles*(data: string): Result[set[BeaconNodeRole], cstring] =
|
|
|
|
var res: set[BeaconNodeRole]
|
|
|
|
if len(data) == 0:
|
|
|
|
return ok(AllBeaconNodeRoles)
|
|
|
|
let parts = data.split("roles=")
|
|
|
|
if (len(parts) != 2) or (len(parts[0]) != 0):
|
|
|
|
return err("Invalid beacon node roles string")
|
|
|
|
let sroles = parts[1].split(",")
|
|
|
|
for srole in sroles:
|
|
|
|
case toLower(strip(srole))
|
|
|
|
of "":
|
|
|
|
discard
|
|
|
|
of "all":
|
|
|
|
res.incl(AllBeaconNodeRoles)
|
|
|
|
of "attestation":
|
|
|
|
res.incl({BeaconNodeRole.AttestationData,
|
|
|
|
BeaconNodeRole.AttestationPublish})
|
|
|
|
of "block":
|
|
|
|
res.incl({BeaconNodeRole.BlockProposalData,
|
|
|
|
BeaconNodeRole.BlockProposalPublish})
|
|
|
|
of "aggregated":
|
|
|
|
res.incl({BeaconNodeRole.AggregatedData,
|
|
|
|
BeaconNodeRole.AggregatedPublish})
|
|
|
|
of "sync":
|
|
|
|
res.incl({BeaconNodeRole.SyncCommitteeData,
|
|
|
|
BeaconNodeRole.SyncCommitteePublish})
|
|
|
|
of "attestation-data":
|
|
|
|
res.incl(BeaconNodeRole.AttestationData)
|
|
|
|
of "attestation-publish":
|
|
|
|
res.incl(BeaconNodeRole.AttestationPublish)
|
|
|
|
of "aggregated-data":
|
|
|
|
res.incl(BeaconNodeRole.AggregatedData)
|
|
|
|
of "aggregated-publish":
|
|
|
|
res.incl(BeaconNodeRole.AggregatedPublish)
|
|
|
|
of "block-data":
|
|
|
|
res.incl(BeaconNodeRole.BlockProposalData)
|
|
|
|
of "block-publish":
|
|
|
|
res.incl(BeaconNodeRole.BlockProposalPublish)
|
|
|
|
of "sync-data":
|
|
|
|
res.incl(BeaconNodeRole.SyncCommitteeData)
|
|
|
|
of "sync-publish":
|
|
|
|
res.incl(BeaconNodeRole.SyncCommitteePublish)
|
|
|
|
of "duties":
|
|
|
|
res.incl(BeaconNodeRole.Duties)
|
2023-06-28 13:33:07 +00:00
|
|
|
of "no-timecheck":
|
|
|
|
res.incl(BeaconNodeRole.NoTimeCheck)
|
2022-09-29 07:57:14 +00:00
|
|
|
else:
|
|
|
|
return err("Invalid beacon node role string found")
|
2023-06-28 13:33:07 +00:00
|
|
|
if res == {BeaconNodeRole.NoTimeCheck}:
|
|
|
|
res.incl(AllBeaconNodeRoles)
|
2022-09-29 07:57:14 +00:00
|
|
|
ok(res)
|
|
|
|
|
2023-05-09 20:09:09 +00:00
|
|
|
proc normalizeUri*(r: Uri): Result[Uri, cstring] =
|
|
|
|
const
|
|
|
|
MissingPortNumber = cstring("Missing port number")
|
|
|
|
MissingHostname = cstring("Missing hostname")
|
|
|
|
UnknownScheme = cstring("Unknown scheme value")
|
|
|
|
|
|
|
|
if ($r).toLowerAscii().startsWith("http://") or
|
|
|
|
($r).toLowerAscii().startsWith("https://"):
|
|
|
|
# When a scheme is provided, only a hostname is required
|
|
|
|
if len(r.hostname) == 0: return err(MissingHostname)
|
|
|
|
return ok(r)
|
|
|
|
|
|
|
|
# Check for unknown scheme
|
|
|
|
if ($r).contains("://"):
|
|
|
|
return err(UnknownScheme)
|
|
|
|
|
|
|
|
# Add the default scheme (http)
|
|
|
|
let normalized =
|
|
|
|
if ($r).startsWith("//"):
|
|
|
|
parseUri("http:" & $r)
|
|
|
|
else:
|
|
|
|
parseUri("http://" & $r)
|
|
|
|
|
|
|
|
if len(normalized.hostname) == 0:
|
|
|
|
return err(MissingHostname)
|
|
|
|
|
|
|
|
if len(normalized.port) == 0:
|
|
|
|
return err(MissingPortNumber)
|
|
|
|
|
|
|
|
ok(normalized)
|
2023-04-03 16:25:16 +00:00
|
|
|
|
2023-09-15 05:34:32 +00:00
|
|
|
proc initClient*(uri: Uri): Result[RestClientRef, HttpAddressErrorType] =
|
|
|
|
let
|
|
|
|
flags = {RestClientFlag.CommaSeparatedArray}
|
|
|
|
socketFlags = {SocketFlags.TcpNoDelay}
|
|
|
|
address = ? getHttpAddress(uri)
|
|
|
|
client = RestClientRef.new(address, flags = flags,
|
|
|
|
socketFlags = socketFlags)
|
|
|
|
ok(client)
|
|
|
|
|
2022-09-29 07:57:14 +00:00
|
|
|
proc init*(t: typedesc[BeaconNodeServerRef], remote: Uri,
|
|
|
|
index: int): Result[BeaconNodeServerRef, string] =
|
|
|
|
doAssert(index >= 0)
|
|
|
|
let
|
2023-05-09 20:09:09 +00:00
|
|
|
remoteUri = normalizeUri(remote).valueOr:
|
2023-06-28 13:33:07 +00:00
|
|
|
return err($error)
|
|
|
|
roles = parseRoles(remoteUri.anchor).valueOr:
|
|
|
|
return err($error)
|
2023-09-15 05:34:32 +00:00
|
|
|
server =
|
|
|
|
block:
|
|
|
|
let res = initClient(remoteUri)
|
|
|
|
if res.isOk():
|
|
|
|
BeaconNodeServerRef(
|
|
|
|
client: res.get(), endpoint: $remoteUri, index: index,
|
|
|
|
roles: roles, logIdent: $(res.get().address.getUri()),
|
|
|
|
uri: remoteUri, status: RestBeaconNodeStatus.Offline)
|
|
|
|
else:
|
|
|
|
if res.error.isCriticalError():
|
|
|
|
return err(res.error.toString())
|
|
|
|
BeaconNodeServerRef(
|
|
|
|
client: nil, endpoint: $remoteUri, index: index,
|
|
|
|
roles: roles, logIdent: $remoteUri, uri: remoteUri,
|
|
|
|
status: RestBeaconNodeStatus.Noname)
|
2022-09-29 07:57:14 +00:00
|
|
|
ok(server)
|
|
|
|
|
|
|
|
proc getMissingRoles*(n: openArray[BeaconNodeServerRef]): set[BeaconNodeRole] =
|
|
|
|
var res: set[BeaconNodeRole] = AllBeaconNodeRoles
|
|
|
|
for node in n.items():
|
|
|
|
res.excl(node.roles)
|
|
|
|
res
|
|
|
|
|
2021-07-13 11:15:07 +00:00
|
|
|
proc init*(t: typedesc[DutyAndProof], epoch: Epoch, dependentRoot: Eth2Digest,
|
|
|
|
duty: RestAttesterDuty,
|
2023-05-30 08:38:34 +00:00
|
|
|
slotSig: Opt[ValidatorSig]): DutyAndProof =
|
2021-07-13 11:15:07 +00:00
|
|
|
DutyAndProof(epoch: epoch, dependentRoot: dependentRoot, data: duty,
|
|
|
|
slotSig: slotSig)
|
|
|
|
|
|
|
|
proc init*(t: typedesc[ProposedData], epoch: Epoch, dependentRoot: Eth2Digest,
|
2022-04-08 16:22:49 +00:00
|
|
|
data: openArray[ProposerTask]): ProposedData =
|
2021-07-13 11:15:07 +00:00
|
|
|
ProposedData(epoch: epoch, dependentRoot: dependentRoot, duties: @data)
|
|
|
|
|
2023-04-17 21:31:54 +00:00
|
|
|
proc getCurrentSlot*(vc: ValidatorClientRef): Opt[Slot] =
|
|
|
|
let res = vc.beaconClock.now().toSlot()
|
|
|
|
if res.afterGenesis:
|
|
|
|
Opt.some(res.slot)
|
2021-07-13 11:15:07 +00:00
|
|
|
else:
|
2023-04-17 21:31:54 +00:00
|
|
|
Opt.none(Slot)
|
2021-07-13 11:15:07 +00:00
|
|
|
|
|
|
|
proc getAttesterDutiesForSlot*(vc: ValidatorClientRef,
|
2021-07-15 08:17:32 +00:00
|
|
|
slot: Slot): seq[DutyAndProof] =
|
2022-05-10 10:03:40 +00:00
|
|
|
## Returns all `DutyAndProof` for the given `slot`.
|
2021-07-15 08:17:32 +00:00
|
|
|
var res: seq[DutyAndProof]
|
2021-07-13 11:15:07 +00:00
|
|
|
let epoch = slot.epoch()
|
2023-08-15 17:00:27 +00:00
|
|
|
for key, item in mpairs(vc.attesters):
|
|
|
|
item.duties.withValue(epoch, duty):
|
|
|
|
if duty[].data.slot == slot:
|
|
|
|
res.add(duty[])
|
2021-07-13 11:15:07 +00:00
|
|
|
res
|
|
|
|
|
2022-05-10 10:03:40 +00:00
|
|
|
proc getSyncCommitteeDutiesForSlot*(vc: ValidatorClientRef,
|
2022-11-24 07:46:35 +00:00
|
|
|
slot: Slot): seq[SyncCommitteeDuty] =
|
|
|
|
## Returns all `SyncCommitteeDuty` for the given `slot`.
|
|
|
|
var res: seq[SyncCommitteeDuty]
|
2023-08-15 17:00:27 +00:00
|
|
|
let period = slot.sync_committee_period()
|
2022-05-10 10:03:40 +00:00
|
|
|
for key, item in mpairs(vc.syncCommitteeDuties):
|
2023-08-15 17:00:27 +00:00
|
|
|
item.duties.withValue(period, duty):
|
2022-05-10 10:03:40 +00:00
|
|
|
res.add(duty[])
|
|
|
|
res
|
|
|
|
|
2021-07-13 11:15:07 +00:00
|
|
|
proc getDurationToNextAttestation*(vc: ValidatorClientRef,
|
|
|
|
slot: Slot): string =
|
2022-01-11 10:01:54 +00:00
|
|
|
var minSlot = FAR_FUTURE_SLOT
|
2021-07-13 11:15:07 +00:00
|
|
|
let currentEpoch = slot.epoch()
|
|
|
|
for epoch in [currentEpoch, currentEpoch + 1'u64]:
|
2022-05-10 10:03:40 +00:00
|
|
|
for key, item in vc.attesters:
|
2021-07-13 11:15:07 +00:00
|
|
|
let duty = item.duties.getOrDefault(epoch, DefaultDutyAndProof)
|
|
|
|
if not(duty.isDefault()):
|
2022-01-11 10:01:54 +00:00
|
|
|
let dutySlotTime = duty.data.slot
|
2022-07-13 14:43:57 +00:00
|
|
|
if (duty.data.slot < minSlot) and (duty.data.slot >= slot):
|
2022-01-11 10:01:54 +00:00
|
|
|
minSlot = duty.data.slot
|
|
|
|
if minSlot != FAR_FUTURE_SLOT:
|
2021-07-13 11:15:07 +00:00
|
|
|
break
|
2022-01-11 10:01:54 +00:00
|
|
|
|
|
|
|
if minSlot == FAR_FUTURE_SLOT:
|
2021-07-13 11:15:07 +00:00
|
|
|
"<unknown>"
|
|
|
|
else:
|
2022-01-11 10:01:54 +00:00
|
|
|
$(minSlot.attestation_deadline() - slot.start_beacon_time())
|
2021-07-13 11:15:07 +00:00
|
|
|
|
|
|
|
proc getDurationToNextBlock*(vc: ValidatorClientRef, slot: Slot): string =
|
2022-01-11 10:01:54 +00:00
|
|
|
var minSlot = FAR_FUTURE_SLOT
|
2021-07-13 11:15:07 +00:00
|
|
|
let currentEpoch = slot.epoch()
|
|
|
|
for epoch in [currentEpoch, currentEpoch + 1'u64]:
|
|
|
|
let data = vc.proposers.getOrDefault(epoch)
|
|
|
|
if not(data.isDefault()):
|
|
|
|
for item in data.duties:
|
2022-08-19 10:30:07 +00:00
|
|
|
if item.duty.pubkey in vc.attachedValidators[]:
|
2022-07-13 14:43:57 +00:00
|
|
|
if (item.duty.slot < minSlot) and (item.duty.slot >= slot):
|
2022-01-11 10:01:54 +00:00
|
|
|
minSlot = item.duty.slot
|
|
|
|
if minSlot != FAR_FUTURE_SLOT:
|
2021-07-13 11:15:07 +00:00
|
|
|
break
|
2022-01-11 10:01:54 +00:00
|
|
|
if minSlot == FAR_FUTURE_SLOT:
|
2021-07-13 11:15:07 +00:00
|
|
|
"<unknown>"
|
|
|
|
else:
|
2022-01-11 10:01:54 +00:00
|
|
|
$(minSlot.block_deadline() - slot.start_beacon_time())
|
2021-07-13 11:15:07 +00:00
|
|
|
|
|
|
|
iterator attesterDutiesForEpoch*(vc: ValidatorClientRef,
|
|
|
|
epoch: Epoch): DutyAndProof =
|
2022-05-10 10:03:40 +00:00
|
|
|
for key, item in vc.attesters:
|
2021-07-13 11:15:07 +00:00
|
|
|
let epochDuties = item.duties.getOrDefault(epoch)
|
|
|
|
if not(isDefault(epochDuties)):
|
|
|
|
yield epochDuties
|
|
|
|
|
2023-08-15 17:00:27 +00:00
|
|
|
iterator syncDutiesForPeriod*(vc: ValidatorClientRef,
|
|
|
|
period: SyncCommitteePeriod): SyncCommitteeDuty =
|
|
|
|
for key, item in vc.syncCommitteeDuties:
|
|
|
|
let periodDuties = item.duties.getOrDefault(period)
|
|
|
|
if not(isDefault(periodDuties)):
|
|
|
|
yield periodDuties
|
|
|
|
|
|
|
|
proc syncMembersSubscriptionInfoForPeriod*(
|
|
|
|
vc: ValidatorClientRef,
|
|
|
|
period: SyncCommitteePeriod
|
|
|
|
): seq[SyncCommitteeSubscriptionInfo] =
|
2022-05-10 10:03:40 +00:00
|
|
|
var res: seq[SyncCommitteeSubscriptionInfo]
|
|
|
|
for key, item in mpairs(vc.syncCommitteeDuties):
|
|
|
|
var cur: SyncCommitteeSubscriptionInfo
|
|
|
|
var initialized = false
|
|
|
|
|
2023-08-15 17:00:27 +00:00
|
|
|
item.duties.withValue(period, periodDuties):
|
|
|
|
if not(initialized):
|
|
|
|
cur.validator_index = periodDuties[].validator_index
|
2022-05-10 10:03:40 +00:00
|
|
|
initialized = true
|
|
|
|
cur.validator_sync_committee_indices.add(
|
2023-08-15 17:00:27 +00:00
|
|
|
periodDuties[].validator_sync_committee_indices)
|
2022-05-10 10:03:40 +00:00
|
|
|
|
|
|
|
if initialized:
|
2023-08-15 17:00:27 +00:00
|
|
|
res.add(cur)
|
2022-05-10 10:03:40 +00:00
|
|
|
res
|
|
|
|
|
2022-01-11 10:01:54 +00:00
|
|
|
proc getDelay*(vc: ValidatorClientRef, deadline: BeaconTime): TimeDiff =
|
|
|
|
vc.beaconClock.now() - deadline
|
2021-07-19 14:31:02 +00:00
|
|
|
|
2022-12-09 16:05:55 +00:00
|
|
|
proc getValidatorForDuties*(vc: ValidatorClientRef,
|
2023-02-20 11:28:56 +00:00
|
|
|
key: ValidatorPubKey, slot: Slot,
|
|
|
|
slashingSafe = false): Opt[AttachedValidator] =
|
2023-03-02 15:55:45 +00:00
|
|
|
vc.attachedValidators[].getValidatorForDuties(key, slot, slashingSafe)
|
2022-02-16 11:31:23 +00:00
|
|
|
|
|
|
|
proc forkAtEpoch*(vc: ValidatorClientRef, epoch: Epoch): Fork =
|
|
|
|
# If schedule is present, it MUST not be empty.
|
|
|
|
doAssert(len(vc.forks) > 0)
|
|
|
|
var res: Fork
|
|
|
|
for item in vc.forks:
|
|
|
|
if item.epoch <= epoch:
|
|
|
|
res = item
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
res
|
2022-05-10 10:03:40 +00:00
|
|
|
|
2022-07-13 14:43:57 +00:00
|
|
|
proc getSubcommitteeIndex*(index: IndexInSyncCommittee): SyncSubcommitteeIndex =
|
|
|
|
SyncSubcommitteeIndex(uint16(index) div SYNC_SUBCOMMITTEE_SIZE)
|
2022-07-21 16:54:07 +00:00
|
|
|
|
|
|
|
proc currentSlot*(vc: ValidatorClientRef): Slot =
|
|
|
|
vc.beaconClock.now().slotOrZero()
|
|
|
|
|
|
|
|
proc addValidator*(vc: ValidatorClientRef, keystore: KeystoreData) =
|
2022-09-17 05:30:07 +00:00
|
|
|
let
|
|
|
|
slot = vc.currentSlot()
|
2023-05-17 04:56:37 +00:00
|
|
|
withdrawalAddress =
|
|
|
|
if vc.keymanagerHost.isNil:
|
|
|
|
Opt.none Eth1Address
|
|
|
|
else:
|
|
|
|
vc.keymanagerHost[].getValidatorWithdrawalAddress(keystore.pubkey)
|
|
|
|
perValidatorDefaultFeeRecipient = getPerValidatorDefaultFeeRecipient(
|
|
|
|
vc.config.defaultFeeRecipient, withdrawalAddress)
|
2022-09-17 05:30:07 +00:00
|
|
|
feeRecipient = vc.config.validatorsDir.getSuggestedFeeRecipient(
|
2023-05-17 04:56:37 +00:00
|
|
|
keystore.pubkey, perValidatorDefaultFeeRecipient).valueOr(
|
|
|
|
perValidatorDefaultFeeRecipient)
|
2023-02-15 15:10:31 +00:00
|
|
|
gasLimit = vc.config.validatorsDir.getSuggestedGasLimit(
|
|
|
|
keystore.pubkey, vc.config.suggestedGasLimit).valueOr(
|
|
|
|
vc.config.suggestedGasLimit)
|
2023-02-07 14:53:36 +00:00
|
|
|
|
2023-02-15 15:10:31 +00:00
|
|
|
discard vc.attachedValidators[].addValidator(keystore, feeRecipient, gasLimit)
|
2022-07-21 16:54:07 +00:00
|
|
|
|
|
|
|
proc removeValidator*(vc: ValidatorClientRef,
|
|
|
|
pubkey: ValidatorPubKey) {.async.} =
|
2023-02-20 11:28:56 +00:00
|
|
|
let validator = vc.attachedValidators[].getValidator(pubkey).valueOr:
|
|
|
|
return
|
|
|
|
# Remove validator from ValidatorPool.
|
|
|
|
vc.attachedValidators[].removeValidator(pubkey)
|
|
|
|
|
|
|
|
case validator.kind
|
|
|
|
of ValidatorKind.Local:
|
|
|
|
discard
|
|
|
|
of ValidatorKind.Remote:
|
|
|
|
# We must close all the REST clients running for the remote validator.
|
|
|
|
let pending =
|
|
|
|
block:
|
|
|
|
var res: seq[Future[void]]
|
|
|
|
for item in validator.clients:
|
|
|
|
res.add(item[0].closeWait())
|
|
|
|
res
|
|
|
|
await allFutures(pending)
|
2022-07-21 16:54:07 +00:00
|
|
|
|
2022-10-21 14:53:30 +00:00
|
|
|
proc getFeeRecipient*(vc: ValidatorClientRef, pubkey: ValidatorPubKey,
|
|
|
|
validatorIdx: ValidatorIndex,
|
|
|
|
epoch: Epoch): Opt[Eth1Address] =
|
|
|
|
let dynamicRecipient = vc.dynamicFeeRecipientsStore[].getDynamicFeeRecipient(
|
|
|
|
validatorIdx, epoch)
|
|
|
|
if dynamicRecipient.isSome():
|
|
|
|
Opt.some(dynamicRecipient.get())
|
|
|
|
else:
|
2023-05-17 04:56:37 +00:00
|
|
|
let
|
|
|
|
withdrawalAddress =
|
|
|
|
if vc.keymanagerHost.isNil:
|
|
|
|
Opt.none Eth1Address
|
|
|
|
else:
|
|
|
|
vc.keymanagerHost[].getValidatorWithdrawalAddress(pubkey)
|
|
|
|
perValidatorDefaultFeeRecipient = getPerValidatorDefaultFeeRecipient(
|
|
|
|
vc.config.defaultFeeRecipient, withdrawalAddress)
|
|
|
|
staticRecipient = getSuggestedFeeRecipient(
|
|
|
|
vc.config.validatorsDir, pubkey, perValidatorDefaultFeeRecipient)
|
2022-10-21 14:53:30 +00:00
|
|
|
if staticRecipient.isOk():
|
|
|
|
Opt.some(staticRecipient.get())
|
|
|
|
else:
|
|
|
|
Opt.none(Eth1Address)
|
|
|
|
|
2023-02-15 15:10:31 +00:00
|
|
|
proc getGasLimit*(vc: ValidatorClientRef,
|
|
|
|
pubkey: ValidatorPubKey): uint64 =
|
|
|
|
getSuggestedGasLimit(
|
|
|
|
vc.config.validatorsDir, pubkey, vc.config.suggestedGasLimit).valueOr:
|
|
|
|
vc.config.suggestedGasLimit
|
|
|
|
|
2022-10-21 14:53:30 +00:00
|
|
|
proc prepareProposersList*(vc: ValidatorClientRef,
|
|
|
|
epoch: Epoch): seq[PrepareBeaconProposer] =
|
|
|
|
var res: seq[PrepareBeaconProposer]
|
|
|
|
for validator in vc.attachedValidators[].items():
|
|
|
|
if validator.index.isSome():
|
|
|
|
let
|
|
|
|
index = validator.index.get()
|
|
|
|
feeRecipient = vc.getFeeRecipient(validator.pubkey, index, epoch)
|
|
|
|
if feeRecipient.isSome():
|
|
|
|
res.add(PrepareBeaconProposer(validator_index: index,
|
|
|
|
fee_recipient: feeRecipient.get()))
|
|
|
|
res
|
2022-10-29 09:00:51 +00:00
|
|
|
|
|
|
|
proc isDefault*(reg: SignedValidatorRegistrationV1): bool =
|
|
|
|
(reg.message.timestamp == 0'u64) or (reg.message.gas_limit == 0'u64)
|
|
|
|
|
|
|
|
proc isExpired*(vc: ValidatorClientRef,
|
|
|
|
reg: SignedValidatorRegistrationV1, slot: Slot): bool =
|
|
|
|
let
|
|
|
|
regTime = fromUnix(int64(reg.message.timestamp))
|
|
|
|
regSlot =
|
|
|
|
block:
|
|
|
|
let res = vc.beaconClock.toSlot(regTime)
|
|
|
|
if not(res.afterGenesis):
|
|
|
|
# This case should not be happend, but it could in case of time jumps
|
|
|
|
# (time could be modified by admin or ntpd).
|
|
|
|
return false
|
|
|
|
uint64(res.slot)
|
|
|
|
|
|
|
|
if regSlot > slot:
|
|
|
|
# This case should not be happened, but if it happens (time could be
|
|
|
|
# modified by admin or ntpd).
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
if (slot - regSlot) div SLOTS_PER_EPOCH >=
|
|
|
|
EPOCHS_BETWEEN_VALIDATOR_REGISTRATION:
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
true
|
|
|
|
|
2023-02-15 15:10:31 +00:00
|
|
|
proc getValidatorRegistration(
|
2022-10-29 09:00:51 +00:00
|
|
|
vc: ValidatorClientRef,
|
|
|
|
validator: AttachedValidator,
|
|
|
|
timestamp: Time,
|
|
|
|
fork: Fork
|
|
|
|
): Result[PendingValidatorRegistration, RegistrationKind] =
|
|
|
|
if validator.index.isNone():
|
|
|
|
debug "Validator registration missing validator index",
|
|
|
|
validator = shortLog(validator)
|
|
|
|
return err(RegistrationKind.MissingIndex)
|
|
|
|
|
|
|
|
let
|
|
|
|
vindex = validator.index.get()
|
|
|
|
cached = vc.validatorsRegCache.getOrDefault(validator.pubkey)
|
|
|
|
currentSlot =
|
|
|
|
block:
|
|
|
|
let res = vc.beaconClock.toSlot(timestamp)
|
|
|
|
if not(res.afterGenesis):
|
|
|
|
return err(RegistrationKind.IncorrectTime)
|
|
|
|
res.slot
|
|
|
|
|
|
|
|
if cached.isDefault() or vc.isExpired(cached, currentSlot):
|
|
|
|
let feeRecipient = vc.getFeeRecipient(validator.pubkey, vindex,
|
|
|
|
currentSlot.epoch())
|
|
|
|
if feeRecipient.isNone():
|
|
|
|
debug "Could not get fee recipient for registration data",
|
|
|
|
validator = shortLog(validator)
|
|
|
|
return err(RegistrationKind.MissingFee)
|
2023-02-15 15:10:31 +00:00
|
|
|
let gasLimit = vc.getGasLimit(validator.pubkey)
|
2022-10-29 09:00:51 +00:00
|
|
|
var registration =
|
|
|
|
SignedValidatorRegistrationV1(
|
|
|
|
message: ValidatorRegistrationV1(
|
|
|
|
fee_recipient:
|
|
|
|
ExecutionAddress(data: distinctBase(feeRecipient.get())),
|
2023-02-15 15:10:31 +00:00
|
|
|
gas_limit: gasLimit,
|
2022-10-29 09:00:51 +00:00
|
|
|
timestamp: uint64(timestamp.toUnix()),
|
|
|
|
pubkey: validator.pubkey
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
let sigfut = validator.getBuilderSignature(fork, registration.message)
|
|
|
|
if sigfut.finished():
|
|
|
|
# This is short-path if we able to create signature locally.
|
2023-06-01 08:04:30 +00:00
|
|
|
if not(sigfut.completed()):
|
2022-10-29 09:00:51 +00:00
|
|
|
let exc = sigfut.readError()
|
|
|
|
debug "Got unexpected exception while signing validator registration",
|
|
|
|
validator = shortLog(validator), error_name = $exc.name,
|
|
|
|
error_msg = $exc.msg
|
|
|
|
return err(RegistrationKind.ErrorSignature)
|
|
|
|
let sigres = sigfut.read()
|
|
|
|
if sigres.isErr():
|
|
|
|
debug "Failed to get signature for validator registration",
|
|
|
|
validator = shortLog(validator), error = sigres.error()
|
|
|
|
return err(RegistrationKind.NoSignature)
|
|
|
|
registration.signature = sigres.get()
|
|
|
|
# Updating cache table with new signed registration data
|
|
|
|
vc.validatorsRegCache[registration.message.pubkey] = registration
|
|
|
|
ok(PendingValidatorRegistration(registration: registration, future: nil))
|
|
|
|
else:
|
|
|
|
# Remote signature service involved, cache will be updated later.
|
|
|
|
ok(PendingValidatorRegistration(registration: registration,
|
|
|
|
future: sigfut))
|
|
|
|
else:
|
|
|
|
# Returning cached result.
|
|
|
|
err(RegistrationKind.Cached)
|
|
|
|
|
|
|
|
proc prepareRegistrationList*(
|
|
|
|
vc: ValidatorClientRef,
|
|
|
|
timestamp: Time,
|
|
|
|
fork: Fork
|
|
|
|
): Future[seq[SignedValidatorRegistrationV1]] {.async.} =
|
|
|
|
|
|
|
|
var
|
|
|
|
messages: seq[SignedValidatorRegistrationV1]
|
|
|
|
futures: seq[Future[SignatureResult]]
|
|
|
|
registrations: seq[SignedValidatorRegistrationV1]
|
|
|
|
total = vc.attachedValidators[].count()
|
|
|
|
succeed = 0
|
|
|
|
bad = 0
|
|
|
|
errors = 0
|
|
|
|
indexMissing = 0
|
|
|
|
feeMissing = 0
|
2023-02-15 15:10:31 +00:00
|
|
|
gasLimit = 0
|
2022-10-29 09:00:51 +00:00
|
|
|
cached = 0
|
|
|
|
timed = 0
|
|
|
|
|
|
|
|
for validator in vc.attachedValidators[].items():
|
2023-02-15 15:10:31 +00:00
|
|
|
let res = vc.getValidatorRegistration(validator, timestamp, fork)
|
2022-10-29 09:00:51 +00:00
|
|
|
if res.isOk():
|
|
|
|
let preg = res.get()
|
|
|
|
if preg.future.isNil():
|
|
|
|
registrations.add(preg.registration)
|
|
|
|
else:
|
|
|
|
messages.add(preg.registration)
|
|
|
|
futures.add(preg.future)
|
|
|
|
else:
|
|
|
|
case res.error()
|
|
|
|
of RegistrationKind.Cached: inc(cached)
|
|
|
|
of RegistrationKind.IncorrectTime: inc(timed)
|
|
|
|
of RegistrationKind.NoSignature: inc(bad)
|
|
|
|
of RegistrationKind.ErrorSignature: inc(errors)
|
|
|
|
of RegistrationKind.MissingIndex: inc(indexMissing)
|
|
|
|
of RegistrationKind.MissingFee: inc(feeMissing)
|
2023-02-15 15:10:31 +00:00
|
|
|
of RegistrationKind.MissingGasLimit: inc(gasLimit)
|
2022-10-29 09:00:51 +00:00
|
|
|
|
|
|
|
succeed = len(registrations)
|
|
|
|
|
|
|
|
if len(futures) > 0:
|
|
|
|
await allFutures(futures)
|
|
|
|
|
|
|
|
for index, future in futures.pairs():
|
2023-06-01 08:04:30 +00:00
|
|
|
if future.completed():
|
2022-10-29 09:00:51 +00:00
|
|
|
let sres = future.read()
|
|
|
|
if sres.isOk():
|
|
|
|
var reg = messages[index]
|
|
|
|
reg.signature = sres.get()
|
|
|
|
registrations.add(reg)
|
|
|
|
# Updating cache table
|
|
|
|
vc.validatorsRegCache[reg.message.pubkey] = reg
|
|
|
|
inc(succeed)
|
|
|
|
else:
|
|
|
|
inc(bad)
|
|
|
|
else:
|
|
|
|
inc(errors)
|
|
|
|
|
|
|
|
debug "Validator registrations prepared", total = total, succeed = succeed,
|
|
|
|
cached = cached, bad = bad, errors = errors,
|
|
|
|
index_missing = indexMissing, fee_missing = feeMissing,
|
|
|
|
incorrect_time = timed
|
|
|
|
|
|
|
|
return registrations
|
2023-02-23 00:11:00 +00:00
|
|
|
|
2023-05-30 08:38:34 +00:00
|
|
|
func init*(t: typedesc[ApiNodeFailure], failure: ApiFailure,
|
|
|
|
request: string, strategy: ApiStrategyKind,
|
|
|
|
node: BeaconNodeServerRef): ApiNodeFailure =
|
|
|
|
ApiNodeFailure(node: node, request: request, strategy: Opt.some(strategy),
|
|
|
|
failure: failure)
|
|
|
|
|
|
|
|
func init*(t: typedesc[ApiNodeFailure], failure: ApiFailure,
|
|
|
|
request: string, strategy: ApiStrategyKind,
|
|
|
|
node: BeaconNodeServerRef, reason: string): ApiNodeFailure =
|
|
|
|
ApiNodeFailure(node: node, request: request, strategy: Opt.some(strategy),
|
|
|
|
failure: failure, reason: reason)
|
|
|
|
|
|
|
|
func init*(t: typedesc[ApiNodeFailure], failure: ApiFailure,
|
|
|
|
request: string, strategy: ApiStrategyKind,
|
|
|
|
node: BeaconNodeServerRef, status: int,
|
|
|
|
reason: string): ApiNodeFailure =
|
|
|
|
ApiNodeFailure(node: node, request: request, strategy: Opt.some(strategy),
|
|
|
|
failure: failure, status: Opt.some(status), reason: reason)
|
|
|
|
|
|
|
|
func init*(t: typedesc[ApiNodeFailure], failure: ApiFailure,
|
|
|
|
request: string, node: BeaconNodeServerRef, status: int,
|
|
|
|
reason: string): ApiNodeFailure =
|
|
|
|
ApiNodeFailure(node: node, request: request,
|
|
|
|
failure: failure, status: Opt.some(status), reason: reason)
|
|
|
|
|
|
|
|
func init*(t: typedesc[ApiNodeFailure], failure: ApiFailure,
|
|
|
|
request: string, node: BeaconNodeServerRef,
|
|
|
|
reason: string): ApiNodeFailure =
|
|
|
|
ApiNodeFailure(node: node, request: request, failure: failure, reason: reason)
|
2023-04-17 21:31:54 +00:00
|
|
|
|
|
|
|
proc checkedWaitForSlot*(vc: ValidatorClientRef, destinationSlot: Slot,
|
|
|
|
offset: TimeDiff,
|
|
|
|
showLogs: bool): Future[Opt[Slot]] {.async.} =
|
|
|
|
let
|
|
|
|
currentTime = vc.beaconClock.now()
|
|
|
|
currentSlot = currentTime.slotOrZero()
|
|
|
|
chronosOffset = chronos.nanoseconds(
|
|
|
|
if offset.nanoseconds < 0: 0'i64 else: offset.nanoseconds)
|
|
|
|
|
|
|
|
var timeToSlot = (destinationSlot.start_beacon_time() - currentTime) +
|
|
|
|
chronosOffset
|
|
|
|
|
|
|
|
logScope:
|
|
|
|
start_time = shortLog(currentTime)
|
|
|
|
start_slot = shortLog(currentSlot)
|
|
|
|
dest_slot = shortLog(destinationSlot)
|
|
|
|
time_to_slot = shortLog(timeToSlot)
|
|
|
|
|
|
|
|
while true:
|
|
|
|
await sleepAsync(timeToSlot)
|
|
|
|
|
|
|
|
let
|
|
|
|
wallTime = vc.beaconClock.now()
|
|
|
|
wallSlot = wallTime.slotOrZero()
|
|
|
|
|
|
|
|
logScope:
|
|
|
|
wall_time = shortLog(wallTime)
|
|
|
|
wall_slot = shortLog(wallSlot)
|
|
|
|
|
|
|
|
if wallSlot < destinationSlot:
|
|
|
|
# While we were sleeping, the system clock changed and time moved
|
|
|
|
# backwards!
|
|
|
|
if wallSlot + 1 < destinationSlot:
|
|
|
|
# This is a critical condition where it's hard to reason about what
|
|
|
|
# to do next - we'll call the attention of the user here by shutting
|
|
|
|
# down.
|
|
|
|
if showLogs:
|
|
|
|
fatal "System time adjusted backwards significantly - " &
|
|
|
|
"clock may be inaccurate - shutting down"
|
|
|
|
return Opt.none(Slot)
|
|
|
|
else:
|
|
|
|
# Time moved back by a single slot - this could be a minor adjustment,
|
|
|
|
# for example when NTP does its thing after not working for a while
|
|
|
|
timeToSlot = destinationSlot.start_beacon_time() - wallTime +
|
|
|
|
chronosOffset
|
|
|
|
if showLogs:
|
|
|
|
warn "System time adjusted backwards, rescheduling slot actions"
|
|
|
|
continue
|
|
|
|
|
|
|
|
elif wallSlot > destinationSlot + SLOTS_PER_EPOCH:
|
|
|
|
if showLogs:
|
|
|
|
warn "Time moved forwards by more than an epoch, skipping ahead"
|
|
|
|
return Opt.some(wallSlot)
|
|
|
|
|
|
|
|
elif wallSlot > destinationSlot:
|
|
|
|
if showLogs:
|
|
|
|
notice "Missed expected slot start, catching up"
|
|
|
|
return Opt.some(wallSlot)
|
|
|
|
|
|
|
|
else:
|
|
|
|
return Opt.some(destinationSlot)
|
|
|
|
|
|
|
|
proc checkedWaitForNextSlot*(vc: ValidatorClientRef, curSlot: Opt[Slot],
|
|
|
|
offset: TimeDiff,
|
|
|
|
showLogs: bool): Future[Opt[Slot]] =
|
|
|
|
let
|
|
|
|
currentTime = vc.beaconClock.now()
|
|
|
|
currentSlot = curSlot.valueOr: currentTime.slotOrZero()
|
|
|
|
nextSlot = currentSlot + 1
|
|
|
|
|
|
|
|
vc.checkedWaitForSlot(nextSlot, offset, showLogs)
|
2023-06-08 08:44:32 +00:00
|
|
|
|
|
|
|
proc checkedWaitForNextSlot*(vc: ValidatorClientRef, offset: TimeDiff,
|
|
|
|
showLogs: bool): Future[Opt[Slot]] =
|
|
|
|
let
|
|
|
|
currentTime = vc.beaconClock.now()
|
|
|
|
currentSlot = currentTime.slotOrZero()
|
|
|
|
nextSlot = currentSlot + 1
|
|
|
|
|
|
|
|
vc.checkedWaitForSlot(nextSlot, offset, showLogs)
|
|
|
|
|
|
|
|
proc expectBlock*(vc: ValidatorClientRef, slot: Slot,
|
|
|
|
confirmations: int = 1): Future[seq[Eth2Digest]] =
|
|
|
|
var
|
|
|
|
retFuture = newFuture[seq[Eth2Digest]]("expectBlock")
|
|
|
|
waiter = BlockWaiter(future: retFuture, count: confirmations)
|
|
|
|
|
|
|
|
proc cancellation(udata: pointer) =
|
|
|
|
vc.blocksSeen.withValue(slot, adata):
|
|
|
|
adata[].waiters.keepItIf(it.future != retFuture)
|
|
|
|
|
|
|
|
proc scheduleCallbacks(data: var BlockDataItem,
|
|
|
|
waiter: BlockWaiter) =
|
|
|
|
data.waiters.add(waiter)
|
|
|
|
for mitem in data.waiters.mitems():
|
|
|
|
if mitem.count <= len(data.blocks):
|
|
|
|
if not(mitem.future.finished()): mitem.future.complete(data.blocks)
|
|
|
|
|
|
|
|
vc.blocksSeen.mgetOrPut(slot, BlockDataItem()).scheduleCallbacks(waiter)
|
|
|
|
if not(retFuture.finished()): retFuture.cancelCallback = cancellation
|
|
|
|
retFuture
|
|
|
|
|
2023-07-06 09:14:22 +00:00
|
|
|
proc registerBlock*(vc: ValidatorClientRef, eblck: EventBeaconBlockObject,
|
2023-06-28 13:33:21 +00:00
|
|
|
node: BeaconNodeServerRef) =
|
2023-06-08 08:44:32 +00:00
|
|
|
let
|
|
|
|
wallTime = vc.beaconClock.now()
|
2023-07-06 09:14:22 +00:00
|
|
|
delay = wallTime - eblck.slot.start_beacon_time()
|
2023-06-08 08:44:32 +00:00
|
|
|
|
2023-07-06 09:14:22 +00:00
|
|
|
debug "Block received", slot = eblck.slot,
|
|
|
|
block_root = shortLog(eblck.block_root), optimistic = eblck.optimistic,
|
2023-06-28 13:33:21 +00:00
|
|
|
node = node, delay = delay
|
2023-06-08 08:44:32 +00:00
|
|
|
|
|
|
|
proc scheduleCallbacks(data: var BlockDataItem,
|
|
|
|
blck: EventBeaconBlockObject) =
|
2023-07-06 09:14:22 +00:00
|
|
|
vc.rootsSeen[blck.block_root] = blck.slot
|
2023-06-08 08:44:32 +00:00
|
|
|
data.blocks.add(blck.block_root)
|
|
|
|
for mitem in data.waiters.mitems():
|
|
|
|
if mitem.count >= len(data.blocks):
|
|
|
|
if not(mitem.future.finished()): mitem.future.complete(data.blocks)
|
2023-07-06 09:14:22 +00:00
|
|
|
|
|
|
|
vc.blocksSeen.mgetOrPut(eblck.slot, BlockDataItem()).scheduleCallbacks(eblck)
|
2023-06-08 08:44:32 +00:00
|
|
|
|
|
|
|
proc pruneBlocksSeen*(vc: ValidatorClientRef, epoch: Epoch) =
|
|
|
|
var blocksSeen: Table[Slot, BlockDataItem]
|
|
|
|
for slot, item in vc.blocksSeen.pairs():
|
|
|
|
if (slot.epoch() + HISTORICAL_DUTIES_EPOCHS) >= epoch:
|
|
|
|
blocksSeen[slot] = item
|
|
|
|
else:
|
2023-07-06 09:14:22 +00:00
|
|
|
for root in item.blocks: vc.rootsSeen.del(root)
|
2023-06-08 08:44:32 +00:00
|
|
|
let blockRoot =
|
|
|
|
if len(item.blocks) == 0:
|
|
|
|
"<missing>"
|
|
|
|
elif len(item.blocks) == 1:
|
|
|
|
shortLog(item.blocks[0])
|
|
|
|
else:
|
|
|
|
"[" & item.blocks.mapIt(shortLog(it)).join(", ") & "]"
|
|
|
|
debug "Block data has been pruned", slot = slot, blocks = blockRoot
|
|
|
|
vc.blocksSeen = blocksSeen
|
|
|
|
|
|
|
|
proc waitForBlock*(
|
|
|
|
vc: ValidatorClientRef,
|
|
|
|
slot: Slot,
|
|
|
|
timediff: TimeDiff,
|
|
|
|
confirmations: int = 1
|
|
|
|
) {.async.} =
|
|
|
|
## This procedure will wait for a block proposal for a ``slot`` received
|
|
|
|
## by the beacon node.
|
|
|
|
let
|
|
|
|
startTime = Moment.now()
|
|
|
|
waitTime = (start_beacon_time(slot) + timediff) - vc.beaconClock.now()
|
|
|
|
|
|
|
|
logScope:
|
|
|
|
slot = slot
|
|
|
|
timediff = timediff
|
|
|
|
wait_time = waitTime
|
|
|
|
|
|
|
|
debug "Waiting for block proposal"
|
|
|
|
|
|
|
|
if waitTime.nanoseconds <= 0'i64:
|
|
|
|
# We do not have time to wait for block.
|
|
|
|
return
|
|
|
|
|
|
|
|
let blocks =
|
|
|
|
try:
|
|
|
|
let timeout = nanoseconds(waitTime.nanoseconds)
|
|
|
|
await vc.expectBlock(slot, confirmations).wait(timeout)
|
|
|
|
except AsyncTimeoutError:
|
|
|
|
let dur = Moment.now() - startTime
|
|
|
|
debug "Block has not been received in time", duration = dur
|
|
|
|
return
|
|
|
|
except CancelledError as exc:
|
|
|
|
let dur = Moment.now() - startTime
|
|
|
|
debug "Block awaiting was interrupted", duration = dur
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
|
|
|
let dur = Moment.now() - startTime
|
|
|
|
error "Unexpected error occured while waiting for block publication",
|
|
|
|
err_name = exc.name, err_msg = exc.msg, duration = dur
|
|
|
|
return
|
|
|
|
|
|
|
|
let
|
|
|
|
dur = Moment.now() - startTime
|
|
|
|
blockRoot =
|
|
|
|
if len(blocks) == 0:
|
|
|
|
"<missing>"
|
|
|
|
elif len(blocks) == 1:
|
|
|
|
shortLog(blocks[0])
|
|
|
|
else:
|
|
|
|
"[" & blocks.mapIt(shortLog(it)).join(", ") & "]"
|
|
|
|
|
|
|
|
debug "Block proposal awaited", duration = dur,
|
|
|
|
block_root = blockRoot
|
|
|
|
|
|
|
|
try:
|
|
|
|
await waitAfterBlockCutoff(vc.beaconClock, slot)
|
|
|
|
except CancelledError as exc:
|
|
|
|
let dur = Moment.now() - startTime
|
|
|
|
debug "Waiting for block cutoff was interrupted", duration = dur
|
|
|
|
raise exc
|
2023-06-20 16:41:20 +00:00
|
|
|
|
|
|
|
iterator chunks*[T](data: openArray[T], maxCount: Positive): seq[T] =
|
|
|
|
for i in countup(0, len(data) - 1, maxCount):
|
|
|
|
yield @(data.toOpenArray(i, min(i + maxCount, len(data)) - 1))
|
2023-08-17 17:26:08 +00:00
|
|
|
|
2023-06-28 13:33:07 +00:00
|
|
|
func init*(t: typedesc[TimeOffset], duration: Duration): TimeOffset =
|
|
|
|
TimeOffset(value: duration.nanoseconds)
|
|
|
|
|
|
|
|
func init*(t: typedesc[TimeOffset], offset: int64): TimeOffset =
|
|
|
|
TimeOffset(value: offset)
|
|
|
|
|
|
|
|
func abs*(to: TimeOffset): TimeOffset =
|
|
|
|
TimeOffset(value: abs(to.value))
|
|
|
|
|
|
|
|
func milliseconds*(to: TimeOffset): int64 =
|
|
|
|
if to.value < 0:
|
|
|
|
-nanoseconds(-to.value).milliseconds
|
|
|
|
else:
|
|
|
|
nanoseconds(-to.value).milliseconds
|
|
|
|
|
|
|
|
func `<`*(a, b: TimeOffset): bool = a.value < b.value
|
|
|
|
func `<=`*(a, b: TimeOffset): bool = a.value <= b.value
|
|
|
|
func `==`*(a, b: TimeOffset): bool = a.value == b.value
|
|
|
|
|
|
|
|
func nanoseconds*(to: TimeOffset): int64 = to.value
|
|
|
|
|
|
|
|
proc waitForNextEpoch*(service: ClientServiceRef,
|
|
|
|
delay: Duration) {.async.} =
|
|
|
|
let
|
|
|
|
vc = service.client
|
|
|
|
sleepTime = vc.beaconClock.durationToNextEpoch() + delay
|
|
|
|
debug "Sleeping until next epoch", service = service.name,
|
|
|
|
sleep_time = sleepTime, delay = delay
|
|
|
|
await sleepAsync(sleepTime)
|
|
|
|
|
|
|
|
proc waitForNextEpoch*(service: ClientServiceRef): Future[void] =
|
|
|
|
waitForNextEpoch(service, ZeroDuration)
|
|
|
|
|
|
|
|
proc waitForNextSlot*(service: ClientServiceRef) {.async.} =
|
|
|
|
let vc = service.client
|
|
|
|
let sleepTime = vc.beaconClock.durationToNextSlot()
|
|
|
|
await sleepAsync(sleepTime)
|
2023-08-15 17:00:27 +00:00
|
|
|
|
|
|
|
func compareUnsorted*[T](a, b: openArray[T]): bool =
|
|
|
|
if len(a) != len(b):
|
|
|
|
return false
|
|
|
|
|
|
|
|
return
|
|
|
|
case len(a)
|
|
|
|
of 0:
|
|
|
|
true
|
|
|
|
of 1:
|
|
|
|
a[0] == b[0]
|
|
|
|
of 2:
|
|
|
|
((a[0] == b[0]) and (a[1] == b[1])) or ((a[0] == b[1]) and (a[1] == b[0]))
|
|
|
|
else:
|
|
|
|
let asorted = sorted(a)
|
|
|
|
let bsorted = sorted(b)
|
|
|
|
for index, item in asorted.pairs():
|
|
|
|
if item != bsorted[index]:
|
|
|
|
return false
|
|
|
|
true
|
|
|
|
|
|
|
|
func `==`*(a, b: SyncCommitteeDuty): bool =
|
|
|
|
(a.pubkey == b.pubkey) and
|
|
|
|
(a.validator_index == b.validator_index) and
|
|
|
|
compareUnsorted(a.validator_sync_committee_indices,
|
|
|
|
b.validator_sync_committee_indices)
|
|
|
|
|
|
|
|
proc cmp(x, y: AttestationSlotRequest|SyncCommitteeSlotRequest): int =
|
|
|
|
cmp(x.slot, y.slot)
|
|
|
|
|
|
|
|
func getIndex*(proof: SyncCommitteeSelectionProof,
|
|
|
|
inindex: IndexInSyncCommittee): Opt[int] =
|
|
|
|
if len(proof) == 0:
|
|
|
|
return Opt.none(int)
|
|
|
|
for index, value in proof.pairs():
|
|
|
|
if value.sync_committee_index == inindex:
|
|
|
|
return Opt.some(index)
|
|
|
|
Opt.none(int)
|
|
|
|
|
|
|
|
func hasSignature*(proof: SyncCommitteeSelectionProof,
|
|
|
|
inindex: IndexInSyncCommittee,
|
|
|
|
slot: Slot): bool =
|
|
|
|
let index = proof.getIndex(inindex).valueOr: return false
|
|
|
|
proof[index].signatures[int(slot.since_epoch_start())].isSome()
|
|
|
|
|
|
|
|
proc setSignature*(proof: var SyncCommitteeSelectionProof,
|
|
|
|
inindex: IndexInSyncCommittee, slot: Slot,
|
|
|
|
signature: Opt[ValidatorSig]) =
|
|
|
|
let index = proof.getIndex(inindex).expect(
|
|
|
|
"EpochSelectionProof should be present at this moment")
|
|
|
|
proof[index].signatures[int(slot.since_epoch_start())] = signature
|
|
|
|
|
|
|
|
proc setSyncSelectionProof*(vc: ValidatorClientRef, pubkey: ValidatorPubKey,
|
|
|
|
inindex: IndexInSyncCommittee, slot: Slot,
|
|
|
|
duty: SyncCommitteeDuty,
|
|
|
|
signature: Opt[ValidatorSig]) =
|
|
|
|
let
|
|
|
|
proof =
|
|
|
|
block:
|
|
|
|
let length = len(duty.validator_sync_committee_indices)
|
|
|
|
var res = newSeq[EpochSelectionProof](length)
|
|
|
|
for i in 0 ..< length:
|
|
|
|
res[i].sync_committee_index = duty.validator_sync_committee_indices[i]
|
|
|
|
res
|
|
|
|
|
|
|
|
vc.syncCommitteeProofs.
|
|
|
|
mgetOrPut(slot.epoch(), default(SyncCommitteeProofs)).proofs.
|
|
|
|
mgetOrPut(pubkey, proof).setSignature(inindex, slot, signature)
|
|
|
|
|
|
|
|
proc getSyncCommitteeSelectionProof*(
|
|
|
|
vc: ValidatorClientRef,
|
|
|
|
pubkey: ValidatorPubKey,
|
|
|
|
epoch: Epoch
|
|
|
|
): Opt[SyncCommitteeSelectionProof] =
|
|
|
|
vc.syncCommitteeProofs.withValue(epoch, epochProofs):
|
|
|
|
epochProofs[].proofs.withValue(pubkey, validatorProofs):
|
|
|
|
return Opt.some(validatorProofs[])
|
|
|
|
do:
|
|
|
|
return Opt.none(SyncCommitteeSelectionProof)
|
|
|
|
do:
|
|
|
|
return Opt.none(SyncCommitteeSelectionProof)
|
|
|
|
|
|
|
|
proc getSyncCommitteeSelectionProof*(
|
|
|
|
vc: ValidatorClientRef,
|
|
|
|
pubkey: ValidatorPubKey,
|
|
|
|
slot: Slot,
|
|
|
|
inindex: IndexInSyncCommittee
|
|
|
|
): Opt[ValidatorSig] =
|
|
|
|
vc.syncCommitteeProofs.withValue(slot.epoch(), epochProofs):
|
|
|
|
epochProofs[].proofs.withValue(pubkey, validatorProofs):
|
|
|
|
let index = getIndex(validatorProofs[], inindex).valueOr:
|
|
|
|
return Opt.none(ValidatorSig)
|
|
|
|
return validatorProofs[][index].signatures[int(slot.since_epoch_start())]
|
|
|
|
do:
|
|
|
|
return Opt.none(ValidatorSig)
|
|
|
|
do:
|
|
|
|
return Opt.none(ValidatorSig)
|
|
|
|
|
|
|
|
proc fillSyncCommitteeSelectionProofs*(
|
|
|
|
service: DutiesServiceRef,
|
|
|
|
start, finish: Slot
|
|
|
|
): Future[FillSignaturesResult] {.async.} =
|
|
|
|
let
|
|
|
|
vc = service.client
|
|
|
|
genesisRoot = vc.beaconGenesis.genesis_validators_root
|
|
|
|
var
|
|
|
|
requests =
|
|
|
|
block:
|
|
|
|
var res: seq[SyncCommitteeSlotRequest]
|
|
|
|
for epoch in start.epoch() .. finish.epoch():
|
|
|
|
let
|
|
|
|
fork = vc.forkAtEpoch(epoch)
|
|
|
|
period = epoch.sync_committee_period()
|
|
|
|
for duty in vc.syncDutiesForPeriod(period):
|
|
|
|
let validator = vc.attachedValidators[].
|
|
|
|
getValidator(duty.pubkey).valueOr:
|
|
|
|
# Ignore all the validators which are not here anymore
|
|
|
|
continue
|
|
|
|
if validator.index.isNone():
|
|
|
|
# Ignore all the valididators which do not have index yet.
|
|
|
|
continue
|
|
|
|
let proof = vc.getSyncCommitteeSelectionProof(duty.pubkey, epoch).
|
|
|
|
get(default(SyncCommitteeSelectionProof))
|
|
|
|
for inindex in duty.validator_sync_committee_indices:
|
|
|
|
for slot in epoch.slots():
|
|
|
|
if slot < start: continue
|
|
|
|
if slot > finish: break
|
|
|
|
if not(proof.hasSignature(inindex, slot)):
|
|
|
|
res.add(
|
|
|
|
SyncCommitteeSlotRequest(
|
|
|
|
validator: validator,
|
|
|
|
fork: fork,
|
|
|
|
slot: slot,
|
|
|
|
duty: duty,
|
|
|
|
sync_committee_index: inindex))
|
|
|
|
# We make requests sorted by slot number.
|
|
|
|
sorted(res, cmp, order = SortOrder.Ascending)
|
|
|
|
sigres = FillSignaturesResult(signaturesRequested: len(requests))
|
|
|
|
pendingRequests = requests.mapIt(
|
|
|
|
FutureBase(getSyncCommitteeSelectionProof(
|
|
|
|
it.validator, it.fork, genesisRoot, it.slot,
|
|
|
|
getSubcommitteeIndex(it.sync_committee_index))))
|
|
|
|
|
|
|
|
while len(pendingRequests) > 0:
|
|
|
|
try:
|
|
|
|
discard await race(pendingRequests)
|
|
|
|
except CancelledError as exc:
|
2023-09-24 07:28:09 +00:00
|
|
|
let pending = pendingRequests
|
|
|
|
.filterIt(not(it.finished())).mapIt(it.cancelAndWait())
|
|
|
|
await noCancel allFutures(pending)
|
2023-08-15 17:00:27 +00:00
|
|
|
raise exc
|
|
|
|
|
|
|
|
(requests, pendingRequests) =
|
|
|
|
block:
|
|
|
|
var
|
|
|
|
res1: seq[SyncCommitteeSlotRequest]
|
|
|
|
res2: seq[FutureBase]
|
|
|
|
for index, fut in pendingRequests.pairs():
|
|
|
|
if not(fut.finished()):
|
|
|
|
res1.add(requests[index])
|
|
|
|
res2.add(fut)
|
|
|
|
else:
|
|
|
|
let
|
|
|
|
request = requests[index]
|
|
|
|
signature =
|
|
|
|
if fut.completed():
|
|
|
|
let sres = Future[SignatureResult](fut).read()
|
|
|
|
if sres.isErr():
|
|
|
|
warn "Unable to create slot signature using remote signer",
|
|
|
|
reason = sres.error(), epoch = request.slot.epoch(),
|
|
|
|
slot = request.slot
|
|
|
|
Opt.none(ValidatorSig)
|
|
|
|
else:
|
|
|
|
inc(sigres.signaturesReceived)
|
|
|
|
Opt.some(sres.get())
|
|
|
|
else:
|
|
|
|
Opt.none(ValidatorSig)
|
|
|
|
vc.setSyncSelectionProof(request.validator.pubkey,
|
|
|
|
request.sync_committee_index,
|
|
|
|
request.slot, request.duty,
|
|
|
|
signature)
|
|
|
|
(res1, res2)
|
|
|
|
sigres
|
|
|
|
|
|
|
|
proc fillAttestationSelectionProofs*(
|
|
|
|
service: DutiesServiceRef,
|
|
|
|
start, finish: Slot
|
|
|
|
): Future[FillSignaturesResult] {.async.} =
|
|
|
|
let
|
|
|
|
vc = service.client
|
|
|
|
genesisRoot = vc.beaconGenesis.genesis_validators_root
|
|
|
|
var
|
|
|
|
requests =
|
|
|
|
block:
|
|
|
|
var res: seq[AttestationSlotRequest]
|
|
|
|
for epoch in start.epoch() .. finish.epoch():
|
|
|
|
for duty in vc.attesterDutiesForEpoch(epoch):
|
|
|
|
if (duty.data.slot < start) or (duty.data.slot > finish):
|
|
|
|
# Ignore all the slots which are not in range.
|
|
|
|
continue
|
|
|
|
if duty.slotSig.isSome():
|
|
|
|
# Ignore all the duties which already has selection proof.
|
|
|
|
continue
|
|
|
|
let validator = vc.attachedValidators[].
|
|
|
|
getValidator(duty.data.pubkey).valueOr:
|
|
|
|
# Ignore all the validators which are not here anymore
|
|
|
|
continue
|
|
|
|
if validator.index.isNone():
|
|
|
|
# Ignore all the valididators which do not have index yet.
|
|
|
|
continue
|
|
|
|
res.add(AttestationSlotRequest(
|
|
|
|
validator: validator,
|
|
|
|
slot: duty.data.slot,
|
|
|
|
fork: vc.forkAtEpoch(duty.data.slot.epoch())
|
|
|
|
))
|
|
|
|
# We make requests sorted by slot number.
|
|
|
|
sorted(res, cmp, order = SortOrder.Ascending)
|
|
|
|
sigres = FillSignaturesResult(signaturesRequested: len(requests))
|
|
|
|
pendingRequests = requests.mapIt(
|
|
|
|
FutureBase(getSlotSignature(it.validator, it.fork, genesisRoot, it.slot)))
|
|
|
|
|
|
|
|
while len(pendingRequests) > 0:
|
|
|
|
try:
|
|
|
|
discard await race(pendingRequests)
|
|
|
|
except CancelledError as exc:
|
2023-09-24 07:28:09 +00:00
|
|
|
let pending = pendingRequests
|
|
|
|
.filterIt(not(it.finished())).mapIt(it.cancelAndWait())
|
|
|
|
await noCancel allFutures(pending)
|
2023-08-15 17:00:27 +00:00
|
|
|
raise exc
|
|
|
|
|
|
|
|
(requests, pendingRequests) =
|
|
|
|
block:
|
|
|
|
var
|
|
|
|
res1: seq[AttestationSlotRequest]
|
|
|
|
res2: seq[FutureBase]
|
|
|
|
for index, fut in pendingRequests.pairs():
|
|
|
|
if not(fut.finished()):
|
|
|
|
res1.add(requests[index])
|
|
|
|
res2.add(fut)
|
|
|
|
else:
|
|
|
|
let
|
|
|
|
request = requests[index]
|
|
|
|
signature =
|
|
|
|
if fut.completed():
|
|
|
|
let sres = Future[SignatureResult](fut).read()
|
|
|
|
if sres.isErr():
|
|
|
|
warn "Unable to create slot signature using remote signer",
|
|
|
|
reason = sres.error(), epoch = request.slot.epoch(),
|
|
|
|
slot = request.slot
|
|
|
|
Opt.none(ValidatorSig)
|
|
|
|
else:
|
|
|
|
inc(sigres.signaturesReceived)
|
|
|
|
Opt.some(sres.get())
|
|
|
|
else:
|
|
|
|
Opt.none(ValidatorSig)
|
|
|
|
vc.attesters.withValue(request.validator.pubkey, map):
|
|
|
|
map[].duties.withValue(request.slot.epoch(), dap):
|
|
|
|
dap[].slotSig = signature
|
|
|
|
(res1, res2)
|
|
|
|
sigres
|
|
|
|
|
|
|
|
proc updateRuntimeConfig*(vc: ValidatorClientRef,
|
|
|
|
node: BeaconNodeServerRef,
|
|
|
|
info: VCRuntimeConfig): Result[void, string] =
|
|
|
|
if not(info.hasKey("ALTAIR_FORK_EPOCH")):
|
|
|
|
debug "Beacon node's configuration missing ALTAIR_FORK_EPOCH value",
|
|
|
|
node = node
|
|
|
|
|
|
|
|
let
|
|
|
|
res = info.getOrDefault("ALTAIR_FORK_EPOCH", FAR_FUTURE_EPOCH)
|
|
|
|
wallEpoch = vc.beaconClock.now().slotOrZero().epoch()
|
|
|
|
|
|
|
|
return
|
|
|
|
if vc.runtimeConfig.altairEpoch.get(FAR_FUTURE_EPOCH) == FAR_FUTURE_EPOCH:
|
|
|
|
vc.runtimeConfig.altairEpoch = Opt.some(res)
|
|
|
|
ok()
|
|
|
|
else:
|
|
|
|
if res == vc.runtimeConfig.altairEpoch.get():
|
|
|
|
ok()
|
|
|
|
else:
|
|
|
|
if res == FAR_FUTURE_EPOCH:
|
|
|
|
if wallEpoch < vc.runtimeConfig.altairEpoch.get():
|
|
|
|
debug "Beacon node must be updated before Altair activates",
|
|
|
|
node = node,
|
|
|
|
altairForkEpoch = vc.runtimeConfig.altairEpoch.get()
|
|
|
|
ok()
|
|
|
|
else:
|
|
|
|
err("Beacon node must be updated and report correct " &
|
|
|
|
"ALTAIR_FORK_EPOCH value")
|
|
|
|
else:
|
|
|
|
err("Beacon node has conflicting ALTAIR_FORK_EPOCH value")
|
|
|
|
|
|
|
|
proc `+`*(slot: Slot, epochs: Epoch): Slot =
|
|
|
|
slot + uint64(epochs) * SLOTS_PER_EPOCH
|
|
|
|
|
|
|
|
func finish_slot*(epoch: Epoch): Slot =
|
|
|
|
## Return the last slot of ``epoch``.
|
|
|
|
Slot((epoch + 1).start_slot() - 1)
|