enable `styleCheck:usages` (#3573)
Some upstream repos still need fixes, but this gets us close enough that style hints can be enabled by default. In general, "canonical" spellings are preferred even if they violate nep-1 - this applies in particular to spec-related stuff like `genesis_validators_root` which appears throughout the codebase.
This commit is contained in:
parent
f20c53be82
commit
f70ff38b53
|
@ -1159,7 +1159,7 @@ proc loadStateRoots*(db: BeaconChainDB): Table[(Slot, Eth2Digest), Eth2Digest] =
|
|||
## mean we also have a state (and vice versa)!
|
||||
var state_roots = initTable[(Slot, Eth2Digest), Eth2Digest](1024)
|
||||
|
||||
discard db.state_roots.find([], proc(k, v: openArray[byte]) =
|
||||
discard db.stateRoots.find([], proc(k, v: openArray[byte]) =
|
||||
if k.len() == 40 and v.len() == 32:
|
||||
# For legacy reasons, the first byte of the slot is not part of the slot
|
||||
# but rather a subkey identifier - see subkey
|
||||
|
|
|
@ -59,8 +59,8 @@ type
|
|||
eventBus*: AsyncEventBus
|
||||
vcProcess*: Process
|
||||
requestManager*: RequestManager
|
||||
syncManager*: SyncManager[Peer, PeerID]
|
||||
backfiller*: SyncManager[Peer, PeerID]
|
||||
syncManager*: SyncManager[Peer, PeerId]
|
||||
backfiller*: SyncManager[Peer, PeerId]
|
||||
genesisSnapshotContent*: string
|
||||
actionTracker*: ActionTracker
|
||||
processor*: ref Eth2Processor
|
||||
|
|
|
@ -379,7 +379,7 @@ proc addAttestation*(pool: var AttestationPool,
|
|||
pool.onAttestationAdded(attestation)
|
||||
|
||||
func covers*(
|
||||
pool: var AttestationPool, data: Attestationdata,
|
||||
pool: var AttestationPool, data: AttestationData,
|
||||
bits: CommitteeValidatorsBits): bool =
|
||||
## Return true iff the given attestation already is fully covered by one of
|
||||
## the existing aggregates, making it redundant
|
||||
|
|
|
@ -367,6 +367,6 @@ proc addBackfillBlock*(
|
|||
let putBlockTick = Moment.now
|
||||
debug "Block backfilled",
|
||||
sigVerifyDur = sigVerifyTick - startTick,
|
||||
putBlockDur = putBlocktick - sigVerifyTick
|
||||
putBlockDur = putBlockTick - sigVerifyTick
|
||||
|
||||
ok()
|
||||
|
|
|
@ -506,7 +506,7 @@ proc getForkedBlock*(
|
|||
type T = type(blck)
|
||||
blck = getBlock(dag, bid, T).valueOr:
|
||||
getBlock(
|
||||
dag.era, getStateField(dag.headState, historicalRoots).asSeq,
|
||||
dag.era, getStateField(dag.headState, historical_roots).asSeq,
|
||||
bid.slot, Opt[Eth2Digest].ok(bid.root), T).valueOr:
|
||||
result.err()
|
||||
return
|
||||
|
@ -792,7 +792,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
|||
of BeaconStateFork.Phase0: genesisFork(cfg)
|
||||
of BeaconStateFork.Altair: altairFork(cfg)
|
||||
of BeaconStateFork.Bellatrix: bellatrixFork(cfg)
|
||||
statefork = getStateField(dag.headState, fork)
|
||||
stateFork = getStateField(dag.headState, fork)
|
||||
|
||||
if stateFork != configFork:
|
||||
error "State from database does not match network, check --network parameter",
|
||||
|
@ -954,7 +954,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
|||
|
||||
dag
|
||||
|
||||
template genesisValidatorsRoot*(dag: ChainDAGRef): Eth2Digest =
|
||||
template genesis_validators_root*(dag: ChainDAGRef): Eth2Digest =
|
||||
getStateField(dag.headState, genesis_validators_root)
|
||||
|
||||
func getEpochRef*(
|
||||
|
@ -1050,7 +1050,7 @@ proc getFinalizedEpochRef*(dag: ChainDAGRef): EpochRef =
|
|||
func stateCheckpoint*(dag: ChainDAGRef, bsi: BlockSlotId): BlockSlotId =
|
||||
## The first ancestor BlockSlot that is a state checkpoint
|
||||
var bsi = bsi
|
||||
while not dag.isStateCheckPoint(bsi):
|
||||
while not dag.isStateCheckpoint(bsi):
|
||||
if bsi.isProposed:
|
||||
bsi.bid = dag.parent(bsi.bid).valueOr:
|
||||
break
|
||||
|
|
|
@ -229,7 +229,7 @@ proc deleteLightClientData*(dag: ChainDAGRef, bid: BlockId) =
|
|||
template lazy_header(name: untyped): untyped {.dirty.} =
|
||||
## `createLightClientUpdates` helper to lazily load a known block header.
|
||||
var `name ptr`: ptr[BeaconBlockHeader]
|
||||
template `assign name`(target: var BeaconBlockHeader,
|
||||
template `assign _ name`(target: var BeaconBlockHeader,
|
||||
bid: BlockId): untyped =
|
||||
if `name ptr` != nil:
|
||||
target = `name ptr`[]
|
||||
|
@ -243,7 +243,7 @@ template lazy_data(name: untyped): untyped {.dirty.} =
|
|||
## `createLightClientUpdates` helper to lazily load cached light client state.
|
||||
var `name` {.noinit.}: CachedLightClientData
|
||||
`name`.finalized_bid.slot = FAR_FUTURE_SLOT
|
||||
template `load name`(bid: BlockId): untyped =
|
||||
template `load _ name`(bid: BlockId): untyped =
|
||||
if `name`.finalized_bid.slot == FAR_FUTURE_SLOT:
|
||||
`name` = dag.getLightClientData(bid)
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ proc getSignedExitMessage(config: BeaconNodeConf,
|
|||
exitAtEpoch: Epoch,
|
||||
validatorIdx: uint64 ,
|
||||
fork: Fork,
|
||||
genesisValidatorsRoot: Eth2Digest): SignedVoluntaryExit =
|
||||
genesis_validators_root: Eth2Digest): SignedVoluntaryExit =
|
||||
let
|
||||
validatorsDir = config.validatorsDir
|
||||
keystoreDir = validatorsDir / validatorKeyAsStr
|
||||
|
@ -48,7 +48,7 @@ proc getSignedExitMessage(config: BeaconNodeConf,
|
|||
signedExit.signature =
|
||||
block:
|
||||
let key = signingItem.get.privateKey
|
||||
get_voluntary_exit_signature(fork, genesisValidatorsRoot,
|
||||
get_voluntary_exit_signature(fork, genesis_validators_root,
|
||||
signedExit.message, key).toValidatorSig()
|
||||
|
||||
signedExit
|
||||
|
@ -153,7 +153,7 @@ proc rpcValidatorExit(config: BeaconNodeConf) {.async.} =
|
|||
fatal "Failed to obtain the fork id of the head state", err = err.msg
|
||||
quit 1
|
||||
|
||||
let genesisValidatorsRoot = try:
|
||||
let genesis_validators_root = try:
|
||||
(await rpcClient.get_v1_beacon_genesis()).genesis_validators_root
|
||||
except CatchableError as err:
|
||||
fatal "Failed to obtain the genesis validators root of the network",
|
||||
|
@ -167,7 +167,7 @@ proc rpcValidatorExit(config: BeaconNodeConf) {.async.} =
|
|||
exitAtEpoch,
|
||||
validatorIdx,
|
||||
fork,
|
||||
genesisValidatorsRoot)
|
||||
genesis_validators_root)
|
||||
|
||||
try:
|
||||
let choice = askForExitConfirmation()
|
||||
|
@ -281,14 +281,14 @@ proc restValidatorExit(config: BeaconNodeConf) {.async.} =
|
|||
quit 1
|
||||
|
||||
let
|
||||
genesisValidatorsRoot = genesis.genesis_validators_root
|
||||
genesis_validators_root = genesis.genesis_validators_root
|
||||
validatorKeyAsStr = "0x" & $validator.pubkey
|
||||
signedExit = getSignedExitMessage(config,
|
||||
validatorKeyAsStr,
|
||||
exitAtEpoch,
|
||||
validatorIdx,
|
||||
fork,
|
||||
genesisValidatorsRoot)
|
||||
genesis_validators_root)
|
||||
|
||||
try:
|
||||
let choice = askForExitConfirmation()
|
||||
|
|
|
@ -204,7 +204,6 @@ proc getPartialState(
|
|||
# performs
|
||||
var tmp: seq[byte]
|
||||
if (let e = db.getStateSSZ(historical_roots, slot, tmp); e.isErr):
|
||||
debugecho e.error()
|
||||
return false
|
||||
|
||||
static: doAssert isFixedSize(PartialBeaconState)
|
||||
|
@ -243,7 +242,7 @@ iterator getBlockIds*(
|
|||
proc new*(
|
||||
T: type EraDB, cfg: RuntimeConfig, path: string,
|
||||
genesis_validators_root: Eth2Digest): EraDB =
|
||||
EraDb(cfg: cfg, path: path, genesis_validators_root: genesis_validators_root)
|
||||
EraDB(cfg: cfg, path: path, genesis_validators_root: genesis_validators_root)
|
||||
|
||||
when isMainModule:
|
||||
# Testing EraDB gets messy because of the large amounts of data involved:
|
||||
|
|
|
@ -336,7 +336,7 @@ func asEngineExecutionPayload*(executionPayload: bellatrix.ExecutionPayload):
|
|||
|
||||
engine_api.ExecutionPayloadV1(
|
||||
parentHash: executionPayload.parent_hash.asBlockHash,
|
||||
feeRecipient: Address(executionPayload.feeRecipient.data),
|
||||
feeRecipient: Address(executionPayload.fee_recipient.data),
|
||||
stateRoot: executionPayload.state_root.asBlockHash,
|
||||
receiptsRoot: executionPayload.receipts_root.asBlockHash,
|
||||
logsBloom:
|
||||
|
@ -362,7 +362,7 @@ template findBlock(chain: Eth1Chain, eth1Data: Eth1Data): Eth1Block =
|
|||
getOrDefault(chain.blocksByHash, asBlockHash(eth1Data.block_hash), nil)
|
||||
|
||||
func makeSuccessorWithoutDeposits(existingBlock: Eth1Block,
|
||||
successor: BlockObject): ETh1Block =
|
||||
successor: BlockObject): Eth1Block =
|
||||
result = Eth1Block(
|
||||
number: Eth1BlockNumber successor.number,
|
||||
timestamp: Eth1BlockTimestamp successor.timestamp,
|
||||
|
@ -409,11 +409,9 @@ template awaitWithRetries*[T](lazyFutExpr: Future[T],
|
|||
if not f.finished:
|
||||
await cancelAndWait(f)
|
||||
elif f.failed:
|
||||
if f.error[] of Defect:
|
||||
raise f.error
|
||||
else:
|
||||
debug "Web3 request failed", req = reqType, err = f.error.msg
|
||||
inc failed_web3_requests
|
||||
static: doAssert f.error of CatchableError
|
||||
debug "Web3 request failed", req = reqType, err = f.error.msg
|
||||
inc failed_web3_requests
|
||||
else:
|
||||
break
|
||||
|
||||
|
|
|
@ -190,8 +190,8 @@ func applyScoreChanges*(self: var ProtoArray,
|
|||
# the delta by the new score amount.
|
||||
#
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/fork-choice.md#get_latest_attesting_balance
|
||||
if useProposerBoost and (not proposer_boost_root.isZero) and
|
||||
proposer_boost_root == node.root:
|
||||
if useProposerBoost and (not proposerBoostRoot.isZero) and
|
||||
proposerBoostRoot == node.root:
|
||||
proposerBoostScore = calculateProposerBoost(newBalances)
|
||||
if nodeDelta >= 0 and
|
||||
high(Delta) - nodeDelta < self.previousProposerBoostScore:
|
||||
|
|
|
@ -285,8 +285,8 @@ proc addBlock*(
|
|||
# because there are no state rewinds to deal with
|
||||
let res = self.storeBackfillBlock(blck)
|
||||
|
||||
if resFut != nil:
|
||||
resFut.complete(res)
|
||||
if resfut != nil:
|
||||
resfut.complete(res)
|
||||
return
|
||||
|
||||
try:
|
||||
|
|
|
@ -697,7 +697,7 @@ proc validateAggregate*(
|
|||
let deferredCrypto = batchCrypto
|
||||
.scheduleAggregateChecks(
|
||||
fork, genesis_validators_root,
|
||||
signed_aggregate_and_proof, epochRef, attesting_indices
|
||||
signedAggregateAndProof, epochRef, attesting_indices
|
||||
)
|
||||
if deferredCrypto.isErr():
|
||||
return checkedReject(deferredCrypto.error)
|
||||
|
@ -881,7 +881,7 @@ proc validateSyncCommitteeMessage*(
|
|||
let
|
||||
epoch = msg.slot.epoch
|
||||
fork = dag.forkAtEpoch(epoch)
|
||||
genesisValidatorsRoot = dag.genesisValidatorsRoot
|
||||
genesis_validators_root = dag.genesis_validators_root
|
||||
senderPubKey = dag.validatorKey(msg.validator_index)
|
||||
|
||||
if senderPubKey.isNone():
|
||||
|
@ -967,7 +967,7 @@ proc validateContribution*(
|
|||
let
|
||||
epoch = msg.message.contribution.slot.epoch
|
||||
fork = dag.forkAtEpoch(epoch)
|
||||
genesis_validators_root = dag.genesisValidatorsRoot
|
||||
genesis_validators_root = dag.genesis_validators_root
|
||||
|
||||
if msg.message.contribution.aggregation_bits.countOnes() == 0:
|
||||
# [REJECT] The contribution has participants
|
||||
|
|
|
@ -61,7 +61,7 @@ type
|
|||
didInitializeStoreCallback: DidInitializeStoreCallback
|
||||
|
||||
cfg: RuntimeConfig
|
||||
genesisValidatorsRoot: Eth2Digest
|
||||
genesis_validators_root: Eth2Digest
|
||||
trustedBlockRoot: Eth2Digest
|
||||
|
||||
lastProgressTick: BeaconTime # Moment when last update made progress
|
||||
|
@ -83,7 +83,7 @@ proc new*(
|
|||
dumpEnabled: bool,
|
||||
dumpDirInvalid, dumpDirIncoming: string,
|
||||
cfg: RuntimeConfig,
|
||||
genesisValidatorsRoot, trustedBlockRoot: Eth2Digest,
|
||||
genesis_validators_root, trustedBlockRoot: Eth2Digest,
|
||||
store: ref Option[LightClientStore],
|
||||
getBeaconTime: GetBeaconTimeFn,
|
||||
didInitializeStoreCallback: DidInitializeStoreCallback = nil
|
||||
|
@ -96,7 +96,7 @@ proc new*(
|
|||
getBeaconTime: getBeaconTime,
|
||||
didInitializeStoreCallback: didInitializeStoreCallback,
|
||||
cfg: cfg,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
genesis_validators_root: genesis_validators_root,
|
||||
trustedBlockRoot: trustedBlockRoot
|
||||
)
|
||||
|
||||
|
@ -172,14 +172,14 @@ proc storeObject*(
|
|||
err(BlockError.MissingParent)
|
||||
else:
|
||||
store[].get.process_light_client_update(
|
||||
obj, wallSlot, self.cfg, self.genesisValidatorsRoot,
|
||||
obj, wallSlot, self.cfg, self.genesis_validators_root,
|
||||
allowForceUpdate = false)
|
||||
elif obj is altair.OptimisticLightClientUpdate:
|
||||
if store[].isNone:
|
||||
err(BlockError.MissingParent)
|
||||
else:
|
||||
store[].get.process_optimistic_light_client_update(
|
||||
obj, wallSlot, self.cfg, self.genesisValidatorsRoot)
|
||||
obj, wallSlot, self.cfg, self.genesis_validators_root)
|
||||
|
||||
self.dumpObject(obj, res)
|
||||
|
||||
|
|
|
@ -52,5 +52,5 @@ func makeDeposit*(
|
|||
pubkey: pubkey,
|
||||
withdrawal_credentials: makeWithdrawalCredentials(pubkey))
|
||||
|
||||
if skipBLSValidation notin flags:
|
||||
if skipBlsValidation notin flags:
|
||||
result.signature = preset.get_deposit_signature(result, privkey).toValidatorSig()
|
||||
|
|
|
@ -55,7 +55,7 @@ type
|
|||
GossipMsg = messages.Message
|
||||
|
||||
SeenItem* = object
|
||||
peerId*: PeerID
|
||||
peerId*: PeerId
|
||||
stamp*: chronos.Moment
|
||||
|
||||
Eth2Node* = ref object of RootObj
|
||||
|
@ -65,20 +65,20 @@ type
|
|||
discoveryEnabled*: bool
|
||||
wantedPeers*: int
|
||||
hardMaxPeers*: int
|
||||
peerPool*: PeerPool[Peer, PeerID]
|
||||
peerPool*: PeerPool[Peer, PeerId]
|
||||
protocolStates*: seq[RootRef]
|
||||
metadata*: altair.MetaData
|
||||
connectTimeout*: chronos.Duration
|
||||
seenThreshold*: chronos.Duration
|
||||
connQueue: AsyncQueue[PeerAddr]
|
||||
seenTable: Table[PeerID, SeenItem]
|
||||
seenTable: Table[PeerId, SeenItem]
|
||||
connWorkers: seq[Future[void]]
|
||||
connTable: HashSet[PeerID]
|
||||
connTable: HashSet[PeerId]
|
||||
forkId*: ENRForkID
|
||||
discoveryForkId*: ENRForkID
|
||||
forkDigests*: ref ForkDigests
|
||||
rng*: ref BrHmacDrbgContext
|
||||
peers*: Table[PeerID, Peer]
|
||||
peers*: Table[PeerId, Peer]
|
||||
validTopics: HashSet[string]
|
||||
peerPingerHeartbeatFut: Future[void]
|
||||
peerTrimmerHeartbeatFut: Future[void]
|
||||
|
@ -110,7 +110,7 @@ type
|
|||
disconnectedFut: Future[void]
|
||||
|
||||
PeerAddr* = object
|
||||
peerId*: PeerID
|
||||
peerId*: PeerId
|
||||
addrs*: seq[MultiAddress]
|
||||
|
||||
ConnectionState* = enum
|
||||
|
@ -342,13 +342,13 @@ proc openStream(node: Eth2Node,
|
|||
|
||||
proc init*(T: type Peer, network: Eth2Node, peerId: PeerId): Peer {.gcsafe.}
|
||||
|
||||
func peerId*(node: Eth2Node): PeerID =
|
||||
func peerId*(node: Eth2Node): PeerId =
|
||||
node.switch.peerInfo.peerId
|
||||
|
||||
func enrRecord*(node: Eth2Node): Record =
|
||||
node.discovery.localNode.record
|
||||
|
||||
proc getPeer*(node: Eth2Node, peerId: PeerID): Peer =
|
||||
proc getPeer*(node: Eth2Node, peerId: PeerId): Peer =
|
||||
node.peers.withValue(peerId, peer) do:
|
||||
return peer[]
|
||||
do:
|
||||
|
@ -359,7 +359,7 @@ proc peerFromStream(network: Eth2Node, conn: Connection): Peer =
|
|||
result = network.getPeer(conn.peerId)
|
||||
result.peerId = conn.peerId
|
||||
|
||||
proc getKey*(peer: Peer): PeerID {.inline.} =
|
||||
proc getKey*(peer: Peer): PeerId {.inline.} =
|
||||
peer.peerId
|
||||
|
||||
proc getFuture*(peer: Peer): Future[void] {.inline.} =
|
||||
|
@ -437,7 +437,7 @@ template awaitNonNegativeRequestQuota*(peer: Peer) =
|
|||
func allowedOpsPerSecondCost*(n: int): float =
|
||||
(replenishRate * 1000000000'f / n.float)
|
||||
|
||||
proc isSeen*(network: Eth2Node, peerId: PeerID): bool =
|
||||
proc isSeen*(network: Eth2Node, peerId: PeerId): bool =
|
||||
## Returns ``true`` if ``peerId`` present in SeenTable and time period is not
|
||||
## yet expired.
|
||||
let currentTime = now(chronos.Moment)
|
||||
|
@ -453,9 +453,9 @@ proc isSeen*(network: Eth2Node, peerId: PeerID): bool =
|
|||
else:
|
||||
true
|
||||
|
||||
proc addSeen*(network: Eth2Node, peerId: PeerID,
|
||||
proc addSeen*(network: Eth2Node, peerId: PeerId,
|
||||
period: chronos.Duration) =
|
||||
## Adds peer with PeerID ``peerId`` to SeenTable and timeout ``period``.
|
||||
## Adds peer with PeerId ``peerId`` to SeenTable and timeout ``period``.
|
||||
let item = SeenItem(peerId: peerId, stamp: now(chronos.Moment) + period)
|
||||
withValue(network.seenTable, peerId, entry) do:
|
||||
if entry.stamp < item.stamp:
|
||||
|
@ -854,7 +854,7 @@ proc toPeerAddr*(r: enr.TypedRecord,
|
|||
|
||||
let
|
||||
pubKey = ? keys.PublicKey.fromRaw(r.secp256k1.get)
|
||||
peerId = ? PeerID.init(crypto.PublicKey(
|
||||
peerId = ? PeerId.init(crypto.PublicKey(
|
||||
scheme: Secp256k1, skkey: secp.SkPublicKey(pubKey)))
|
||||
|
||||
var addrs = newSeq[MultiAddress]()
|
||||
|
@ -1044,7 +1044,7 @@ proc queryRandom*(
|
|||
proc trimConnections(node: Eth2Node, count: int) =
|
||||
# Kill `count` peers, scoring them to remove the least useful ones
|
||||
|
||||
var scores = initOrderedTable[PeerID, int]()
|
||||
var scores = initOrderedTable[PeerId, int]()
|
||||
|
||||
# Take into account the stabilitySubnets
|
||||
# During sync, only this will be used to score peers
|
||||
|
@ -1084,7 +1084,7 @@ proc trimConnections(node: Eth2Node, count: int) =
|
|||
# Then, use the average of all topics per peers, to avoid giving too much
|
||||
# point to big peers
|
||||
|
||||
var gossipScores = initTable[PeerID, tuple[sum: int, count: int]]()
|
||||
var gossipScores = initTable[PeerId, tuple[sum: int, count: int]]()
|
||||
for topic, _ in node.pubsub.gossipsub:
|
||||
let
|
||||
peersInMesh = node.pubsub.mesh.peers(topic)
|
||||
|
@ -1116,7 +1116,7 @@ proc trimConnections(node: Eth2Node, count: int) =
|
|||
scores[peerId] =
|
||||
scores.getOrDefault(peerId) + (gScore.sum div gScore.count)
|
||||
|
||||
proc sortPerScore(a, b: (PeerID, int)): int =
|
||||
proc sortPerScore(a, b: (PeerId, int)): int =
|
||||
system.cmp(a[1], b[1])
|
||||
|
||||
scores.sort(sortPerScore)
|
||||
|
@ -1153,7 +1153,7 @@ proc getLowSubnets(node: Eth2Node, epoch: Epoch): (AttnetBits, SyncnetBits) =
|
|||
|
||||
for subNetId in 0 ..< totalSubnets:
|
||||
let topic =
|
||||
topicNameGenerator(node.forkId.forkDigest, SubnetIdType(subNetId))
|
||||
topicNameGenerator(node.forkId.fork_digest, SubnetIdType(subNetId))
|
||||
|
||||
if node.pubsub.gossipsub.peers(topic) < node.pubsub.parameters.dLow:
|
||||
lowOutgoingSubnets.setBit(subNetId)
|
||||
|
@ -1228,7 +1228,7 @@ proc runDiscoveryLoop*(node: Eth2Node) {.async.} =
|
|||
let res = discNode.toPeerAddr()
|
||||
if res.isErr():
|
||||
debug "Failed to decode discovery's node address",
|
||||
node = discnode, errMsg = res.error
|
||||
node = discNode, errMsg = res.error
|
||||
continue
|
||||
|
||||
let peerAddr = res.get()
|
||||
|
@ -1279,7 +1279,7 @@ proc resolvePeer(peer: Peer) =
|
|||
let nodeId =
|
||||
block:
|
||||
var key: PublicKey
|
||||
# `secp256k1` keys are always stored inside PeerID.
|
||||
# `secp256k1` keys are always stored inside PeerId.
|
||||
discard peer.peerId.extractPublicKey(key)
|
||||
keys.PublicKey.fromRaw(key.skkey.getBytes()).get().toNodeId()
|
||||
|
||||
|
@ -1327,7 +1327,7 @@ proc handlePeer*(peer: Peer) {.async.} =
|
|||
debug "Peer successfully connected", peer = peer,
|
||||
connections = peer.connections
|
||||
|
||||
proc onConnEvent(node: Eth2Node, peerId: PeerID, event: ConnEvent) {.async.} =
|
||||
proc onConnEvent(node: Eth2Node, peerId: PeerId, event: ConnEvent) {.async.} =
|
||||
let peer = node.getPeer(peerId)
|
||||
case event.kind
|
||||
of ConnEventKind.Connected:
|
||||
|
@ -1402,7 +1402,7 @@ proc onConnEvent(node: Eth2Node, peerId: PeerID, event: ConnEvent) {.async.} =
|
|||
peer.connectionState = Disconnected
|
||||
|
||||
proc new*(T: type Eth2Node, config: BeaconNodeConf, runtimeCfg: RuntimeConfig,
|
||||
enrForkId: ENRForkID, discoveryForkId: ENRForkId, forkDigests: ref ForkDigests,
|
||||
enrForkId: ENRForkID, discoveryForkId: ENRForkID, forkDigests: ref ForkDigests,
|
||||
getBeaconTime: GetBeaconTimeFn, switch: Switch,
|
||||
pubsub: GossipSub, ip: Option[ValidIpAddress], tcpPort,
|
||||
udpPort: Option[Port], privKey: keys.PrivateKey, discovery: bool,
|
||||
|
@ -1428,7 +1428,7 @@ proc new*(T: type Eth2Node, config: BeaconNodeConf, runtimeCfg: RuntimeConfig,
|
|||
wantedPeers: config.maxPeers,
|
||||
hardMaxPeers: config.hardMaxPeers.get(config.maxPeers * 3 div 2), #*1.5
|
||||
cfg: runtimeCfg,
|
||||
peerPool: newPeerPool[Peer, PeerID](),
|
||||
peerPool: newPeerPool[Peer, PeerId](),
|
||||
# Its important here to create AsyncQueue with limited size, otherwise
|
||||
# it could produce HIGH cpu usage.
|
||||
connQueue: newAsyncQueue[PeerAddr](ConcurrentConnections),
|
||||
|
@ -1705,7 +1705,7 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend =
|
|||
#Must import here because of cyclicity
|
||||
import ../sync/sync_protocol
|
||||
|
||||
proc updatePeerMetadata(node: Eth2Node, peerId: PeerID) {.async.} =
|
||||
proc updatePeerMetadata(node: Eth2Node, peerId: PeerId) {.async.} =
|
||||
trace "updating peer metadata", peerId
|
||||
|
||||
var peer = node.getPeer(peerId)
|
||||
|
@ -1803,9 +1803,9 @@ proc getPersistentNetKeys*(rng: var BrHmacDrbgContext,
|
|||
let
|
||||
privKey = res.get()
|
||||
pubKey = privKey.getPublicKey().expect("working public key from random")
|
||||
pres = PeerID.init(pubKey)
|
||||
pres = PeerId.init(pubKey)
|
||||
if pres.isErr():
|
||||
fatal "Could not obtain PeerID from network key"
|
||||
fatal "Could not obtain PeerId from network key"
|
||||
quit QuitFailure
|
||||
info "Generating new networking key", network_public_key = pubKey,
|
||||
network_peer_id = $pres.get()
|
||||
|
@ -1973,14 +1973,14 @@ proc createEth2Node*(rng: ref BrHmacDrbgContext,
|
|||
cfg: RuntimeConfig,
|
||||
forkDigests: ref ForkDigests,
|
||||
getBeaconTime: GetBeaconTimeFn,
|
||||
genesisValidatorsRoot: Eth2Digest): Eth2Node
|
||||
genesis_validators_root: Eth2Digest): Eth2Node
|
||||
{.raises: [Defect, CatchableError].} =
|
||||
let
|
||||
enrForkId = getENRForkID(
|
||||
cfg, getBeaconTime().slotOrZero.epoch, genesisValidatorsRoot)
|
||||
cfg, getBeaconTime().slotOrZero.epoch, genesis_validators_root)
|
||||
|
||||
discoveryForkId = getDiscoveryForkID(
|
||||
cfg, getBeaconTime().slotOrZero.epoch, genesisValidatorsRoot)
|
||||
cfg, getBeaconTime().slotOrZero.epoch, genesis_validators_root)
|
||||
|
||||
(extIp, extTcpPort, extUdpPort) = try: setupAddress(
|
||||
config.nat, config.listenAddress, config.tcpPort, config.udpPort, clientId)
|
||||
|
@ -2053,7 +2053,7 @@ proc createEth2Node*(rng: ref BrHmacDrbgContext,
|
|||
let
|
||||
maddress = MultiAddress.init(s).tryGet()
|
||||
mpeerId = maddress[multiCodec("p2p")].tryGet()
|
||||
peerId = PeerID.init(mpeerId.protoAddress().tryGet()).tryGet()
|
||||
peerId = PeerId.init(mpeerId.protoAddress().tryGet()).tryGet()
|
||||
res.mgetOrPut(peerId, @[]).add(maddress)
|
||||
info "Adding priviledged direct peer", peerId, address = maddress
|
||||
res
|
||||
|
@ -2086,7 +2086,7 @@ proc announcedENR*(node: Eth2Node): enr.Record =
|
|||
node.discovery.localNode.record
|
||||
|
||||
proc shortForm*(id: NetKeyPair): string =
|
||||
$PeerID.init(id.pubkey)
|
||||
$PeerId.init(id.pubkey)
|
||||
|
||||
proc subscribe*(
|
||||
node: Eth2Node, topic: string, topicParams: TopicParams,
|
||||
|
@ -2270,9 +2270,9 @@ proc updateForkId(node: Eth2Node, value: ENRForkID) =
|
|||
else:
|
||||
debug "ENR fork id changed", value
|
||||
|
||||
proc updateForkId*(node: Eth2Node, epoch: Epoch, genesisValidatorsRoot: Eth2Digest) =
|
||||
node.updateForkId(getENRForkId(node.cfg, epoch, genesisValidatorsRoot))
|
||||
node.discoveryForkId = getDiscoveryForkID(node.cfg, epoch, genesisValidatorsRoot)
|
||||
proc updateForkId*(node: Eth2Node, epoch: Epoch, genesis_validators_root: Eth2Digest) =
|
||||
node.updateForkId(getENRForkID(node.cfg, epoch, genesis_validators_root))
|
||||
node.discoveryForkId = getDiscoveryForkID(node.cfg, epoch, genesis_validators_root)
|
||||
|
||||
func forkDigestAtEpoch(node: Eth2Node, epoch: Epoch): ForkDigest =
|
||||
case node.cfg.stateForkAtEpoch(epoch)
|
||||
|
|
|
@ -10,13 +10,13 @@
|
|||
import libp2p/[peerid, multiaddress], json_serialization
|
||||
export json_serialization
|
||||
|
||||
proc writeValue*(writer: var JsonWriter, value: PeerID) {.
|
||||
proc writeValue*(writer: var JsonWriter, value: PeerId) {.
|
||||
raises: [Defect, IOError].} =
|
||||
writer.writeValue $value
|
||||
|
||||
proc readValue*(reader: var JsonReader, value: var PeerID) {.
|
||||
proc readValue*(reader: var JsonReader, value: var PeerId) {.
|
||||
raises: [Defect, IOError, SerializationError].} =
|
||||
let res = PeerID.init reader.readValue(string)
|
||||
let res = PeerId.init reader.readValue(string)
|
||||
if res.isOk:
|
||||
value = res.get()
|
||||
else:
|
||||
|
|
|
@ -2,3 +2,8 @@
|
|||
-d:"libp2p_pki_schemes=secp256k1"
|
||||
|
||||
-d:chronosStrictException
|
||||
--styleCheck:usages
|
||||
--styleCheck:hint
|
||||
--hint[XDeclaredButNotUsed]:off
|
||||
--hint[ConvFromXtoItselfNotNeeded]:off
|
||||
--hint[Processing]:off
|
||||
|
|
|
@ -206,7 +206,7 @@ proc initFullNode(
|
|||
node: BeaconNode,
|
||||
rng: ref BrHmacDrbgContext,
|
||||
dag: ChainDAGRef,
|
||||
taskpool: TaskpoolPtr,
|
||||
taskpool: TaskPoolPtr,
|
||||
getBeaconTime: GetBeaconTimeFn) =
|
||||
template config(): auto = node.config
|
||||
|
||||
|
@ -274,11 +274,11 @@ proc initFullNode(
|
|||
blockProcessor, node.validatorMonitor, dag, attestationPool, exitPool,
|
||||
node.attachedValidators, syncCommitteeMsgPool, quarantine, rng,
|
||||
getBeaconTime, taskpool)
|
||||
syncManager = newSyncManager[Peer, PeerID](
|
||||
syncManager = newSyncManager[Peer, PeerId](
|
||||
node.network.peerPool, SyncQueueKind.Forward, getLocalHeadSlot,
|
||||
getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot,
|
||||
getFrontfillSlot, dag.tail.slot, blockVerifier)
|
||||
backfiller = newSyncManager[Peer, PeerID](
|
||||
backfiller = newSyncManager[Peer, PeerId](
|
||||
node.network.peerPool, SyncQueueKind.Backward, getLocalHeadSlot,
|
||||
getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot,
|
||||
getFrontfillSlot, dag.backfill.slot, blockVerifier, maxHeadAge = 0)
|
||||
|
@ -336,7 +336,7 @@ proc init*(T: type BeaconNode,
|
|||
depositContractSnapshotContents: string): BeaconNode {.
|
||||
raises: [Defect, CatchableError].} =
|
||||
|
||||
var taskpool: TaskpoolPtr
|
||||
var taskpool: TaskPoolPtr
|
||||
|
||||
let depositContractSnapshot = if depositContractSnapshotContents.len > 0:
|
||||
try:
|
||||
|
@ -352,9 +352,9 @@ proc init*(T: type BeaconNode,
|
|||
fatal "The number of threads --numThreads cannot be negative."
|
||||
quit 1
|
||||
elif config.numThreads == 0:
|
||||
taskpool = TaskpoolPtr.new(numThreads = min(countProcessors(), 16))
|
||||
taskpool = TaskPoolPtr.new(numThreads = min(countProcessors(), 16))
|
||||
else:
|
||||
taskpool = TaskpoolPtr.new(numThreads = config.numThreads)
|
||||
taskpool = TaskPoolPtr.new(numThreads = config.numThreads)
|
||||
|
||||
info "Threadpool started", numThreads = taskpool.numThreads
|
||||
except Exception as exc:
|
||||
|
@ -1155,7 +1155,7 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
|
|||
let epoch = slot.epoch
|
||||
if epoch + 1 >= node.network.forkId.next_fork_epoch:
|
||||
# Update 1 epoch early to block non-fork-ready peers
|
||||
node.network.updateForkId(epoch, node.dag.genesisValidatorsRoot)
|
||||
node.network.updateForkId(epoch, node.dag.genesis_validators_root)
|
||||
|
||||
# When we're not behind schedule, we'll speculatively update the clearance
|
||||
# state in anticipation of receiving the next block - we do it after logging
|
||||
|
@ -1892,10 +1892,10 @@ proc doSlashingImport(conf: BeaconNodeConf) {.raises: [SerializationError, IOErr
|
|||
|
||||
var spdir: SPDIR
|
||||
try:
|
||||
spdir = JSON.loadFile(interchange, SPDIR)
|
||||
spdir = Json.loadFile(interchange, SPDIR)
|
||||
except SerializationError as err:
|
||||
writeStackTrace()
|
||||
stderr.write $JSON & " load issue for file \"", interchange, "\"\n"
|
||||
stderr.write $Json & " load issue for file \"", interchange, "\"\n"
|
||||
stderr.write err.formatMsg(interchange), "\n"
|
||||
quit 1
|
||||
|
||||
|
@ -1936,7 +1936,7 @@ proc handleStartUpCmd(config: var BeaconNodeConf) {.raises: [Defect, CatchableEr
|
|||
of BNStartUpCmd.record: doRecord(config, rng[])
|
||||
of BNStartUpCmd.web3: doWeb3Cmd(config, rng[])
|
||||
of BNStartUpCmd.slashingdb: doSlashingInterchange(config)
|
||||
of BNStartupCmd.trustedNodeSync:
|
||||
of BNStartUpCmd.trustedNodeSync:
|
||||
let
|
||||
network = loadEth2Network(config)
|
||||
cfg = network.cfg
|
||||
|
|
|
@ -222,7 +222,7 @@ proc installApiHandlers*(node: SigningNode) =
|
|||
let
|
||||
forkInfo = request.forkInfo.get()
|
||||
cooked = get_slot_signature(forkInfo.fork,
|
||||
forkInfo.genesisValidatorsRoot,
|
||||
forkInfo.genesis_validators_root,
|
||||
request.aggregationSlot.slot, validator.data.privateKey)
|
||||
signature = cooked.toValidatorSig().toHex()
|
||||
signatureResponse(Http200, signature)
|
||||
|
@ -230,7 +230,7 @@ proc installApiHandlers*(node: SigningNode) =
|
|||
let
|
||||
forkInfo = request.forkInfo.get()
|
||||
cooked = get_aggregate_and_proof_signature(forkInfo.fork,
|
||||
forkInfo.genesisValidatorsRoot, request.aggregateAndProof,
|
||||
forkInfo.genesis_validators_root, request.aggregateAndProof,
|
||||
validator.data.privateKey)
|
||||
signature = cooked.toValidatorSig().toHex()
|
||||
signatureResponse(Http200, signature)
|
||||
|
@ -238,7 +238,7 @@ proc installApiHandlers*(node: SigningNode) =
|
|||
let
|
||||
forkInfo = request.forkInfo.get()
|
||||
cooked = get_attestation_signature(forkInfo.fork,
|
||||
forkInfo.genesisValidatorsRoot, request.attestation,
|
||||
forkInfo.genesis_validators_root, request.attestation,
|
||||
validator.data.privateKey)
|
||||
signature = cooked.toValidatorSig().toHex()
|
||||
signatureResponse(Http200, signature)
|
||||
|
@ -248,7 +248,7 @@ proc installApiHandlers*(node: SigningNode) =
|
|||
blck = request.blck
|
||||
blockRoot = hash_tree_root(blck)
|
||||
cooked = get_block_signature(forkInfo.fork,
|
||||
forkInfo.genesisValidatorsRoot, blck.slot, blockRoot,
|
||||
forkInfo.genesis_validators_root, blck.slot, blockRoot,
|
||||
validator.data.privateKey)
|
||||
signature = cooked.toValidatorSig().toHex()
|
||||
signatureResponse(Http200, signature)
|
||||
|
@ -260,7 +260,7 @@ proc installApiHandlers*(node: SigningNode) =
|
|||
cooked =
|
||||
withBlck(forked):
|
||||
get_block_signature(forkInfo.fork,
|
||||
forkInfo.genesisValidatorsRoot, blck.slot, blockRoot,
|
||||
forkInfo.genesis_validators_root, blck.slot, blockRoot,
|
||||
validator.data.privateKey)
|
||||
signature = cooked.toValidatorSig().toHex()
|
||||
signatureResponse(Http200, signature)
|
||||
|
@ -277,7 +277,7 @@ proc installApiHandlers*(node: SigningNode) =
|
|||
let
|
||||
forkInfo = request.forkInfo.get()
|
||||
cooked = get_epoch_signature(forkInfo.fork,
|
||||
forkInfo.genesisValidatorsRoot, request.randaoReveal.epoch,
|
||||
forkInfo.genesis_validators_root, request.randaoReveal.epoch,
|
||||
validator.data.privateKey)
|
||||
signature = cooked.toValidatorSig().toHex()
|
||||
signatureResponse(Http200, signature)
|
||||
|
@ -285,7 +285,7 @@ proc installApiHandlers*(node: SigningNode) =
|
|||
let
|
||||
forkInfo = request.forkInfo.get()
|
||||
cooked = get_voluntary_exit_signature(forkInfo.fork,
|
||||
forkInfo.genesisValidatorsRoot, request.voluntaryExit,
|
||||
forkInfo.genesis_validators_root, request.voluntaryExit,
|
||||
validator.data.privateKey)
|
||||
signature = cooked.toValidatorSig().toHex()
|
||||
signatureResponse(Http200, signature)
|
||||
|
@ -294,7 +294,7 @@ proc installApiHandlers*(node: SigningNode) =
|
|||
forkInfo = request.forkInfo.get()
|
||||
msg = request.syncCommitteeMessage
|
||||
cooked = get_sync_committee_message_signature(forkInfo.fork,
|
||||
forkInfo.genesisValidatorsRoot, msg.slot, msg.beaconBlockRoot,
|
||||
forkInfo.genesis_validators_root, msg.slot, msg.beaconBlockRoot,
|
||||
validator.data.privateKey)
|
||||
signature = cooked.toValidatorSig().toHex()
|
||||
signatureResponse(Http200, signature)
|
||||
|
@ -303,7 +303,7 @@ proc installApiHandlers*(node: SigningNode) =
|
|||
forkInfo = request.forkInfo.get()
|
||||
msg = request.syncAggregatorSelectionData
|
||||
cooked = get_sync_committee_selection_proof(forkInfo.fork,
|
||||
forkInfo.genesisValidatorsRoot, msg.slot, msg.subcommittee_index,
|
||||
forkInfo.genesis_validators_root, msg.slot, msg.subcommittee_index,
|
||||
validator.data.privateKey)
|
||||
signature = cooked.toValidatorSig().toHex()
|
||||
signatureResponse(Http200, signature)
|
||||
|
@ -312,7 +312,7 @@ proc installApiHandlers*(node: SigningNode) =
|
|||
forkInfo = request.forkInfo.get()
|
||||
msg = request.syncCommitteeContributionAndProof
|
||||
cooked = get_contribution_and_proof_signature(
|
||||
forkInfo.fork, forkInfo.genesisValidatorsRoot, msg,
|
||||
forkInfo.fork, forkInfo.genesis_validators_root, msg,
|
||||
validator.data.privateKey)
|
||||
signature = cooked.toValidatorSig().toHex()
|
||||
signatureResponse(Http200, signature)
|
||||
|
|
|
@ -52,7 +52,7 @@ type
|
|||
state*: string
|
||||
|
||||
RestPubSubPeer* = object
|
||||
peerId*: PeerID
|
||||
peerId*: PeerId
|
||||
score*: float64
|
||||
iWantBudget*: int
|
||||
iHaveBudget*: int
|
||||
|
@ -67,14 +67,14 @@ type
|
|||
agent*: string
|
||||
|
||||
RestPeerStats* = object
|
||||
peerId*: PeerID
|
||||
peerId*: PeerId
|
||||
null*: bool
|
||||
connected*: bool
|
||||
expire*: string
|
||||
score*: float64
|
||||
|
||||
RestPeerStatus* = object
|
||||
peerId*: PeerID
|
||||
peerId*: PeerId
|
||||
connected*: bool
|
||||
|
||||
proc toInfo(node: BeaconNode, peerId: PeerId): RestPeerInfo =
|
||||
|
@ -146,7 +146,7 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
|
||||
router.api(MethodGet, "/nimbus/v1/network/ids") do (
|
||||
) -> RestApiResponse:
|
||||
var res: seq[PeerID]
|
||||
var res: seq[PeerId]
|
||||
for peerId, peer in node.network.peerPool:
|
||||
res.add(peerId)
|
||||
return RestApiResponse.jsonResponse((peerids: res))
|
||||
|
@ -273,7 +273,7 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
var peers: seq[RestPubSubPeer]
|
||||
let backoff = node.network.pubsub.backingOff.getOrDefault(topic)
|
||||
for peer in v:
|
||||
peers.add(peer.toNode(backOff.getOrDefault(peer.peerId)))
|
||||
peers.add(peer.toNode(backoff.getOrDefault(peer.peerId)))
|
||||
res.add((topic: topic, peers: peers))
|
||||
res
|
||||
let meshPeers =
|
||||
|
@ -283,14 +283,14 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
var peers: seq[RestPubSubPeer]
|
||||
let backoff = node.network.pubsub.backingOff.getOrDefault(topic)
|
||||
for peer in v:
|
||||
peers.add(peer.toNode(backOff.getOrDefault(peer.peerId)))
|
||||
peers.add(peer.toNode(backoff.getOrDefault(peer.peerId)))
|
||||
res.add((topic: topic, peers: peers))
|
||||
res
|
||||
let colocationPeers =
|
||||
block:
|
||||
var res: seq[tuple[address: string, peerids: seq[PeerID]]]
|
||||
var res: seq[tuple[address: string, peerids: seq[PeerId]]]
|
||||
for k, v in node.network.pubsub.peersInIP:
|
||||
var peerids: seq[PeerID]
|
||||
var peerids: seq[PeerId]
|
||||
for id in v:
|
||||
peerids.add(id)
|
||||
res.add(($k, peerids))
|
||||
|
|
|
@ -104,7 +104,7 @@ proc getDiscoveryAddresses(node: BeaconNode): Option[seq[string]] =
|
|||
if respa.isErr():
|
||||
return none[seq[string]]()
|
||||
let pa = respa.get()
|
||||
let mpa = MultiAddress.init(multicodec("p2p"), pa.peerId)
|
||||
let mpa = MultiAddress.init(multiCodec("p2p"), pa.peerId)
|
||||
if mpa.isErr():
|
||||
return none[seq[string]]()
|
||||
var addresses = newSeqOfCap[string](len(pa.addrs))
|
||||
|
@ -116,7 +116,7 @@ proc getDiscoveryAddresses(node: BeaconNode): Option[seq[string]] =
|
|||
|
||||
proc getP2PAddresses(node: BeaconNode): Option[seq[string]] =
|
||||
let pinfo = node.network.switch.peerInfo
|
||||
let mpa = MultiAddress.init(multicodec("p2p"), pinfo.peerId)
|
||||
let mpa = MultiAddress.init(multiCodec("p2p"), pinfo.peerId)
|
||||
if mpa.isErr():
|
||||
return none[seq[string]]()
|
||||
var addresses = newSeqOfCap[string](len(pinfo.addrs))
|
||||
|
@ -152,7 +152,7 @@ proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonResponse(
|
||||
(
|
||||
peer_id: $node.network.peerId(),
|
||||
enr: node.network.enrRecord().toUri(),
|
||||
enr: node.network.enrRecord().toURI(),
|
||||
p2p_addresses: p2pAddresses,
|
||||
discovery_addresses: discoveryAddresses,
|
||||
metadata: (
|
||||
|
@ -196,7 +196,7 @@ proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
(peer.direction in directionMask):
|
||||
let peer = (
|
||||
peer_id: $peer.peerId,
|
||||
enr: if peer.enr.isSome(): peer.enr.get().toUri() else: "",
|
||||
enr: if peer.enr.isSome(): peer.enr.get().toURI() else: "",
|
||||
last_seen_p2p_address: getLastSeenAddress(node, peer.peerId),
|
||||
state: peer.connectionState.toString(),
|
||||
direction: peer.direction.toString(),
|
||||
|
@ -225,7 +225,7 @@ proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
|
||||
# https://ethereum.github.io/beacon-APIs/#/Node/getPeer
|
||||
router.api(MethodGet, "/eth/v1/node/peers/{peer_id}") do (
|
||||
peer_id: PeerID) -> RestApiResponse:
|
||||
peer_id: PeerId) -> RestApiResponse:
|
||||
let peer =
|
||||
block:
|
||||
if peer_id.isErr():
|
||||
|
@ -238,7 +238,7 @@ proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonResponse(
|
||||
(
|
||||
peer_id: $peer.peerId,
|
||||
enr: if peer.enr.isSome(): peer.enr.get().toUri() else: "",
|
||||
enr: if peer.enr.isSome(): peer.enr.get().toURI() else: "",
|
||||
last_seen_p2p_address: getLastSeenAddress(node, peer.peerId),
|
||||
state: peer.connectionState.toString(),
|
||||
direction: peer.direction.toString(),
|
||||
|
|
|
@ -15,7 +15,7 @@ type
|
|||
ValidatorIndexError* {.pure.} = enum
|
||||
UnsupportedValue, TooHighValue
|
||||
|
||||
func match(data: openarray[char], charset: set[char]): int =
|
||||
func match(data: openArray[char], charset: set[char]): int =
|
||||
for ch in data:
|
||||
if ch notin charset:
|
||||
return 1
|
||||
|
|
|
@ -60,7 +60,7 @@ proc createIdQuery(ids: openArray[string]): Result[ValidatorQuery, cstring] =
|
|||
|
||||
for item in ids:
|
||||
if item.startsWith("0x"):
|
||||
let pubkey = ? ValidatorPubkey.fromHex(item)
|
||||
let pubkey = ? ValidatorPubKey.fromHex(item)
|
||||
res.keyset.incl(pubkey)
|
||||
else:
|
||||
var tmp: uint64
|
||||
|
|
|
@ -170,7 +170,7 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
var peers = newJArray()
|
||||
let backoff = node.network.pubsub.backingOff.getOrDefault(topic)
|
||||
for peer in v:
|
||||
peers.add(peer.toNode(backOff.getOrDefault(peer.peerId)))
|
||||
peers.add(peer.toNode(backoff.getOrDefault(peer.peerId)))
|
||||
|
||||
gossipsub.add(topic, peers)
|
||||
|
||||
|
@ -181,7 +181,7 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
var peers = newJArray()
|
||||
let backoff = node.network.pubsub.backingOff.getOrDefault(topic)
|
||||
for peer in v:
|
||||
peers.add(peer.toNode(backOff.getOrDefault(peer.peerId)))
|
||||
peers.add(peer.toNode(backoff.getOrDefault(peer.peerId)))
|
||||
|
||||
mesh.add(topic, peers)
|
||||
|
||||
|
|
|
@ -128,7 +128,7 @@ proc getDiscoveryAddresses(node: BeaconNode): Option[seq[string]] =
|
|||
if respa.isErr():
|
||||
return none[seq[string]]()
|
||||
let pa = respa.get()
|
||||
let mpa = MultiAddress.init(multicodec("p2p"), pa.peerId)
|
||||
let mpa = MultiAddress.init(multiCodec("p2p"), pa.peerId)
|
||||
if mpa.isErr():
|
||||
return none[seq[string]]()
|
||||
var addresses = newSeqOfCap[string](len(pa.addrs))
|
||||
|
@ -140,7 +140,7 @@ proc getDiscoveryAddresses(node: BeaconNode): Option[seq[string]] =
|
|||
|
||||
proc getP2PAddresses(node: BeaconNode): Option[seq[string]] =
|
||||
let pinfo = node.network.switch.peerInfo
|
||||
let mpa = MultiAddress.init(multicodec("p2p"), pinfo.peerId)
|
||||
let mpa = MultiAddress.init(multiCodec("p2p"), pinfo.peerId)
|
||||
if mpa.isErr():
|
||||
return none[seq[string]]()
|
||||
var addresses = newSeqOfCap[string](len(pinfo.addrs))
|
||||
|
@ -171,7 +171,7 @@ proc installNodeApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
|
||||
return (
|
||||
peer_id: $node.network.peerId(),
|
||||
enr: node.network.enrRecord().toUri(),
|
||||
enr: node.network.enrRecord().toURI(),
|
||||
p2p_addresses: p2pAddresses,
|
||||
discovery_addresses: discoveryAddresses,
|
||||
metadata: (node.network.metadata.seq_number,
|
||||
|
@ -193,7 +193,7 @@ proc installNodeApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
if (peer.connectionState in states) and (peer.direction in dirs):
|
||||
let resPeer = (
|
||||
peer_id: $peer.peerId,
|
||||
enr: if peer.enr.isSome(): peer.enr.get().toUri() else: "",
|
||||
enr: if peer.enr.isSome(): peer.enr.get().toURI() else: "",
|
||||
last_seen_p2p_address: getLastSeenAddress(node, peer.peerId),
|
||||
state: peer.connectionState.toString(),
|
||||
direction: peer.direction.toString(),
|
||||
|
@ -221,7 +221,7 @@ proc installNodeApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
|
||||
rpcServer.rpc("get_v1_node_peers_peerId") do (
|
||||
peer_id: string) -> RpcNodePeer:
|
||||
let pres = PeerID.init(peer_id)
|
||||
let pres = PeerId.init(peer_id)
|
||||
if pres.isErr():
|
||||
raise newException(CatchableError,
|
||||
"The peer ID supplied could not be parsed")
|
||||
|
@ -232,7 +232,7 @@ proc installNodeApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
|
||||
return (
|
||||
peer_id: $peer.peerId,
|
||||
enr: if peer.enr.isSome(): peer.enr.get().toUri() else: "",
|
||||
enr: if peer.enr.isSome(): peer.enr.get().toURI() else: "",
|
||||
last_seen_p2p_address: getLastSeenAddress(node, peer.peerId),
|
||||
state: peer.connectionState.toString(),
|
||||
direction: peer.direction.toString(),
|
||||
|
|
|
@ -401,9 +401,9 @@ type
|
|||
# serialized. They're represented in memory to allow in-place SSZ reading
|
||||
# and writing compatibly with the full Validator object.
|
||||
|
||||
pubkey* {.dontserialize.}: ValidatorPubKey
|
||||
pubkey* {.dontSerialize.}: ValidatorPubKey
|
||||
|
||||
withdrawal_credentials* {.dontserialize.}: Eth2Digest ##\
|
||||
withdrawal_credentials* {.dontSerialize.}: Eth2Digest ##\
|
||||
## Commitment to pubkey for withdrawals and transfers
|
||||
|
||||
effective_balance*: uint64 ##\
|
||||
|
@ -908,7 +908,7 @@ func prune*(cache: var StateCache, epoch: Epoch) =
|
|||
block:
|
||||
for k in cache.shuffled_active_validator_indices.keys:
|
||||
if k < pruneEpoch:
|
||||
drops.add prune_epoch.start_slot
|
||||
drops.add pruneEpoch.start_slot
|
||||
for drop in drops:
|
||||
cache.shuffled_active_validator_indices.del drop.epoch
|
||||
drops.setLen(0)
|
||||
|
|
|
@ -37,14 +37,14 @@ proc getSignedToken*(key: openArray[byte], payload: string): string =
|
|||
|
||||
# https://datatracker.ietf.org/doc/html/rfc7515#appendix-A.1.1
|
||||
const jwsProtectedHeader =
|
||||
base64url_encode($ %* {"typ": "JWT", "alg": "HS256"}) & "."
|
||||
base64urlEncode($ %* {"typ": "JWT", "alg": "HS256"}) & "."
|
||||
# In theory, std/json might change how it encodes, and it doesn't per-se
|
||||
# matter but can also simply specify the base64-encoded form directly if
|
||||
# useful, since it's never checked here on its own.
|
||||
static: doAssert jwsProtectedHeader == "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9."
|
||||
let signingInput = jwsProtectedHeader & base64urlEncode(payload)
|
||||
|
||||
signingInput & "." & base64_urlencode(sha256.hmac(key, signingInput).data)
|
||||
signingInput & "." & base64urlEncode(sha256.hmac(key, signingInput).data)
|
||||
|
||||
proc getSignedIatToken*(key: openArray[byte], time: int64): string =
|
||||
getSignedToken(key, $getIatToken(time))
|
||||
|
|
|
@ -271,7 +271,7 @@ proc jsonError*(t: typedesc[RestApiResponse], status: HttpCode = Http200,
|
|||
|
||||
proc jsonError*(t: typedesc[RestApiResponse], status: HttpCode = Http200,
|
||||
msg: string = "",
|
||||
stacktraces: openarray[string]): RestApiResponse =
|
||||
stacktraces: openArray[string]): RestApiResponse =
|
||||
let data =
|
||||
block:
|
||||
var default: string
|
||||
|
@ -326,7 +326,7 @@ proc sszResponse*(t: typedesc[RestApiResponse], data: auto): RestApiResponse =
|
|||
default
|
||||
RestApiResponse.response(res, Http200, "application/octet-stream")
|
||||
|
||||
template hexOriginal(data: openarray[byte]): string =
|
||||
template hexOriginal(data: openArray[byte]): string =
|
||||
to0xHex(data)
|
||||
|
||||
proc decodeJsonString*[T](t: typedesc[T],
|
||||
|
@ -920,7 +920,7 @@ proc readValue*(reader: var JsonReader[RestJson],
|
|||
"RestPublishedBeaconBlock")
|
||||
proposer_index = some(reader.readValue(uint64))
|
||||
of "parent_root":
|
||||
if parentRoot.isSome():
|
||||
if parent_root.isSome():
|
||||
reader.raiseUnexpectedField("Multiple `parent_root` fields found",
|
||||
"RestPublishedBeaconBlock")
|
||||
parent_root = some(reader.readValue(Eth2Digest))
|
||||
|
@ -1941,7 +1941,7 @@ proc encodeBytes*[T: EncodeArrays](value: T,
|
|||
else:
|
||||
err("Content-Type not supported")
|
||||
|
||||
proc decodeBytes*[T: DecodeTypes](t: typedesc[T], value: openarray[byte],
|
||||
proc decodeBytes*[T: DecodeTypes](t: typedesc[T], value: openArray[byte],
|
||||
contentType: string): RestResult[T] =
|
||||
const isExtensibleType = t is ExtensibleDecodeTypes
|
||||
case contentType
|
||||
|
@ -1953,7 +1953,7 @@ proc decodeBytes*[T: DecodeTypes](t: typedesc[T], value: openarray[byte],
|
|||
else:
|
||||
err("Content-Type not supported")
|
||||
|
||||
proc decodeBytes*[T: SszDecodeTypes](t: typedesc[T], value: openarray[byte],
|
||||
proc decodeBytes*[T: SszDecodeTypes](t: typedesc[T], value: openArray[byte],
|
||||
contentType: string, updateRoot = true): RestResult[T] =
|
||||
case contentType
|
||||
of "application/octet-stream":
|
||||
|
@ -2066,7 +2066,7 @@ proc encodeString*(value: PeerDirectKind): Result[string, cstring] =
|
|||
of PeerDirectKind.Outbound:
|
||||
ok("outbound")
|
||||
|
||||
proc encodeString*(peerid: PeerID): Result[string, cstring] =
|
||||
proc encodeString*(peerid: PeerId): Result[string, cstring] =
|
||||
ok($peerid)
|
||||
|
||||
proc decodeString*(t: typedesc[EventTopic],
|
||||
|
@ -2211,9 +2211,9 @@ proc decodeString*(t: typedesc[ValidatorIdent],
|
|||
ok(ValidatorIdent(kind: ValidatorQueryKind.Index,
|
||||
index: RestValidatorIndex(res)))
|
||||
|
||||
proc decodeString*(t: typedesc[PeerID],
|
||||
value: string): Result[PeerID, cstring] =
|
||||
PeerID.init(value)
|
||||
proc decodeString*(t: typedesc[PeerId],
|
||||
value: string): Result[PeerId, cstring] =
|
||||
PeerId.init(value)
|
||||
|
||||
proc decodeString*(t: typedesc[CommitteeIndex],
|
||||
value: string): Result[CommitteeIndex, cstring] =
|
||||
|
|
|
@ -435,8 +435,7 @@ type
|
|||
|
||||
Web3SignerForkInfo* = object
|
||||
fork*: Fork
|
||||
genesisValidatorsRoot* {.
|
||||
serializedFieldName: "genesis_validators_root".}: Eth2Digest
|
||||
genesis_validators_root*: Eth2Digest
|
||||
|
||||
Web3SignerAggregationSlotData* = object
|
||||
slot*: Slot
|
||||
|
@ -593,65 +592,65 @@ func init*(t: typedesc[RestValidatorBalance], index: ValidatorIndex,
|
|||
RestValidatorBalance(index: index, balance: Base10.toString(balance))
|
||||
|
||||
func init*(t: typedesc[Web3SignerRequest], fork: Fork,
|
||||
genesisValidatorsRoot: Eth2Digest, data: Slot,
|
||||
genesis_validators_root: Eth2Digest, data: Slot,
|
||||
signingRoot: Option[Eth2Digest] = none[Eth2Digest]()
|
||||
): Web3SignerRequest =
|
||||
Web3SignerRequest(
|
||||
kind: Web3SignerRequestKind.AggregationSlot,
|
||||
forkInfo: some(Web3SignerForkInfo(
|
||||
fork: fork, genesisValidatorsRoot: genesisValidatorsRoot
|
||||
fork: fork, genesis_validators_root: genesis_validators_root
|
||||
)),
|
||||
signingRoot: signingRoot,
|
||||
aggregationSlot: Web3SignerAggregationSlotData(slot: data)
|
||||
)
|
||||
|
||||
func init*(t: typedesc[Web3SignerRequest], fork: Fork,
|
||||
genesisValidatorsRoot: Eth2Digest, data: AggregateAndProof,
|
||||
genesis_validators_root: Eth2Digest, data: AggregateAndProof,
|
||||
signingRoot: Option[Eth2Digest] = none[Eth2Digest]()
|
||||
): Web3SignerRequest =
|
||||
Web3SignerRequest(
|
||||
kind: Web3SignerRequestKind.AggregateAndProof,
|
||||
forkInfo: some(Web3SignerForkInfo(
|
||||
fork: fork, genesisValidatorsRoot: genesisValidatorsRoot
|
||||
fork: fork, genesis_validators_root: genesis_validators_root
|
||||
)),
|
||||
signingRoot: signingRoot,
|
||||
aggregateAndProof: data
|
||||
)
|
||||
|
||||
func init*(t: typedesc[Web3SignerRequest], fork: Fork,
|
||||
genesisValidatorsRoot: Eth2Digest, data: AttestationData,
|
||||
genesis_validators_root: Eth2Digest, data: AttestationData,
|
||||
signingRoot: Option[Eth2Digest] = none[Eth2Digest]()
|
||||
): Web3SignerRequest =
|
||||
Web3SignerRequest(
|
||||
kind: Web3SignerRequestKind.Attestation,
|
||||
forkInfo: some(Web3SignerForkInfo(
|
||||
fork: fork, genesisValidatorsRoot: genesisValidatorsRoot
|
||||
fork: fork, genesis_validators_root: genesis_validators_root
|
||||
)),
|
||||
signingRoot: signingRoot,
|
||||
attestation: data
|
||||
)
|
||||
|
||||
func init*(t: typedesc[Web3SignerRequest], fork: Fork,
|
||||
genesisValidatorsRoot: Eth2Digest, data: phase0.BeaconBlock,
|
||||
genesis_validators_root: Eth2Digest, data: phase0.BeaconBlock,
|
||||
signingRoot: Option[Eth2Digest] = none[Eth2Digest]()
|
||||
): Web3SignerRequest =
|
||||
Web3SignerRequest(
|
||||
kind: Web3SignerRequestKind.Block,
|
||||
forkInfo: some(Web3SignerForkInfo(
|
||||
fork: fork, genesisValidatorsRoot: genesisValidatorsRoot
|
||||
fork: fork, genesis_validators_root: genesis_validators_root
|
||||
)),
|
||||
signingRoot: signingRoot,
|
||||
blck: data
|
||||
)
|
||||
|
||||
func init*(t: typedesc[Web3SignerRequest], fork: Fork,
|
||||
genesisValidatorsRoot: Eth2Digest, data: ForkedBeaconBlock,
|
||||
genesis_validators_root: Eth2Digest, data: ForkedBeaconBlock,
|
||||
signingRoot: Option[Eth2Digest] = none[Eth2Digest]()
|
||||
): Web3SignerRequest =
|
||||
Web3SignerRequest(
|
||||
kind: Web3SignerRequestKind.BlockV2,
|
||||
forkInfo: some(Web3SignerForkInfo(
|
||||
fork: fork, genesisValidatorsRoot: genesisValidatorsRoot
|
||||
fork: fork, genesis_validators_root: genesis_validators_root
|
||||
)),
|
||||
signingRoot: signingRoot,
|
||||
beaconBlock: data
|
||||
|
@ -673,39 +672,39 @@ func init*(t: typedesc[Web3SignerRequest], genesisForkVersion: Version,
|
|||
)
|
||||
|
||||
func init*(t: typedesc[Web3SignerRequest], fork: Fork,
|
||||
genesisValidatorsRoot: Eth2Digest, data: Epoch,
|
||||
genesis_validators_root: Eth2Digest, data: Epoch,
|
||||
signingRoot: Option[Eth2Digest] = none[Eth2Digest]()
|
||||
): Web3SignerRequest =
|
||||
Web3SignerRequest(
|
||||
kind: Web3SignerRequestKind.RandaoReveal,
|
||||
forkInfo: some(Web3SignerForkInfo(
|
||||
fork: fork, genesisValidatorsRoot: genesisValidatorsRoot
|
||||
fork: fork, genesis_validators_root: genesis_validators_root
|
||||
)),
|
||||
signingRoot: signingRoot,
|
||||
randaoReveal: Web3SignerRandaoRevealData(epoch: data)
|
||||
)
|
||||
|
||||
func init*(t: typedesc[Web3SignerRequest], fork: Fork,
|
||||
genesisValidatorsRoot: Eth2Digest, data: VoluntaryExit,
|
||||
genesis_validators_root: Eth2Digest, data: VoluntaryExit,
|
||||
signingRoot: Option[Eth2Digest] = none[Eth2Digest]()
|
||||
): Web3SignerRequest =
|
||||
Web3SignerRequest(
|
||||
kind: Web3SignerRequestKind.VoluntaryExit,
|
||||
forkInfo: some(Web3SignerForkInfo(
|
||||
fork: fork, genesisValidatorsRoot: genesisValidatorsRoot
|
||||
fork: fork, genesis_validators_root: genesis_validators_root
|
||||
)),
|
||||
signingRoot: signingRoot,
|
||||
voluntaryExit: data
|
||||
)
|
||||
|
||||
func init*(t: typedesc[Web3SignerRequest], fork: Fork,
|
||||
genesisValidatorsRoot: Eth2Digest, blockRoot: Eth2Digest,
|
||||
genesis_validators_root: Eth2Digest, blockRoot: Eth2Digest,
|
||||
slot: Slot, signingRoot: Option[Eth2Digest] = none[Eth2Digest]()
|
||||
): Web3SignerRequest =
|
||||
Web3SignerRequest(
|
||||
kind: Web3SignerRequestKind.SyncCommitteeMessage,
|
||||
forkInfo: some(Web3SignerForkInfo(
|
||||
fork: fork, genesisValidatorsRoot: genesisValidatorsRoot
|
||||
fork: fork, genesis_validators_root: genesis_validators_root
|
||||
)),
|
||||
signingRoot: signingRoot,
|
||||
syncCommitteeMessage: Web3SignerSyncCommitteeMessageData(
|
||||
|
@ -714,28 +713,28 @@ func init*(t: typedesc[Web3SignerRequest], fork: Fork,
|
|||
)
|
||||
|
||||
func init*(t: typedesc[Web3SignerRequest], fork: Fork,
|
||||
genesisValidatorsRoot: Eth2Digest,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
data: SyncAggregatorSelectionData,
|
||||
signingRoot: Option[Eth2Digest] = none[Eth2Digest]()
|
||||
): Web3SignerRequest =
|
||||
Web3SignerRequest(
|
||||
kind: Web3SignerRequestKind.SyncCommitteeSelectionProof,
|
||||
forkInfo: some(Web3SignerForkInfo(
|
||||
fork: fork, genesisValidatorsRoot: genesisValidatorsRoot
|
||||
fork: fork, genesis_validators_root: genesis_validators_root
|
||||
)),
|
||||
signingRoot: signingRoot,
|
||||
syncAggregatorSelectionData: data
|
||||
)
|
||||
|
||||
func init*(t: typedesc[Web3SignerRequest], fork: Fork,
|
||||
genesisValidatorsRoot: Eth2Digest,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
data: ContributionAndProof,
|
||||
signingRoot: Option[Eth2Digest] = none[Eth2Digest]()
|
||||
): Web3SignerRequest =
|
||||
Web3SignerRequest(
|
||||
kind: Web3SignerRequestKind.SyncCommitteeContributionAndProof,
|
||||
forkInfo: some(Web3SignerForkInfo(
|
||||
fork: fork, genesisValidatorsRoot: genesisValidatorsRoot
|
||||
fork: fork, genesis_validators_root: genesis_validators_root
|
||||
)),
|
||||
signingRoot: signingRoot,
|
||||
syncCommitteeContributionAndProof: data
|
||||
|
|
|
@ -566,16 +566,16 @@ func compute_fork_digest*(current_version: Version,
|
|||
|
||||
func init*(T: type ForkDigests,
|
||||
cfg: RuntimeConfig,
|
||||
genesisValidatorsRoot: Eth2Digest): T =
|
||||
genesis_validators_root: Eth2Digest): T =
|
||||
T(
|
||||
phase0:
|
||||
compute_fork_digest(cfg.GENESIS_FORK_VERSION, genesisValidatorsRoot),
|
||||
compute_fork_digest(cfg.GENESIS_FORK_VERSION, genesis_validators_root),
|
||||
altair:
|
||||
compute_fork_digest(cfg.ALTAIR_FORK_VERSION, genesisValidatorsRoot),
|
||||
compute_fork_digest(cfg.ALTAIR_FORK_VERSION, genesis_validators_root),
|
||||
bellatrix:
|
||||
compute_fork_digest(cfg.BELLATRIX_FORK_VERSION, genesisValidatorsRoot),
|
||||
compute_fork_digest(cfg.BELLATRIX_FORK_VERSION, genesis_validators_root),
|
||||
sharding:
|
||||
compute_fork_digest(cfg.SHARDING_FORK_VERSION, genesisValidatorsRoot),
|
||||
compute_fork_digest(cfg.SHARDING_FORK_VERSION, genesis_validators_root),
|
||||
)
|
||||
|
||||
func toBlockId*(blck: SomeForkySignedBeaconBlock): BlockId =
|
||||
|
|
|
@ -29,7 +29,7 @@ import nimcrypto/utils as ncrutils
|
|||
export
|
||||
results, burnMem, writeValue, readValue
|
||||
|
||||
{.localPassC: "-fno-lto".} # no LTO for crypto
|
||||
{.localPassc: "-fno-lto".} # no LTO for crypto
|
||||
|
||||
type
|
||||
KeystoreMode* = enum
|
||||
|
@ -61,7 +61,7 @@ type
|
|||
|
||||
Cipher* = object
|
||||
case function*: CipherFunctionKind
|
||||
of aes128ctrCipher:
|
||||
of aes128CtrCipher:
|
||||
params*: Aes128CtrParams
|
||||
message*: CipherBytes
|
||||
|
||||
|
@ -293,7 +293,7 @@ static:
|
|||
|
||||
func validateKeyPath*(path: string): Result[KeyPath, cstring] =
|
||||
var digitCount: int
|
||||
var number: BiggestUint
|
||||
var number: BiggestUInt
|
||||
try:
|
||||
for elem in path.string.split("/"):
|
||||
# TODO: doesn't "m" have to be the first character and is it the only
|
||||
|
@ -675,7 +675,7 @@ func scrypt(password: openArray[char], salt: openArray[byte],
|
|||
discard scrypt(password, salt, N, r, p, xyv, b, result)
|
||||
|
||||
func areValid(params: Pbkdf2Params): bool =
|
||||
if params.c == 0 or params.dkLen < 32 or params.salt.bytes.len == 0:
|
||||
if params.c == 0 or params.dklen < 32 or params.salt.bytes.len == 0:
|
||||
return false
|
||||
|
||||
# https://www.ietf.org/rfc/rfc2898.txt
|
||||
|
@ -875,7 +875,7 @@ proc createNetKeystore*(kdfKind: KdfKind,
|
|||
|
||||
proc createKeystore*(kdfKind: KdfKind,
|
||||
rng: var BrHmacDrbgContext,
|
||||
privKey: ValidatorPrivkey,
|
||||
privKey: ValidatorPrivKey,
|
||||
password = KeystorePass.init "",
|
||||
path = KeyPath "",
|
||||
description = "",
|
||||
|
|
|
@ -193,7 +193,7 @@ func getSyncSubnets*(
|
|||
syncCommittee: SyncCommittee): SyncnetBits =
|
||||
var res: SyncnetBits
|
||||
for i, pubkey in syncCommittee.pubkeys:
|
||||
if not nodeHasPubKey(pubkey):
|
||||
if not nodeHasPubkey(pubkey):
|
||||
continue
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/altair/validator.md#broadcast-sync-committee-message
|
||||
|
|
|
@ -49,7 +49,7 @@ func get_slot_signature*(
|
|||
let signing_root = compute_slot_signing_root(
|
||||
fork, genesis_validators_root, slot)
|
||||
|
||||
blsSign(privKey, signing_root.data)
|
||||
blsSign(privkey, signing_root.data)
|
||||
|
||||
proc verify_slot_signature*(
|
||||
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
|
||||
|
@ -73,7 +73,7 @@ func get_epoch_signature*(
|
|||
let signing_root = compute_epoch_signing_root(
|
||||
fork, genesis_validators_root, epoch)
|
||||
|
||||
blsSign(privKey, signing_root.data)
|
||||
blsSign(privkey, signing_root.data)
|
||||
|
||||
proc verify_epoch_signature*(
|
||||
fork: Fork, genesis_validators_root: Eth2Digest, epoch: Epoch,
|
||||
|
@ -100,7 +100,7 @@ func get_block_signature*(
|
|||
let signing_root = compute_block_signing_root(
|
||||
fork, genesis_validators_root, slot, root)
|
||||
|
||||
blsSign(privKey, signing_root.data)
|
||||
blsSign(privkey, signing_root.data)
|
||||
|
||||
proc verify_block_signature*(
|
||||
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
|
||||
|
@ -125,11 +125,11 @@ func compute_aggregate_and_proof_signing_root*(
|
|||
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/validator.md#broadcast-aggregate
|
||||
func get_aggregate_and_proof_signature*(fork: Fork, genesis_validators_root: Eth2Digest,
|
||||
aggregate_and_proof: AggregateAndProof,
|
||||
privKey: ValidatorPrivKey): CookedSig =
|
||||
privkey: ValidatorPrivKey): CookedSig =
|
||||
let signing_root = compute_aggregate_and_proof_signing_root(
|
||||
fork, genesis_validators_root, aggregate_and_proof)
|
||||
|
||||
blsSign(privKey, signing_root.data)
|
||||
blsSign(privkey, signing_root.data)
|
||||
|
||||
proc verify_aggregate_and_proof_signature*(
|
||||
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||
|
@ -158,7 +158,7 @@ func get_attestation_signature*(
|
|||
let signing_root = compute_attestation_signing_root(
|
||||
fork, genesis_validators_root, attestation_data)
|
||||
|
||||
blsSign(privKey, signing_root.data)
|
||||
blsSign(privkey, signing_root.data)
|
||||
|
||||
proc verify_attestation_signature*(
|
||||
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||
|
@ -185,7 +185,7 @@ func get_deposit_signature*(preset: RuntimeConfig,
|
|||
let signing_root = compute_deposit_signing_root(
|
||||
preset.GENESIS_FORK_VERSION, deposit.getDepositMessage())
|
||||
|
||||
blsSign(privKey, signing_root.data)
|
||||
blsSign(privkey, signing_root.data)
|
||||
|
||||
func get_deposit_signature*(message: DepositMessage, version: Version,
|
||||
privkey: ValidatorPrivKey): CookedSig =
|
||||
|
@ -218,7 +218,7 @@ func get_voluntary_exit_signature*(
|
|||
let signing_root = compute_voluntary_exit_signing_root(
|
||||
fork, genesis_validators_root, voluntary_exit)
|
||||
|
||||
blsSign(privKey, signing_root.data)
|
||||
blsSign(privkey, signing_root.data)
|
||||
|
||||
proc verify_voluntary_exit_signature*(
|
||||
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||
|
|
|
@ -25,7 +25,7 @@ import
|
|||
export results, altair, phase0, taskpools, bearssl, signatures
|
||||
|
||||
type
|
||||
TaskPoolPtr* = TaskPool
|
||||
TaskPoolPtr* = Taskpool
|
||||
|
||||
BatchVerifier* = object
|
||||
sigVerifCache*: BatchedBLSVerifierCache ##\
|
||||
|
|
|
@ -340,7 +340,7 @@ proc makeBeaconBlock*(
|
|||
|
||||
var blck = partialBeaconBlock(cfg, state, proposer_index,
|
||||
randao_reveal, eth1_data, graffiti, attestations, deposits,
|
||||
exits, sync_aggregate, executionPayload)
|
||||
exits, sync_aggregate, execution_payload)
|
||||
|
||||
let res = process_block(cfg, state.data, blck, {skipBlsValidation}, cache)
|
||||
|
||||
|
|
|
@ -572,7 +572,7 @@ proc syncLoop[A, B](man: SyncManager[A, B]) {.async.} =
|
|||
)
|
||||
|
||||
# Update status string
|
||||
man.syncStatus = timeLeft.toTimeLeftString() & " (" &
|
||||
man.syncStatus = timeleft.toTimeLeftString() & " (" &
|
||||
(done * 100).formatBiggestFloat(ffDecimal, 2) & "%) " &
|
||||
man.avgSyncSpeed.formatBiggestFloat(ffDecimal, 4) &
|
||||
"slots/s (" & map & ":" & currentSlot & ")"
|
||||
|
|
|
@ -586,9 +586,9 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T],
|
|||
else:
|
||||
some(sq.readyQueue.pop())
|
||||
of SyncQueueKind.Backward:
|
||||
let maxSlot = sq.readyQueue[0].request.slot +
|
||||
let maxslot = sq.readyQueue[0].request.slot +
|
||||
(sq.readyQueue[0].request.count - 1'u64)
|
||||
if sq.outSlot > maxSlot:
|
||||
if sq.outSlot > maxslot:
|
||||
none[SyncResult[T]]()
|
||||
else:
|
||||
some(sq.readyQueue.pop())
|
||||
|
@ -820,10 +820,10 @@ proc pop*[T](sq: SyncQueue[T], maxslot: Slot, item: T): SyncRequest[T] =
|
|||
## Create new request according to current SyncQueue parameters.
|
||||
sq.handlePotentialSafeSlotAdvancement()
|
||||
while len(sq.debtsQueue) > 0:
|
||||
if maxSlot < sq.debtsQueue[0].slot:
|
||||
if maxslot < sq.debtsQueue[0].slot:
|
||||
# Peer's latest slot is less than starting request's slot.
|
||||
return SyncRequest.empty(sq.kind, T)
|
||||
if maxSlot < sq.debtsQueue[0].lastSlot():
|
||||
if maxslot < sq.debtsQueue[0].lastSlot():
|
||||
# Peer's latest slot is less than finishing request's slot.
|
||||
return SyncRequest.empty(sq.kind, T)
|
||||
var sr = sq.debtsQueue.pop()
|
||||
|
@ -837,7 +837,7 @@ proc pop*[T](sq: SyncQueue[T], maxslot: Slot, item: T): SyncRequest[T] =
|
|||
|
||||
case sq.kind
|
||||
of SyncQueueKind.Forward:
|
||||
if maxSlot < sq.inpSlot:
|
||||
if maxslot < sq.inpSlot:
|
||||
# Peer's latest slot is less than queue's input slot.
|
||||
return SyncRequest.empty(sq.kind, T)
|
||||
if sq.inpSlot > sq.finalSlot:
|
||||
|
@ -862,7 +862,7 @@ proc pop*[T](sq: SyncQueue[T], maxslot: Slot, item: T): SyncRequest[T] =
|
|||
(baseSlot - count, count)
|
||||
else:
|
||||
(baseSlot - sq.chunkSize, sq.chunkSize)
|
||||
if (maxSlot + 1'u64) < slot + count:
|
||||
if (maxslot + 1'u64) < slot + count:
|
||||
# Peer's latest slot is less than queue's input slot.
|
||||
return SyncRequest.empty(sq.kind, T)
|
||||
var sr = SyncRequest.init(sq.kind, slot, count, item)
|
||||
|
|
|
@ -108,7 +108,7 @@ proc publishBlock(vc: ValidatorClientRef, currentSlot, slot: Slot,
|
|||
error_name = exc.name, error_msg = exc.msg
|
||||
|
||||
proc proposeBlock(vc: ValidatorClientRef, slot: Slot,
|
||||
proposerKey: ValidatorPubkey) {.async.} =
|
||||
proposerKey: ValidatorPubKey) {.async.} =
|
||||
let (inFuture, timeToSleep) = vc.beaconClock.fromNow(slot)
|
||||
try:
|
||||
if inFuture:
|
||||
|
|
|
@ -156,7 +156,7 @@ proc init*(t: typedesc[DutyAndProof], epoch: Epoch, dependentRoot: Eth2Digest,
|
|||
slotSig: slotSig)
|
||||
|
||||
proc init*(t: typedesc[ProposedData], epoch: Epoch, dependentRoot: Eth2Digest,
|
||||
data: openarray[ProposerTask]): ProposedData =
|
||||
data: openArray[ProposerTask]): ProposedData =
|
||||
ProposedData(epoch: epoch, dependentRoot: dependentRoot, duties: @data)
|
||||
|
||||
proc getCurrentSlot*(vc: ValidatorClientRef): Option[Slot] =
|
||||
|
@ -231,7 +231,7 @@ proc getDelay*(vc: ValidatorClientRef, deadline: BeaconTime): TimeDiff =
|
|||
vc.beaconClock.now() - deadline
|
||||
|
||||
proc getValidator*(vc: ValidatorClientRef,
|
||||
key: ValidatorPubkey): Option[AttachedValidator] =
|
||||
key: ValidatorPubKey): Option[AttachedValidator] =
|
||||
let validator = vc.attachedValidators.getValidator(key)
|
||||
if isNil(validator):
|
||||
warn "Validator not in pool anymore", validator = shortLog(validator)
|
||||
|
|
|
@ -4,7 +4,7 @@ import common, api
|
|||
|
||||
logScope: service = "fork_service"
|
||||
|
||||
proc validateForkSchedule(forks: openarray[Fork]): bool {.raises: [Defect].} =
|
||||
proc validateForkSchedule(forks: openArray[Fork]): bool {.raises: [Defect].} =
|
||||
# Check if `forks` list is linked list.
|
||||
var current_version = forks[0].current_version
|
||||
for index, item in forks.pairs():
|
||||
|
@ -17,7 +17,7 @@ proc validateForkSchedule(forks: openarray[Fork]): bool {.raises: [Defect].} =
|
|||
current_version = item.current_version
|
||||
true
|
||||
|
||||
proc sortForks(forks: openarray[Fork]): Result[seq[Fork], cstring] {.
|
||||
proc sortForks(forks: openArray[Fork]): Result[seq[Fork], cstring] {.
|
||||
raises: [Defect].} =
|
||||
proc cmp(x, y: Fork): int {.closure.} =
|
||||
if uint64(x.epoch) == uint64(y.epoch): return 0
|
||||
|
|
|
@ -26,7 +26,7 @@ export
|
|||
when defined(windows):
|
||||
import stew/[windows/acl]
|
||||
|
||||
{.localPassC: "-fno-lto".} # no LTO for crypto
|
||||
{.localPassc: "-fno-lto".} # no LTO for crypto
|
||||
|
||||
const
|
||||
KeystoreFileName* = "keystore.json"
|
||||
|
@ -415,7 +415,7 @@ proc loadKeystoreImpl(validatorsDir, secretsDir, keyName: string,
|
|||
let passphrasePath = secretsDir / keyName
|
||||
if fileExists(passphrasePath):
|
||||
if not(checkSensitiveFilePermissions(passphrasePath)):
|
||||
error "Password file has insecure permissions", key_path = keyStorePath
|
||||
error "Password file has insecure permissions", key_path = keystorePath
|
||||
return
|
||||
|
||||
let passphrase =
|
||||
|
@ -636,12 +636,12 @@ proc mapErrTo*[T, E](r: Result[T, E], v: static KeystoreGenerationErrorKind):
|
|||
r.mapErr(proc (e: E): KeystoreGenerationError =
|
||||
KeystoreGenerationError(kind: v, error: $e))
|
||||
|
||||
proc loadNetKeystore*(keyStorePath: string,
|
||||
proc loadNetKeystore*(keystorePath: string,
|
||||
insecurePwd: Option[string]): Option[lcrypto.PrivateKey] =
|
||||
|
||||
if not(checkSensitiveFilePermissions(keystorePath)):
|
||||
error "Network keystorage file has insecure permissions",
|
||||
key_path = keyStorePath
|
||||
key_path = keystorePath
|
||||
return
|
||||
|
||||
let keyStore =
|
||||
|
@ -657,18 +657,18 @@ proc loadNetKeystore*(keyStorePath: string,
|
|||
|
||||
if insecurePwd.isSome():
|
||||
warn "Using insecure password to unlock networking key"
|
||||
let decrypted = decryptNetKeystore(keystore,
|
||||
let decrypted = decryptNetKeystore(keyStore,
|
||||
KeystorePass.init(insecurePwd.get()))
|
||||
if decrypted.isOk:
|
||||
return some(decrypted.get())
|
||||
else:
|
||||
error "Network keystore decryption failed", key_store = keyStorePath
|
||||
error "Network keystore decryption failed", key_store = keystorePath
|
||||
return
|
||||
else:
|
||||
let prompt = "Please enter passphrase to unlock networking key: "
|
||||
let res = keyboardGetPassword[lcrypto.PrivateKey](prompt, 3,
|
||||
proc (password: string): KsResult[lcrypto.PrivateKey] =
|
||||
let decrypted = decryptNetKeystore(keystore, KeystorePass.init password)
|
||||
let decrypted = decryptNetKeystore(keyStore, KeystorePass.init password)
|
||||
if decrypted.isErr():
|
||||
error "Keystore decryption failed. Please try again", keystorePath
|
||||
decrypted
|
||||
|
@ -678,13 +678,13 @@ proc loadNetKeystore*(keyStorePath: string,
|
|||
else:
|
||||
return
|
||||
|
||||
proc saveNetKeystore*(rng: var BrHmacDrbgContext, keyStorePath: string,
|
||||
proc saveNetKeystore*(rng: var BrHmacDrbgContext, keystorePath: string,
|
||||
netKey: lcrypto.PrivateKey, insecurePwd: Option[string]
|
||||
): Result[void, KeystoreGenerationError] =
|
||||
let password =
|
||||
if insecurePwd.isSome():
|
||||
warn "Using insecure password to lock networking key",
|
||||
key_path = keyStorePath
|
||||
key_path = keystorePath
|
||||
insecurePwd.get()
|
||||
else:
|
||||
let prompt = "Please enter NEW password to lock network key storage: "
|
||||
|
@ -698,16 +698,16 @@ proc saveNetKeystore*(rng: var BrHmacDrbgContext, keyStorePath: string,
|
|||
try:
|
||||
encodedStorage = Json.encode(keyStore)
|
||||
except SerializationError as exc:
|
||||
error "Could not serialize network key storage", key_path = keyStorePath
|
||||
error "Could not serialize network key storage", key_path = keystorePath
|
||||
return err(KeystoreGenerationError(
|
||||
kind: FailedToCreateKeystoreFile, error: exc.msg))
|
||||
|
||||
let res = secureWriteFile(keyStorePath, encodedStorage)
|
||||
let res = secureWriteFile(keystorePath, encodedStorage)
|
||||
if res.isOk():
|
||||
ok()
|
||||
else:
|
||||
error "Could not write to network key storage file",
|
||||
key_path = keyStorePath
|
||||
key_path = keystorePath
|
||||
res.mapErrTo(FailedToCreateKeystoreFile)
|
||||
|
||||
proc createValidatorFiles*(secretsDir, validatorsDir, keystoreDir, secretFile,
|
||||
|
|
|
@ -90,7 +90,7 @@ proc init*(
|
|||
|
||||
if requiresMigration:
|
||||
fatal "The slashing database predates Altair hardfork from October 2021." &
|
||||
" You can migrate to the new DB format using Nimbus 1.6.0" &
|
||||
" You can migrate to the new DB format using Nimbus 1.6.0" &
|
||||
" for a few minutes at https://github.com/status-im/nimbus-eth2/releases/tag/v1.6.0" &
|
||||
" until the messages \"Migrating local validators slashing DB from v1 to v2\"" &
|
||||
" and \"Slashing DB migration successful.\""
|
||||
|
@ -206,7 +206,7 @@ proc registerAttestation*(
|
|||
|
||||
proc pruneBlocks*(
|
||||
db: SlashingProtectionDB,
|
||||
validator: ValidatorPubkey,
|
||||
validator: ValidatorPubKey,
|
||||
newMinSlot: Slot) =
|
||||
## Prune all blocks from a validator before the specified newMinSlot
|
||||
## This is intended for interchange import to ensure
|
||||
|
@ -220,7 +220,7 @@ proc pruneBlocks*(
|
|||
|
||||
proc pruneAttestations*(
|
||||
db: SlashingProtectionDB,
|
||||
validator: ValidatorPubkey,
|
||||
validator: ValidatorPubKey,
|
||||
newMinSourceEpoch: int64,
|
||||
newMinTargetEpoch: int64) =
|
||||
## Prune all blocks from a validator before the specified newMinSlot
|
||||
|
|
|
@ -225,7 +225,7 @@ proc writeValue*(w: var JsonWriter, a: SlotString or EpochString)
|
|||
proc readValue*(r: var JsonReader, a: var (SlotString or EpochString))
|
||||
{.raises: [SerializationError, IOError, Defect].} =
|
||||
try:
|
||||
a = (typeof a)(r.readValue(string).parseBiggestUint())
|
||||
a = (typeof a)(r.readValue(string).parseBiggestUInt())
|
||||
except ValueError:
|
||||
raiseUnexpectedValue(r, "Integer in a string expected")
|
||||
|
||||
|
@ -361,7 +361,7 @@ proc importInterchangeV5Impl*(
|
|||
# (the last before the earliest) the minEpochViolation check stays consistent.
|
||||
var maxValidSourceEpochSeen = -1
|
||||
var maxValidTargetEpochSeen = -1
|
||||
|
||||
|
||||
if dbSource.isSome():
|
||||
maxValidSourceEpochSeen = int dbSource.get()
|
||||
if dbTarget.isSome():
|
||||
|
@ -384,7 +384,7 @@ proc importInterchangeV5Impl*(
|
|||
|
||||
# See formal proof https://github.com/michaelsproul/slashing-proofs
|
||||
# of synthetic attestation
|
||||
if not(maxValidSourceEpochSeen < maxValidTargetEpochSeen) and
|
||||
if not(maxValidSourceEpochSeen < maxValidTargetEpochSeen) and
|
||||
not(maxValidSourceEpochSeen == 0 and maxValidTargetEpochSeen == 0):
|
||||
# Special-case genesis (Slashing prot is deactivated anyway)
|
||||
warn "Invalid attestation(s), source epochs should be less than target epochs, skipping import",
|
||||
|
@ -393,7 +393,7 @@ proc importInterchangeV5Impl*(
|
|||
maxValidTargetEpochSeen = maxValidTargetEpochSeen
|
||||
result = siPartial
|
||||
continue
|
||||
|
||||
|
||||
db.registerSyntheticAttestation(
|
||||
parsedKey,
|
||||
Epoch maxValidSourceEpochSeen,
|
||||
|
|
|
@ -664,7 +664,7 @@ proc initCompatV1*(T: type SlashingProtectionDB_v2,
|
|||
## or load an existing one with matching genesis root
|
||||
## `dbname` MUST not be ending with .sqlite3
|
||||
|
||||
let alreadyExists = fileExists(basepath/dbname&".sqlite3")
|
||||
let alreadyExists = fileExists(basePath/dbname&".sqlite3")
|
||||
|
||||
result.db = T(backend: SqStoreRef.init(
|
||||
basePath, dbname,
|
||||
|
@ -698,7 +698,7 @@ proc init*(T: type SlashingProtectionDB_v2,
|
|||
## or load an existing one with matching genesis root
|
||||
## `dbname` MUST not be ending with .sqlite3
|
||||
|
||||
let alreadyExists = fileExists(basepath/dbname&".sqlite3")
|
||||
let alreadyExists = fileExists(basePath/dbname&".sqlite3")
|
||||
|
||||
result = T(backend: SqStoreRef.init(basePath, dbname, keyspaces = []).get())
|
||||
if alreadyExists:
|
||||
|
@ -718,7 +718,7 @@ proc loadUnchecked*(
|
|||
## this doesn't check the genesis validator root
|
||||
##
|
||||
## Privacy: This leaks user folder hierarchy in case the file does not exist
|
||||
let path = basepath/dbname&".sqlite3"
|
||||
let path = basePath/dbname&".sqlite3"
|
||||
let alreadyExists = fileExists(path)
|
||||
if not alreadyExists:
|
||||
raise newException(IOError, "DB '" & path & "' does not exist.")
|
||||
|
@ -734,7 +734,7 @@ proc close*(db: SlashingProtectionDB_v2) =
|
|||
# DB Queries
|
||||
# -------------------------------------------------------------
|
||||
|
||||
proc foundAnyResult(status: KVResult[bool]): bool {.inline.}=
|
||||
proc foundAnyResult(status: KvResult[bool]): bool {.inline.}=
|
||||
## Checks a DB query status for errors
|
||||
## Then returns true if any result was found
|
||||
## and false otherwise.
|
||||
|
@ -833,7 +833,7 @@ proc checkSlashableBlockProposalDoubleProposal(
|
|||
# ---------------------------------
|
||||
block:
|
||||
# Condition 1 at https://eips.ethereum.org/EIPS/eip-3076
|
||||
var root: ETH2Digest
|
||||
var root: Eth2Digest
|
||||
let status = db.sqlBlockForSameSlot.exec(
|
||||
(valID, int64 slot)
|
||||
) do (res: Hash32):
|
||||
|
@ -902,7 +902,7 @@ proc checkSlashableAttestationDoubleVote(
|
|||
# ---------------------------------
|
||||
block:
|
||||
# Condition 3 part 1/3 at https://eips.ethereum.org/EIPS/eip-3076
|
||||
var root: ETH2Digest
|
||||
var root: Eth2Digest
|
||||
|
||||
# Overflows in 14 trillion years (minimal) or 112 trillion years (mainnet)
|
||||
doAssert target <= high(int64).uint64
|
||||
|
@ -952,7 +952,7 @@ proc checkSlashableAttestationOther(
|
|||
block:
|
||||
# Condition 3 part 2/3 at https://eips.ethereum.org/EIPS/eip-3076
|
||||
# Condition 3 part 3/3 at https://eips.ethereum.org/EIPS/eip-3076
|
||||
var root: ETH2Digest
|
||||
var root: Eth2Digest
|
||||
var db_source, db_target: Epoch
|
||||
|
||||
# Overflows in 14 trillion years (minimal) or 112 trillion years (mainnet)
|
||||
|
@ -1167,7 +1167,7 @@ proc registerAttestation*(
|
|||
proc pruneBlocks*(
|
||||
db: SlashingProtectionDB_v2,
|
||||
index: Option[ValidatorIndex],
|
||||
validator: ValidatorPubkey, newMinSlot: Slot) =
|
||||
validator: ValidatorPubKey, newMinSlot: Slot) =
|
||||
## Prune all blocks from a validator before the specified newMinSlot
|
||||
## This is intended for interchange import to ensure
|
||||
## that in case of a gap, we don't allow signing in that gap.
|
||||
|
@ -1180,13 +1180,13 @@ proc pruneBlocks*(
|
|||
|
||||
proc pruneBlocks*(
|
||||
db: SlashingProtectionDB_v2,
|
||||
validator: ValidatorPubkey, newMinSlot: Slot) =
|
||||
validator: ValidatorPubKey, newMinSlot: Slot) =
|
||||
pruneBlocks(db, none(ValidatorIndex), validator, newMinSlot)
|
||||
|
||||
proc pruneAttestations*(
|
||||
db: SlashingProtectionDB_v2,
|
||||
index: Option[ValidatorIndex],
|
||||
validator: ValidatorPubkey,
|
||||
validator: ValidatorPubKey,
|
||||
newMinSourceEpoch: int64,
|
||||
newMinTargetEpoch: int64) =
|
||||
## Prune all blocks from a validator before the specified newMinSlot
|
||||
|
@ -1205,7 +1205,7 @@ proc pruneAttestations*(
|
|||
|
||||
proc pruneAttestations*(
|
||||
db: SlashingProtectionDB_v2,
|
||||
validator: ValidatorPubkey,
|
||||
validator: ValidatorPubKey,
|
||||
newMinSourceEpoch: int64,
|
||||
newMinTargetEpoch: int64) =
|
||||
pruneAttestations(
|
||||
|
@ -1245,14 +1245,14 @@ proc pruneAfterFinalization*(
|
|||
|
||||
proc retrieveLatestValidatorData*(
|
||||
db: SlashingProtectionDB_v2,
|
||||
validator: ValidatorPubkey
|
||||
validator: ValidatorPubKey
|
||||
): tuple[
|
||||
maxBlockSlot: Option[Slot],
|
||||
maxBlockSlot: Option[Slot],
|
||||
maxAttSourceEpoch: Option[Epoch],
|
||||
maxAttTargetEpoch: Option[Epoch]] =
|
||||
|
||||
|
||||
let valID = db.getOrRegisterValidator(none(ValidatorIndex), validator)
|
||||
|
||||
|
||||
var slot, source, target: int64
|
||||
let status = db.sqlMaxBlockAtt.exec(
|
||||
valID
|
||||
|
@ -1283,7 +1283,7 @@ proc registerSyntheticAttestation*(
|
|||
validator: ValidatorPubKey,
|
||||
source, target: Epoch) =
|
||||
## Add a synthetic attestation to the slashing protection DB
|
||||
|
||||
|
||||
# Spec require source < target (except genesis?), for synthetic attestation for slashing protection we want max(source, target)
|
||||
doAssert (source < target) or (source == Epoch(0) and target == Epoch(0))
|
||||
|
||||
|
@ -1332,7 +1332,7 @@ proc toSPDIR*(db: SlashingProtectionDB_v2): SPDIR
|
|||
# Can't capture var SPDIR in a closure
|
||||
let genesis_validators_root {.byaddr.} = result.metadata.genesis_validators_root
|
||||
let status = selectRootStmt.exec do (res: Hash32):
|
||||
genesis_validators_root = Eth2Digest0x(ETH2Digest(data: res))
|
||||
genesis_validators_root = Eth2Digest0x(Eth2Digest(data: res))
|
||||
doAssert status.isOk()
|
||||
|
||||
selectRootStmt.dispose()
|
||||
|
@ -1430,7 +1430,7 @@ proc inclSPDIR*(db: SlashingProtectionDB_v2, spdir: SPDIR): SlashingImportStatus
|
|||
# genesis_validators_root
|
||||
# -----------------------------------------------------
|
||||
block:
|
||||
var dbGenValRoot: ETH2Digest
|
||||
var dbGenValRoot: Eth2Digest
|
||||
|
||||
let selectRootStmt = db.backend.prepareStmt(
|
||||
"SELECT genesis_validators_root FROM metadata;",
|
||||
|
|
|
@ -422,7 +422,7 @@ proc forkchoice_updated(state: bellatrix.BeaconState,
|
|||
finalized_block_hash: Eth2Digest,
|
||||
fee_recipient: ethtypes.Address,
|
||||
execution_engine: Eth1Monitor):
|
||||
Future[Option[bellatrix.PayloadId]] {.async.} =
|
||||
Future[Option[bellatrix.PayloadID]] {.async.} =
|
||||
const web3Timeout = 3.seconds
|
||||
|
||||
let
|
||||
|
@ -439,9 +439,9 @@ proc forkchoice_updated(state: bellatrix.BeaconState,
|
|||
payloadId = forkchoiceResponse.payloadId
|
||||
|
||||
return if payloadId.isSome:
|
||||
some(bellatrix.PayloadId(payloadId.get))
|
||||
some(bellatrix.PayloadID(payloadId.get))
|
||||
else:
|
||||
none(bellatrix.PayloadId)
|
||||
none(bellatrix.PayloadID)
|
||||
|
||||
proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
|
||||
randao_reveal: ValidatorSig,
|
||||
|
@ -705,11 +705,11 @@ proc createAndSendSyncCommitteeMessage(node: BeaconNode,
|
|||
try:
|
||||
let
|
||||
fork = node.dag.forkAtEpoch(slot.epoch)
|
||||
genesisValidatorsRoot = node.dag.genesisValidatorsRoot
|
||||
genesis_validators_root = node.dag.genesis_validators_root
|
||||
msg =
|
||||
block:
|
||||
let res = await signSyncCommitteeMessage(validator, fork,
|
||||
genesisValidatorsRoot,
|
||||
genesis_validators_root,
|
||||
slot, head.root)
|
||||
if res.isErr():
|
||||
error "Unable to sign committee message using remote signer",
|
||||
|
@ -781,7 +781,7 @@ proc signAndSendContribution(node: BeaconNode,
|
|||
|
||||
let res = await validator.sign(
|
||||
msg, node.dag.forkAtEpoch(contribution.slot.epoch),
|
||||
node.dag.genesisValidatorsRoot)
|
||||
node.dag.genesis_validators_root)
|
||||
|
||||
if res.isErr():
|
||||
error "Unable to sign sync committee contribution usign remote signer",
|
||||
|
@ -801,7 +801,7 @@ proc handleSyncCommitteeContributions(node: BeaconNode,
|
|||
# TODO Use a view type to avoid the copy
|
||||
let
|
||||
fork = node.dag.forkAtEpoch(slot.epoch)
|
||||
genesisValidatorsRoot = node.dag.genesisValidatorsRoot
|
||||
genesis_validators_root = node.dag.genesis_validators_root
|
||||
syncCommittee = node.dag.syncCommitteeParticipants(slot + 1)
|
||||
|
||||
type
|
||||
|
@ -827,7 +827,7 @@ proc handleSyncCommitteeContributions(node: BeaconNode,
|
|||
subcommitteeIdx: subcommitteeIdx)
|
||||
|
||||
selectionProofs.add validator.getSyncCommitteeSelectionProof(
|
||||
fork, genesisValidatorsRoot, slot, subcommitteeIdx.asUInt64)
|
||||
fork, genesis_validators_root, slot, subcommitteeIdx.asUInt64)
|
||||
|
||||
await allFutures(selectionProofs)
|
||||
|
||||
|
|
|
@ -9,9 +9,9 @@ TODO
|
|||
Openarray are a parameter-only type that represent a (pointer, length) pair.
|
||||
In other languages they are also known as slices, ranges, views, spans.
|
||||
|
||||
_The name openarray is inherited from Pascal, Oberon and Modula 2_
|
||||
_The name openArray is inherited from Pascal, Oberon and Modula 2_
|
||||
|
||||
Arrays and sequences are implictily converted to openarray.
|
||||
Arrays and sequences are implictily converted to openArray.
|
||||
|
||||
The compiler has a limited form of escape analysis to prevent capturing openarrays in closures
|
||||
or returning them.
|
||||
|
|
10
ncli/nim.cfg
10
ncli/nim.cfg
|
@ -1,2 +1,10 @@
|
|||
hints:off
|
||||
-u:metrics
|
||||
|
||||
-d:"libp2p_pki_schemes=secp256k1"
|
||||
|
||||
-d:chronosStrictException
|
||||
--styleCheck:usages
|
||||
--styleCheck:hint
|
||||
--hint[XDeclaredButNotUsed]:off
|
||||
--hint[ConvFromXtoItselfNotNeeded]:off
|
||||
--hint[Processing]:off
|
||||
|
|
|
@ -265,6 +265,8 @@ proc getTestRules(conf: RestTesterConf): Result[seq[JsonNode], cstring] =
|
|||
fatal "JSON processing error while reading rules file",
|
||||
error_msg = exc.msg, filename = conf.rulesFilename
|
||||
return err("Unable to parse json")
|
||||
except Exception as exc:
|
||||
raiseAssert exc.msg
|
||||
|
||||
let elems = node.getElems()
|
||||
if len(elems) == 0:
|
||||
|
@ -727,13 +729,15 @@ proc validateHeaders(resp: HttpResponseHeader, expect: HeadersExpect): bool =
|
|||
return false
|
||||
true
|
||||
|
||||
proc jsonBody(body: openarray[byte]): Result[JsonNode, cstring] =
|
||||
proc jsonBody(body: openArray[byte]): Result[JsonNode, cstring] =
|
||||
var sbody = cast[string](@body)
|
||||
let res =
|
||||
try:
|
||||
parseJson(sbody)
|
||||
except CatchableError as exc:
|
||||
return err("Unable to parse json")
|
||||
except Exception as exc:
|
||||
raiseAssert exc.msg
|
||||
ok(res)
|
||||
|
||||
proc getPath(jobj: JsonNode, path: seq[string]): Result[JsonNode, cstring] =
|
||||
|
@ -784,7 +788,7 @@ proc structCmp(j1, j2: JsonNode, strict: bool): bool =
|
|||
else:
|
||||
true
|
||||
|
||||
proc validateBody(body: openarray[byte], expect: BodyExpect): bool =
|
||||
proc validateBody(body: openArray[byte], expect: BodyExpect): bool =
|
||||
if len(expect.items) == 0:
|
||||
true
|
||||
else:
|
||||
|
|
|
@ -150,7 +150,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
|
||||
let
|
||||
syncCommittee = @(dag.syncCommitteeParticipants(slot + 1))
|
||||
genesis_validators_root = dag.genesisValidatorsRoot
|
||||
genesis_validators_root = dag.genesis_validators_root
|
||||
fork = dag.forkAtEpoch(slot.epoch)
|
||||
messagesTime = slot.attestation_deadline()
|
||||
contributionsTime = slot.sync_contribution_deadline()
|
||||
|
|
|
@ -1,2 +1 @@
|
|||
-u:metrics
|
||||
-d:"chronicles_sinks=json[file(block_sim.log)]"
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
-u:metrics
|
||||
|
||||
-d:"libp2p_pki_schemes=secp256k1"
|
||||
|
||||
-d:chronosStrictException
|
||||
--styleCheck:usages
|
||||
--styleCheck:hint
|
||||
--hint[XDeclaredButNotUsed]:off
|
||||
--hint[ConvFromXtoItselfNotNeeded]:off
|
||||
--hint[Processing]:off
|
|
@ -1 +0,0 @@
|
|||
-u:metrics
|
|
@ -1 +0,0 @@
|
|||
-u:metrics
|
|
@ -1,2 +1,9 @@
|
|||
# Use only `secp256k1` public key cryptography as an identity in LibP2P.
|
||||
-d:"libp2p_pki_schemes=secp256k1"
|
||||
|
||||
-d:chronosStrictException
|
||||
--styleCheck:usages
|
||||
--styleCheck:hint
|
||||
--hint[XDeclaredButNotUsed]:off
|
||||
--hint[ConvFromXtoItselfNotNeeded]:off
|
||||
--hint[Processing]:off
|
||||
|
|
|
@ -14,7 +14,7 @@ proc new(T: type Eth2DiscoveryProtocol,
|
|||
bindPort: Port, bindIp: ValidIpAddress,
|
||||
enrFields: openArray[(string, seq[byte])] = [],
|
||||
rng: ref BrHmacDrbgContext):
|
||||
T {.raises: [Exception, Defect].} =
|
||||
T {.raises: [CatchableError, Defect].} =
|
||||
|
||||
newProtocol(pk, enrIp, enrTcpPort, enrUdpPort, enrFields,
|
||||
bindPort = bindPort, bindIp = bindIp, rng = rng)
|
||||
|
|
|
@ -78,11 +78,11 @@ proc contains*(keylist: openArray[KeystoreInfo], key: string): bool =
|
|||
let pubkey = ValidatorPubKey.fromHex(key).tryGet()
|
||||
contains(keylist, pubkey)
|
||||
|
||||
proc startSingleNodeNetwork =
|
||||
proc startSingleNodeNetwork {.raises: [CatchableError, Defect].} =
|
||||
let
|
||||
rng = keys.newRng()
|
||||
mnemonic = generateMnemonic(rng[])
|
||||
seed = getSeed(mnemonic, KeyStorePass.init "")
|
||||
seed = getSeed(mnemonic, KeystorePass.init "")
|
||||
cfg = defaultRuntimeConfig
|
||||
|
||||
let vres = secureCreatePath(validatorsDir)
|
||||
|
@ -126,7 +126,7 @@ proc startSingleNodeNetwork =
|
|||
fatal "Failed to create token file", err = deposits.error
|
||||
quit 1
|
||||
|
||||
let createTestnetConf = BeaconNodeConf.load(cmdLine = mapIt([
|
||||
let createTestnetConf = try: BeaconNodeConf.load(cmdLine = mapIt([
|
||||
"--data-dir=" & dataDir,
|
||||
"createTestnet",
|
||||
"--total-validators=" & $simulationDepositsCount,
|
||||
|
@ -136,10 +136,12 @@ proc startSingleNodeNetwork =
|
|||
"--netkey-file=network_key.json",
|
||||
"--insecure-netkey-password=true",
|
||||
"--genesis-offset=0"], it))
|
||||
except Exception as exc: # TODO Fix confutils exceptions
|
||||
raiseAssert exc.msg
|
||||
|
||||
doCreateTestnet(createTestnetConf, rng[])
|
||||
|
||||
let runNodeConf = BeaconNodeConf.load(cmdLine = mapIt([
|
||||
let runNodeConf = try: BeaconNodeConf.load(cmdLine = mapIt([
|
||||
"--tcp-port=49000",
|
||||
"--udp-port=49000",
|
||||
"--network=" & dataDir,
|
||||
|
@ -157,6 +159,8 @@ proc startSingleNodeNetwork =
|
|||
"--serve-light-client-data=off",
|
||||
"--import-light-client-data=none",
|
||||
"--doppelganger-detection=off"], it))
|
||||
except Exception as exc: # TODO fix confutils exceptions
|
||||
raiseAssert exc.msg
|
||||
|
||||
let metadata = loadEth2NetworkMetadata(dataDir)
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ let
|
|||
cfg = defaultRuntimeConfig
|
||||
validatorDirRes = secureCreatePath(testValidatorsDir)
|
||||
|
||||
proc namesEqual(a, b: openarray[string]): bool =
|
||||
proc namesEqual(a, b: openArray[string]): bool =
|
||||
sorted(a) == sorted(b)
|
||||
|
||||
when not defined(windows):
|
||||
|
|
|
@ -82,7 +82,7 @@ suite "Light client" & preset():
|
|||
serveLightClientData = true,
|
||||
importLightClientData = ImportLightClientData.OnlyNew)
|
||||
quarantine = newClone(Quarantine.init())
|
||||
taskpool = TaskPool.new()
|
||||
taskpool = Taskpool.new()
|
||||
var verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool)
|
||||
|
||||
test "Pre-Altair":
|
||||
|
@ -112,7 +112,7 @@ suite "Light client" & preset():
|
|||
|
||||
# Track trusted checkpoint for light client
|
||||
let
|
||||
genesis_validators_root = dag.genesisValidatorsRoot
|
||||
genesis_validators_root = dag.genesis_validators_root
|
||||
trusted_block_root = dag.head.root
|
||||
|
||||
# Advance to target slot
|
||||
|
|
|
@ -33,7 +33,7 @@ suite "Light client processor" & preset():
|
|||
serveLightClientData = true,
|
||||
importLightClientData = ImportLightClientData.OnlyNew)
|
||||
quarantine = newClone(Quarantine.init())
|
||||
taskpool = TaskPool.new()
|
||||
taskpool = Taskpool.new()
|
||||
var verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool)
|
||||
|
||||
var cache: StateCache
|
||||
|
@ -56,7 +56,7 @@ suite "Light client processor" & preset():
|
|||
|
||||
addBlocks(SLOTS_PER_EPOCH, 0.75)
|
||||
let
|
||||
genesisValidatorsRoot = dag.genesisValidatorsRoot
|
||||
genesis_validators_root = dag.genesis_validators_root
|
||||
trustedBlockRoot = dag.head.root
|
||||
|
||||
const
|
||||
|
@ -89,7 +89,7 @@ suite "Light client processor" & preset():
|
|||
let store = (ref Option[LightClientStore])()
|
||||
var
|
||||
processor = LightClientProcessor.new(
|
||||
false, "", "", cfg, genesisValidatorsRoot, trustedBlockRoot,
|
||||
false, "", "", cfg, genesis_validators_root, trustedBlockRoot,
|
||||
store, getBeaconTime, didInitializeStore)
|
||||
res: Result[void, BlockError]
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ import
|
|||
../beacon_chain/consensus_object_pools/sync_committee_msg_pool,
|
||||
./testblockutil
|
||||
|
||||
func aggregate(sigs: openarray[CookedSig]): CookedSig =
|
||||
func aggregate(sigs: openArray[CookedSig]): CookedSig =
|
||||
var agg {.noinit.}: AggregateSignature
|
||||
agg.init sigs[0]
|
||||
for i in 1 ..< sigs.len:
|
||||
|
@ -45,7 +45,7 @@ suite "Sync committee pool":
|
|||
test "Aggregating votes":
|
||||
let
|
||||
fork = altairFork(defaultRuntimeConfig)
|
||||
genesisValidatorsRoot = eth2digest(@[5.byte, 6, 7])
|
||||
genesis_validators_root = eth2digest(@[5.byte, 6, 7])
|
||||
|
||||
privkey1 = MockPrivKeys[1.ValidatorIndex]
|
||||
privkey2 = MockPrivKeys[2.ValidatorIndex]
|
||||
|
@ -64,13 +64,13 @@ suite "Sync committee pool":
|
|||
subcommittee2 = SyncSubcommitteeIndex(1)
|
||||
|
||||
sig1 = get_sync_committee_message_signature(
|
||||
fork, genesisValidatorsRoot, root1Slot, root1, privkey1)
|
||||
fork, genesis_validators_root, root1Slot, root1, privkey1)
|
||||
sig2 = get_sync_committee_message_signature(
|
||||
fork, genesisValidatorsRoot, root2Slot, root2, privkey1)
|
||||
fork, genesis_validators_root, root2Slot, root2, privkey1)
|
||||
sig3 = get_sync_committee_message_signature(
|
||||
fork, genesisValidatorsRoot, root3Slot, root3, privkey1)
|
||||
fork, genesis_validators_root, root3Slot, root3, privkey1)
|
||||
sig4 = get_sync_committee_message_signature(
|
||||
fork, genesisValidatorsRoot, root3Slot, root2, privkey1)
|
||||
fork, genesis_validators_root, root3Slot, root2, privkey1)
|
||||
|
||||
# Inserting sync committee messages
|
||||
#
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 0237e4e0e914fc19359c18a66406d33bc942775c
|
||||
Subproject commit 71a30b926c2a66096a1925fd9b6e5a6ed1546d9b
|
|
@ -1 +1 @@
|
|||
Subproject commit 884e870048698a7dd17ee961f8f53b3c6f56c80a
|
||||
Subproject commit 527b2bd3f44a47af03d3d3c940348e0cb8826652
|
|
@ -1 +1 @@
|
|||
Subproject commit a697e3585d583ab6b91a159ea7d023461002c927
|
||||
Subproject commit b13d65940074ddf8abd1c3de00b6bcd6a32f994c
|
Loading…
Reference in New Issue