Merge branch 'unstable' of github.com:status-im/nim-beacon-chain into unstable
This commit is contained in:
commit
eb985b880c
13
CHANGELOG.md
13
CHANGELOG.md
|
@ -8,6 +8,19 @@ TBD
|
|||
* `getChronosFutures` is now `debug_getChronosFutures`
|
||||
|
||||
|
||||
2021-03-10 v1.0.12
|
||||
==================
|
||||
|
||||
This is bugfix release correcting an error in the Prater testnet config
|
||||
leading to incorrect Eth1 voting.
|
||||
|
||||
|
||||
2021-03-10 v1.0.11
|
||||
==================
|
||||
|
||||
This is a minor release adding support for connecting to the Prater testnet.
|
||||
|
||||
|
||||
2021-03-10 v1.0.10
|
||||
==================
|
||||
|
||||
|
|
|
@ -161,9 +161,10 @@ type
|
|||
subscribeSlot*: array[ATTESTATION_SUBNET_COUNT, Slot]
|
||||
unsubscribeSlot*: array[ATTESTATION_SUBNET_COUNT, Slot]
|
||||
|
||||
# Used to track the next attestation slots, using an epoch-relative
|
||||
# coordinate system. Defaults don't need initialization.
|
||||
# Used to track the next attestation and proposal slots using an
|
||||
# epoch-relative coordinate system. Doesn't need initialization.
|
||||
attestingSlots*: array[2, uint32]
|
||||
lastCalculatedAttestationEpoch*: Epoch
|
||||
proposingSlots*: array[2, uint32]
|
||||
lastCalculatedEpoch*: Epoch
|
||||
|
||||
func shortLog*(v: AttachedValidator): string = shortLog(v.pubKey)
|
||||
|
|
|
@ -273,6 +273,10 @@ type
|
|||
desc: "Write SSZ dumps of blocks, attestations and states to data dir"
|
||||
name: "dump" }: bool
|
||||
|
||||
directPeers* {.
|
||||
desc: "The list of priviledged, secure and known peers to connect and maintain the connection to, this requires a not random netkey-file. In the complete multiaddress format like: /ip4/<address>/tcp/<port>/p2p/<peerId-public-key>. Peering agreements are established out of band and must be reciprocal."
|
||||
name: "direct-peer" .}: seq[string]
|
||||
|
||||
doppelgangerDetection* {.
|
||||
defaultValue: true
|
||||
desc: "Whether to detect whether another validator is be running the same validator keys (default true)"
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
import
|
||||
os, sequtils, strutils, options, json, terminal, random,
|
||||
os, sequtils, strutils, options, json, terminal,
|
||||
chronos, chronicles, confutils, stint, json_serialization,
|
||||
../filepath,
|
||||
../networking/network_metadata,
|
||||
web3, web3/confutils_defs, eth/keys, stew/io2,
|
||||
web3, web3/confutils_defs, eth/keys, eth/p2p/discoveryv5/random2,
|
||||
stew/io2,
|
||||
../spec/[datatypes, crypto, presets], ../ssz/merkleization,
|
||||
../validators/keystore_management
|
||||
|
||||
|
@ -260,7 +261,10 @@ proc main() {.async.} =
|
|||
|
||||
if cfg.maxDelay > 0.0:
|
||||
delayGenerator = proc (): chronos.Duration =
|
||||
chronos.milliseconds (rand(cfg.minDelay..cfg.maxDelay)*1000).int
|
||||
let
|
||||
minDelay = (cfg.minDelay*1000).int64
|
||||
maxDelay = (cfg.maxDelay*1000).int64
|
||||
chronos.milliseconds (rng[].rand(maxDelay - minDelay) + minDelay)
|
||||
|
||||
await sendDeposits(deposits, cfg.web3Url, cfg.privateKey,
|
||||
cfg.depositContractAddress, delayGenerator)
|
||||
|
|
|
@ -10,15 +10,16 @@ import
|
|||
faststreams/[inputs, outputs, buffers], snappy, snappy/framing,
|
||||
json_serialization, json_serialization/std/[net, options],
|
||||
chronos, chronicles, metrics,
|
||||
libp2p/[switch, peerinfo,
|
||||
libp2p/[switch, peerinfo, multicodec,
|
||||
multiaddress, crypto/crypto, crypto/secp,
|
||||
protocols/identify, protocols/protocol],
|
||||
libp2p/muxers/muxer, libp2p/muxers/mplex/mplex,
|
||||
libp2p/transports/[transport, tcptransport],
|
||||
libp2p/protocols/secure/[secure, noise],
|
||||
libp2p/protocols/pubsub/[pubsub, rpc/message, rpc/messages],
|
||||
libp2p/protocols/pubsub/[pubsub, gossipsub, rpc/message, rpc/messages],
|
||||
libp2p/transports/tcptransport,
|
||||
libp2p/stream/connection,
|
||||
libp2p/utils/semaphore,
|
||||
eth/[keys, async_utils], eth/p2p/p2p_protocol_dsl,
|
||||
eth/net/nat, eth/p2p/discoveryv5/[enr, node],
|
||||
".."/[
|
||||
|
@ -28,8 +29,6 @@ import
|
|||
../validators/keystore_management,
|
||||
./eth2_discovery, ./peer_pool, ./libp2p_json_serialization
|
||||
|
||||
import libp2p/protocols/pubsub/gossipsub
|
||||
|
||||
when chronicles.enabledLogLevel == LogLevel.TRACE:
|
||||
import std/sequtils
|
||||
|
||||
|
@ -855,7 +854,11 @@ proc connectWorker(node: Eth2Node, index: int) {.async.} =
|
|||
# This loop will never produce HIGH CPU usage because it will wait
|
||||
# and block until it not obtains new peer from the queue ``connQueue``.
|
||||
let remotePeerAddr = await node.connQueue.popFirst()
|
||||
await node.dialPeer(remotePeerAddr, index)
|
||||
# Previous worker dial might have hit the maximum peers.
|
||||
# TODO: could clear the whole connTable and connQueue here also, best
|
||||
# would be to have this event based coming from peer pool or libp2p.
|
||||
if node.switch.connManager.outSema.count > 0:
|
||||
await node.dialPeer(remotePeerAddr, index)
|
||||
# Peer was added to `connTable` before adding it to `connQueue`, so we
|
||||
# excluding peer here after processing.
|
||||
node.connTable.excl(remotePeerAddr.peerId)
|
||||
|
@ -865,12 +868,44 @@ proc toPeerAddr(node: Node): Result[PeerAddr, cstring] {.raises: [Defect].} =
|
|||
let peerAddr = ? nodeRecord.toPeerAddr(tcpProtocol)
|
||||
ok(peerAddr)
|
||||
|
||||
proc queryRandom*(d: Eth2DiscoveryProtocol, forkId: ENRForkID,
|
||||
attnets: BitArray[ATTESTATION_SUBNET_COUNT]):
|
||||
Future[seq[PeerAddr]] {.async, raises:[Exception, Defect].} =
|
||||
## Perform a discovery query for a random target matching the eth2 field
|
||||
## (forkId) and matching at least one of the attestation subnets.
|
||||
let nodes = await d.queryRandom()
|
||||
let eth2Field = SSZ.encode(forkId)
|
||||
|
||||
var filtered: seq[PeerAddr]
|
||||
for n in nodes:
|
||||
if n.record.contains(("eth2", eth2Field)):
|
||||
let res = n.record.tryGet("attnets", seq[byte])
|
||||
|
||||
if res.isSome():
|
||||
let attnetsNode =
|
||||
try:
|
||||
SSZ.decode(res.get(), BitArray[ATTESTATION_SUBNET_COUNT])
|
||||
except SszError as e:
|
||||
debug "Could not decode attestation subnet bitfield of peer",
|
||||
peer = n.record.toURI(), exception = e.name, msg = e.msg
|
||||
continue
|
||||
|
||||
for i in 0..<attnetsNode.bytes.len:
|
||||
if (attnets.bytes[i] and attnetsNode.bytes[i]) > 0:
|
||||
# we have at least one subnet match
|
||||
let peerAddr = n.toPeerAddr()
|
||||
if peerAddr.isOk():
|
||||
filtered.add(peerAddr.get())
|
||||
break
|
||||
|
||||
return filtered
|
||||
|
||||
proc runDiscoveryLoop*(node: Eth2Node) {.async.} =
|
||||
debug "Starting discovery loop"
|
||||
let enrField = ("eth2", SSZ.encode(node.forkId))
|
||||
|
||||
while true:
|
||||
if node.peerPool.lenSpace({PeerType.Outgoing}) > 0:
|
||||
if node.switch.connManager.outSema.count > 0:
|
||||
var discoveredNodes = await node.discovery.queryRandom(enrField)
|
||||
var newPeers = 0
|
||||
for discNode in discoveredNodes:
|
||||
|
@ -1541,7 +1576,19 @@ proc createEth2Node*(rng: ref BrHmacDrbgContext,
|
|||
ipColocationFactorThreshold: 3.0,
|
||||
behaviourPenaltyWeight: -15.9,
|
||||
behaviourPenaltyDecay: 0.986,
|
||||
disconnectBadPeers: true
|
||||
disconnectBadPeers: true,
|
||||
directPeers:
|
||||
block:
|
||||
var res = initTable[PeerId, seq[MultiAddress]]()
|
||||
if config.directPeers.len > 0:
|
||||
for s in config.directPeers:
|
||||
let
|
||||
maddress = MultiAddress.init(s).tryGet()
|
||||
mpeerId = maddress[multiCodec("p2p")].tryGet()
|
||||
peerId = PeerID.init(mpeerId.protoAddress().tryGet()).tryGet()
|
||||
res.mGetOrPut(peerId, @[]).add(maddress)
|
||||
info "Adding priviledged direct peer", peerId, address = maddress
|
||||
res
|
||||
)
|
||||
pubsub = GossipSub.init(
|
||||
switch = switch,
|
||||
|
|
|
@ -7,8 +7,8 @@
|
|||
|
||||
import
|
||||
# Standard library
|
||||
std/[math, os, sequtils, strformat, strutils, tables, times,
|
||||
terminal, osproc],
|
||||
std/[math, os, osproc, random, sequtils, strformat, strutils,
|
||||
tables, times, terminal],
|
||||
system/ansi_c,
|
||||
|
||||
# Nimble packages
|
||||
|
@ -37,8 +37,10 @@ import
|
|||
validator_api],
|
||||
./spec/[
|
||||
datatypes, digest, crypto, beaconstate, eth2_apis/beacon_rpc_client,
|
||||
helpers, network, presets, validator, weak_subjectivity, signatures],
|
||||
./consensus_object_pools/[blockchain_dag, block_quarantine, block_clearance, block_pools_types, attestation_pool, exit_pool],
|
||||
helpers, network, presets, weak_subjectivity, signatures],
|
||||
./consensus_object_pools/[
|
||||
blockchain_dag, block_quarantine, block_clearance, block_pools_types,
|
||||
attestation_pool, exit_pool, spec_cache],
|
||||
./eth1/eth1_monitor
|
||||
|
||||
from eth/common/eth_types import BlockHashOrNumber
|
||||
|
@ -214,6 +216,9 @@ proc init*(T: type BeaconNode,
|
|||
error "Failed to initialize database", err = e.msg
|
||||
quit 1
|
||||
|
||||
# Doesn't use std/random directly, but dependencies might
|
||||
randomize(rng[].rand(high(int)))
|
||||
|
||||
info "Loading block dag from database", path = config.databaseDir
|
||||
|
||||
let
|
||||
|
@ -453,32 +458,17 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
|
|||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#lookahead
|
||||
# Only subscribe when this node should aggregate; libp2p broadcasting works
|
||||
# on subnet topics regardless.
|
||||
#
|
||||
# Committee sizes in any given epoch vary by 1, i.e. committee sizes $n$
|
||||
# $n+1$ can exist. Furthermore, according to
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#aggregation-selection
|
||||
# is_aggregator uses `len(committee) div TARGET_AGGREGATORS_PER_COMMITTEE`
|
||||
# to determine whether committee length/slot signature pairs aggregate the
|
||||
# attestations in a slot/committee, where TARGET_AGGREGATORS_PER_COMMITTEE
|
||||
# is currently 16 in all defined presets. Therefore, probe a committee len
|
||||
# to determine whether it's possible that it's within a boundary such that
|
||||
# either that length or other possible committee lengths don't cross those
|
||||
# div/mod 16 boundaries which would change is_aggregator results.
|
||||
static: doAssert TARGET_AGGREGATORS_PER_COMMITTEE == 16 # mainnet, minimal
|
||||
let epochRef = node.chainDag.getEpochRef(node.chainDag.head, epoch)
|
||||
|
||||
let
|
||||
probeCommitteeLen = get_beacon_committee_len(
|
||||
node.chainDag.headState.data.data, compute_start_slot_at_epoch(epoch),
|
||||
0.CommitteeIndex, cache)
|
||||
|
||||
# Without knowing whether probeCommitteeLen is the higher or lower, if it's
|
||||
# [-1, 1] mod TARGET_AGGREGATORS_PER_COMMITTEE it might cross boundaries in
|
||||
# is_aggregator, such that one can't hoist committee length calculation out
|
||||
# of the anyIt(...) loop.
|
||||
isConstAggregationLen =
|
||||
(probeCommitteeLen mod TARGET_AGGREGATORS_PER_COMMITTEE) notin
|
||||
[0'u64, 1'u64, TARGET_AGGREGATORS_PER_COMMITTEE - 1]
|
||||
# Update proposals
|
||||
node.attestationSubnets.proposingSlots[epoch mod 2] = 0
|
||||
for i in 0 ..< SLOTS_PER_EPOCH:
|
||||
let beaconProposer = epochRef.beacon_proposers[i]
|
||||
if beaconProposer.isSome and beaconProposer.get()[0] in attachedValidators:
|
||||
node.attestationsubnets.proposingSlots[epoch mod 2] =
|
||||
node.attestationsubnets.proposingSlots[epoch mod 2] or (1'u32 shl i)
|
||||
|
||||
# Update attestations
|
||||
template isAnyCommitteeValidatorAggregating(
|
||||
validatorIndices, committeeLen: untyped, slot: Slot): bool =
|
||||
anyIt(
|
||||
|
@ -489,22 +479,17 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
|
|||
node.chainDag.headState.data.data.fork,
|
||||
node.chainDag.headState.data.data.genesis_validators_root, slot)))
|
||||
|
||||
# The relevant bitmap are 32 bits each.
|
||||
static: doAssert SLOTS_PER_EPOCH <= 32
|
||||
node.attestationSubnets.lastCalculatedAttestationEpoch = epoch
|
||||
node.attestationSubnets.lastCalculatedEpoch = epoch
|
||||
node.attestationSubnets.attestingSlots[epoch mod 2] = 0
|
||||
|
||||
# The relevant bitmaps are 32 bits each.
|
||||
static: doAssert SLOTS_PER_EPOCH <= 32
|
||||
|
||||
for (validatorIndices, committeeIndex, subnetIndex, slot) in
|
||||
get_committee_assignments(
|
||||
node.chainDag.headState.data.data, epoch, validatorIndices, cache):
|
||||
|
||||
doAssert compute_epoch_at_slot(slot) == epoch
|
||||
let committeeLen =
|
||||
if isConstAggregationLen:
|
||||
probeCommitteeLen
|
||||
else:
|
||||
get_beacon_committee_len(
|
||||
node.chainDag.headState.data.data, slot, committeeIndex, cache)
|
||||
|
||||
# Each get_committee_assignments() call here is on the next epoch. At any
|
||||
# given time, only care about two epochs, the current and next epoch. So,
|
||||
|
@ -524,7 +509,8 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
|
|||
(1'u32 shl (slot mod SLOTS_PER_EPOCH))
|
||||
|
||||
if not isAnyCommitteeValidatorAggregating(
|
||||
validatorIndices, committeeLen, slot):
|
||||
validatorIndices,
|
||||
get_beacon_committee_len(epochRef, slot, committeeIndex), slot):
|
||||
continue
|
||||
|
||||
node.attestationSubnets.unsubscribeSlot[subnetIndex] =
|
||||
|
@ -541,11 +527,11 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
|
|||
node.attestationSubnets.subscribeSlot[subnetIndex])
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#phase-0-attestation-subnet-stability
|
||||
proc getStabilitySubnetLength(node: BeaconNode): uint64 =
|
||||
func getStabilitySubnetLength(node: BeaconNode): uint64 =
|
||||
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION +
|
||||
node.network.rng[].rand(EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION.int).uint64
|
||||
|
||||
proc updateStabilitySubnets(node: BeaconNode, slot: Slot): set[uint8] =
|
||||
func updateStabilitySubnets(node: BeaconNode, slot: Slot): set[uint8] =
|
||||
# Equivalent to wallSlot by cycleAttestationSubnets(), especially
|
||||
# since it'll try to run early in epochs, avoiding race conditions.
|
||||
static: doAssert ATTESTATION_SUBNET_COUNT <= high(uint8)
|
||||
|
@ -878,13 +864,14 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) =
|
|||
# This exits early all but one call each epoch.
|
||||
traceAsyncErrors node.cycleAttestationSubnets(slot)
|
||||
|
||||
func getNextAttestation(node: BeaconNode, slot: Slot): Slot =
|
||||
# The relevant attestations are in, depending on calculated bounds:
|
||||
func getNextValidatorAction(
|
||||
actionSlotSource: auto, lastCalculatedEpoch: Epoch, slot: Slot): Slot =
|
||||
# The relevant actions are in, depending on calculated bounds:
|
||||
# [aS[epoch mod 2], aS[1 - (epoch mod 2)]]
|
||||
# current epoch next epoch
|
||||
let orderedAttestingSlots = [
|
||||
node.attestationSubnets.attestingSlots[ slot.epoch mod 2'u64],
|
||||
node.attestationSubnets.attestingSlots[1 - (slot.epoch mod 2'u64)]]
|
||||
let orderedActionSlots = [
|
||||
actionSlotSource[ slot.epoch mod 2'u64],
|
||||
actionSlotSource[1 - (slot.epoch mod 2'u64)]]
|
||||
|
||||
static: doAssert MIN_ATTESTATION_INCLUSION_DELAY == 1
|
||||
|
||||
|
@ -894,15 +881,15 @@ func getNextAttestation(node: BeaconNode, slot: Slot): Slot =
|
|||
for i in [0'u64, 1'u64]:
|
||||
let bitmapEpoch = slot.epoch + i
|
||||
|
||||
if bitmapEpoch > node.attestationSubnets.lastCalculatedAttestationEpoch:
|
||||
if bitmapEpoch > lastCalculatedEpoch:
|
||||
return FAR_FUTURE_SLOT
|
||||
|
||||
for slotOffset in 0 ..< SLOTS_PER_EPOCH:
|
||||
let nextAttestationSlot =
|
||||
let nextActionSlot =
|
||||
compute_start_slot_at_epoch(bitmapEpoch) + slotOffset
|
||||
if ((orderedAttestingSlots[i] and (1'u32 shl slotOffset)) != 0) and
|
||||
nextAttestationSlot > slot:
|
||||
return nextAttestationSlot
|
||||
if ((orderedActionSlots[i] and (1'u32 shl slotOffset)) != 0) and
|
||||
nextActionSlot > slot:
|
||||
return nextActionSlot
|
||||
|
||||
FAR_FUTURE_SLOT
|
||||
|
||||
|
@ -928,10 +915,23 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
|
|||
# the database are synced with the filesystem.
|
||||
node.db.checkpoint()
|
||||
|
||||
# -1 is a more useful output than 18446744073709551615 as an indicator of
|
||||
# no future attestation/proposal known.
|
||||
template displayInt64(x: Slot): int64 =
|
||||
if x == high(uint64).Slot:
|
||||
-1'i64
|
||||
else:
|
||||
toGaugeValue(x)
|
||||
|
||||
let
|
||||
nextAttestationSlot = node.getNextAttestation(slot)
|
||||
nextActionWaitTime =
|
||||
saturate(fromNow(node.beaconClock, nextAttestationSlot))
|
||||
nextAttestationSlot = getNextValidatorAction(
|
||||
node.attestationSubnets.attestingSlots,
|
||||
node.attestationSubnets.lastCalculatedEpoch, slot)
|
||||
nextProposalSlot = getNextValidatorAction(
|
||||
node.attestationSubnets.proposingSlots,
|
||||
node.attestationSubnets.lastCalculatedEpoch, slot)
|
||||
nextActionWaitTime = saturate(fromNow(
|
||||
node.beaconClock, min(nextAttestationSlot, nextProposalSlot)))
|
||||
|
||||
info "Slot end",
|
||||
slot = shortLog(slot),
|
||||
|
@ -941,7 +941,8 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
|
|||
finalizedHead = shortLog(node.chainDag.finalizedHead.blck),
|
||||
finalizedEpoch =
|
||||
shortLog(node.chainDag.finalizedHead.blck.slot.compute_epoch_at_slot()),
|
||||
nextAttestationSlot,
|
||||
nextAttestationSlot = displayInt64(nextAttestationSlot),
|
||||
nextProposalSlot = displayInt64(nextProposalSlot),
|
||||
nextActionWait =
|
||||
if nextAttestationSlot == FAR_FUTURE_SLOT:
|
||||
"n/a"
|
||||
|
@ -1135,7 +1136,7 @@ proc startSyncManager(node: BeaconNode) =
|
|||
)
|
||||
node.syncManager.start()
|
||||
|
||||
proc connectedPeersCount(node: BeaconNode): int =
|
||||
func connectedPeersCount(node: BeaconNode): int =
|
||||
len(node.network.peerPool)
|
||||
|
||||
proc installRpcHandlers(rpcServer: RpcServer, node: BeaconNode) =
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
@ -9,7 +9,7 @@
|
|||
|
||||
import
|
||||
# Standard library
|
||||
std/[os, tables, random, strutils, typetraits],
|
||||
std/[os, tables, strutils, typetraits],
|
||||
|
||||
# Nimble packages
|
||||
chronos, confutils/defs,
|
||||
|
@ -46,8 +46,6 @@ proc updateLogLevel*(logLevel: string) =
|
|||
warn "Unrecognized logging topic", topic = topicName
|
||||
|
||||
proc setupLogging*(logLevel: string, logFile: Option[OutFile]) =
|
||||
randomize()
|
||||
|
||||
if logFile.isSome:
|
||||
when defaultChroniclesStream.outputs.type.arity > 1:
|
||||
block openLogFile:
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
import
|
||||
# Standard library
|
||||
os, strutils, json,
|
||||
std/[os, json, random, strutils],
|
||||
|
||||
# Nimble packages
|
||||
stew/shims/[tables, macros],
|
||||
|
@ -27,7 +27,8 @@ import
|
|||
./ssz/merkleization,
|
||||
./spec/eth2_apis/callsigs_types,
|
||||
./validators/[attestation_aggregation, keystore_management, validator_pool, slashing_protection],
|
||||
./eth/db/[kvstore, kvstore_sqlite3]
|
||||
./eth/db/[kvstore, kvstore_sqlite3],
|
||||
./eth/keys, ./eth/p2p/discoveryv5/random2
|
||||
|
||||
logScope: topics = "vc"
|
||||
|
||||
|
@ -281,6 +282,13 @@ programMain:
|
|||
|
||||
setupLogging(config.logLevel, config.logFile)
|
||||
|
||||
# Doesn't use std/random directly, but dependencies might
|
||||
let rng = keys.newRng()
|
||||
if rng.isNil:
|
||||
randomize()
|
||||
else:
|
||||
randomize(rng[].rand(high(int)))
|
||||
|
||||
case config.cmd
|
||||
of VCNoCommand:
|
||||
debug "Launching validator client",
|
||||
|
|
|
@ -124,6 +124,8 @@ proc process_deposit*(preset: RuntimePreset,
|
|||
# New validator! Add validator and balance entries
|
||||
state.validators.add(get_validator_from_deposit(deposit.data))
|
||||
state.balances.add(amount)
|
||||
|
||||
doAssert state.validators.len == state.balances.len
|
||||
else:
|
||||
# Deposits may come with invalid signatures - in that case, they are not
|
||||
# turned into a validator but still get processed to keep the deposit
|
||||
|
|
|
@ -75,10 +75,6 @@ const
|
|||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#misc
|
||||
ATTESTATION_SUBNET_COUNT* = 64
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/pull/2101
|
||||
ATTESTATION_PRODUCTION_DIVISOR* = 3
|
||||
ATTESTATION_ENTROPY_DIVISOR* = 12
|
||||
|
||||
template maxSize*(n: int) {.pragma.}
|
||||
|
||||
# Block validation flow
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
import
|
||||
# Standard library
|
||||
std/[os, osproc, random, sequtils, streams, tables],
|
||||
std/[os, osproc, sequtils, streams, tables],
|
||||
|
||||
# Nimble packages
|
||||
stew/[assign2, objects, shims/macros],
|
||||
|
@ -541,28 +541,6 @@ proc broadcastAggregatedAttestations(
|
|||
validator = shortLog(curr[0].v),
|
||||
aggregationSlot
|
||||
|
||||
proc getSlotTimingEntropy(): int64 =
|
||||
# Ensure SECONDS_PER_SLOT / ATTESTATION_PRODUCTION_DIVISOR >
|
||||
# SECONDS_PER_SLOT / ATTESTATION_ENTROPY_DIVISOR, which will
|
||||
# enure that the second condition can't go negative.
|
||||
static: doAssert ATTESTATION_ENTROPY_DIVISOR > ATTESTATION_PRODUCTION_DIVISOR
|
||||
|
||||
# For each `slot`, a validator must generate a uniform random variable
|
||||
# `slot_timing_entropy` between `(-SECONDS_PER_SLOT /
|
||||
# ATTESTATION_ENTROPY_DIVISOR, SECONDS_PER_SLOT /
|
||||
# ATTESTATION_ENTROPY_DIVISOR)` with millisecond resolution and using local
|
||||
# entropy.
|
||||
#
|
||||
# Per issue discussion "validators served by the same beacon node can have
|
||||
# the same attestation production time, i.e., they can share the source of
|
||||
# the entropy and the actual slot_timing_entropy value."
|
||||
const
|
||||
slot_timing_entropy_upper_bound =
|
||||
SECONDS_PER_SLOT.int64 * 1000 div ATTESTATION_ENTROPY_DIVISOR
|
||||
slot_timing_entropy_lower_bound = 0-slot_timing_entropy_upper_bound
|
||||
rand(range[(slot_timing_entropy_lower_bound + 1) ..
|
||||
(slot_timing_entropy_upper_bound - 1)])
|
||||
|
||||
proc updateValidatorMetrics*(node: BeaconNode) =
|
||||
when defined(metrics):
|
||||
# Technically, this only needs to be done on epoch transitions and if there's
|
||||
|
@ -654,29 +632,14 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
|||
|
||||
head = await handleProposal(node, head, slot)
|
||||
|
||||
# Fix timing attack: https://github.com/ethereum/eth2.0-specs/pull/2101
|
||||
# A validator must create and broadcast the `attestation` to the associated
|
||||
# attestation subnet when the earlier one of the following two events occurs:
|
||||
#
|
||||
# - The validator has received a valid block from the expected block
|
||||
# proposer for the assigned `slot`. In this case, the validator must set a
|
||||
# timer for `abs(slot_timing_entropy)`. The end of this timer will be the
|
||||
# trigger for attestation production.
|
||||
#
|
||||
# - `SECONDS_PER_SLOT / ATTESTATION_PRODUCTION_DIVISOR +
|
||||
# slot_timing_entropy` seconds have elapsed since the start of the `slot`
|
||||
# (using the `slot_timing_entropy` generated for this slot)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#attesting
|
||||
# Milliseconds to wait from the start of the slot before sending out
|
||||
# attestations - base value
|
||||
const attestationOffset =
|
||||
SECONDS_PER_SLOT.int64 * 1000 div ATTESTATION_PRODUCTION_DIVISOR
|
||||
# attestations
|
||||
const attestationOffset = SECONDS_PER_SLOT.int64 * 1000 div 3
|
||||
|
||||
let
|
||||
slotTimingEntropy = getSlotTimingEntropy() # +/- 1s
|
||||
# The latest point in time when we'll be sending out attestations
|
||||
attestationCutoffTime = slot.toBeaconTime(
|
||||
millis(attestationOffset + slotTimingEntropy))
|
||||
attestationCutoffTime = slot.toBeaconTime(millis(attestationOffset))
|
||||
attestationCutoff = node.beaconClock.fromNow(attestationCutoffTime)
|
||||
|
||||
if attestationCutoff.inFuture:
|
||||
|
@ -687,9 +650,8 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
|||
# Wait either for the block or the attestation cutoff time to arrive
|
||||
if await node.consensusManager[].expectBlock(slot).withTimeout(attestationCutoff.offset):
|
||||
# The expected block arrived (or expectBlock was called again which
|
||||
# shouldn't happen as this is the only place we use it) - according to the
|
||||
# spec, we should now wait for abs(slotTimingEntropy) - in our async loop
|
||||
# however, we might have been doing other processing that caused delays
|
||||
# shouldn't happen as this is the only place we use it) - in our async
|
||||
# loop however, we might have been doing other processing that caused delays
|
||||
# here so we'll cap the waiting to the time when we would have sent out
|
||||
# attestations had the block not arrived.
|
||||
# An opposite case is that we received (or produced) a block that has
|
||||
|
@ -698,8 +660,8 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
|||
# impose a minimum delay of 250ms. The delay is enforced only when we're
|
||||
# not hitting the "normal" cutoff time for sending out attestations.
|
||||
|
||||
const afterBlockDelay = 250
|
||||
let
|
||||
afterBlockDelay = max(250, abs(slotTimingEntropy))
|
||||
afterBlockTime = node.beaconClock.now() + millis(afterBlockDelay)
|
||||
afterBlockCutoff = node.beaconClock.fromNow(
|
||||
min(afterBlockTime, attestationCutoffTime))
|
||||
|
|
|
@ -9,7 +9,7 @@ when not defined(nimscript):
|
|||
const
|
||||
versionMajor* = 1
|
||||
versionMinor* = 0
|
||||
versionBuild* = 10
|
||||
versionBuild* = 12
|
||||
|
||||
versionBlob* = "stateofus" # Single word - ends up in the default graffitti
|
||||
|
||||
|
|
|
@ -67,3 +67,13 @@ All the same conventions apply:
|
|||
WEB3_URL="ws://localhost:8545" ./run-mainnet-node.sh --max-peers=150
|
||||
```
|
||||
|
||||
## Running a Prater node
|
||||
|
||||
`run-prater-beacon-node.sh` is a similar script intended for connecting to the Prater
|
||||
testnet. All the same conventions apply:
|
||||
|
||||
```bash
|
||||
# using a local Geth instance
|
||||
WEB3_URL="ws://localhost:8545" ./run-prater-node.sh --max-peers=150
|
||||
```
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ import # Unit test
|
|||
./test_bitseqs,
|
||||
./test_block_pool,
|
||||
./test_datatypes,
|
||||
./test_discovery,
|
||||
./test_eth1_monitor,
|
||||
./test_exit_pool,
|
||||
./test_helpers,
|
||||
|
|
|
@ -0,0 +1,132 @@
|
|||
import
|
||||
std/unittest,
|
||||
chronos, stew/shims/net, eth/keys, eth/p2p/discoveryv5/enr,
|
||||
../beacon_chain/conf,
|
||||
../beacon_chain/spec/datatypes,
|
||||
../beacon_chain/networking/[eth2_network, eth2_discovery],
|
||||
./testutil
|
||||
|
||||
template timedAsyncTest*(name, body: untyped) =
|
||||
timedTest name:
|
||||
proc scenario {.async.} = body
|
||||
waitFor scenario()
|
||||
|
||||
proc new*(T: type Eth2DiscoveryProtocol,
|
||||
pk: keys.PrivateKey,
|
||||
enrIp: Option[ValidIpAddress], enrTcpPort, enrUdpPort: Option[Port],
|
||||
bindPort: Port, bindIp: ValidIpAddress,
|
||||
enrFields: openArray[(string, seq[byte])] = [],
|
||||
rng: ref BrHmacDrbgContext):
|
||||
T {.raises: [Exception, Defect].} =
|
||||
|
||||
newProtocol(pk, enrIp, enrTcpPort, enrUdpPort, enrFields,
|
||||
bindPort = bindPort, bindIp = bindIp, rng = rng)
|
||||
|
||||
proc generateNode(rng: ref BrHmacDrbgContext, port: Port,
|
||||
enrFields: openArray[(string, seq[byte])] = []): Eth2DiscoveryProtocol =
|
||||
let ip = ValidIpAddress.init("127.0.0.1")
|
||||
Eth2DiscoveryProtocol.new(keys.PrivateKey.random(rng[]),
|
||||
some(ip), some(port), some(port), port, ip, enrFields, rng = rng)
|
||||
|
||||
suiteReport "Eth2 specific discovery tests":
|
||||
let
|
||||
rng = keys.newRng()
|
||||
enrForkId = ENRForkID(
|
||||
fork_digest: ForkDigest([byte 0, 1, 2, 3]),
|
||||
next_fork_version: Version([byte 0, 0, 0, 0]),
|
||||
next_fork_epoch: Epoch(0))
|
||||
|
||||
timedAsyncTest "Subnet query":
|
||||
var attnets: BitArray[ATTESTATION_SUBNET_COUNT]
|
||||
attnets.setBit(34)
|
||||
|
||||
let
|
||||
node1 = generateNode(rng, Port(5000))
|
||||
node2 = generateNode(rng, Port(5001),
|
||||
{"eth2": SSZ.encode(enrForkId), "attnets": SSZ.encode(attnets)})
|
||||
|
||||
node1.open()
|
||||
node2.open()
|
||||
|
||||
# ping in one direction to add node2 to routing table of node1
|
||||
check (await node2.ping(node1.localNode)).isOk()
|
||||
|
||||
var attnetsSelected: BitArray[ATTESTATION_SUBNET_COUNT]
|
||||
attnetsSelected.setBit(42)
|
||||
attnetsSelected.setBit(34)
|
||||
|
||||
let discovered = await node1.queryRandom(enrForkId, attnetsSelected)
|
||||
check discovered.len == 1
|
||||
|
||||
await node1.closeWait()
|
||||
await node2.closeWait()
|
||||
|
||||
timedAsyncTest "Invalid attnets field":
|
||||
var invalidAttnets: BitArray[ATTESTATION_SUBNET_COUNT div 2]
|
||||
invalidAttnets.setBit(15)
|
||||
# TODO: This doesn't fail actually.
|
||||
# var invalidAttnets2: BitArray[ATTESTATION_SUBNET_COUNT * 2]
|
||||
# invalidAttnets2.setBit(15)
|
||||
var attnets: BitArray[ATTESTATION_SUBNET_COUNT]
|
||||
attnets.setBit(15)
|
||||
|
||||
let
|
||||
node1 = generateNode(rng, Port(5000))
|
||||
node2 = generateNode(rng, Port(5001),
|
||||
{"eth2": SSZ.encode(enrForkId), "attnets": SSZ.encode(invalidAttnets)})
|
||||
node3 = generateNode(rng, Port(5002),
|
||||
{"eth2": SSZ.encode(enrForkId), "attnets": SSZ.encode(attnets)})
|
||||
|
||||
node1.open()
|
||||
node2.open()
|
||||
node3.open()
|
||||
|
||||
check (await node2.ping(node1.localNode)).isOk()
|
||||
check (await node3.ping(node1.localNode)).isOk()
|
||||
|
||||
var attnetsSelected: BitArray[ATTESTATION_SUBNET_COUNT]
|
||||
attnetsSelected.setBit(15)
|
||||
attnetsSelected.setBit(42)
|
||||
|
||||
let discovered = await node1.queryRandom(enrForkId, attnetsSelected)
|
||||
check discovered.len == 1
|
||||
|
||||
await node1.closeWait()
|
||||
await node2.closeWait()
|
||||
await node3.closeWait()
|
||||
|
||||
timedAsyncTest "Subnet query after ENR update":
|
||||
var attnets: BitArray[ATTESTATION_SUBNET_COUNT]
|
||||
attnets.setBit(1)
|
||||
|
||||
let
|
||||
node1 = generateNode(rng, Port(5000))
|
||||
node2 = generateNode(rng, Port(5001),
|
||||
{"eth2": SSZ.encode(enrForkId), "attnets": SSZ.encode(attnets)})
|
||||
|
||||
node1.open()
|
||||
node2.open()
|
||||
|
||||
check (await node2.ping(node1.localNode)).isOk()
|
||||
|
||||
var attnetsSelected: BitArray[ATTESTATION_SUBNET_COUNT]
|
||||
attnetsSelected.setBit(2)
|
||||
|
||||
block:
|
||||
let discovered = await node1.queryRandom(enrForkId, attnetsSelected)
|
||||
check discovered.len == 0
|
||||
|
||||
block:
|
||||
attnets.setBit(2)
|
||||
check node2.updateRecord(
|
||||
{"eth2": SSZ.encode(enrForkId), "attnets": SSZ.encode(attnets)}).isOk()
|
||||
|
||||
let nodes = await node1.findNode(node2.localNode, @[0'u32])
|
||||
check nodes.isOk() and nodes[].len > 0
|
||||
discard node1.addNode(nodes[][0])
|
||||
|
||||
let discovered = await node1.queryRandom(enrForkId, attnetsSelected)
|
||||
check discovered.len == 1
|
||||
|
||||
await node1.closeWait()
|
||||
await node2.closeWait()
|
|
@ -1 +1 @@
|
|||
Subproject commit 192c1b48ea5ff4adb4e6ef7d2a9e5f82fb5ffd72
|
||||
Subproject commit 5b4e32779b66b6d84edb13aa4eee67f544313d4b
|
|
@ -1 +1 @@
|
|||
Subproject commit 0f890d4a667fcb2dcafd7243a079e5af2874db1d
|
||||
Subproject commit b5956a1b4b74cc7f2c10eaf9cceb12c80ae33787
|
|
@ -1 +1 @@
|
|||
Subproject commit 93674cbdbd3ce59e2d4d0cbdfac9ab62d9a6d28f
|
||||
Subproject commit e788deab3d59ff8a4fe103aeb5d82d3d82fcac7d
|
Loading…
Reference in New Issue