Merge branch 'unstable' of github.com:status-im/nim-beacon-chain into unstable
This commit is contained in:
commit
eb985b880c
13
CHANGELOG.md
13
CHANGELOG.md
|
@ -8,6 +8,19 @@ TBD
|
||||||
* `getChronosFutures` is now `debug_getChronosFutures`
|
* `getChronosFutures` is now `debug_getChronosFutures`
|
||||||
|
|
||||||
|
|
||||||
|
2021-03-10 v1.0.12
|
||||||
|
==================
|
||||||
|
|
||||||
|
This is bugfix release correcting an error in the Prater testnet config
|
||||||
|
leading to incorrect Eth1 voting.
|
||||||
|
|
||||||
|
|
||||||
|
2021-03-10 v1.0.11
|
||||||
|
==================
|
||||||
|
|
||||||
|
This is a minor release adding support for connecting to the Prater testnet.
|
||||||
|
|
||||||
|
|
||||||
2021-03-10 v1.0.10
|
2021-03-10 v1.0.10
|
||||||
==================
|
==================
|
||||||
|
|
||||||
|
|
|
@ -161,9 +161,10 @@ type
|
||||||
subscribeSlot*: array[ATTESTATION_SUBNET_COUNT, Slot]
|
subscribeSlot*: array[ATTESTATION_SUBNET_COUNT, Slot]
|
||||||
unsubscribeSlot*: array[ATTESTATION_SUBNET_COUNT, Slot]
|
unsubscribeSlot*: array[ATTESTATION_SUBNET_COUNT, Slot]
|
||||||
|
|
||||||
# Used to track the next attestation slots, using an epoch-relative
|
# Used to track the next attestation and proposal slots using an
|
||||||
# coordinate system. Defaults don't need initialization.
|
# epoch-relative coordinate system. Doesn't need initialization.
|
||||||
attestingSlots*: array[2, uint32]
|
attestingSlots*: array[2, uint32]
|
||||||
lastCalculatedAttestationEpoch*: Epoch
|
proposingSlots*: array[2, uint32]
|
||||||
|
lastCalculatedEpoch*: Epoch
|
||||||
|
|
||||||
func shortLog*(v: AttachedValidator): string = shortLog(v.pubKey)
|
func shortLog*(v: AttachedValidator): string = shortLog(v.pubKey)
|
||||||
|
|
|
@ -273,6 +273,10 @@ type
|
||||||
desc: "Write SSZ dumps of blocks, attestations and states to data dir"
|
desc: "Write SSZ dumps of blocks, attestations and states to data dir"
|
||||||
name: "dump" }: bool
|
name: "dump" }: bool
|
||||||
|
|
||||||
|
directPeers* {.
|
||||||
|
desc: "The list of priviledged, secure and known peers to connect and maintain the connection to, this requires a not random netkey-file. In the complete multiaddress format like: /ip4/<address>/tcp/<port>/p2p/<peerId-public-key>. Peering agreements are established out of band and must be reciprocal."
|
||||||
|
name: "direct-peer" .}: seq[string]
|
||||||
|
|
||||||
doppelgangerDetection* {.
|
doppelgangerDetection* {.
|
||||||
defaultValue: true
|
defaultValue: true
|
||||||
desc: "Whether to detect whether another validator is be running the same validator keys (default true)"
|
desc: "Whether to detect whether another validator is be running the same validator keys (default true)"
|
||||||
|
|
|
@ -1,9 +1,10 @@
|
||||||
import
|
import
|
||||||
os, sequtils, strutils, options, json, terminal, random,
|
os, sequtils, strutils, options, json, terminal,
|
||||||
chronos, chronicles, confutils, stint, json_serialization,
|
chronos, chronicles, confutils, stint, json_serialization,
|
||||||
../filepath,
|
../filepath,
|
||||||
../networking/network_metadata,
|
../networking/network_metadata,
|
||||||
web3, web3/confutils_defs, eth/keys, stew/io2,
|
web3, web3/confutils_defs, eth/keys, eth/p2p/discoveryv5/random2,
|
||||||
|
stew/io2,
|
||||||
../spec/[datatypes, crypto, presets], ../ssz/merkleization,
|
../spec/[datatypes, crypto, presets], ../ssz/merkleization,
|
||||||
../validators/keystore_management
|
../validators/keystore_management
|
||||||
|
|
||||||
|
@ -260,7 +261,10 @@ proc main() {.async.} =
|
||||||
|
|
||||||
if cfg.maxDelay > 0.0:
|
if cfg.maxDelay > 0.0:
|
||||||
delayGenerator = proc (): chronos.Duration =
|
delayGenerator = proc (): chronos.Duration =
|
||||||
chronos.milliseconds (rand(cfg.minDelay..cfg.maxDelay)*1000).int
|
let
|
||||||
|
minDelay = (cfg.minDelay*1000).int64
|
||||||
|
maxDelay = (cfg.maxDelay*1000).int64
|
||||||
|
chronos.milliseconds (rng[].rand(maxDelay - minDelay) + minDelay)
|
||||||
|
|
||||||
await sendDeposits(deposits, cfg.web3Url, cfg.privateKey,
|
await sendDeposits(deposits, cfg.web3Url, cfg.privateKey,
|
||||||
cfg.depositContractAddress, delayGenerator)
|
cfg.depositContractAddress, delayGenerator)
|
||||||
|
|
|
@ -10,15 +10,16 @@ import
|
||||||
faststreams/[inputs, outputs, buffers], snappy, snappy/framing,
|
faststreams/[inputs, outputs, buffers], snappy, snappy/framing,
|
||||||
json_serialization, json_serialization/std/[net, options],
|
json_serialization, json_serialization/std/[net, options],
|
||||||
chronos, chronicles, metrics,
|
chronos, chronicles, metrics,
|
||||||
libp2p/[switch, peerinfo,
|
libp2p/[switch, peerinfo, multicodec,
|
||||||
multiaddress, crypto/crypto, crypto/secp,
|
multiaddress, crypto/crypto, crypto/secp,
|
||||||
protocols/identify, protocols/protocol],
|
protocols/identify, protocols/protocol],
|
||||||
libp2p/muxers/muxer, libp2p/muxers/mplex/mplex,
|
libp2p/muxers/muxer, libp2p/muxers/mplex/mplex,
|
||||||
libp2p/transports/[transport, tcptransport],
|
libp2p/transports/[transport, tcptransport],
|
||||||
libp2p/protocols/secure/[secure, noise],
|
libp2p/protocols/secure/[secure, noise],
|
||||||
libp2p/protocols/pubsub/[pubsub, rpc/message, rpc/messages],
|
libp2p/protocols/pubsub/[pubsub, gossipsub, rpc/message, rpc/messages],
|
||||||
libp2p/transports/tcptransport,
|
libp2p/transports/tcptransport,
|
||||||
libp2p/stream/connection,
|
libp2p/stream/connection,
|
||||||
|
libp2p/utils/semaphore,
|
||||||
eth/[keys, async_utils], eth/p2p/p2p_protocol_dsl,
|
eth/[keys, async_utils], eth/p2p/p2p_protocol_dsl,
|
||||||
eth/net/nat, eth/p2p/discoveryv5/[enr, node],
|
eth/net/nat, eth/p2p/discoveryv5/[enr, node],
|
||||||
".."/[
|
".."/[
|
||||||
|
@ -28,8 +29,6 @@ import
|
||||||
../validators/keystore_management,
|
../validators/keystore_management,
|
||||||
./eth2_discovery, ./peer_pool, ./libp2p_json_serialization
|
./eth2_discovery, ./peer_pool, ./libp2p_json_serialization
|
||||||
|
|
||||||
import libp2p/protocols/pubsub/gossipsub
|
|
||||||
|
|
||||||
when chronicles.enabledLogLevel == LogLevel.TRACE:
|
when chronicles.enabledLogLevel == LogLevel.TRACE:
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
|
|
||||||
|
@ -855,7 +854,11 @@ proc connectWorker(node: Eth2Node, index: int) {.async.} =
|
||||||
# This loop will never produce HIGH CPU usage because it will wait
|
# This loop will never produce HIGH CPU usage because it will wait
|
||||||
# and block until it not obtains new peer from the queue ``connQueue``.
|
# and block until it not obtains new peer from the queue ``connQueue``.
|
||||||
let remotePeerAddr = await node.connQueue.popFirst()
|
let remotePeerAddr = await node.connQueue.popFirst()
|
||||||
await node.dialPeer(remotePeerAddr, index)
|
# Previous worker dial might have hit the maximum peers.
|
||||||
|
# TODO: could clear the whole connTable and connQueue here also, best
|
||||||
|
# would be to have this event based coming from peer pool or libp2p.
|
||||||
|
if node.switch.connManager.outSema.count > 0:
|
||||||
|
await node.dialPeer(remotePeerAddr, index)
|
||||||
# Peer was added to `connTable` before adding it to `connQueue`, so we
|
# Peer was added to `connTable` before adding it to `connQueue`, so we
|
||||||
# excluding peer here after processing.
|
# excluding peer here after processing.
|
||||||
node.connTable.excl(remotePeerAddr.peerId)
|
node.connTable.excl(remotePeerAddr.peerId)
|
||||||
|
@ -865,12 +868,44 @@ proc toPeerAddr(node: Node): Result[PeerAddr, cstring] {.raises: [Defect].} =
|
||||||
let peerAddr = ? nodeRecord.toPeerAddr(tcpProtocol)
|
let peerAddr = ? nodeRecord.toPeerAddr(tcpProtocol)
|
||||||
ok(peerAddr)
|
ok(peerAddr)
|
||||||
|
|
||||||
|
proc queryRandom*(d: Eth2DiscoveryProtocol, forkId: ENRForkID,
|
||||||
|
attnets: BitArray[ATTESTATION_SUBNET_COUNT]):
|
||||||
|
Future[seq[PeerAddr]] {.async, raises:[Exception, Defect].} =
|
||||||
|
## Perform a discovery query for a random target matching the eth2 field
|
||||||
|
## (forkId) and matching at least one of the attestation subnets.
|
||||||
|
let nodes = await d.queryRandom()
|
||||||
|
let eth2Field = SSZ.encode(forkId)
|
||||||
|
|
||||||
|
var filtered: seq[PeerAddr]
|
||||||
|
for n in nodes:
|
||||||
|
if n.record.contains(("eth2", eth2Field)):
|
||||||
|
let res = n.record.tryGet("attnets", seq[byte])
|
||||||
|
|
||||||
|
if res.isSome():
|
||||||
|
let attnetsNode =
|
||||||
|
try:
|
||||||
|
SSZ.decode(res.get(), BitArray[ATTESTATION_SUBNET_COUNT])
|
||||||
|
except SszError as e:
|
||||||
|
debug "Could not decode attestation subnet bitfield of peer",
|
||||||
|
peer = n.record.toURI(), exception = e.name, msg = e.msg
|
||||||
|
continue
|
||||||
|
|
||||||
|
for i in 0..<attnetsNode.bytes.len:
|
||||||
|
if (attnets.bytes[i] and attnetsNode.bytes[i]) > 0:
|
||||||
|
# we have at least one subnet match
|
||||||
|
let peerAddr = n.toPeerAddr()
|
||||||
|
if peerAddr.isOk():
|
||||||
|
filtered.add(peerAddr.get())
|
||||||
|
break
|
||||||
|
|
||||||
|
return filtered
|
||||||
|
|
||||||
proc runDiscoveryLoop*(node: Eth2Node) {.async.} =
|
proc runDiscoveryLoop*(node: Eth2Node) {.async.} =
|
||||||
debug "Starting discovery loop"
|
debug "Starting discovery loop"
|
||||||
let enrField = ("eth2", SSZ.encode(node.forkId))
|
let enrField = ("eth2", SSZ.encode(node.forkId))
|
||||||
|
|
||||||
while true:
|
while true:
|
||||||
if node.peerPool.lenSpace({PeerType.Outgoing}) > 0:
|
if node.switch.connManager.outSema.count > 0:
|
||||||
var discoveredNodes = await node.discovery.queryRandom(enrField)
|
var discoveredNodes = await node.discovery.queryRandom(enrField)
|
||||||
var newPeers = 0
|
var newPeers = 0
|
||||||
for discNode in discoveredNodes:
|
for discNode in discoveredNodes:
|
||||||
|
@ -1541,7 +1576,19 @@ proc createEth2Node*(rng: ref BrHmacDrbgContext,
|
||||||
ipColocationFactorThreshold: 3.0,
|
ipColocationFactorThreshold: 3.0,
|
||||||
behaviourPenaltyWeight: -15.9,
|
behaviourPenaltyWeight: -15.9,
|
||||||
behaviourPenaltyDecay: 0.986,
|
behaviourPenaltyDecay: 0.986,
|
||||||
disconnectBadPeers: true
|
disconnectBadPeers: true,
|
||||||
|
directPeers:
|
||||||
|
block:
|
||||||
|
var res = initTable[PeerId, seq[MultiAddress]]()
|
||||||
|
if config.directPeers.len > 0:
|
||||||
|
for s in config.directPeers:
|
||||||
|
let
|
||||||
|
maddress = MultiAddress.init(s).tryGet()
|
||||||
|
mpeerId = maddress[multiCodec("p2p")].tryGet()
|
||||||
|
peerId = PeerID.init(mpeerId.protoAddress().tryGet()).tryGet()
|
||||||
|
res.mGetOrPut(peerId, @[]).add(maddress)
|
||||||
|
info "Adding priviledged direct peer", peerId, address = maddress
|
||||||
|
res
|
||||||
)
|
)
|
||||||
pubsub = GossipSub.init(
|
pubsub = GossipSub.init(
|
||||||
switch = switch,
|
switch = switch,
|
||||||
|
|
|
@ -7,8 +7,8 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
std/[math, os, sequtils, strformat, strutils, tables, times,
|
std/[math, os, osproc, random, sequtils, strformat, strutils,
|
||||||
terminal, osproc],
|
tables, times, terminal],
|
||||||
system/ansi_c,
|
system/ansi_c,
|
||||||
|
|
||||||
# Nimble packages
|
# Nimble packages
|
||||||
|
@ -37,8 +37,10 @@ import
|
||||||
validator_api],
|
validator_api],
|
||||||
./spec/[
|
./spec/[
|
||||||
datatypes, digest, crypto, beaconstate, eth2_apis/beacon_rpc_client,
|
datatypes, digest, crypto, beaconstate, eth2_apis/beacon_rpc_client,
|
||||||
helpers, network, presets, validator, weak_subjectivity, signatures],
|
helpers, network, presets, weak_subjectivity, signatures],
|
||||||
./consensus_object_pools/[blockchain_dag, block_quarantine, block_clearance, block_pools_types, attestation_pool, exit_pool],
|
./consensus_object_pools/[
|
||||||
|
blockchain_dag, block_quarantine, block_clearance, block_pools_types,
|
||||||
|
attestation_pool, exit_pool, spec_cache],
|
||||||
./eth1/eth1_monitor
|
./eth1/eth1_monitor
|
||||||
|
|
||||||
from eth/common/eth_types import BlockHashOrNumber
|
from eth/common/eth_types import BlockHashOrNumber
|
||||||
|
@ -214,6 +216,9 @@ proc init*(T: type BeaconNode,
|
||||||
error "Failed to initialize database", err = e.msg
|
error "Failed to initialize database", err = e.msg
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
|
# Doesn't use std/random directly, but dependencies might
|
||||||
|
randomize(rng[].rand(high(int)))
|
||||||
|
|
||||||
info "Loading block dag from database", path = config.databaseDir
|
info "Loading block dag from database", path = config.databaseDir
|
||||||
|
|
||||||
let
|
let
|
||||||
|
@ -453,32 +458,17 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#lookahead
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#lookahead
|
||||||
# Only subscribe when this node should aggregate; libp2p broadcasting works
|
# Only subscribe when this node should aggregate; libp2p broadcasting works
|
||||||
# on subnet topics regardless.
|
# on subnet topics regardless.
|
||||||
#
|
let epochRef = node.chainDag.getEpochRef(node.chainDag.head, epoch)
|
||||||
# Committee sizes in any given epoch vary by 1, i.e. committee sizes $n$
|
|
||||||
# $n+1$ can exist. Furthermore, according to
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#aggregation-selection
|
|
||||||
# is_aggregator uses `len(committee) div TARGET_AGGREGATORS_PER_COMMITTEE`
|
|
||||||
# to determine whether committee length/slot signature pairs aggregate the
|
|
||||||
# attestations in a slot/committee, where TARGET_AGGREGATORS_PER_COMMITTEE
|
|
||||||
# is currently 16 in all defined presets. Therefore, probe a committee len
|
|
||||||
# to determine whether it's possible that it's within a boundary such that
|
|
||||||
# either that length or other possible committee lengths don't cross those
|
|
||||||
# div/mod 16 boundaries which would change is_aggregator results.
|
|
||||||
static: doAssert TARGET_AGGREGATORS_PER_COMMITTEE == 16 # mainnet, minimal
|
|
||||||
|
|
||||||
let
|
# Update proposals
|
||||||
probeCommitteeLen = get_beacon_committee_len(
|
node.attestationSubnets.proposingSlots[epoch mod 2] = 0
|
||||||
node.chainDag.headState.data.data, compute_start_slot_at_epoch(epoch),
|
for i in 0 ..< SLOTS_PER_EPOCH:
|
||||||
0.CommitteeIndex, cache)
|
let beaconProposer = epochRef.beacon_proposers[i]
|
||||||
|
if beaconProposer.isSome and beaconProposer.get()[0] in attachedValidators:
|
||||||
# Without knowing whether probeCommitteeLen is the higher or lower, if it's
|
node.attestationsubnets.proposingSlots[epoch mod 2] =
|
||||||
# [-1, 1] mod TARGET_AGGREGATORS_PER_COMMITTEE it might cross boundaries in
|
node.attestationsubnets.proposingSlots[epoch mod 2] or (1'u32 shl i)
|
||||||
# is_aggregator, such that one can't hoist committee length calculation out
|
|
||||||
# of the anyIt(...) loop.
|
|
||||||
isConstAggregationLen =
|
|
||||||
(probeCommitteeLen mod TARGET_AGGREGATORS_PER_COMMITTEE) notin
|
|
||||||
[0'u64, 1'u64, TARGET_AGGREGATORS_PER_COMMITTEE - 1]
|
|
||||||
|
|
||||||
|
# Update attestations
|
||||||
template isAnyCommitteeValidatorAggregating(
|
template isAnyCommitteeValidatorAggregating(
|
||||||
validatorIndices, committeeLen: untyped, slot: Slot): bool =
|
validatorIndices, committeeLen: untyped, slot: Slot): bool =
|
||||||
anyIt(
|
anyIt(
|
||||||
|
@ -489,22 +479,17 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
|
||||||
node.chainDag.headState.data.data.fork,
|
node.chainDag.headState.data.data.fork,
|
||||||
node.chainDag.headState.data.data.genesis_validators_root, slot)))
|
node.chainDag.headState.data.data.genesis_validators_root, slot)))
|
||||||
|
|
||||||
# The relevant bitmap are 32 bits each.
|
node.attestationSubnets.lastCalculatedEpoch = epoch
|
||||||
static: doAssert SLOTS_PER_EPOCH <= 32
|
|
||||||
node.attestationSubnets.lastCalculatedAttestationEpoch = epoch
|
|
||||||
node.attestationSubnets.attestingSlots[epoch mod 2] = 0
|
node.attestationSubnets.attestingSlots[epoch mod 2] = 0
|
||||||
|
|
||||||
|
# The relevant bitmaps are 32 bits each.
|
||||||
|
static: doAssert SLOTS_PER_EPOCH <= 32
|
||||||
|
|
||||||
for (validatorIndices, committeeIndex, subnetIndex, slot) in
|
for (validatorIndices, committeeIndex, subnetIndex, slot) in
|
||||||
get_committee_assignments(
|
get_committee_assignments(
|
||||||
node.chainDag.headState.data.data, epoch, validatorIndices, cache):
|
node.chainDag.headState.data.data, epoch, validatorIndices, cache):
|
||||||
|
|
||||||
doAssert compute_epoch_at_slot(slot) == epoch
|
doAssert compute_epoch_at_slot(slot) == epoch
|
||||||
let committeeLen =
|
|
||||||
if isConstAggregationLen:
|
|
||||||
probeCommitteeLen
|
|
||||||
else:
|
|
||||||
get_beacon_committee_len(
|
|
||||||
node.chainDag.headState.data.data, slot, committeeIndex, cache)
|
|
||||||
|
|
||||||
# Each get_committee_assignments() call here is on the next epoch. At any
|
# Each get_committee_assignments() call here is on the next epoch. At any
|
||||||
# given time, only care about two epochs, the current and next epoch. So,
|
# given time, only care about two epochs, the current and next epoch. So,
|
||||||
|
@ -524,7 +509,8 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
|
||||||
(1'u32 shl (slot mod SLOTS_PER_EPOCH))
|
(1'u32 shl (slot mod SLOTS_PER_EPOCH))
|
||||||
|
|
||||||
if not isAnyCommitteeValidatorAggregating(
|
if not isAnyCommitteeValidatorAggregating(
|
||||||
validatorIndices, committeeLen, slot):
|
validatorIndices,
|
||||||
|
get_beacon_committee_len(epochRef, slot, committeeIndex), slot):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
node.attestationSubnets.unsubscribeSlot[subnetIndex] =
|
node.attestationSubnets.unsubscribeSlot[subnetIndex] =
|
||||||
|
@ -541,11 +527,11 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
|
||||||
node.attestationSubnets.subscribeSlot[subnetIndex])
|
node.attestationSubnets.subscribeSlot[subnetIndex])
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#phase-0-attestation-subnet-stability
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#phase-0-attestation-subnet-stability
|
||||||
proc getStabilitySubnetLength(node: BeaconNode): uint64 =
|
func getStabilitySubnetLength(node: BeaconNode): uint64 =
|
||||||
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION +
|
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION +
|
||||||
node.network.rng[].rand(EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION.int).uint64
|
node.network.rng[].rand(EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION.int).uint64
|
||||||
|
|
||||||
proc updateStabilitySubnets(node: BeaconNode, slot: Slot): set[uint8] =
|
func updateStabilitySubnets(node: BeaconNode, slot: Slot): set[uint8] =
|
||||||
# Equivalent to wallSlot by cycleAttestationSubnets(), especially
|
# Equivalent to wallSlot by cycleAttestationSubnets(), especially
|
||||||
# since it'll try to run early in epochs, avoiding race conditions.
|
# since it'll try to run early in epochs, avoiding race conditions.
|
||||||
static: doAssert ATTESTATION_SUBNET_COUNT <= high(uint8)
|
static: doAssert ATTESTATION_SUBNET_COUNT <= high(uint8)
|
||||||
|
@ -878,13 +864,14 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) =
|
||||||
# This exits early all but one call each epoch.
|
# This exits early all but one call each epoch.
|
||||||
traceAsyncErrors node.cycleAttestationSubnets(slot)
|
traceAsyncErrors node.cycleAttestationSubnets(slot)
|
||||||
|
|
||||||
func getNextAttestation(node: BeaconNode, slot: Slot): Slot =
|
func getNextValidatorAction(
|
||||||
# The relevant attestations are in, depending on calculated bounds:
|
actionSlotSource: auto, lastCalculatedEpoch: Epoch, slot: Slot): Slot =
|
||||||
|
# The relevant actions are in, depending on calculated bounds:
|
||||||
# [aS[epoch mod 2], aS[1 - (epoch mod 2)]]
|
# [aS[epoch mod 2], aS[1 - (epoch mod 2)]]
|
||||||
# current epoch next epoch
|
# current epoch next epoch
|
||||||
let orderedAttestingSlots = [
|
let orderedActionSlots = [
|
||||||
node.attestationSubnets.attestingSlots[ slot.epoch mod 2'u64],
|
actionSlotSource[ slot.epoch mod 2'u64],
|
||||||
node.attestationSubnets.attestingSlots[1 - (slot.epoch mod 2'u64)]]
|
actionSlotSource[1 - (slot.epoch mod 2'u64)]]
|
||||||
|
|
||||||
static: doAssert MIN_ATTESTATION_INCLUSION_DELAY == 1
|
static: doAssert MIN_ATTESTATION_INCLUSION_DELAY == 1
|
||||||
|
|
||||||
|
@ -894,15 +881,15 @@ func getNextAttestation(node: BeaconNode, slot: Slot): Slot =
|
||||||
for i in [0'u64, 1'u64]:
|
for i in [0'u64, 1'u64]:
|
||||||
let bitmapEpoch = slot.epoch + i
|
let bitmapEpoch = slot.epoch + i
|
||||||
|
|
||||||
if bitmapEpoch > node.attestationSubnets.lastCalculatedAttestationEpoch:
|
if bitmapEpoch > lastCalculatedEpoch:
|
||||||
return FAR_FUTURE_SLOT
|
return FAR_FUTURE_SLOT
|
||||||
|
|
||||||
for slotOffset in 0 ..< SLOTS_PER_EPOCH:
|
for slotOffset in 0 ..< SLOTS_PER_EPOCH:
|
||||||
let nextAttestationSlot =
|
let nextActionSlot =
|
||||||
compute_start_slot_at_epoch(bitmapEpoch) + slotOffset
|
compute_start_slot_at_epoch(bitmapEpoch) + slotOffset
|
||||||
if ((orderedAttestingSlots[i] and (1'u32 shl slotOffset)) != 0) and
|
if ((orderedActionSlots[i] and (1'u32 shl slotOffset)) != 0) and
|
||||||
nextAttestationSlot > slot:
|
nextActionSlot > slot:
|
||||||
return nextAttestationSlot
|
return nextActionSlot
|
||||||
|
|
||||||
FAR_FUTURE_SLOT
|
FAR_FUTURE_SLOT
|
||||||
|
|
||||||
|
@ -928,10 +915,23 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
|
||||||
# the database are synced with the filesystem.
|
# the database are synced with the filesystem.
|
||||||
node.db.checkpoint()
|
node.db.checkpoint()
|
||||||
|
|
||||||
|
# -1 is a more useful output than 18446744073709551615 as an indicator of
|
||||||
|
# no future attestation/proposal known.
|
||||||
|
template displayInt64(x: Slot): int64 =
|
||||||
|
if x == high(uint64).Slot:
|
||||||
|
-1'i64
|
||||||
|
else:
|
||||||
|
toGaugeValue(x)
|
||||||
|
|
||||||
let
|
let
|
||||||
nextAttestationSlot = node.getNextAttestation(slot)
|
nextAttestationSlot = getNextValidatorAction(
|
||||||
nextActionWaitTime =
|
node.attestationSubnets.attestingSlots,
|
||||||
saturate(fromNow(node.beaconClock, nextAttestationSlot))
|
node.attestationSubnets.lastCalculatedEpoch, slot)
|
||||||
|
nextProposalSlot = getNextValidatorAction(
|
||||||
|
node.attestationSubnets.proposingSlots,
|
||||||
|
node.attestationSubnets.lastCalculatedEpoch, slot)
|
||||||
|
nextActionWaitTime = saturate(fromNow(
|
||||||
|
node.beaconClock, min(nextAttestationSlot, nextProposalSlot)))
|
||||||
|
|
||||||
info "Slot end",
|
info "Slot end",
|
||||||
slot = shortLog(slot),
|
slot = shortLog(slot),
|
||||||
|
@ -941,7 +941,8 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
|
||||||
finalizedHead = shortLog(node.chainDag.finalizedHead.blck),
|
finalizedHead = shortLog(node.chainDag.finalizedHead.blck),
|
||||||
finalizedEpoch =
|
finalizedEpoch =
|
||||||
shortLog(node.chainDag.finalizedHead.blck.slot.compute_epoch_at_slot()),
|
shortLog(node.chainDag.finalizedHead.blck.slot.compute_epoch_at_slot()),
|
||||||
nextAttestationSlot,
|
nextAttestationSlot = displayInt64(nextAttestationSlot),
|
||||||
|
nextProposalSlot = displayInt64(nextProposalSlot),
|
||||||
nextActionWait =
|
nextActionWait =
|
||||||
if nextAttestationSlot == FAR_FUTURE_SLOT:
|
if nextAttestationSlot == FAR_FUTURE_SLOT:
|
||||||
"n/a"
|
"n/a"
|
||||||
|
@ -1135,7 +1136,7 @@ proc startSyncManager(node: BeaconNode) =
|
||||||
)
|
)
|
||||||
node.syncManager.start()
|
node.syncManager.start()
|
||||||
|
|
||||||
proc connectedPeersCount(node: BeaconNode): int =
|
func connectedPeersCount(node: BeaconNode): int =
|
||||||
len(node.network.peerPool)
|
len(node.network.peerPool)
|
||||||
|
|
||||||
proc installRpcHandlers(rpcServer: RpcServer, node: BeaconNode) =
|
proc installRpcHandlers(rpcServer: RpcServer, node: BeaconNode) =
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# beacon_chain
|
# beacon_chain
|
||||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
@ -9,7 +9,7 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
std/[os, tables, random, strutils, typetraits],
|
std/[os, tables, strutils, typetraits],
|
||||||
|
|
||||||
# Nimble packages
|
# Nimble packages
|
||||||
chronos, confutils/defs,
|
chronos, confutils/defs,
|
||||||
|
@ -46,8 +46,6 @@ proc updateLogLevel*(logLevel: string) =
|
||||||
warn "Unrecognized logging topic", topic = topicName
|
warn "Unrecognized logging topic", topic = topicName
|
||||||
|
|
||||||
proc setupLogging*(logLevel: string, logFile: Option[OutFile]) =
|
proc setupLogging*(logLevel: string, logFile: Option[OutFile]) =
|
||||||
randomize()
|
|
||||||
|
|
||||||
if logFile.isSome:
|
if logFile.isSome:
|
||||||
when defaultChroniclesStream.outputs.type.arity > 1:
|
when defaultChroniclesStream.outputs.type.arity > 1:
|
||||||
block openLogFile:
|
block openLogFile:
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
os, strutils, json,
|
std/[os, json, random, strutils],
|
||||||
|
|
||||||
# Nimble packages
|
# Nimble packages
|
||||||
stew/shims/[tables, macros],
|
stew/shims/[tables, macros],
|
||||||
|
@ -27,7 +27,8 @@ import
|
||||||
./ssz/merkleization,
|
./ssz/merkleization,
|
||||||
./spec/eth2_apis/callsigs_types,
|
./spec/eth2_apis/callsigs_types,
|
||||||
./validators/[attestation_aggregation, keystore_management, validator_pool, slashing_protection],
|
./validators/[attestation_aggregation, keystore_management, validator_pool, slashing_protection],
|
||||||
./eth/db/[kvstore, kvstore_sqlite3]
|
./eth/db/[kvstore, kvstore_sqlite3],
|
||||||
|
./eth/keys, ./eth/p2p/discoveryv5/random2
|
||||||
|
|
||||||
logScope: topics = "vc"
|
logScope: topics = "vc"
|
||||||
|
|
||||||
|
@ -281,6 +282,13 @@ programMain:
|
||||||
|
|
||||||
setupLogging(config.logLevel, config.logFile)
|
setupLogging(config.logLevel, config.logFile)
|
||||||
|
|
||||||
|
# Doesn't use std/random directly, but dependencies might
|
||||||
|
let rng = keys.newRng()
|
||||||
|
if rng.isNil:
|
||||||
|
randomize()
|
||||||
|
else:
|
||||||
|
randomize(rng[].rand(high(int)))
|
||||||
|
|
||||||
case config.cmd
|
case config.cmd
|
||||||
of VCNoCommand:
|
of VCNoCommand:
|
||||||
debug "Launching validator client",
|
debug "Launching validator client",
|
||||||
|
|
|
@ -124,6 +124,8 @@ proc process_deposit*(preset: RuntimePreset,
|
||||||
# New validator! Add validator and balance entries
|
# New validator! Add validator and balance entries
|
||||||
state.validators.add(get_validator_from_deposit(deposit.data))
|
state.validators.add(get_validator_from_deposit(deposit.data))
|
||||||
state.balances.add(amount)
|
state.balances.add(amount)
|
||||||
|
|
||||||
|
doAssert state.validators.len == state.balances.len
|
||||||
else:
|
else:
|
||||||
# Deposits may come with invalid signatures - in that case, they are not
|
# Deposits may come with invalid signatures - in that case, they are not
|
||||||
# turned into a validator but still get processed to keep the deposit
|
# turned into a validator but still get processed to keep the deposit
|
||||||
|
|
|
@ -75,10 +75,6 @@ const
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#misc
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#misc
|
||||||
ATTESTATION_SUBNET_COUNT* = 64
|
ATTESTATION_SUBNET_COUNT* = 64
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/pull/2101
|
|
||||||
ATTESTATION_PRODUCTION_DIVISOR* = 3
|
|
||||||
ATTESTATION_ENTROPY_DIVISOR* = 12
|
|
||||||
|
|
||||||
template maxSize*(n: int) {.pragma.}
|
template maxSize*(n: int) {.pragma.}
|
||||||
|
|
||||||
# Block validation flow
|
# Block validation flow
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
std/[os, osproc, random, sequtils, streams, tables],
|
std/[os, osproc, sequtils, streams, tables],
|
||||||
|
|
||||||
# Nimble packages
|
# Nimble packages
|
||||||
stew/[assign2, objects, shims/macros],
|
stew/[assign2, objects, shims/macros],
|
||||||
|
@ -541,28 +541,6 @@ proc broadcastAggregatedAttestations(
|
||||||
validator = shortLog(curr[0].v),
|
validator = shortLog(curr[0].v),
|
||||||
aggregationSlot
|
aggregationSlot
|
||||||
|
|
||||||
proc getSlotTimingEntropy(): int64 =
|
|
||||||
# Ensure SECONDS_PER_SLOT / ATTESTATION_PRODUCTION_DIVISOR >
|
|
||||||
# SECONDS_PER_SLOT / ATTESTATION_ENTROPY_DIVISOR, which will
|
|
||||||
# enure that the second condition can't go negative.
|
|
||||||
static: doAssert ATTESTATION_ENTROPY_DIVISOR > ATTESTATION_PRODUCTION_DIVISOR
|
|
||||||
|
|
||||||
# For each `slot`, a validator must generate a uniform random variable
|
|
||||||
# `slot_timing_entropy` between `(-SECONDS_PER_SLOT /
|
|
||||||
# ATTESTATION_ENTROPY_DIVISOR, SECONDS_PER_SLOT /
|
|
||||||
# ATTESTATION_ENTROPY_DIVISOR)` with millisecond resolution and using local
|
|
||||||
# entropy.
|
|
||||||
#
|
|
||||||
# Per issue discussion "validators served by the same beacon node can have
|
|
||||||
# the same attestation production time, i.e., they can share the source of
|
|
||||||
# the entropy and the actual slot_timing_entropy value."
|
|
||||||
const
|
|
||||||
slot_timing_entropy_upper_bound =
|
|
||||||
SECONDS_PER_SLOT.int64 * 1000 div ATTESTATION_ENTROPY_DIVISOR
|
|
||||||
slot_timing_entropy_lower_bound = 0-slot_timing_entropy_upper_bound
|
|
||||||
rand(range[(slot_timing_entropy_lower_bound + 1) ..
|
|
||||||
(slot_timing_entropy_upper_bound - 1)])
|
|
||||||
|
|
||||||
proc updateValidatorMetrics*(node: BeaconNode) =
|
proc updateValidatorMetrics*(node: BeaconNode) =
|
||||||
when defined(metrics):
|
when defined(metrics):
|
||||||
# Technically, this only needs to be done on epoch transitions and if there's
|
# Technically, this only needs to be done on epoch transitions and if there's
|
||||||
|
@ -654,29 +632,14 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
||||||
|
|
||||||
head = await handleProposal(node, head, slot)
|
head = await handleProposal(node, head, slot)
|
||||||
|
|
||||||
# Fix timing attack: https://github.com/ethereum/eth2.0-specs/pull/2101
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#attesting
|
||||||
# A validator must create and broadcast the `attestation` to the associated
|
|
||||||
# attestation subnet when the earlier one of the following two events occurs:
|
|
||||||
#
|
|
||||||
# - The validator has received a valid block from the expected block
|
|
||||||
# proposer for the assigned `slot`. In this case, the validator must set a
|
|
||||||
# timer for `abs(slot_timing_entropy)`. The end of this timer will be the
|
|
||||||
# trigger for attestation production.
|
|
||||||
#
|
|
||||||
# - `SECONDS_PER_SLOT / ATTESTATION_PRODUCTION_DIVISOR +
|
|
||||||
# slot_timing_entropy` seconds have elapsed since the start of the `slot`
|
|
||||||
# (using the `slot_timing_entropy` generated for this slot)
|
|
||||||
|
|
||||||
# Milliseconds to wait from the start of the slot before sending out
|
# Milliseconds to wait from the start of the slot before sending out
|
||||||
# attestations - base value
|
# attestations
|
||||||
const attestationOffset =
|
const attestationOffset = SECONDS_PER_SLOT.int64 * 1000 div 3
|
||||||
SECONDS_PER_SLOT.int64 * 1000 div ATTESTATION_PRODUCTION_DIVISOR
|
|
||||||
|
|
||||||
let
|
let
|
||||||
slotTimingEntropy = getSlotTimingEntropy() # +/- 1s
|
|
||||||
# The latest point in time when we'll be sending out attestations
|
# The latest point in time when we'll be sending out attestations
|
||||||
attestationCutoffTime = slot.toBeaconTime(
|
attestationCutoffTime = slot.toBeaconTime(millis(attestationOffset))
|
||||||
millis(attestationOffset + slotTimingEntropy))
|
|
||||||
attestationCutoff = node.beaconClock.fromNow(attestationCutoffTime)
|
attestationCutoff = node.beaconClock.fromNow(attestationCutoffTime)
|
||||||
|
|
||||||
if attestationCutoff.inFuture:
|
if attestationCutoff.inFuture:
|
||||||
|
@ -687,9 +650,8 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
||||||
# Wait either for the block or the attestation cutoff time to arrive
|
# Wait either for the block or the attestation cutoff time to arrive
|
||||||
if await node.consensusManager[].expectBlock(slot).withTimeout(attestationCutoff.offset):
|
if await node.consensusManager[].expectBlock(slot).withTimeout(attestationCutoff.offset):
|
||||||
# The expected block arrived (or expectBlock was called again which
|
# The expected block arrived (or expectBlock was called again which
|
||||||
# shouldn't happen as this is the only place we use it) - according to the
|
# shouldn't happen as this is the only place we use it) - in our async
|
||||||
# spec, we should now wait for abs(slotTimingEntropy) - in our async loop
|
# loop however, we might have been doing other processing that caused delays
|
||||||
# however, we might have been doing other processing that caused delays
|
|
||||||
# here so we'll cap the waiting to the time when we would have sent out
|
# here so we'll cap the waiting to the time when we would have sent out
|
||||||
# attestations had the block not arrived.
|
# attestations had the block not arrived.
|
||||||
# An opposite case is that we received (or produced) a block that has
|
# An opposite case is that we received (or produced) a block that has
|
||||||
|
@ -698,8 +660,8 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
||||||
# impose a minimum delay of 250ms. The delay is enforced only when we're
|
# impose a minimum delay of 250ms. The delay is enforced only when we're
|
||||||
# not hitting the "normal" cutoff time for sending out attestations.
|
# not hitting the "normal" cutoff time for sending out attestations.
|
||||||
|
|
||||||
|
const afterBlockDelay = 250
|
||||||
let
|
let
|
||||||
afterBlockDelay = max(250, abs(slotTimingEntropy))
|
|
||||||
afterBlockTime = node.beaconClock.now() + millis(afterBlockDelay)
|
afterBlockTime = node.beaconClock.now() + millis(afterBlockDelay)
|
||||||
afterBlockCutoff = node.beaconClock.fromNow(
|
afterBlockCutoff = node.beaconClock.fromNow(
|
||||||
min(afterBlockTime, attestationCutoffTime))
|
min(afterBlockTime, attestationCutoffTime))
|
||||||
|
|
|
@ -9,7 +9,7 @@ when not defined(nimscript):
|
||||||
const
|
const
|
||||||
versionMajor* = 1
|
versionMajor* = 1
|
||||||
versionMinor* = 0
|
versionMinor* = 0
|
||||||
versionBuild* = 10
|
versionBuild* = 12
|
||||||
|
|
||||||
versionBlob* = "stateofus" # Single word - ends up in the default graffitti
|
versionBlob* = "stateofus" # Single word - ends up in the default graffitti
|
||||||
|
|
||||||
|
|
|
@ -67,3 +67,13 @@ All the same conventions apply:
|
||||||
WEB3_URL="ws://localhost:8545" ./run-mainnet-node.sh --max-peers=150
|
WEB3_URL="ws://localhost:8545" ./run-mainnet-node.sh --max-peers=150
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Running a Prater node
|
||||||
|
|
||||||
|
`run-prater-beacon-node.sh` is a similar script intended for connecting to the Prater
|
||||||
|
testnet. All the same conventions apply:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# using a local Geth instance
|
||||||
|
WEB3_URL="ws://localhost:8545" ./run-prater-node.sh --max-peers=150
|
||||||
|
```
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@ import # Unit test
|
||||||
./test_bitseqs,
|
./test_bitseqs,
|
||||||
./test_block_pool,
|
./test_block_pool,
|
||||||
./test_datatypes,
|
./test_datatypes,
|
||||||
|
./test_discovery,
|
||||||
./test_eth1_monitor,
|
./test_eth1_monitor,
|
||||||
./test_exit_pool,
|
./test_exit_pool,
|
||||||
./test_helpers,
|
./test_helpers,
|
||||||
|
|
|
@ -0,0 +1,132 @@
|
||||||
|
import
|
||||||
|
std/unittest,
|
||||||
|
chronos, stew/shims/net, eth/keys, eth/p2p/discoveryv5/enr,
|
||||||
|
../beacon_chain/conf,
|
||||||
|
../beacon_chain/spec/datatypes,
|
||||||
|
../beacon_chain/networking/[eth2_network, eth2_discovery],
|
||||||
|
./testutil
|
||||||
|
|
||||||
|
template timedAsyncTest*(name, body: untyped) =
|
||||||
|
timedTest name:
|
||||||
|
proc scenario {.async.} = body
|
||||||
|
waitFor scenario()
|
||||||
|
|
||||||
|
proc new*(T: type Eth2DiscoveryProtocol,
|
||||||
|
pk: keys.PrivateKey,
|
||||||
|
enrIp: Option[ValidIpAddress], enrTcpPort, enrUdpPort: Option[Port],
|
||||||
|
bindPort: Port, bindIp: ValidIpAddress,
|
||||||
|
enrFields: openArray[(string, seq[byte])] = [],
|
||||||
|
rng: ref BrHmacDrbgContext):
|
||||||
|
T {.raises: [Exception, Defect].} =
|
||||||
|
|
||||||
|
newProtocol(pk, enrIp, enrTcpPort, enrUdpPort, enrFields,
|
||||||
|
bindPort = bindPort, bindIp = bindIp, rng = rng)
|
||||||
|
|
||||||
|
proc generateNode(rng: ref BrHmacDrbgContext, port: Port,
|
||||||
|
enrFields: openArray[(string, seq[byte])] = []): Eth2DiscoveryProtocol =
|
||||||
|
let ip = ValidIpAddress.init("127.0.0.1")
|
||||||
|
Eth2DiscoveryProtocol.new(keys.PrivateKey.random(rng[]),
|
||||||
|
some(ip), some(port), some(port), port, ip, enrFields, rng = rng)
|
||||||
|
|
||||||
|
suiteReport "Eth2 specific discovery tests":
|
||||||
|
let
|
||||||
|
rng = keys.newRng()
|
||||||
|
enrForkId = ENRForkID(
|
||||||
|
fork_digest: ForkDigest([byte 0, 1, 2, 3]),
|
||||||
|
next_fork_version: Version([byte 0, 0, 0, 0]),
|
||||||
|
next_fork_epoch: Epoch(0))
|
||||||
|
|
||||||
|
timedAsyncTest "Subnet query":
|
||||||
|
var attnets: BitArray[ATTESTATION_SUBNET_COUNT]
|
||||||
|
attnets.setBit(34)
|
||||||
|
|
||||||
|
let
|
||||||
|
node1 = generateNode(rng, Port(5000))
|
||||||
|
node2 = generateNode(rng, Port(5001),
|
||||||
|
{"eth2": SSZ.encode(enrForkId), "attnets": SSZ.encode(attnets)})
|
||||||
|
|
||||||
|
node1.open()
|
||||||
|
node2.open()
|
||||||
|
|
||||||
|
# ping in one direction to add node2 to routing table of node1
|
||||||
|
check (await node2.ping(node1.localNode)).isOk()
|
||||||
|
|
||||||
|
var attnetsSelected: BitArray[ATTESTATION_SUBNET_COUNT]
|
||||||
|
attnetsSelected.setBit(42)
|
||||||
|
attnetsSelected.setBit(34)
|
||||||
|
|
||||||
|
let discovered = await node1.queryRandom(enrForkId, attnetsSelected)
|
||||||
|
check discovered.len == 1
|
||||||
|
|
||||||
|
await node1.closeWait()
|
||||||
|
await node2.closeWait()
|
||||||
|
|
||||||
|
timedAsyncTest "Invalid attnets field":
|
||||||
|
var invalidAttnets: BitArray[ATTESTATION_SUBNET_COUNT div 2]
|
||||||
|
invalidAttnets.setBit(15)
|
||||||
|
# TODO: This doesn't fail actually.
|
||||||
|
# var invalidAttnets2: BitArray[ATTESTATION_SUBNET_COUNT * 2]
|
||||||
|
# invalidAttnets2.setBit(15)
|
||||||
|
var attnets: BitArray[ATTESTATION_SUBNET_COUNT]
|
||||||
|
attnets.setBit(15)
|
||||||
|
|
||||||
|
let
|
||||||
|
node1 = generateNode(rng, Port(5000))
|
||||||
|
node2 = generateNode(rng, Port(5001),
|
||||||
|
{"eth2": SSZ.encode(enrForkId), "attnets": SSZ.encode(invalidAttnets)})
|
||||||
|
node3 = generateNode(rng, Port(5002),
|
||||||
|
{"eth2": SSZ.encode(enrForkId), "attnets": SSZ.encode(attnets)})
|
||||||
|
|
||||||
|
node1.open()
|
||||||
|
node2.open()
|
||||||
|
node3.open()
|
||||||
|
|
||||||
|
check (await node2.ping(node1.localNode)).isOk()
|
||||||
|
check (await node3.ping(node1.localNode)).isOk()
|
||||||
|
|
||||||
|
var attnetsSelected: BitArray[ATTESTATION_SUBNET_COUNT]
|
||||||
|
attnetsSelected.setBit(15)
|
||||||
|
attnetsSelected.setBit(42)
|
||||||
|
|
||||||
|
let discovered = await node1.queryRandom(enrForkId, attnetsSelected)
|
||||||
|
check discovered.len == 1
|
||||||
|
|
||||||
|
await node1.closeWait()
|
||||||
|
await node2.closeWait()
|
||||||
|
await node3.closeWait()
|
||||||
|
|
||||||
|
timedAsyncTest "Subnet query after ENR update":
|
||||||
|
var attnets: BitArray[ATTESTATION_SUBNET_COUNT]
|
||||||
|
attnets.setBit(1)
|
||||||
|
|
||||||
|
let
|
||||||
|
node1 = generateNode(rng, Port(5000))
|
||||||
|
node2 = generateNode(rng, Port(5001),
|
||||||
|
{"eth2": SSZ.encode(enrForkId), "attnets": SSZ.encode(attnets)})
|
||||||
|
|
||||||
|
node1.open()
|
||||||
|
node2.open()
|
||||||
|
|
||||||
|
check (await node2.ping(node1.localNode)).isOk()
|
||||||
|
|
||||||
|
var attnetsSelected: BitArray[ATTESTATION_SUBNET_COUNT]
|
||||||
|
attnetsSelected.setBit(2)
|
||||||
|
|
||||||
|
block:
|
||||||
|
let discovered = await node1.queryRandom(enrForkId, attnetsSelected)
|
||||||
|
check discovered.len == 0
|
||||||
|
|
||||||
|
block:
|
||||||
|
attnets.setBit(2)
|
||||||
|
check node2.updateRecord(
|
||||||
|
{"eth2": SSZ.encode(enrForkId), "attnets": SSZ.encode(attnets)}).isOk()
|
||||||
|
|
||||||
|
let nodes = await node1.findNode(node2.localNode, @[0'u32])
|
||||||
|
check nodes.isOk() and nodes[].len > 0
|
||||||
|
discard node1.addNode(nodes[][0])
|
||||||
|
|
||||||
|
let discovered = await node1.queryRandom(enrForkId, attnetsSelected)
|
||||||
|
check discovered.len == 1
|
||||||
|
|
||||||
|
await node1.closeWait()
|
||||||
|
await node2.closeWait()
|
|
@ -1 +1 @@
|
||||||
Subproject commit 192c1b48ea5ff4adb4e6ef7d2a9e5f82fb5ffd72
|
Subproject commit 5b4e32779b66b6d84edb13aa4eee67f544313d4b
|
|
@ -1 +1 @@
|
||||||
Subproject commit 0f890d4a667fcb2dcafd7243a079e5af2874db1d
|
Subproject commit b5956a1b4b74cc7f2c10eaf9cceb12c80ae33787
|
|
@ -1 +1 @@
|
||||||
Subproject commit 93674cbdbd3ce59e2d4d0cbdfac9ab62d9a6d28f
|
Subproject commit e788deab3d59ff8a4fe103aeb5d82d3d82fcac7d
|
Loading…
Reference in New Issue