Merge branch 'devel' into cli-reforms

This commit is contained in:
Zahary Karadjov 2020-07-10 16:23:27 +03:00
commit c06c60dcaf
No known key found for this signature in database
GPG Key ID: C8936F8A3073D609
11 changed files with 65 additions and 72 deletions

View File

@ -76,5 +76,5 @@ task test, "Run all tests":
buildAndRunBinary "all_fixtures_require_ssz", "tests/official/", "-d:chronicles_log_level=TRACE -d:const_preset=mainnet"
# State and block sims; getting to 4th epoch triggers consensus checks
buildAndRunBinary "state_sim", "research/", "-d:const_preset=mainnet", "--validators=2000 --slots=128"
buildAndRunBinary "block_sim", "research/", "-d:const_preset=mainnet", "--validators=2000 --slots=128"
buildAndRunBinary "state_sim", "research/", "-d:const_preset=mainnet", "--validators=3000 --slots=128"
buildAndRunBinary "block_sim", "research/", "-d:const_preset=mainnet", "--validators=3000 --slots=128"

View File

@ -274,8 +274,6 @@ proc addResolved(pool: var AttestationPool, blck: BlockRef, attestation: Attesta
# sets by virtue of not overlapping with some other attestation
# and therefore being useful after all?
trace "Ignoring subset attestation",
existingParticipants = get_attesting_indices_seq(
state, a.data, v.aggregation_bits, cache),
newParticipants = participants,
cat = "filtering"
found = true
@ -286,10 +284,6 @@ proc addResolved(pool: var AttestationPool, blck: BlockRef, attestation: Attesta
# can now be removed per same logic as above
trace "Removing subset attestations",
existingParticipants = a.validations.filterIt(
it.aggregation_bits.isSubsetOf(validation.aggregation_bits)
).mapIt(get_attesting_indices_seq(
state, a.data, it.aggregation_bits, cache)),
newParticipants = participants,
cat = "pruning"

View File

@ -427,7 +427,7 @@ proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.gcsafe, asyn
epoch = scheduledSlot.compute_epoch_at_slot(),
slot = scheduledSlot
# Brute-force, but ensure it's reliably enough to run in CI.
# Brute-force, but ensure it's reliable enough to run in CI.
quit(0)
if not wallSlot.afterGenesis or (wallSlot.slot < lastSlot):

View File

@ -96,6 +96,5 @@ proc new*(T: type Eth2DiscoveryProtocol,
if fileExists(persistentBootstrapFile):
loadBootstrapFile(persistentBootstrapFile, bootstrapEnrs, ourPubKey)
let enrFieldPairs = mapIt(enrFields, toFieldPair(it[0], it[1]))
newProtocol(
pk, db, ip, tcpPort, udpPort, enrFieldPairs, bootstrapEnrs, rng = rng)
pk, db, ip, tcpPort, udpPort, enrFields, bootstrapEnrs, rng = rng)

View File

@ -408,14 +408,12 @@ proc bootstrapDiscovery(conf: InspectorConf,
let udpPort = Port(conf.discoveryPort)
let tcpPort = Port(conf.ethPort)
let host = host.toIpAddress()
var pairs: seq[FieldPair]
if enrFields.isSome():
let fields = enrFields.get()
pairs = @[toFieldPair("eth2", fields.eth2),
toFieldPair("attnets", fields.attnets)]
let pairs = {"eth2": fields.eth2, "attnets": fields.attnets}
result = newProtocol(pk, db, host, tcpPort, udpPort, pairs, bootnodes)
else:
pairs = @[]
result = newProtocol(pk, db, host, tcpPort, udpPort, pairs, bootnodes)
result = newProtocol(pk, db, host, tcpPort, udpPort, [], bootnodes)
result.open()
result.start()

View File

@ -442,7 +442,6 @@ type
StateCache* = object
shuffled_active_validator_indices*:
Table[Epoch, seq[ValidatorIndex]]
committee_count_cache*: Table[Epoch, uint64]
beacon_proposer_indices*: Table[Slot, Option[ValidatorIndex]]
func shortValidatorKey*(state: BeaconState, validatorIdx: int): string =

View File

@ -22,7 +22,7 @@
{.push raises: [Defect].}
import
math, sequtils, tables,
math, sequtils, tables, algorithm,
stew/[bitops2], chronicles, json_serialization/std/sets,
metrics, ../extras, ../ssz/merkleization,
beaconstate, crypto, datatypes, digest, helpers, validator,
@ -330,41 +330,41 @@ func get_inclusion_delay_deltas(
state: BeaconState, total_balance: Gwei, cache: var StateCache):
seq[Gwei] =
# Return proposer and inclusion delay micro-rewards/penalties for each validator.
var rewards = repeat(0'u64, len(state.validators))
let
var
rewards = repeat(0'u64, len(state.validators))
matching_source_attestations =
get_matching_source_attestations(state, get_previous_epoch(state))
source_attestation_attesting_indices = mapIt(
matching_source_attestations,
get_attesting_indices(state, it.data, it.aggregation_bits, cache))
for index in get_unslashed_attesting_indices(state, matching_source_attestations, cache):
# Translation of attestation = min([...])
# Start by filtering the right attestations
var filtered_matching_source_attestations: seq[PendingAttestation]
# Translation of attestation = min([...])
# The spec (pseudo)code defines this in terms of Python's min(), which per
# https://docs.python.org/3/library/functions.html#min:
# If multiple items are minimal, the function returns the first one
# encountered.
# Therefore, this approach depends on Nim's default sort being stable, per
# https://nim-lang.org/docs/algorithm.html#sort,openArray[T],proc(T,T) via
# "The sorting is guaranteed to be stable and the worst case is guaranteed
# to be O(n log n)."
matching_source_attestations.sort do (x, y: PendingAttestation) -> int:
cmp(x.inclusion_delay, y.inclusion_delay)
for source_attestation_index, a in matching_source_attestations:
if index notin
# Order/indices in source_attestation_attesting_indices matches sorted order
let source_attestation_attesting_indices = mapIt(
matching_source_attestations,
get_attesting_indices(state, it.data, it.aggregation_bits, cache))
for index in get_unslashed_attesting_indices(
state, matching_source_attestations, cache):
for source_attestation_index, attestation in matching_source_attestations:
if index in
source_attestation_attesting_indices[source_attestation_index]:
continue
filtered_matching_source_attestations.add a
if filtered_matching_source_attestations.len == 0:
continue
# The first filtered attestation serves as min until we find something
# better
var attestation = filtered_matching_source_attestations[0]
for source_attestation_index, a in filtered_matching_source_attestations:
if a.inclusion_delay < attestation.inclusion_delay:
attestation = a
rewards[attestation.proposer_index] +=
get_proposer_reward(state, index, total_balance)
let max_attester_reward =
get_base_reward(state, index, total_balance) -
get_proposer_reward(state, index, total_balance)
rewards[index] += Gwei(max_attester_reward div attestation.inclusion_delay)
rewards[attestation.proposer_index] +=
get_proposer_reward(state, index, total_balance)
let max_attester_reward =
get_base_reward(state, index, total_balance) -
get_proposer_reward(state, index, total_balance)
rewards[index] +=
Gwei(max_attester_reward div attestation.inclusion_delay)
break
# No penalties associated with inclusion delay
# Spec constructs both and returns both; this doesn't

View File

@ -9,7 +9,7 @@
{.push raises: [Defect].}
import
algorithm, options, sequtils, math, tables,
algorithm, options, sequtils, math, tables, sets,
./datatypes, ./digest, ./helpers
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#compute_shuffled_index
@ -140,18 +140,15 @@ func get_beacon_committee*(
cache.shuffled_active_validator_indices[epoch] =
get_shuffled_active_validator_indices(state, epoch)
# Constant throughout an epoch
if epoch notin cache.committee_count_cache:
cache.committee_count_cache[epoch] =
get_committee_count_at_slot(state, slot)
try:
let committee_count = get_committee_count_at_slot(
cache.shuffled_active_validator_indices[epoch].len)
compute_committee(
cache.shuffled_active_validator_indices[epoch],
get_seed(state, epoch, DOMAIN_BEACON_ATTESTER),
(slot mod SLOTS_PER_EPOCH) * cache.committee_count_cache[epoch] +
(slot mod SLOTS_PER_EPOCH) * committee_count +
index.uint64,
cache.committee_count_cache[epoch] * SLOTS_PER_EPOCH
committee_count * SLOTS_PER_EPOCH
)
except KeyError:
raiseAssert "values are added to cache before using them"
@ -160,7 +157,6 @@ func get_beacon_committee*(
func get_empty_per_epoch_cache*(): StateCache =
result.shuffled_active_validator_indices =
initTable[Epoch, seq[ValidatorIndex]]()
result.committee_count_cache = initTable[Epoch, uint64]()
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#compute_proposer_index
func compute_proposer_index(state: BeaconState, indices: seq[ValidatorIndex],
@ -234,7 +230,8 @@ func get_beacon_proposer_index*(state: BeaconState, cache: var StateCache):
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#validator-assignments
func get_committee_assignment*(
state: BeaconState, epoch: Epoch, validator_index: ValidatorIndex):
state: BeaconState, epoch: Epoch,
validator_indices: HashSet[ValidatorIndex]):
Option[tuple[a: seq[ValidatorIndex], b: CommitteeIndex, c: Slot]] {.used.} =
# Return the committee assignment in the ``epoch`` for ``validator_index``.
# ``assignment`` returned is a tuple of the following form:
@ -242,6 +239,9 @@ func get_committee_assignment*(
# * ``assignment[1]`` is the index to which the committee is assigned
# * ``assignment[2]`` is the slot at which the committee is assigned
# Return None if no assignment.
#
# Slightly adapted from spec version to support multiple validator indices,
# since each beacon_node supports many validators.
let next_epoch = get_current_epoch(state) + 1
doAssert epoch <= next_epoch
@ -251,9 +251,8 @@ func get_committee_assignment*(
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
for index in 0 ..< get_committee_count_at_slot(state, slot):
let idx = index.CommitteeIndex
let committee =
get_beacon_committee(state, slot, idx, cache)
if validator_index in committee:
let committee = get_beacon_committee(state, slot, idx, cache)
if not disjoint(validator_indices, toHashSet(committee)):
return some((committee, idx, slot))
none(tuple[a: seq[ValidatorIndex], b: CommitteeIndex, c: Slot])

View File

@ -440,7 +440,8 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T],
request_count = item.request.count,
request_step = item.request.step,
blocks_map = getShortMap(item.request, item.data),
blocks_count = len(item.data), errCode = res.error
blocks_count = len(item.data), errCode = res.error,
topics = "syncman"
var resetSlot: Option[Slot]
@ -455,7 +456,7 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T],
peer = req.item, rewind_to_slot = finalizedSlot,
request_slot = req.slot, request_count = req.count,
request_step = req.step, blocks_count = len(item.data),
blocks_map = getShortMap(req, item.data)
blocks_map = getShortMap(req, item.data), topics = "syncman"
resetSlot = some(finalizedSlot)
req.item.updateScore(PeerScoreMissingBlocks)
else:
@ -463,21 +464,22 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T],
peer = req.item, to_slot = finalizedSlot,
request_slot = req.slot, request_count = req.count,
request_step = req.step, blocks_count = len(item.data),
blocks_map = getShortMap(req, item.data)
blocks_map = getShortMap(req, item.data), topics = "syncman"
req.item.updateScore(PeerScoreBadBlocks)
elif res.error == BlockError.Invalid:
let req = item.request
warn "Received invalid sequence of blocks", peer = req.item,
request_slot = req.slot, request_count = req.count,
request_step = req.step, blocks_count = len(item.data),
blocks_map = getShortMap(req, item.data)
blocks_map = getShortMap(req, item.data), topics = "syncman"
req.item.updateScore(PeerScoreBadBlocks)
else:
let req = item.request
warn "Received unexpected response from block_pool", peer = req.item,
request_slot = req.slot, request_count = req.count,
request_step = req.step, blocks_count = len(item.data),
blocks_map = getShortMap(req, item.data), errorCode = res.error
blocks_map = getShortMap(req, item.data), errorCode = res.error,
topics = "syncman"
req.item.updateScore(PeerScoreBadBlocks)
# We need to move failed response to the debts queue.
@ -486,7 +488,8 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T],
await sq.resetWait(resetSlot)
debug "Rewind to slot was happened", reset_slot = reset_slot.get(),
queue_input_slot = sq.inpSlot,
queue_output_slot = sq.outSlot
queue_output_slot = sq.outSlot,
topics = "syncman"
break
proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T]) =
@ -824,7 +827,7 @@ proc sync*[A, B](man: SyncManager[A, B]) {.async.} =
debug "Syncing process is not progressing, reset the queue",
workers_count = workersCount(),
to_slot = man.queue.outSlot,
local_head_slot = lsm1.slot
local_head_slot = lsm1.slot, topics = "syncman"
await man.queue.resetWait(none[Slot]())
else:
syncSpeed = speed(lsm1, lsm2)
@ -924,7 +927,7 @@ proc sync*[A, B](man: SyncManager[A, B]) {.async.} =
await sleepAsync(man.sleepTime)
else:
debug "Synchronization loop waiting for workers completion",
workers_count = workersCount()
workers_count = workersCount(), topics = "syncman"
discard await withTimeout(one(pending), man.sleepTime)
else:
man.inProgress = true

View File

@ -7,7 +7,7 @@
import
# Standard library
tables, strutils, parseutils, sequtils,
tables, strutils, parseutils, sequtils, sets,
# Nimble packages
stew/[byteutils, objects],
@ -358,7 +358,8 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
let idx = state.validators.asSeq.findIt(it.pubKey == pubkey)
if idx == -1:
continue
let ca = state.get_committee_assignment(epoch, idx.ValidatorIndex)
let ca = state.get_committee_assignment(
epoch, toHashSet([idx.ValidatorIndex]))
if ca.isSome:
result.add((public_key: pubkey,
committee_index: ca.get.b,

2
vendor/nim-eth vendored

@ -1 +1 @@
Subproject commit 484fbcab1b25072b4c45f496a88d361fc9479be4
Subproject commit 8d2614a1ad503952cf8a101ad1a9042107f33d6c