remove get_epoch_start_slot(...) kludge and update to work exactly as 0.7.1 specifies; remove unused get_attestation_participants_cached(...); update AttestationData to 0.7.1 (being the last data structure, I believe, remaining for such; remove potentially spurious/certainly not-in-spec assertion from compute_committee(...); fix state sim to work with new get_epoch_start_slot/AttestationData/etc setup where it can't stuff all shards' attestations from same slot in the same MIN_ATTESTATION_INCLUSION_DELAY rotating/circular buffer of seq[Attestation]s without more involved shuffling of shard/slot calculation order; fix attestation pool testing to be consistent with get_epoch_start_slot(...) (#302)
This commit is contained in:
parent
214860cb88
commit
7356905f95
|
@ -264,8 +264,6 @@ func get_genesis_beacon_state*(
|
|||
|
||||
state
|
||||
|
||||
# TODO candidate for spec?
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/0.5.1/specs/core/0_beacon-chain.md#on-genesis
|
||||
func get_initial_beacon_block*(state: BeaconState): BeaconBlock =
|
||||
BeaconBlock(
|
||||
slot: GENESIS_SLOT,
|
||||
|
@ -281,9 +279,8 @@ func get_attestation_data_slot*(state: BeaconState,
|
|||
offset = (data.crosslink.shard + SHARD_COUNT -
|
||||
get_epoch_start_shard(state, data.target_epoch)) mod SHARD_COUNT
|
||||
|
||||
# TODO re-instate once double-check correct conditions in attestation pool
|
||||
#get_epoch_start_slot(data.target_epoch) + offset div (committee_count div SLOTS_PER_EPOCH)
|
||||
data.slot
|
||||
get_epoch_start_slot(data.target_epoch) + offset div
|
||||
(committee_count div SLOTS_PER_EPOCH)
|
||||
|
||||
# This is the slower (O(n)), spec-compatible signature.
|
||||
func get_attestation_data_slot*(state: BeaconState,
|
||||
|
@ -433,14 +430,6 @@ func get_attesting_indices_seq*(
|
|||
toSeq(items(get_attesting_indices(
|
||||
state, attestation_data, bitfield, cache)))
|
||||
|
||||
# TODO legacy function name; rename, reimplement caching if useful, blob/v0.6.2
|
||||
iterator get_attestation_participants_cached*(
|
||||
state: BeaconState, attestation_data: AttestationData, bitfield: BitField,
|
||||
cache: var StateCache): ValidatorIndex =
|
||||
for participant in get_attesting_indices(
|
||||
state, attestation_data, bitfield, cache):
|
||||
yield participant
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#convert_to_indexed
|
||||
func convert_to_indexed(state: BeaconState, attestation: Attestation,
|
||||
stateCache: var StateCache): IndexedAttestation =
|
||||
|
@ -473,8 +462,7 @@ func convert_to_indexed(state: BeaconState, attestation: Attestation,
|
|||
## the conversion here otherwise needs sorting is due to the
|
||||
## usage of HashSet -- order only matters in one place (that
|
||||
## 0.6.3 highlights and explicates) except in that the spec,
|
||||
## for no obvious reason, verifies it. So, here goes, sort a
|
||||
## list just so a called function can verify it's sorted.
|
||||
## for no obvious reason, verifies it.
|
||||
IndexedAttestation(
|
||||
custody_bit_0_indices: sorted(
|
||||
mapIt(custody_bit_0_indices, it.uint64), system.cmp),
|
||||
|
@ -595,7 +583,6 @@ proc makeAttestationData*(
|
|||
else: get_block_root_at_slot(state, epoch_start_slot)
|
||||
|
||||
AttestationData(
|
||||
slot: state.slot,
|
||||
beacon_block_root: beacon_block_root,
|
||||
target_root: target_root,
|
||||
source_epoch: state.current_justified_epoch,
|
||||
|
|
|
@ -117,9 +117,8 @@ type
|
|||
signature*: ValidatorSig ##\
|
||||
## BLS aggregate signature
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.6.2/specs/core/0_beacon-chain.md#attestationdata
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#attestationdata
|
||||
AttestationData* = object
|
||||
slot*: Slot # TODO remove this, once figure out attestation pool issues
|
||||
# LMD GHOST vote
|
||||
beacon_block_root*: Eth2Digest
|
||||
|
||||
|
|
|
@ -118,7 +118,6 @@ func compute_committee(indices: seq[ValidatorIndex], seed: Eth2Digest,
|
|||
start = (len(indices).uint64 * index) div count
|
||||
endIdx = (len(indices).uint64 * (index + 1)) div count
|
||||
key = (indices.len, seed)
|
||||
doAssert endIdx.int - start.int > 0
|
||||
|
||||
if key notin stateCache.crosslink_committee_cache:
|
||||
stateCache.crosslink_committee_cache[key] =
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import
|
||||
confutils, stats, times,
|
||||
json, strformat,
|
||||
options, sequtils, random,
|
||||
options, sequtils, random, tables,
|
||||
../tests/[testutil],
|
||||
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest, helpers, validator],
|
||||
../beacon_chain/[attestation_pool, extras, ssz, state_transition, fork_choice]
|
||||
|
@ -45,8 +45,8 @@ proc writeJson*(prefix, slot, v: auto) =
|
|||
discard open(f, fmt"{prefix:04}-{humaneSlotNum(slot):08}.json", fmWrite)
|
||||
write(f, pretty(%*(v)))
|
||||
|
||||
cli do(slots = 1945,
|
||||
validators = SLOTS_PER_EPOCH * 8, # One per shard is minimum
|
||||
cli do(slots = 448,
|
||||
validators = SLOTS_PER_EPOCH * 9, # One per shard is minimum
|
||||
json_interval = SLOTS_PER_EPOCH,
|
||||
prefix = 0,
|
||||
attesterRatio {.desc: "ratio of validators that attest in each round"} = 0.9,
|
||||
|
@ -58,7 +58,7 @@ cli do(slots = 1945,
|
|||
genesisBlock = get_initial_beacon_block(genesisState)
|
||||
|
||||
var
|
||||
attestations: array[MIN_ATTESTATION_INCLUSION_DELAY, seq[Attestation]]
|
||||
attestations = initTable[Slot, seq[Attestation]]()
|
||||
state = genesisState
|
||||
latest_block_root = signing_root(genesisBlock)
|
||||
timers: array[Timers, RunningStat]
|
||||
|
@ -78,10 +78,13 @@ cli do(slots = 1945,
|
|||
maybeWrite()
|
||||
|
||||
let
|
||||
attestations_idx = state.slot mod MIN_ATTESTATION_INCLUSION_DELAY
|
||||
body = BeaconBlockBody(attestations: attestations[attestations_idx])
|
||||
attestations_idx = state.slot
|
||||
body = BeaconBlockBody(
|
||||
attestations: attestations.getOrDefault(attestations_idx))
|
||||
|
||||
attestations[attestations_idx] = @[]
|
||||
attestations.del attestations_idx
|
||||
doAssert len(attestations) <=
|
||||
(SLOTS_PER_EPOCH.int + MIN_ATTESTATION_INCLUSION_DELAY.int)
|
||||
|
||||
let t =
|
||||
if (state.slot > GENESIS_SLOT and
|
||||
|
@ -127,9 +130,19 @@ cli do(slots = 1945,
|
|||
# add the attestation if any of the validators attested, as given
|
||||
# by the randomness. We have to delay when the attestation is
|
||||
# actually added to the block per the attestation delay rule!
|
||||
attestations[
|
||||
(state.slot + MIN_ATTESTATION_INCLUSION_DELAY - 1) mod
|
||||
MIN_ATTESTATION_INCLUSION_DELAY].add attestation
|
||||
let target_slot =
|
||||
get_attestation_data_slot(state, attestation.data) +
|
||||
MIN_ATTESTATION_INCLUSION_DELAY - 1
|
||||
|
||||
## In principle, should enumerate possible shard/slot combinations by
|
||||
## inverting get_attestation_data_slot(...), but this works. Could be
|
||||
## filtering earlier if we know that this attestation's being created
|
||||
## too late to be useful, as well.
|
||||
if target_slot > attestations_idx:
|
||||
var target_slot_attestations =
|
||||
getOrDefault(attestations, target_slot)
|
||||
target_slot_attestations.add attestation
|
||||
attestations[target_slot] = target_slot_attestations
|
||||
|
||||
flushFile(stdout)
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ suite "Attestation pool processing" & preset():
|
|||
let
|
||||
# Create an attestation for slot 1!
|
||||
crosslink_committee = get_crosslink_committee(state.data.data,
|
||||
slot_to_epoch(state.data.data.slot), 0, cache)
|
||||
slot_to_epoch(state.data.data.slot), 1, cache)
|
||||
attestation = makeAttestation(
|
||||
state.data.data, state.blck.root, crosslink_committee[0])
|
||||
|
||||
|
@ -61,7 +61,7 @@ suite "Attestation pool processing" & preset():
|
|||
let
|
||||
# Create an attestation for slot 1!
|
||||
cc0 = get_crosslink_committee(state.data.data,
|
||||
slot_to_epoch(state.data.data.slot), 0, cache)
|
||||
slot_to_epoch(state.data.data.slot), 1, cache)
|
||||
attestation0 = makeAttestation(
|
||||
state.data.data, state.blck.root, cc0[0])
|
||||
|
||||
|
@ -69,7 +69,7 @@ suite "Attestation pool processing" & preset():
|
|||
|
||||
let
|
||||
cc1 = get_crosslink_committee(state.data.data,
|
||||
slot_to_epoch(state.data.data.slot), 0, cache)
|
||||
slot_to_epoch(state.data.data.slot), 2, cache)
|
||||
attestation1 = makeAttestation(
|
||||
state.data.data, state.blck.root, cc1[0])
|
||||
|
||||
|
@ -77,7 +77,8 @@ suite "Attestation pool processing" & preset():
|
|||
pool.add(state.data.data, attestation1)
|
||||
pool.add(state.data.data, attestation0)
|
||||
|
||||
for i in 0..<MIN_ATTESTATION_INCLUSION_DELAY.int - 1: advanceState(state.data)
|
||||
for i in 0..<MIN_ATTESTATION_INCLUSION_DELAY.int - 1:
|
||||
advanceState(state.data)
|
||||
|
||||
let attestations = pool.getAttestationsForBlock(
|
||||
state.data.data, state.data.data.slot + 1)
|
||||
|
@ -91,7 +92,7 @@ suite "Attestation pool processing" & preset():
|
|||
let
|
||||
# Create an attestation for slot 1!
|
||||
cc0 = get_crosslink_committee(state.data.data,
|
||||
slot_to_epoch(state.data.data.slot), 0, cache)
|
||||
slot_to_epoch(state.data.data.slot), 1, cache)
|
||||
attestation0 = makeAttestation(
|
||||
state.data.data, state.blck.root, cc0[0])
|
||||
attestation1 = makeAttestation(
|
||||
|
@ -115,7 +116,7 @@ suite "Attestation pool processing" & preset():
|
|||
var
|
||||
# Create an attestation for slot 1!
|
||||
cc0 = get_crosslink_committee(state.data.data,
|
||||
slot_to_epoch(state.data.data.slot), 0, cache)
|
||||
slot_to_epoch(state.data.data.slot), 1, cache)
|
||||
attestation0 = makeAttestation(
|
||||
state.data.data, state.blck.root, cc0[0])
|
||||
attestation1 = makeAttestation(
|
||||
|
@ -140,7 +141,7 @@ suite "Attestation pool processing" & preset():
|
|||
var
|
||||
# Create an attestation for slot 1!
|
||||
cc0 = get_crosslink_committee(state.data.data,
|
||||
slot_to_epoch(state.data.data.slot), 0, cache)
|
||||
slot_to_epoch(state.data.data.slot), 1, cache)
|
||||
attestation0 = makeAttestation(
|
||||
state.data.data, state.blck.root, cc0[0])
|
||||
attestation1 = makeAttestation(
|
||||
|
|
Loading…
Reference in New Issue