use StateData in place of BeaconState outside state transition code (#2551)
* use StateData in place of BeaconState outside state transition code * propagate more StateData usage * remove withStateVars().state * wrap get_beacon_committee(BeaconState, ...) as gbc(StateData, ...) * switch makeAttestation() to use StateData * use StateData wrapper/dispatcher for get_committee_count_per_slot() * convert AttestationCache.init(), weak subjectivity functions, and updateValidatorMetrics() * add get_shuffled_active_validator_indices(StateData) and get_block_root_at_slot(StateData) * switch makeAttestationData() to StateData * sync AllTests-mainnet.md after rebase
This commit is contained in:
parent
be5661eebc
commit
0b0bfd1de0
|
@ -266,9 +266,10 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
|||
+ [SyncQueue] checkResponse() test OK
|
||||
+ [SyncQueue] contains() test OK
|
||||
+ [SyncQueue] getLastNonEmptySlot() test OK
|
||||
+ [SyncQueue] getRewindPoint() test OK
|
||||
+ [SyncQueue] hasEndGap() test OK
|
||||
```
|
||||
OK: 13/13 Fail: 0/13 Skip: 0/13
|
||||
OK: 14/14 Fail: 0/14 Skip: 0/14
|
||||
## Zero signature sanity checks
|
||||
```diff
|
||||
+ SSZ serialization roundtrip of SignedBeaconBlockHeader OK
|
||||
|
@ -322,4 +323,4 @@ OK: 3/3 Fail: 0/3 Skip: 0/3
|
|||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||
|
||||
---TOTAL---
|
||||
OK: 179/188 Fail: 0/188 Skip: 9/188
|
||||
OK: 180/189 Fail: 0/189 Skip: 9/189
|
||||
|
|
|
@ -14,9 +14,9 @@ import
|
|||
metrics,
|
||||
chronicles, stew/byteutils, json_serialization/std/sets as jsonSets,
|
||||
# Internal
|
||||
../spec/[beaconstate, datatypes, crypto, digest, validator],
|
||||
../spec/[beaconstate, datatypes, crypto, digest],
|
||||
../ssz/merkleization,
|
||||
"."/[spec_cache, blockchain_dag, block_quarantine],
|
||||
"."/[spec_cache, blockchain_dag, block_quarantine, statedata_helpers],
|
||||
".."/[beacon_clock, beacon_node_types, extras],
|
||||
../fork_choice/fork_choice
|
||||
|
||||
|
@ -371,16 +371,16 @@ func add(
|
|||
do:
|
||||
attCache[key] = aggregation_bits
|
||||
|
||||
func init(T: type AttestationCache, state: BeaconState): T =
|
||||
func init(T: type AttestationCache, state: StateData): T =
|
||||
# Load attestations that are scheduled for being given rewards for
|
||||
for i in 0..<state.previous_epoch_attestations.len():
|
||||
for i in 0..<getStateField(state, previous_epoch_attestations).len():
|
||||
result.add(
|
||||
state.previous_epoch_attestations[i].data,
|
||||
state.previous_epoch_attestations[i].aggregation_bits)
|
||||
for i in 0..<state.current_epoch_attestations.len():
|
||||
getStateField(state, previous_epoch_attestations)[i].data,
|
||||
getStateField(state, previous_epoch_attestations)[i].aggregation_bits)
|
||||
for i in 0..<getStateField(state, current_epoch_attestations).len():
|
||||
result.add(
|
||||
state.current_epoch_attestations[i].data,
|
||||
state.current_epoch_attestations[i].aggregation_bits)
|
||||
getStateField(state, current_epoch_attestations)[i].data,
|
||||
getStateField(state, current_epoch_attestations)[i].aggregation_bits)
|
||||
|
||||
proc score(
|
||||
attCache: var AttestationCache, data: AttestationData,
|
||||
|
@ -404,13 +404,12 @@ proc score(
|
|||
bitsScore
|
||||
|
||||
proc getAttestationsForBlock*(pool: var AttestationPool,
|
||||
state: BeaconState,
|
||||
state: StateData,
|
||||
cache: var StateCache): seq[Attestation] =
|
||||
## Retrieve attestations that may be added to a new block at the slot of the
|
||||
## given state
|
||||
## https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#attestations
|
||||
let
|
||||
newBlockSlot = state.slot.uint64
|
||||
let newBlockSlot = getStateField(state, slot).uint64
|
||||
|
||||
if newBlockSlot < MIN_ATTESTATION_INCLUSION_DELAY:
|
||||
return # Too close to genesis
|
||||
|
@ -450,7 +449,7 @@ proc getAttestationsForBlock*(pool: var AttestationPool,
|
|||
# attestation to - there might have been a fork between when we first
|
||||
# saw the attestation and the time that we added it
|
||||
if not check_attestation(
|
||||
state, attestation, {skipBlsValidation}, cache).isOk():
|
||||
state.data.data, attestation, {skipBlsValidation}, cache).isOk():
|
||||
continue
|
||||
|
||||
let score = attCache.score(
|
||||
|
@ -476,7 +475,8 @@ proc getAttestationsForBlock*(pool: var AttestationPool,
|
|||
var
|
||||
prevEpoch = state.get_previous_epoch()
|
||||
prevEpochSpace =
|
||||
state.previous_epoch_attestations.maxLen - state.previous_epoch_attestations.len()
|
||||
getStateField(state, previous_epoch_attestations).maxLen -
|
||||
getStateField(state, previous_epoch_attestations).len()
|
||||
|
||||
var res: seq[Attestation]
|
||||
let totalCandidates = candidates.len()
|
||||
|
|
|
@ -98,7 +98,7 @@ proc addResolvedBlock(
|
|||
let prevEpochRef =
|
||||
if blockEpoch < 1: nil else: dag.findEpochRef(parent, blockEpoch - 1)
|
||||
|
||||
epochRef = EpochRef.init(state.data.data, cache, prevEpochRef)
|
||||
epochRef = EpochRef.init(state, cache, prevEpochRef)
|
||||
dag.addEpochRef(blockRef, epochRef)
|
||||
|
||||
dag.blocks.incl(KeyedBlockRef.init(blockRef))
|
||||
|
@ -229,7 +229,7 @@ proc addRawBlockKnownParent(
|
|||
# TODO: remove skipBLSValidation
|
||||
|
||||
var sigs: seq[SignatureSet]
|
||||
if not sigs.collectSignatureSets(signedBlock, dag.clearanceState.data.data, cache):
|
||||
if not sigs.collectSignatureSets(signedBlock, dag.clearanceState, cache):
|
||||
# A PublicKey or Signature isn't on the BLS12-381 curve
|
||||
return err((ValidationResult.Reject, Invalid))
|
||||
if not quarantine.batchVerify(sigs):
|
||||
|
|
|
@ -176,31 +176,12 @@ type
|
|||
# balances, as used in fork choice
|
||||
effective_balances_bytes*: seq[byte]
|
||||
|
||||
BlockRef* = ref object
|
||||
## Node in object graph guaranteed to lead back to tail block, and to have
|
||||
## a corresponding entry in database.
|
||||
## Block graph should form a tree - in particular, there are no cycles.
|
||||
|
||||
root*: Eth2Digest ##\
|
||||
## Root that can be used to retrieve block data from database
|
||||
|
||||
parent*: BlockRef ##\
|
||||
## Not nil, except for the tail
|
||||
|
||||
slot*: Slot # could calculate this by walking to root, but..
|
||||
|
||||
BlockData* = object
|
||||
## Body and graph in one
|
||||
|
||||
data*: TrustedSignedBeaconBlock # We trust all blocks we have a ref for
|
||||
refs*: BlockRef
|
||||
|
||||
StateData* = object
|
||||
data*: HashedBeaconState
|
||||
|
||||
blck*: BlockRef ##\
|
||||
## The block associated with the state found in data
|
||||
|
||||
BlockSlot* = object
|
||||
## Unique identifier for a particular fork and time in the block chain -
|
||||
## normally, there's a block for every slot, but in the case a block is not
|
||||
|
|
|
@ -16,7 +16,7 @@ import
|
|||
crypto, datatypes, digest, helpers, validator, state_transition,
|
||||
beaconstate],
|
||||
../beacon_clock,
|
||||
"."/[block_pools_types, block_quarantine]
|
||||
"."/[block_pools_types, block_quarantine, statedata_helpers]
|
||||
|
||||
export block_pools_types, helpers, datatypes
|
||||
|
||||
|
@ -58,7 +58,6 @@ template withStateVars*(
|
|||
template stateData(): StateData {.inject, used.} = stateDataInternal
|
||||
template hashedState(): HashedBeaconState {.inject, used.} =
|
||||
stateDataInternal.data
|
||||
template state(): BeaconState {.inject, used.} = stateDataInternal.data.data
|
||||
template blck(): BlockRef {.inject, used.} = stateDataInternal.blck
|
||||
template root(): Eth2Digest {.inject, used.} = stateDataInternal.data.root
|
||||
|
||||
|
@ -112,24 +111,25 @@ func get_effective_balances*(state: BeaconState): seq[Gwei] =
|
|||
result[i] = validator[].effective_balance
|
||||
|
||||
proc init*(
|
||||
T: type EpochRef, state: BeaconState, cache: var StateCache,
|
||||
T: type EpochRef, state: StateData, cache: var StateCache,
|
||||
prevEpoch: EpochRef): T =
|
||||
let
|
||||
epoch = state.get_current_epoch()
|
||||
epochRef = EpochRef(
|
||||
epoch: epoch,
|
||||
eth1_data: state.eth1_data,
|
||||
eth1_deposit_index: state.eth1_deposit_index,
|
||||
current_justified_checkpoint: state.current_justified_checkpoint,
|
||||
finalized_checkpoint: state.finalized_checkpoint,
|
||||
eth1_data: getStateField(state, eth1_data),
|
||||
eth1_deposit_index: getStateField(state, eth1_deposit_index),
|
||||
current_justified_checkpoint:
|
||||
getStateField(state, current_justified_checkpoint),
|
||||
finalized_checkpoint: getStateField(state, finalized_checkpoint),
|
||||
shuffled_active_validator_indices:
|
||||
cache.get_shuffled_active_validator_indices(state, epoch))
|
||||
for i in 0'u64..<SLOTS_PER_EPOCH:
|
||||
let idx = get_beacon_proposer_index(
|
||||
state, cache, epoch.compute_start_slot_at_epoch() + i)
|
||||
state.data.data, cache, epoch.compute_start_slot_at_epoch() + i)
|
||||
if idx.isSome():
|
||||
epochRef.beacon_proposers[i] =
|
||||
some((idx.get(), state.validators[idx.get].pubkey))
|
||||
some((idx.get(), getStateField(state, validators)[idx.get].pubkey))
|
||||
|
||||
# Validator sets typically don't change between epochs - a more efficient
|
||||
# scheme could be devised where parts of the validator key set is reused
|
||||
|
@ -141,7 +141,7 @@ proc init*(
|
|||
# information may however result in a different root, even if the public
|
||||
# keys are the same
|
||||
|
||||
let validators_root = hash_tree_root(state.validators)
|
||||
let validators_root = hash_tree_root(getStateField(state, validators))
|
||||
|
||||
template sameKeys(a: openArray[ValidatorPubKey], b: openArray[Validator]): bool =
|
||||
if a.len != b.len:
|
||||
|
@ -157,13 +157,15 @@ proc init*(
|
|||
|
||||
if prevEpoch != nil and (
|
||||
prevEpoch.validator_key_store[0] == validators_root or
|
||||
sameKeys(prevEpoch.validator_key_store[1][], state.validators.asSeq)):
|
||||
sameKeys(
|
||||
prevEpoch.validator_key_store[1][],
|
||||
getStateField(state, validators).asSeq)):
|
||||
epochRef.validator_key_store =
|
||||
(validators_root, prevEpoch.validator_key_store[1])
|
||||
else:
|
||||
epochRef.validator_key_store = (
|
||||
validators_root,
|
||||
newClone(mapIt(state.validators.toSeq, it.pubkey)))
|
||||
newClone(mapIt(getStateField(state, validators).toSeq, it.pubkey)))
|
||||
|
||||
# When fork choice runs, it will need the effective balance of the justified
|
||||
# checkpoint - we pre-load the balances here to avoid rewinding the justified
|
||||
|
@ -177,7 +179,8 @@ proc init*(
|
|||
|
||||
epochRef.effective_balances_bytes =
|
||||
snappyEncode(SSZ.encode(
|
||||
List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT](get_effective_balances(state))))
|
||||
List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT](
|
||||
get_effective_balances(state.data.data))))
|
||||
|
||||
epochRef
|
||||
|
||||
|
@ -304,9 +307,6 @@ proc loadStateCache*(
|
|||
if epoch > 0:
|
||||
load(epoch - 1)
|
||||
|
||||
template getStateField*(stateData, fieldName: untyped): untyped =
|
||||
stateData.data.data.fieldName
|
||||
|
||||
func init(T: type BlockRef, root: Eth2Digest, slot: Slot): BlockRef =
|
||||
BlockRef(
|
||||
root: root,
|
||||
|
@ -500,7 +500,7 @@ proc getEpochRef*(dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): EpochRef =
|
|||
let
|
||||
prevEpochRef = if epoch < 1: nil
|
||||
else: dag.findEpochRef(blck, epoch - 1)
|
||||
newEpochRef = EpochRef.init(state, cache, prevEpochRef)
|
||||
newEpochRef = EpochRef.init(stateData, cache, prevEpochRef)
|
||||
|
||||
if epoch >= dag.finalizedHead.slot.epoch():
|
||||
# Only cache epoch information for unfinalized blocks - earlier states
|
||||
|
|
|
@ -8,17 +8,55 @@
|
|||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
../spec/[datatypes, digest, helpers, presets],
|
||||
./block_pools_types, ./blockchain_dag
|
||||
../spec/[beaconstate, datatypes, digest, helpers, presets, validator],
|
||||
./block_pools_types
|
||||
|
||||
# State-related functions implemented based on StateData instead of BeaconState
|
||||
# State-related functionality based on StateData instead of BeaconState
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_current_epoch
|
||||
func get_current_epoch*(stateData: StateData): Epoch =
|
||||
## Return the current epoch.
|
||||
getStateField(stateData, slot).epoch
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_previous_epoch
|
||||
func get_previous_epoch*(stateData: StateData): Epoch =
|
||||
## Return the previous epoch (unless the current epoch is ``GENESIS_EPOCH``).
|
||||
let current_epoch = get_current_epoch(stateData)
|
||||
if current_epoch == GENESIS_EPOCH:
|
||||
GENESIS_EPOCH
|
||||
else:
|
||||
current_epoch - 1
|
||||
|
||||
# Dispatch functions
|
||||
func get_beacon_committee*(
|
||||
state: StateData, slot: Slot, index: CommitteeIndex,
|
||||
cache: var StateCache): seq[ValidatorIndex] =
|
||||
# This one is used by tests/, ncli/, and a couple of places in RPC
|
||||
# TODO use the iterator version alone, to remove the risk of using
|
||||
# diverging get_beacon_committee() in tests and beacon_chain/ by a
|
||||
# wrapper approach (e.g., toSeq). This is a perf tradeoff for test
|
||||
# correctness/consistency.
|
||||
get_beacon_committee(state.data.data, slot, index, cache)
|
||||
|
||||
func get_committee_count_per_slot*(state: StateData,
|
||||
epoch: Epoch,
|
||||
cache: var StateCache): uint64 =
|
||||
# Return the number of committees at ``epoch``.
|
||||
get_committee_count_per_slot(state.data.data, epoch, cache)
|
||||
|
||||
template hash_tree_root*(stateData: StateData): Eth2Digest =
|
||||
# Dispatch here based on type/fork of state. Since StateData is a ref object
|
||||
# type, if Nim chooses the wrong overload, it will simply fail to compile.
|
||||
stateData.data.root
|
||||
|
||||
func get_shuffled_active_validator_indices*(
|
||||
cache: var StateCache, state: StateData, epoch: Epoch):
|
||||
var seq[ValidatorIndex] =
|
||||
cache.get_shuffled_active_validator_indices(state.data.data, epoch)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_block_root_at_slot
|
||||
func get_block_root_at_slot*(state: StateData,
|
||||
slot: Slot): Eth2Digest =
|
||||
## Return the block root at a recent ``slot``.
|
||||
|
||||
get_block_root_at_slot(state.data.data, slot)
|
||||
|
|
|
@ -17,6 +17,7 @@ import
|
|||
# Local modules:
|
||||
../spec/[datatypes, digest, crypto, helpers],
|
||||
../networking/network_metadata,
|
||||
../consensus_object_pools/block_pools_types,
|
||||
../ssz,
|
||||
../rpc/eth_merge_web3,
|
||||
".."/[beacon_chain_db, beacon_node_status],
|
||||
|
@ -283,13 +284,14 @@ template toGaugeValue(x: Quantity): int64 =
|
|||
# "Invalid configuration: GENESIS_DELAY is set too low"
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#get_eth1_data
|
||||
func compute_time_at_slot(state: BeaconState, slot: Slot): uint64 =
|
||||
state.genesis_time + slot * SECONDS_PER_SLOT
|
||||
func compute_time_at_slot(state: StateData, slot: Slot): uint64 =
|
||||
getStateField(state, genesis_time) + slot * SECONDS_PER_SLOT
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#get_eth1_data
|
||||
func voting_period_start_time*(state: BeaconState): uint64 =
|
||||
func voting_period_start_time*(state: StateData): uint64 =
|
||||
let eth1_voting_period_start_slot =
|
||||
state.slot - state.slot mod SLOTS_PER_ETH1_VOTING_PERIOD.uint64
|
||||
getStateField(state, slot) - getStateField(state, slot) mod
|
||||
SLOTS_PER_ETH1_VOTING_PERIOD.uint64
|
||||
compute_time_at_slot(state, eth1_voting_period_start_slot)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#get_eth1_data
|
||||
|
@ -697,7 +699,7 @@ template trackFinalizedState*(m: Eth1Monitor,
|
|||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#get_eth1_data
|
||||
proc getBlockProposalData*(chain: var Eth1Chain,
|
||||
state: BeaconState,
|
||||
state: StateData,
|
||||
finalizedEth1Data: Eth1Data,
|
||||
finalizedStateDepositIndex: uint64): BlockProposalEth1Data =
|
||||
let
|
||||
|
@ -706,11 +708,11 @@ proc getBlockProposalData*(chain: var Eth1Chain,
|
|||
finalizedStateDepositIndex)
|
||||
|
||||
var otherVotesCountTable = initCountTable[Eth1Data]()
|
||||
for vote in state.eth1_data_votes:
|
||||
for vote in getStateField(state, eth1_data_votes):
|
||||
let eth1Block = chain.findBlock(vote)
|
||||
if eth1Block != nil and
|
||||
eth1Block.voteData.deposit_root == vote.deposit_root and
|
||||
vote.deposit_count >= state.eth1_data.deposit_count and
|
||||
vote.deposit_count >= getStateField(state, eth1_data).deposit_count and
|
||||
is_candidate_block(chain.preset, eth1Block, periodStart):
|
||||
otherVotesCountTable.inc vote
|
||||
else:
|
||||
|
@ -718,20 +720,23 @@ proc getBlockProposalData*(chain: var Eth1Chain,
|
|||
root = vote.block_hash,
|
||||
deposits = vote.deposit_count,
|
||||
depositsRoot = vote.deposit_root,
|
||||
localDeposits = state.eth1_data.deposit_count
|
||||
localDeposits = getStateField(state, eth1_data).deposit_count
|
||||
|
||||
var pendingDepositsCount = state.eth1_data.deposit_count - state.eth1_deposit_index
|
||||
var pendingDepositsCount =
|
||||
getStateField(state, eth1_data).deposit_count -
|
||||
getStateField(state, eth1_deposit_index)
|
||||
if otherVotesCountTable.len > 0:
|
||||
let (winningVote, votes) = otherVotesCountTable.largest
|
||||
debug "Voting on eth1 head with majority", votes
|
||||
result.vote = winningVote
|
||||
if uint64((votes + 1) * 2) > SLOTS_PER_ETH1_VOTING_PERIOD:
|
||||
pendingDepositsCount = winningVote.deposit_count - state.eth1_deposit_index
|
||||
pendingDepositsCount = winningVote.deposit_count -
|
||||
getStateField(state, eth1_deposit_index)
|
||||
else:
|
||||
let latestBlock = chain.latestCandidateBlock(periodStart)
|
||||
if latestBlock == nil:
|
||||
debug "No acceptable eth1 votes and no recent candidates. Voting no change"
|
||||
result.vote = state.eth1_data
|
||||
result.vote = getStateField(state, eth1_data)
|
||||
else:
|
||||
debug "No acceptable eth1 votes. Voting for latest candidate"
|
||||
result.vote = latestBlock.voteData
|
||||
|
@ -741,12 +746,13 @@ proc getBlockProposalData*(chain: var Eth1Chain,
|
|||
let
|
||||
totalDepositsInNewBlock = min(MAX_DEPOSITS, pendingDepositsCount)
|
||||
deposits = chain.getDepositsRange(
|
||||
state.eth1_deposit_index,
|
||||
state.eth1_deposit_index + pendingDepositsCount)
|
||||
getStateField(state, eth1_deposit_index),
|
||||
getStateField(state, eth1_deposit_index) + pendingDepositsCount)
|
||||
depositRoots = mapIt(deposits, hash_tree_root(it))
|
||||
|
||||
var scratchMerkleizer = copy chain.finalizedDepositsMerkleizer
|
||||
if chain.advanceMerkleizer(scratchMerkleizer, state.eth1_deposit_index):
|
||||
if chain.advanceMerkleizer(
|
||||
scratchMerkleizer, getStateField(state, eth1_deposit_index)):
|
||||
let proofs = scratchMerkleizer.addChunksAndGenMerkleProofs(depositRoots)
|
||||
for i in 0 ..< totalDepositsInNewBlock:
|
||||
var proof: array[33, Eth2Digest]
|
||||
|
@ -761,7 +767,7 @@ proc getBlockProposalData*(chain: var Eth1Chain,
|
|||
result.hasMissingDeposits = true
|
||||
|
||||
template getBlockProposalData*(m: Eth1Monitor,
|
||||
state: BeaconState,
|
||||
state: StateData,
|
||||
finalizedEth1Data: Eth1Data,
|
||||
finalizedStateDepositIndex: uint64): BlockProposalEth1Data =
|
||||
getBlockProposalData(m.eth1Chain, state, finalizedEth1Data, finalizedStateDepositIndex)
|
||||
|
|
|
@ -261,7 +261,7 @@ proc init*(T: type BeaconNode,
|
|||
currentSlot = beaconClock.now.slotOrZero
|
||||
isCheckpointStale = not is_within_weak_subjectivity_period(
|
||||
currentSlot,
|
||||
chainDag.headState.data.data,
|
||||
chainDag.headState,
|
||||
config.weakSubjectivityCheckpoint.get)
|
||||
|
||||
if isCheckpointStale:
|
||||
|
@ -548,8 +548,8 @@ proc cycleAttestationSubnetsPerEpoch(
|
|||
# calculating future attestation subnets.
|
||||
|
||||
# Only know RANDAO mix, which determines shuffling seed, one epoch in
|
||||
# advance. When node.chainDag.headState.data.data.slot.epoch is ahead
|
||||
# of wallSlot, the clock's just incorrect. If the state slot's behind
|
||||
# advance. When getStateField(node.chainDag.headState, slot).epoch is
|
||||
# ahead of wallSlot, the clock's just incorrect. If the slot's behind
|
||||
# wallSlot, it would have to look more than MIN_SEED_LOOKAHEAD epochs
|
||||
# ahead to compute the shuffling determining the beacon committees.
|
||||
static: doAssert MIN_SEED_LOOKAHEAD == 1
|
||||
|
|
|
@ -17,8 +17,8 @@ import
|
|||
../networking/eth2_network,
|
||||
../validators/validator_duties,
|
||||
../gossip_processing/gossip_validation,
|
||||
../consensus_object_pools/blockchain_dag,
|
||||
../spec/[crypto, digest, datatypes, validator, network],
|
||||
../consensus_object_pools/[blockchain_dag, statedata_helpers],
|
||||
../spec/[crypto, digest, datatypes, network],
|
||||
../spec/eth2_apis/callsigs_types,
|
||||
../ssz/merkleization,
|
||||
./rpc_utils, ./eth2_json_rpc_serialization
|
||||
|
@ -345,12 +345,13 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
withStateForStateId(stateId):
|
||||
proc getCommittee(slot: Slot,
|
||||
index: CommitteeIndex): BeaconStatesCommitteesTuple =
|
||||
let vals = get_beacon_committee(state, slot, index, cache).mapIt(it.uint64)
|
||||
let vals = get_beacon_committee(
|
||||
stateData, slot, index, cache).mapIt(it.uint64)
|
||||
return (index: index.uint64, slot: slot.uint64, validators: vals)
|
||||
|
||||
proc forSlot(slot: Slot, res: var seq[BeaconStatesCommitteesTuple]) =
|
||||
let committees_per_slot =
|
||||
get_committee_count_per_slot(state, slot.epoch, cache)
|
||||
get_committee_count_per_slot(stateData, slot.epoch, cache)
|
||||
|
||||
if index.isNone:
|
||||
for committee_index in 0'u64..<committees_per_slot:
|
||||
|
|
|
@ -12,7 +12,7 @@ import
|
|||
../consensus_object_pools/[blockchain_dag, exit_pool, statedata_helpers],
|
||||
../gossip_processing/gossip_validation,
|
||||
../validators/validator_duties,
|
||||
../spec/[crypto, digest, validator, datatypes, network],
|
||||
../spec/[crypto, digest, datatypes, network],
|
||||
../ssz/merkleization,
|
||||
./eth2_json_rest_serialization, ./rest_utils
|
||||
|
||||
|
@ -485,7 +485,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
node.withStateForBlockSlot(bslot):
|
||||
proc getCommittee(slot: Slot,
|
||||
index: CommitteeIndex): RestBeaconStatesCommittees =
|
||||
let validators = get_beacon_committee(state, slot, index,
|
||||
let validators = get_beacon_committee(stateData, slot, index,
|
||||
cache).mapIt(it)
|
||||
RestBeaconStatesCommittees(index: index, slot: slot,
|
||||
validators: validators)
|
||||
|
@ -493,7 +493,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
proc forSlot(slot: Slot, cindex: Option[CommitteeIndex],
|
||||
res: var seq[RestBeaconStatesCommittees]) =
|
||||
let committees_per_slot =
|
||||
get_committee_count_per_slot(state, Epoch(slot), cache)
|
||||
get_committee_count_per_slot(stateData, Epoch(slot), cache)
|
||||
|
||||
if cindex.isNone:
|
||||
for committee_index in 0'u64 ..< committees_per_slot:
|
||||
|
|
|
@ -26,7 +26,7 @@ proc installDebugApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
rpcServer.rpc("get_v1_debug_beacon_states_stateId") do (
|
||||
stateId: string) -> BeaconState:
|
||||
withStateForStateId(stateId):
|
||||
return state
|
||||
return stateData.data.data
|
||||
|
||||
rpcServer.rpc("get_v1_debug_beacon_heads") do () -> seq[tuple[root: Eth2Digest, slot: Slot]]:
|
||||
return node.chainDag.heads.mapIt((it.root, it.slot))
|
||||
|
|
|
@ -23,7 +23,7 @@ proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
$bres.error())
|
||||
bres.get()
|
||||
node.withStateForBlockSlot(bslot):
|
||||
return RestApiResponse.jsonResponse(state())
|
||||
return RestApiResponse.jsonResponse(stateData.data.data)
|
||||
return RestApiResponse.jsonError(Http500, InternalServerError)
|
||||
|
||||
router.api(MethodGet,
|
||||
|
|
|
@ -633,34 +633,3 @@ proc process_attestation*(
|
|||
addPendingAttestation(state.previous_epoch_attestations)
|
||||
|
||||
ok()
|
||||
|
||||
func makeAttestationData*(
|
||||
state: BeaconState, slot: Slot, committee_index: CommitteeIndex,
|
||||
beacon_block_root: Eth2Digest): AttestationData =
|
||||
## Create an attestation / vote for the block `beacon_block_root` using the
|
||||
## data in `state` to fill in the rest of the fields.
|
||||
## `state` is the state corresponding to the `beacon_block_root` advanced to
|
||||
## the slot we're attesting to.
|
||||
|
||||
let
|
||||
current_epoch = get_current_epoch(state)
|
||||
start_slot = compute_start_slot_at_epoch(current_epoch)
|
||||
epoch_boundary_block_root =
|
||||
if start_slot == state.slot: beacon_block_root
|
||||
else: get_block_root_at_slot(state, start_slot)
|
||||
|
||||
doAssert slot.compute_epoch_at_slot == current_epoch,
|
||||
"Computed epoch was " & $slot.compute_epoch_at_slot &
|
||||
" while the state current_epoch was " & $current_epoch
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#attestation-data
|
||||
AttestationData(
|
||||
slot: slot,
|
||||
index: committee_index.uint64,
|
||||
beacon_block_root: beacon_block_root,
|
||||
source: state.current_justified_checkpoint,
|
||||
target: Checkpoint(
|
||||
epoch: current_epoch,
|
||||
root: epoch_boundary_block_root
|
||||
)
|
||||
)
|
||||
|
|
|
@ -755,6 +755,25 @@ type
|
|||
statuses*: seq[RewardStatus]
|
||||
total_balances*: TotalBalances
|
||||
|
||||
BlockRef* = ref object
|
||||
## Node in object graph guaranteed to lead back to tail block, and to have
|
||||
## a corresponding entry in database.
|
||||
## Block graph should form a tree - in particular, there are no cycles.
|
||||
|
||||
root*: Eth2Digest ##\
|
||||
## Root that can be used to retrieve block data from database
|
||||
|
||||
parent*: BlockRef ##\
|
||||
## Not nil, except for the tail
|
||||
|
||||
slot*: Slot # could calculate this by walking to root, but..
|
||||
|
||||
StateData* = object
|
||||
data*: HashedBeaconState
|
||||
|
||||
blck*: BlockRef ##\
|
||||
## The block associated with the state found in data
|
||||
|
||||
func getImmutableValidatorData*(validator: Validator): ImmutableValidatorData =
|
||||
ImmutableValidatorData(
|
||||
pubkey: validator.pubkey,
|
||||
|
@ -1154,6 +1173,9 @@ proc readValue*(r: var JsonReader, T: type GraffitiBytes): T
|
|||
except ValueError as err:
|
||||
r.raiseUnexpectedValue err.msg
|
||||
|
||||
template getStateField*(stateData, fieldName: untyped): untyped =
|
||||
stateData.data.data.fieldName
|
||||
|
||||
static:
|
||||
# Sanity checks - these types should be trivial enough to copy with memcpy
|
||||
doAssert supportsCopyMem(Validator)
|
||||
|
|
|
@ -77,14 +77,14 @@ func addSignatureSet[T](
|
|||
proc aggregateAttesters(
|
||||
aggPK: var blscurve.PublicKey,
|
||||
attestation: IndexedAttestation,
|
||||
state: BeaconState
|
||||
validators: seq[Validator],
|
||||
): bool =
|
||||
doAssert attestation.attesting_indices.len > 0
|
||||
var attestersAgg{.noInit.}: AggregatePublicKey
|
||||
attestersAgg.init(state.validators[attestation.attesting_indices[0]]
|
||||
attestersAgg.init(validators[attestation.attesting_indices[0]]
|
||||
.pubkey.loadWithCacheOrExit(false))
|
||||
for i in 1 ..< attestation.attesting_indices.len:
|
||||
attestersAgg.aggregate(state.validators[attestation.attesting_indices[i]]
|
||||
attestersAgg.aggregate(validators[attestation.attesting_indices[i]]
|
||||
.pubkey.loadWithCacheOrExit(false))
|
||||
aggPK.finish(attestersAgg)
|
||||
return true
|
||||
|
@ -109,7 +109,7 @@ proc aggregateAttesters(
|
|||
proc addIndexedAttestation(
|
||||
sigs: var seq[SignatureSet],
|
||||
attestation: IndexedAttestation,
|
||||
state: BeaconState
|
||||
state: StateData
|
||||
): bool =
|
||||
## Add an indexed attestation for batched BLS verification
|
||||
## purposes
|
||||
|
@ -126,15 +126,16 @@ proc addIndexedAttestation(
|
|||
return false
|
||||
|
||||
var aggPK {.noInit.}: blscurve.PublicKey
|
||||
if not aggPK.aggregateAttesters(attestation, state):
|
||||
if not aggPK.aggregateAttesters(
|
||||
attestation, getStateField(state, validators).asSeq):
|
||||
return false
|
||||
|
||||
sigs.addSignatureSet(
|
||||
aggPK,
|
||||
attestation.data,
|
||||
attestation.signature.loadOrExit(false),
|
||||
state.genesis_validators_root,
|
||||
state.fork,
|
||||
getStateField(state, genesis_validators_root),
|
||||
getStateField(state, fork),
|
||||
attestation.data.target.epoch,
|
||||
DOMAIN_BEACON_ATTESTER)
|
||||
return true
|
||||
|
@ -142,22 +143,22 @@ proc addIndexedAttestation(
|
|||
proc addAttestation(
|
||||
sigs: var seq[SignatureSet],
|
||||
attestation: Attestation,
|
||||
state: BeaconState,
|
||||
state: StateData,
|
||||
cache: var StateCache
|
||||
): bool =
|
||||
var inited = false
|
||||
var attestersAgg{.noInit.}: AggregatePublicKey
|
||||
for valIndex in state.get_attesting_indices(
|
||||
for valIndex in state.data.data.get_attesting_indices(
|
||||
attestation.data,
|
||||
attestation.aggregation_bits,
|
||||
cache
|
||||
):
|
||||
if not inited: # first iteration
|
||||
attestersAgg.init(state.validators[valIndex]
|
||||
attestersAgg.init(getStateField(state, validators)[valIndex]
|
||||
.pubkey.loadWithCacheOrExit(false))
|
||||
inited = true
|
||||
else:
|
||||
attestersAgg.aggregate(state.validators[valIndex]
|
||||
attestersAgg.aggregate(getStateField(state, validators)[valIndex]
|
||||
.pubkey.loadWithCacheOrExit(false))
|
||||
|
||||
if not inited:
|
||||
|
@ -171,8 +172,8 @@ proc addAttestation(
|
|||
attesters,
|
||||
attestation.data,
|
||||
attestation.signature.loadOrExit(false),
|
||||
state.genesis_validators_root,
|
||||
state.fork,
|
||||
getStateField(state, genesis_validators_root),
|
||||
getStateField(state, fork),
|
||||
attestation.data.target.epoch,
|
||||
DOMAIN_BEACON_ATTESTER)
|
||||
|
||||
|
@ -272,7 +273,7 @@ proc addAggregateAndProofSignature*(
|
|||
proc collectSignatureSets*(
|
||||
sigs: var seq[SignatureSet],
|
||||
signed_block: SignedBeaconBlock,
|
||||
state: BeaconState,
|
||||
state: StateData,
|
||||
cache: var StateCache): bool =
|
||||
## Collect all signatures in a single signed block.
|
||||
## This includes
|
||||
|
@ -291,10 +292,10 @@ proc collectSignatureSets*(
|
|||
|
||||
let
|
||||
proposer_index = signed_block.message.proposer_index
|
||||
if proposer_index >= state.validators.lenu64:
|
||||
if proposer_index >= getStateField(state, validators).lenu64:
|
||||
return false
|
||||
|
||||
let pubkey = state.validators[proposer_index]
|
||||
let pubkey = getStateField(state, validators)[proposer_index]
|
||||
.pubkey.loadWithCacheOrExit(false)
|
||||
let epoch = signed_block.message.slot.compute_epoch_at_slot()
|
||||
|
||||
|
@ -304,8 +305,8 @@ proc collectSignatureSets*(
|
|||
pubkey,
|
||||
signed_block.message,
|
||||
signed_block.signature.loadOrExit(false),
|
||||
state.genesis_validators_root,
|
||||
state.fork,
|
||||
getStateField(state, genesis_validators_root),
|
||||
getStateField(state, fork),
|
||||
epoch,
|
||||
DOMAIN_BEACON_PROPOSER)
|
||||
|
||||
|
@ -315,8 +316,8 @@ proc collectSignatureSets*(
|
|||
pubkey,
|
||||
epoch,
|
||||
signed_block.message.body.randao_reveal.loadOrExit(false),
|
||||
state.genesis_validators_root,
|
||||
state.fork,
|
||||
getStateField(state, genesis_validators_root),
|
||||
getStateField(state, fork),
|
||||
epoch,
|
||||
DOMAIN_RANDAO)
|
||||
|
||||
|
@ -337,14 +338,15 @@ proc collectSignatureSets*(
|
|||
# Proposed block 1
|
||||
block:
|
||||
let header_1 = slashing.signed_header_1
|
||||
let proposer1 = state.validators[header_1.message.proposer_index]
|
||||
let proposer1 =
|
||||
getStateField(state, validators)[header_1.message.proposer_index]
|
||||
let epoch1 = header_1.message.slot.compute_epoch_at_slot()
|
||||
sigs.addSignatureSet(
|
||||
proposer1.pubkey.loadWithCacheOrExit(false),
|
||||
header_1.message,
|
||||
header_1.signature.loadOrExit(false),
|
||||
state.genesis_validators_root,
|
||||
state.fork,
|
||||
getStateField(state, genesis_validators_root),
|
||||
getStateField(state, fork),
|
||||
epoch1,
|
||||
DOMAIN_BEACON_PROPOSER
|
||||
)
|
||||
|
@ -352,14 +354,15 @@ proc collectSignatureSets*(
|
|||
# Conflicting block 2
|
||||
block:
|
||||
let header_2 = slashing.signed_header_2
|
||||
let proposer2 = state.validators[header_2.message.proposer_index]
|
||||
let proposer2 =
|
||||
getStateField(state, validators)[header_2.message.proposer_index]
|
||||
let epoch2 = header_2.message.slot.compute_epoch_at_slot()
|
||||
sigs.addSignatureSet(
|
||||
proposer2.pubkey.loadWithCacheOrExit(false),
|
||||
header_2.message,
|
||||
header_2.signature.loadOrExit(false),
|
||||
state.genesis_validators_root,
|
||||
state.fork,
|
||||
getStateField(state, genesis_validators_root),
|
||||
getStateField(state, fork),
|
||||
epoch2,
|
||||
DOMAIN_BEACON_PROPOSER
|
||||
)
|
||||
|
@ -418,12 +421,12 @@ proc collectSignatureSets*(
|
|||
template volex: untyped = signed_block.message.body.voluntary_exits[i]
|
||||
|
||||
sigs.addSignatureSet(
|
||||
state.validators[volex.message.validator_index]
|
||||
getStateField(state, validators)[volex.message.validator_index]
|
||||
.pubkey.loadWithCacheOrExit(false),
|
||||
volex.message,
|
||||
volex.signature.loadOrExit(false),
|
||||
state.genesis_validators_root,
|
||||
state.fork,
|
||||
getStateField(state, genesis_validators_root),
|
||||
getStateField(state, fork),
|
||||
volex.message.epoch,
|
||||
DOMAIN_VOLUNTARY_EXIT)
|
||||
|
||||
|
|
|
@ -8,15 +8,17 @@
|
|||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
./datatypes, ./digest, ./helpers
|
||||
./datatypes, ./digest, ./helpers,
|
||||
../consensus_object_pools/statedata_helpers
|
||||
|
||||
const
|
||||
SAFETY_DECAY* = 10'u64
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/weak-subjectivity.md#calculating-the-weak-subjectivity-period
|
||||
func compute_weak_subjectivity_period*(state: BeaconState): uint64 =
|
||||
func compute_weak_subjectivity_period(state: StateData): uint64 =
|
||||
var weak_subjectivity_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
let validator_count = get_active_validator_indices_len(state, get_current_epoch(state))
|
||||
let validator_count =
|
||||
get_active_validator_indices_len(state.data.data, get_current_epoch(state))
|
||||
if validator_count >= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT:
|
||||
weak_subjectivity_period += SAFETY_DECAY * CHURN_LIMIT_QUOTIENT div (2 * 100)
|
||||
else:
|
||||
|
@ -25,15 +27,17 @@ func compute_weak_subjectivity_period*(state: BeaconState): uint64 =
|
|||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/weak-subjectivity.md#checking-for-stale-weak-subjectivity-checkpoint
|
||||
func is_within_weak_subjectivity_period*(current_slot: Slot,
|
||||
ws_state: BeaconState,
|
||||
ws_state: StateData,
|
||||
ws_checkpoint: Checkpoint): bool =
|
||||
# Clients may choose to validate the input state against the input Weak Subjectivity Checkpoint
|
||||
doAssert ws_state.latest_block_header.state_root == ws_checkpoint.root
|
||||
doAssert compute_epoch_at_slot(ws_state.slot) == ws_checkpoint.epoch
|
||||
doAssert getStateField(ws_state, latest_block_header).state_root ==
|
||||
ws_checkpoint.root
|
||||
doAssert compute_epoch_at_slot(getStateField(ws_state, slot)) ==
|
||||
ws_checkpoint.epoch
|
||||
|
||||
let
|
||||
ws_period = compute_weak_subjectivity_period(ws_state)
|
||||
ws_state_epoch = compute_epoch_at_slot(ws_state.slot)
|
||||
ws_state_epoch = compute_epoch_at_slot(getStateField(ws_state, slot))
|
||||
current_epoch = compute_epoch_at_slot(current_slot)
|
||||
|
||||
current_epoch <= ws_state_epoch + ws_period
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
import
|
||||
os, strformat, chronicles,
|
||||
./ssz_serialization,
|
||||
../beacon_node_types,
|
||||
../spec/[crypto, datatypes, digest]
|
||||
|
||||
# Dump errors are generally not fatal where used currently - the code calling
|
||||
|
|
|
@ -263,7 +263,7 @@ proc getBlockProposalEth1Data*(node: BeaconNode,
|
|||
else:
|
||||
let finalizedEpochRef = node.chainDag.getFinalizedEpochRef()
|
||||
result = node.eth1Monitor.getBlockProposalData(
|
||||
stateData.data.data, finalizedEpochRef.eth1_data,
|
||||
stateData, finalizedEpochRef.eth1_data,
|
||||
finalizedEpochRef.eth1_deposit_index)
|
||||
|
||||
func getOpaqueTransaction(s: string): OpaqueTransaction =
|
||||
|
@ -321,7 +321,7 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
|
|||
randao_reveal,
|
||||
eth1Proposal.vote,
|
||||
graffiti,
|
||||
node.attestationPool[].getAttestationsForBlock(state, cache),
|
||||
node.attestationPool[].getAttestationsForBlock(stateData, cache),
|
||||
eth1Proposal.deposits,
|
||||
node.exitPool[].getProposerSlashingsForBlock(),
|
||||
node.exitPool[].getAttesterSlashingsForBlock(),
|
||||
|
@ -579,22 +579,21 @@ proc updateValidatorMetrics*(node: BeaconNode) =
|
|||
# We'll limit labelled metrics to the first 64, so that we don't overload
|
||||
# prom
|
||||
|
||||
template state: untyped = node.chainDag.headState.data.data
|
||||
|
||||
var total: Gwei
|
||||
var i = 0
|
||||
for _, v in node.attachedValidators[].validators:
|
||||
let balance =
|
||||
if v.index.isNone():
|
||||
0.Gwei
|
||||
elif v.index.get().uint64 >= state.balances.lenu64:
|
||||
elif v.index.get().uint64 >=
|
||||
getStateField(node.chainDag.headState, balances).lenu64:
|
||||
debug "Cannot get validator balance, index out of bounds",
|
||||
pubkey = shortLog(v.pubkey), index = v.index.get(),
|
||||
balances = state.balances.len,
|
||||
balances = getStateField(node.chainDag.headState, balances).len,
|
||||
stateRoot = node.chainDag.headState.data.root
|
||||
0.Gwei
|
||||
else:
|
||||
state.balances[v.index.get()]
|
||||
getStateField(node.chainDag.headState, balances)[v.index.get()]
|
||||
|
||||
if i < 64:
|
||||
attached_validator_balance.set(
|
||||
|
|
|
@ -3,9 +3,9 @@ import
|
|||
chronicles, confutils, stew/byteutils, eth/db/kvstore_sqlite3,
|
||||
../beacon_chain/networking/network_metadata,
|
||||
../beacon_chain/[beacon_chain_db, extras],
|
||||
../beacon_chain/consensus_object_pools/blockchain_dag,
|
||||
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest, helpers,
|
||||
state_transition, presets, validator],
|
||||
../beacon_chain/consensus_object_pools/[blockchain_dag, statedata_helpers],
|
||||
../beacon_chain/spec/[crypto, datatypes, digest, helpers, state_transition,
|
||||
presets],
|
||||
../beacon_chain/ssz, ../beacon_chain/ssz/sszdump,
|
||||
../research/simutils, ./e2store
|
||||
|
||||
|
@ -217,7 +217,7 @@ proc cmdBench(conf: DbConf, runtimePreset: RuntimePreset) =
|
|||
doAssert dbBenchmark.getState(state[].data.root, loadedState[], noRollback)
|
||||
|
||||
if getStateField(state[], slot).epoch mod 16 == 0:
|
||||
doAssert hash_tree_root(state[].data.data) == hash_tree_root(loadedState[])
|
||||
doAssert hash_tree_root(state[]) == hash_tree_root(loadedState[])
|
||||
|
||||
printTimers(false, timers)
|
||||
|
||||
|
@ -392,7 +392,7 @@ proc cmdExportEra(conf: DbConf, preset: RuntimePreset) =
|
|||
defer: e2s.close()
|
||||
|
||||
dag.withState(tmpState[], canonical):
|
||||
e2s.appendRecord(state).get()
|
||||
e2s.appendRecord(stateData.data.data).get()
|
||||
|
||||
var
|
||||
ancestors: seq[BlockRef]
|
||||
|
@ -440,7 +440,7 @@ proc cmdValidatorPerf(conf: DbConf, runtimePreset: RuntimePreset) =
|
|||
var
|
||||
blockRefs = dag.getBlockRange(conf.perfSlot, conf.perfSlots)
|
||||
perfs = newSeq[ValidatorPerformance](
|
||||
dag.headState.data.data.validators.len())
|
||||
getStateField(dag.headState, validators).len())
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
blck: TrustedSignedBeaconBlock
|
||||
|
@ -457,20 +457,20 @@ proc cmdValidatorPerf(conf: DbConf, runtimePreset: RuntimePreset) =
|
|||
proc processEpoch() =
|
||||
let
|
||||
prev_epoch_target_slot =
|
||||
state[].data.data.get_previous_epoch().compute_start_slot_at_epoch()
|
||||
state[].get_previous_epoch().compute_start_slot_at_epoch()
|
||||
penultimate_epoch_end_slot =
|
||||
if prev_epoch_target_slot == 0: Slot(0)
|
||||
else: prev_epoch_target_slot - 1
|
||||
first_slot_empty =
|
||||
state[].data.data.get_block_root_at_slot(prev_epoch_target_slot) ==
|
||||
state[].data.data.get_block_root_at_slot(penultimate_epoch_end_slot)
|
||||
state[].get_block_root_at_slot(prev_epoch_target_slot) ==
|
||||
state[].get_block_root_at_slot(penultimate_epoch_end_slot)
|
||||
|
||||
let first_slot_attesters = block:
|
||||
let committee_count = state[].data.data.get_committee_count_per_slot(
|
||||
prev_epoch_target_slot, cache)
|
||||
let committee_count = state[].get_committee_count_per_slot(
|
||||
prev_epoch_target_slot.epoch, cache)
|
||||
var indices = HashSet[ValidatorIndex]()
|
||||
for committee_index in 0..<committee_count:
|
||||
for validator_index in state[].data.data.get_beacon_committee(
|
||||
for validator_index in state[].get_beacon_committee(
|
||||
prev_epoch_target_slot, committee_index.CommitteeIndex, cache):
|
||||
indices.incl(validator_index)
|
||||
indices
|
||||
|
|
|
@ -25,7 +25,9 @@ import
|
|||
../beacon_chain/[beacon_node_types, beacon_chain_db, extras],
|
||||
../beacon_chain/eth1/eth1_monitor,
|
||||
../beacon_chain/validators/validator_pool,
|
||||
../beacon_chain/consensus_object_pools/[blockchain_dag, block_quarantine, block_clearance, attestation_pool],
|
||||
../beacon_chain/consensus_object_pools/[blockchain_dag, block_quarantine,
|
||||
block_clearance, attestation_pool,
|
||||
statedata_helpers],
|
||||
../beacon_chain/ssz/[merkleization, ssz_serialization],
|
||||
./simutils
|
||||
|
||||
|
@ -95,21 +97,22 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||
|
||||
chainDag.withState(tmpState[], attestationHead):
|
||||
let committees_per_slot =
|
||||
get_committee_count_per_slot(state, slot.epoch, cache)
|
||||
get_committee_count_per_slot(stateData, slot.epoch, cache)
|
||||
|
||||
for committee_index in 0'u64..<committees_per_slot:
|
||||
let committee = get_beacon_committee(
|
||||
state, slot, committee_index.CommitteeIndex, cache)
|
||||
stateData, slot, committee_index.CommitteeIndex, cache)
|
||||
|
||||
for index_in_committee, validatorIdx in committee:
|
||||
if rand(r, 1.0) <= attesterRatio:
|
||||
let
|
||||
data = makeAttestationData(
|
||||
state, slot, committee_index.CommitteeIndex, blck.root)
|
||||
stateData, slot, committee_index.CommitteeIndex, blck.root)
|
||||
sig =
|
||||
get_attestation_signature(state.fork,
|
||||
state.genesis_validators_root,
|
||||
data, hackPrivKey(state.validators[validatorIdx]))
|
||||
get_attestation_signature(getStateField(stateData, fork),
|
||||
getStateField(stateData, genesis_validators_root),
|
||||
data, hackPrivKey(
|
||||
getStateField(stateData, validators)[validatorIdx]))
|
||||
var aggregation_bits = CommitteeValidatorsBits.init(committee.len)
|
||||
aggregation_bits.setBit index_in_committee
|
||||
|
||||
|
@ -130,10 +133,12 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||
chainDag.withState(tmpState[], head.atSlot(slot)):
|
||||
let
|
||||
finalizedEpochRef = chainDag.getFinalizedEpochRef()
|
||||
proposerIdx = get_beacon_proposer_index(state, cache).get()
|
||||
privKey = hackPrivKey(state.validators[proposerIdx])
|
||||
proposerIdx = get_beacon_proposer_index(
|
||||
stateData.data.data, cache).get()
|
||||
privKey = hackPrivKey(
|
||||
getStateField(stateData, validators)[proposerIdx])
|
||||
eth1ProposalData = eth1Chain.getBlockProposalData(
|
||||
state,
|
||||
stateData,
|
||||
finalizedEpochRef.eth1_data,
|
||||
finalizedEpochRef.eth1_deposit_index)
|
||||
message = makeBeaconBlock(
|
||||
|
@ -141,11 +146,13 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||
hashedState,
|
||||
proposerIdx,
|
||||
head.root,
|
||||
privKey.genRandaoReveal(state.fork, state.genesis_validators_root,
|
||||
privKey.genRandaoReveal(
|
||||
getStateField(stateData, fork),
|
||||
getStateField(stateData, genesis_validators_root),
|
||||
slot).toValidatorSig(),
|
||||
eth1ProposalData.vote,
|
||||
default(GraffitiBytes),
|
||||
attPool.getAttestationsForBlock(state, cache),
|
||||
attPool.getAttestationsForBlock(stateData, cache),
|
||||
eth1ProposalData.deposits,
|
||||
@[],
|
||||
@[],
|
||||
|
@ -165,7 +172,9 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||
# Careful, state no longer valid after here because of the await..
|
||||
newBlock.signature = withTimerRet(timers[tSignBlock]):
|
||||
get_block_signature(
|
||||
state.fork, state.genesis_validators_root, newBlock.message.slot,
|
||||
getStateField(stateData, fork),
|
||||
getStateField(stateData, genesis_validators_root),
|
||||
newBlock.message.slot,
|
||||
blockRoot, privKey).toValidatorSig()
|
||||
|
||||
let added = chainDag.addRawBlock(quarantine, newBlock) do (
|
||||
|
|
|
@ -50,6 +50,7 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||
var
|
||||
attestations = initTable[Slot, seq[Attestation]]()
|
||||
latest_block_root = hash_tree_root(genesisBlock.message)
|
||||
blockrefs = @[BlockRef(root: latest_block_root, slot: 0.Slot)]
|
||||
timers: array[Timers, RunningStat]
|
||||
attesters: RunningStat
|
||||
r = initRand(1)
|
||||
|
@ -110,12 +111,17 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||
committees_per_slot =
|
||||
get_committee_count_per_slot(state[].data, target_slot.epoch, cache)
|
||||
|
||||
blockrefs.add BlockRef(
|
||||
root: latest_block_root, parent: blockrefs[^1], slot: target_slot)
|
||||
|
||||
let
|
||||
scass = withTimerRet(timers[tShuffle]):
|
||||
mapIt(
|
||||
0 ..< committees_per_slot.int,
|
||||
get_beacon_committee(state[].data, target_slot, it.CommitteeIndex, cache))
|
||||
|
||||
stateData = (ref StateData)(data: state[], blck: blockrefs[^1])
|
||||
|
||||
for i, scas in scass:
|
||||
var
|
||||
attestation: Attestation
|
||||
|
@ -129,13 +135,13 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||
if (rand(r, high(int)).float * attesterRatio).int <= high(int):
|
||||
if first:
|
||||
attestation =
|
||||
makeAttestation(state[].data, latest_block_root, scas, target_slot,
|
||||
makeAttestation(stateData[], latest_block_root, scas, target_slot,
|
||||
i.CommitteeIndex, v, cache, flags)
|
||||
agg.init(attestation.signature.load.get())
|
||||
first = false
|
||||
else:
|
||||
let att2 =
|
||||
makeAttestation(state[].data, latest_block_root, scas, target_slot,
|
||||
makeAttestation(stateData[], latest_block_root, scas, target_slot,
|
||||
i.CommitteeIndex, v, cache, flags)
|
||||
if not att2.aggregation_bits.overlaps(attestation.aggregation_bits):
|
||||
attestation.aggregation_bits.incl(att2.aggregation_bits)
|
||||
|
|
|
@ -19,10 +19,11 @@ import
|
|||
../beacon_chain/gossip_processing/[gossip_validation],
|
||||
../beacon_chain/fork_choice/[fork_choice_types, fork_choice],
|
||||
../beacon_chain/consensus_object_pools/[
|
||||
block_quarantine, blockchain_dag, block_clearance, attestation_pool],
|
||||
block_quarantine, blockchain_dag, block_clearance, attestation_pool,
|
||||
statedata_helpers],
|
||||
../beacon_chain/ssz/merkleization,
|
||||
../beacon_chain/spec/[crypto, datatypes, digest, validator, state_transition,
|
||||
helpers, beaconstate, presets],
|
||||
../beacon_chain/spec/[crypto, datatypes, digest, state_transition, helpers,
|
||||
presets],
|
||||
# Test utilities
|
||||
./testutil, ./testdbutil, ./testblockutil
|
||||
|
||||
|
@ -74,9 +75,8 @@ suite "Attestation pool processing" & preset():
|
|||
let
|
||||
# Create an attestation for slot 1!
|
||||
bc0 = get_beacon_committee(
|
||||
state.data.data, getStateField(state, slot), 0.CommitteeIndex, cache)
|
||||
attestation = makeAttestation(
|
||||
state.data.data, state.blck.root, bc0[0], cache)
|
||||
state[], getStateField(state, slot), 0.CommitteeIndex, cache)
|
||||
attestation = makeAttestation(state[], state.blck.root, bc0[0], cache)
|
||||
|
||||
pool[].addAttestation(
|
||||
attestation, @[bc0[0]], attestation.loadSig,
|
||||
|
@ -103,7 +103,7 @@ suite "Attestation pool processing" & preset():
|
|||
getStateField(state, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
|
||||
rewards)
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
||||
let attestations = pool[].getAttestationsForBlock(state[], cache)
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
|
@ -114,9 +114,8 @@ suite "Attestation pool processing" & preset():
|
|||
state.data, state.blck.root,
|
||||
cache, attestations = attestations, nextSlot = false).root
|
||||
bc1 = get_beacon_committee(
|
||||
state.data.data, getStateField(state, slot), 0.CommitteeIndex, cache)
|
||||
att1 = makeAttestation(
|
||||
state.data.data, root1, bc1[0], cache)
|
||||
state[], getStateField(state, slot), 0.CommitteeIndex, cache)
|
||||
att1 = makeAttestation(state[], root1, bc1[0], cache)
|
||||
|
||||
check:
|
||||
process_slots(
|
||||
|
@ -126,23 +125,22 @@ suite "Attestation pool processing" & preset():
|
|||
|
||||
check:
|
||||
# shouldn't include already-included attestations
|
||||
pool[].getAttestationsForBlock(state.data.data, cache) == []
|
||||
pool[].getAttestationsForBlock(state[], cache) == []
|
||||
|
||||
pool[].addAttestation(
|
||||
att1, @[bc1[0]], att1.loadSig, att1.data.slot)
|
||||
|
||||
check:
|
||||
# but new ones should go in
|
||||
pool[].getAttestationsForBlock(state.data.data, cache).len() == 1
|
||||
pool[].getAttestationsForBlock(state[], cache).len() == 1
|
||||
|
||||
let
|
||||
att2 = makeAttestation(
|
||||
state.data.data, root1, bc1[1], cache)
|
||||
att2 = makeAttestation(state[], root1, bc1[1], cache)
|
||||
pool[].addAttestation(
|
||||
att2, @[bc1[1]], att2.loadSig, att2.data.slot)
|
||||
|
||||
let
|
||||
combined = pool[].getAttestationsForBlock(state.data.data, cache)
|
||||
combined = pool[].getAttestationsForBlock(state[], cache)
|
||||
|
||||
check:
|
||||
# New attestations should be combined with old attestations
|
||||
|
@ -154,18 +152,18 @@ suite "Attestation pool processing" & preset():
|
|||
|
||||
check:
|
||||
# readding the combined attestation shouldn't have an effect
|
||||
pool[].getAttestationsForBlock(state.data.data, cache).len() == 1
|
||||
pool[].getAttestationsForBlock(state[], cache).len() == 1
|
||||
|
||||
let
|
||||
# Someone votes for a different root
|
||||
att3 = makeAttestation(state.data.data, Eth2Digest(), bc1[2], cache)
|
||||
att3 = makeAttestation(state[], Eth2Digest(), bc1[2], cache)
|
||||
pool[].addAttestation(
|
||||
att3, @[bc1[2]], att3.loadSig, att3.data.slot)
|
||||
|
||||
check:
|
||||
# We should now get both attestations for the block, but the aggregate
|
||||
# should be the one with the most votes
|
||||
pool[].getAttestationsForBlock(state.data.data, cache).len() == 2
|
||||
pool[].getAttestationsForBlock(state[], cache).len() == 2
|
||||
pool[].getAggregatedAttestation(2.Slot, 0.CommitteeIndex).
|
||||
get().aggregation_bits.countOnes() == 2
|
||||
pool[].getAggregatedAttestation(2.Slot, hash_tree_root(att2.data)).
|
||||
|
@ -173,7 +171,7 @@ suite "Attestation pool processing" & preset():
|
|||
|
||||
let
|
||||
# Someone votes for a different root
|
||||
att4 = makeAttestation(state.data.data, Eth2Digest(), bc1[2], cache)
|
||||
att4 = makeAttestation(state[], Eth2Digest(), bc1[2], cache)
|
||||
pool[].addAttestation(
|
||||
att4, @[bc1[2]], att3.loadSig, att3.data.slot)
|
||||
|
||||
|
@ -181,14 +179,14 @@ suite "Attestation pool processing" & preset():
|
|||
let
|
||||
# Create an attestation for slot 1!
|
||||
bc0 = get_beacon_committee(
|
||||
state.data.data, getStateField(state, slot), 0.CommitteeIndex, cache)
|
||||
state[], getStateField(state, slot), 0.CommitteeIndex, cache)
|
||||
|
||||
var
|
||||
att0 = makeAttestation(state.data.data, state.blck.root, bc0[0], cache)
|
||||
att0 = makeAttestation(state[], state.blck.root, bc0[0], cache)
|
||||
att0x = att0
|
||||
att1 = makeAttestation(state.data.data, state.blck.root, bc0[1], cache)
|
||||
att2 = makeAttestation(state.data.data, state.blck.root, bc0[2], cache)
|
||||
att3 = makeAttestation(state.data.data, state.blck.root, bc0[3], cache)
|
||||
att1 = makeAttestation(state[], state.blck.root, bc0[1], cache)
|
||||
att2 = makeAttestation(state[], state.blck.root, bc0[2], cache)
|
||||
att3 = makeAttestation(state[], state.blck.root, bc0[3], cache)
|
||||
|
||||
# Both attestations include member 2 but neither is a subset of the other
|
||||
att0.combine(att2)
|
||||
|
@ -204,7 +202,7 @@ suite "Attestation pool processing" & preset():
|
|||
rewards)
|
||||
|
||||
check:
|
||||
pool[].getAttestationsForBlock(state.data.data, cache).len() == 2
|
||||
pool[].getAttestationsForBlock(state[], cache).len() == 2
|
||||
# Can get either aggregate here, random!
|
||||
pool[].getAggregatedAttestation(1.Slot, 0.CommitteeIndex).isSome()
|
||||
|
||||
|
@ -212,7 +210,7 @@ suite "Attestation pool processing" & preset():
|
|||
pool[].addAttestation(att3, @[bc0[3]], att3.loadSig, att3.data.slot)
|
||||
|
||||
block:
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
||||
let attestations = pool[].getAttestationsForBlock(state[], cache)
|
||||
check:
|
||||
attestations.len() == 2
|
||||
attestations[0].aggregation_bits.countOnes() == 3
|
||||
|
@ -224,7 +222,7 @@ suite "Attestation pool processing" & preset():
|
|||
pool[].addAttestation(att0x, @[bc0[0]], att0x.loadSig, att0x.data.slot)
|
||||
|
||||
block:
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
||||
let attestations = pool[].getAttestationsForBlock(state[], cache)
|
||||
check:
|
||||
attestations.len() == 1
|
||||
attestations[0].aggregation_bits.countOnes() == 4
|
||||
|
@ -237,11 +235,11 @@ suite "Attestation pool processing" & preset():
|
|||
root.data[0..<8] = toBytesBE(i.uint64)
|
||||
let
|
||||
bc0 = get_beacon_committee(
|
||||
state.data.data, getStateField(state, slot), 0.CommitteeIndex, cache)
|
||||
state[], getStateField(state, slot), 0.CommitteeIndex, cache)
|
||||
|
||||
for j in 0..<bc0.len():
|
||||
root.data[8..<16] = toBytesBE(j.uint64)
|
||||
var att = makeAttestation(state.data.data, root, bc0[j], cache)
|
||||
var att = makeAttestation(state[], root, bc0[j], cache)
|
||||
pool[].addAttestation(att, @[bc0[j]], att.loadSig, att.data.slot)
|
||||
inc attestations
|
||||
|
||||
|
@ -253,7 +251,7 @@ suite "Attestation pool processing" & preset():
|
|||
"6*SLOTS_PER_EPOCH validators > 128 mainnet MAX_ATTESTATIONS"
|
||||
check:
|
||||
# Fill block with attestations
|
||||
pool[].getAttestationsForBlock(state.data.data, cache).lenu64() ==
|
||||
pool[].getAttestationsForBlock(state[], cache).lenu64() ==
|
||||
MAX_ATTESTATIONS
|
||||
pool[].getAggregatedAttestation(
|
||||
getStateField(state, slot) - 1, 0.CommitteeIndex).isSome()
|
||||
|
@ -263,18 +261,16 @@ suite "Attestation pool processing" & preset():
|
|||
let
|
||||
# Create an attestation for slot 1!
|
||||
bc0 = get_beacon_committee(
|
||||
state.data.data, getStateField(state, slot), 0.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(
|
||||
state.data.data, state.blck.root, bc0[0], cache)
|
||||
state[], getStateField(state, slot), 0.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(state[], state.blck.root, bc0[0], cache)
|
||||
|
||||
check:
|
||||
process_slots(state.data, getStateField(state, slot) + 1, cache, rewards)
|
||||
|
||||
let
|
||||
bc1 = get_beacon_committee(state.data.data,
|
||||
bc1 = get_beacon_committee(state[],
|
||||
getStateField(state, slot), 0.CommitteeIndex, cache)
|
||||
attestation1 = makeAttestation(
|
||||
state.data.data, state.blck.root, bc1[0], cache)
|
||||
attestation1 = makeAttestation(state[], state.blck.root, bc1[0], cache)
|
||||
|
||||
# test reverse order
|
||||
pool[].addAttestation(
|
||||
|
@ -285,7 +281,7 @@ suite "Attestation pool processing" & preset():
|
|||
discard process_slots(
|
||||
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards)
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
||||
let attestations = pool[].getAttestationsForBlock(state[], cache)
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
|
@ -295,11 +291,9 @@ suite "Attestation pool processing" & preset():
|
|||
let
|
||||
# Create an attestation for slot 1!
|
||||
bc0 = get_beacon_committee(
|
||||
state.data.data, getStateField(state, slot), 0.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(
|
||||
state.data.data, state.blck.root, bc0[0], cache)
|
||||
attestation1 = makeAttestation(
|
||||
state.data.data, state.blck.root, bc0[1], cache)
|
||||
state[], getStateField(state, slot), 0.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(state[], state.blck.root, bc0[0], cache)
|
||||
attestation1 = makeAttestation(state[], state.blck.root, bc0[1], cache)
|
||||
|
||||
pool[].addAttestation(
|
||||
attestation0, @[bc0[0]], attestation0.loadSig, attestation0.data.slot)
|
||||
|
@ -310,7 +304,7 @@ suite "Attestation pool processing" & preset():
|
|||
process_slots(
|
||||
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards)
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
||||
let attestations = pool[].getAttestationsForBlock(state[], cache)
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
|
@ -321,11 +315,9 @@ suite "Attestation pool processing" & preset():
|
|||
var
|
||||
# Create an attestation for slot 1!
|
||||
bc0 = get_beacon_committee(
|
||||
state.data.data, getStateField(state, slot), 0.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(
|
||||
state.data.data, state.blck.root, bc0[0], cache)
|
||||
attestation1 = makeAttestation(
|
||||
state.data.data, state.blck.root, bc0[1], cache)
|
||||
state[], getStateField(state, slot), 0.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(state[], state.blck.root, bc0[0], cache)
|
||||
attestation1 = makeAttestation(state[], state.blck.root, bc0[1], cache)
|
||||
|
||||
attestation0.combine(attestation1)
|
||||
|
||||
|
@ -338,7 +330,7 @@ suite "Attestation pool processing" & preset():
|
|||
process_slots(
|
||||
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards)
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
||||
let attestations = pool[].getAttestationsForBlock(state[], cache)
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
|
@ -347,12 +339,10 @@ suite "Attestation pool processing" & preset():
|
|||
var cache = StateCache()
|
||||
var
|
||||
# Create an attestation for slot 1!
|
||||
bc0 = get_beacon_committee(state.data.data,
|
||||
bc0 = get_beacon_committee(state[],
|
||||
getStateField(state, slot), 0.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(
|
||||
state.data.data, state.blck.root, bc0[0], cache)
|
||||
attestation1 = makeAttestation(
|
||||
state.data.data, state.blck.root, bc0[1], cache)
|
||||
attestation0 = makeAttestation(state[], state.blck.root, bc0[0], cache)
|
||||
attestation1 = makeAttestation(state[], state.blck.root, bc0[1], cache)
|
||||
|
||||
attestation0.combine(attestation1)
|
||||
|
||||
|
@ -365,7 +355,7 @@ suite "Attestation pool processing" & preset():
|
|||
process_slots(
|
||||
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards)
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
||||
let attestations = pool[].getAttestationsForBlock(state[], cache)
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
|
@ -424,8 +414,8 @@ suite "Attestation pool processing" & preset():
|
|||
pool[].addForkChoice(epochRef, blckRef, signedBlock.message, blckRef.slot)
|
||||
|
||||
bc1 = get_beacon_committee(
|
||||
state.data.data, getStateField(state, slot) - 1, 1.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(state.data.data, b10.root, bc1[0], cache)
|
||||
state[], getStateField(state, slot) - 1, 1.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(state[], b10.root, bc1[0], cache)
|
||||
|
||||
pool[].addAttestation(
|
||||
attestation0, @[bc1[0]], attestation0.loadSig, attestation0.data.slot)
|
||||
|
@ -437,8 +427,8 @@ suite "Attestation pool processing" & preset():
|
|||
head2 == b10Add[]
|
||||
|
||||
let
|
||||
attestation1 = makeAttestation(state.data.data, b11.root, bc1[1], cache)
|
||||
attestation2 = makeAttestation(state.data.data, b11.root, bc1[2], cache)
|
||||
attestation1 = makeAttestation(state[], b11.root, bc1[1], cache)
|
||||
attestation2 = makeAttestation(state[], b11.root, bc1[2], cache)
|
||||
pool[].addAttestation(
|
||||
attestation1, @[bc1[1]], attestation1.loadSig, attestation1.data.slot)
|
||||
|
||||
|
@ -513,7 +503,7 @@ suite "Attestation pool processing" & preset():
|
|||
for epoch in 0 ..< 5:
|
||||
let start_slot = compute_start_slot_at_epoch(Epoch epoch)
|
||||
let committees_per_slot =
|
||||
get_committee_count_per_slot(state.data.data, Epoch epoch, cache)
|
||||
get_committee_count_per_slot(state[], Epoch epoch, cache)
|
||||
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
|
||||
let new_block = addTestBlock(
|
||||
state.data, block_root, cache, attestations = attestations)
|
||||
|
@ -533,7 +523,7 @@ suite "Attestation pool processing" & preset():
|
|||
attestations.setlen(0)
|
||||
for index in 0'u64 ..< committees_per_slot:
|
||||
let committee = get_beacon_committee(
|
||||
state.data.data, getStateField(state, slot), index.CommitteeIndex,
|
||||
state[], getStateField(state, slot), index.CommitteeIndex,
|
||||
cache)
|
||||
|
||||
# Create a bitfield filled with the given count per attestation,
|
||||
|
@ -545,7 +535,7 @@ suite "Attestation pool processing" & preset():
|
|||
attestations.add Attestation(
|
||||
aggregation_bits: aggregation_bits,
|
||||
data: makeAttestationData(
|
||||
state.data.data, getStateField(state, slot),
|
||||
state[], getStateField(state, slot),
|
||||
index.CommitteeIndex, blockroot)
|
||||
# signature: ValidatorSig()
|
||||
)
|
||||
|
|
|
@ -122,13 +122,12 @@ suite "Block pool processing" & preset():
|
|||
db = makeTestDB(SLOTS_PER_EPOCH)
|
||||
dag = init(ChainDAGRef, defaultRuntimePreset, db)
|
||||
quarantine = QuarantineRef.init(keys.newRng())
|
||||
stateData = newClone(dag.headState)
|
||||
state = newClone(dag.headState.data)
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
att0 = makeFullAttestations(
|
||||
stateData.data.data, dag.tail.root, 0.Slot, cache)
|
||||
b1 = addTestBlock(stateData.data, dag.tail.root, cache, attestations = att0)
|
||||
b2 = addTestBlock(stateData.data, b1.root, cache)
|
||||
att0 = makeFullAttestations(state[], dag.tail.root, 0.Slot, cache)
|
||||
b1 = addTestBlock(state[], dag.tail.root, cache, attestations = att0)
|
||||
b2 = addTestBlock(state[], b1.root, cache)
|
||||
test "getRef returns nil for missing blocks":
|
||||
check:
|
||||
dag.getRef(default Eth2Digest) == nil
|
||||
|
@ -169,11 +168,10 @@ suite "Block pool processing" & preset():
|
|||
|
||||
# Skip one slot to get a gap
|
||||
check:
|
||||
process_slots(
|
||||
stateData.data, getStateField(stateData, slot) + 1, cache, rewards)
|
||||
process_slots(state[], state.data.slot + 1, cache, rewards)
|
||||
|
||||
let
|
||||
b4 = addTestBlock(stateData.data, b2.root, cache)
|
||||
b4 = addTestBlock(state[], b2.root, cache)
|
||||
b4Add = dag.addRawBlock(quarantine, b4, nil)
|
||||
|
||||
check:
|
||||
|
@ -349,8 +347,8 @@ suite "chain DAG finalization tests" & preset():
|
|||
tmpState = assignClone(dag.headState.data)
|
||||
check:
|
||||
process_slots(
|
||||
tmpState[], tmpState.data.slot + (5 * SLOTS_PER_EPOCH).uint64, cache,
|
||||
rewards)
|
||||
tmpState[], tmpState.data.slot + (5 * SLOTS_PER_EPOCH).uint64,
|
||||
cache, rewards)
|
||||
|
||||
let lateBlock = addTestBlock(tmpState[], dag.head.root, cache)
|
||||
block:
|
||||
|
@ -371,7 +369,7 @@ suite "chain DAG finalization tests" & preset():
|
|||
blck = addTestBlock(
|
||||
tmpState[], dag.head.root, cache,
|
||||
attestations = makeFullAttestations(
|
||||
tmpState[].data, dag.head.root, tmpState[].data.slot, cache, {}))
|
||||
tmpState[], dag.head.root, tmpState.data.slot, cache, {}))
|
||||
let added = dag.addRawBlock(quarantine, blck, nil)
|
||||
check: added.isOk()
|
||||
dag.updateHead(added[], quarantine)
|
||||
|
@ -484,8 +482,8 @@ suite "chain DAG finalization tests" & preset():
|
|||
var blck = makeTestBlock(
|
||||
dag.headState.data, dag.head.root, cache,
|
||||
attestations = makeFullAttestations(
|
||||
dag.headState.data.data, dag.head.root,
|
||||
getStateField(dag.headState, slot), cache, {}))
|
||||
dag.headState, dag.head.root, getStateField(dag.headState, slot),
|
||||
cache, {}))
|
||||
|
||||
let added = dag.addRawBlock(quarantine, blck, nil)
|
||||
check: added.isOk()
|
||||
|
@ -503,7 +501,7 @@ suite "chain DAG finalization tests" & preset():
|
|||
check:
|
||||
dag.get(cur).data.message.state_root ==
|
||||
tmpStateData[].data.root
|
||||
tmpStateData[].data.root == hash_tree_root(tmpSTateData[].data.data)
|
||||
tmpStateData[].data.root == hash_tree_root(tmpSTateData[])
|
||||
cur = cur.parent
|
||||
|
||||
let
|
||||
|
|
|
@ -17,10 +17,11 @@ import
|
|||
../beacon_chain/gossip_processing/[gossip_validation, batch_validation],
|
||||
../beacon_chain/fork_choice/[fork_choice_types, fork_choice],
|
||||
../beacon_chain/consensus_object_pools/[
|
||||
block_quarantine, blockchain_dag, block_clearance, attestation_pool],
|
||||
block_quarantine, blockchain_dag, block_clearance, attestation_pool,
|
||||
statedata_helpers],
|
||||
../beacon_chain/ssz/merkleization,
|
||||
../beacon_chain/spec/[crypto, datatypes, digest, validator, state_transition,
|
||||
helpers, presets, network],
|
||||
../beacon_chain/spec/[crypto, datatypes, digest, state_transition, helpers,
|
||||
presets, network],
|
||||
# Test utilities
|
||||
./testutil, ./testdbutil, ./testblockutil
|
||||
|
||||
|
@ -66,15 +67,15 @@ suite "Gossip validation " & preset():
|
|||
var
|
||||
# Create attestations for slot 1
|
||||
beacon_committee = get_beacon_committee(
|
||||
chainDag.headState.data.data, chainDag.head.slot, 0.CommitteeIndex, cache)
|
||||
chainDag.headState, chainDag.head.slot, 0.CommitteeIndex, cache)
|
||||
att_1_0 = makeAttestation(
|
||||
chainDag.headState.data.data, chainDag.head.root, beacon_committee[0], cache)
|
||||
chainDag.headState, chainDag.head.root, beacon_committee[0], cache)
|
||||
att_1_1 = makeAttestation(
|
||||
chainDag.headState.data.data, chainDag.head.root, beacon_committee[1], cache)
|
||||
chainDag.headState, chainDag.head.root, beacon_committee[1], cache)
|
||||
|
||||
committees_per_slot =
|
||||
get_committee_count_per_slot(chainDag.headState.data.data,
|
||||
att_1_0.data.slot.epoch, cache)
|
||||
get_committee_count_per_slot(chainDag.headState,
|
||||
att_1_0.data.slot.epoch, cache)
|
||||
|
||||
subnet = compute_subnet_for_attestation(
|
||||
committees_per_slot,
|
||||
|
|
|
@ -15,6 +15,19 @@ import
|
|||
validator, state_transition, presets],
|
||||
../beacon_chain/ssz
|
||||
|
||||
proc makeAttestation(
|
||||
state: BeaconState, beacon_block_root: Eth2Digest,
|
||||
validator_index: ValidatorIndex, cache: var StateCache): Attestation =
|
||||
# The called functions don't use the extra-BeaconState parts of StateData.
|
||||
let
|
||||
stateData = (ref StateData)(
|
||||
data: HashedBeaconState(data: state),
|
||||
blck: BlockRef(root: beacon_block_root, slot: state.slot))
|
||||
(committee, slot, index) =
|
||||
find_beacon_committee(stateData[], validator_index, cache)
|
||||
makeAttestation(stateData[], beacon_block_root, committee, slot, index,
|
||||
validator_index, cache)
|
||||
|
||||
suite "Block processing" & preset():
|
||||
## For now just test that we can compile and execute block processing with
|
||||
## mock data.
|
||||
|
|
|
@ -10,8 +10,9 @@ import
|
|||
../beacon_chain/extras,
|
||||
../beacon_chain/validators/validator_pool,
|
||||
../beacon_chain/ssz/merkleization,
|
||||
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest, presets,
|
||||
helpers, validator, signatures, state_transition]
|
||||
../beacon_chain/spec/[crypto, datatypes, digest, presets, helpers, validator,
|
||||
signatures, state_transition],
|
||||
../beacon_chain/consensus_object_pools/statedata_helpers
|
||||
|
||||
func makeFakeValidatorPrivKey(i: int): ValidatorPrivKey =
|
||||
# 0 is not a valid BLS private key - 1000 helps interop with rust BLS library,
|
||||
|
@ -53,7 +54,7 @@ func makeDeposit*(i: int, flags: UpdateFlags = {}): DepositData =
|
|||
result.signature = get_deposit_signature(
|
||||
defaultRuntimePreset, result, privkey).toValidatorSig()
|
||||
|
||||
proc makeInitialDeposits*(
|
||||
func makeInitialDeposits*(
|
||||
n = SLOTS_PER_EPOCH, flags: UpdateFlags = {}): seq[DepositData] =
|
||||
for i in 0..<n.int:
|
||||
result.add makeDeposit(i, flags)
|
||||
|
@ -147,8 +148,39 @@ proc makeTestBlock*(
|
|||
tmpState[], parent_root, cache, eth1_data, attestations, deposits,
|
||||
graffiti)
|
||||
|
||||
proc makeAttestation*(
|
||||
state: BeaconState, beacon_block_root: Eth2Digest,
|
||||
func makeAttestationData*(
|
||||
state: StateData, slot: Slot, committee_index: CommitteeIndex,
|
||||
beacon_block_root: Eth2Digest): AttestationData =
|
||||
## Create an attestation / vote for the block `beacon_block_root` using the
|
||||
## data in `state` to fill in the rest of the fields.
|
||||
## `state` is the state corresponding to the `beacon_block_root` advanced to
|
||||
## the slot we're attesting to.
|
||||
|
||||
let
|
||||
current_epoch = get_current_epoch(state)
|
||||
start_slot = compute_start_slot_at_epoch(current_epoch)
|
||||
epoch_boundary_block_root =
|
||||
if start_slot == getStateField(state, slot): beacon_block_root
|
||||
else: get_block_root_at_slot(state, start_slot)
|
||||
|
||||
doAssert slot.compute_epoch_at_slot == current_epoch,
|
||||
"Computed epoch was " & $slot.compute_epoch_at_slot &
|
||||
" while the state current_epoch was " & $current_epoch
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#attestation-data
|
||||
AttestationData(
|
||||
slot: slot,
|
||||
index: committee_index.uint64,
|
||||
beacon_block_root: beacon_block_root,
|
||||
source: getStateField(state, current_justified_checkpoint),
|
||||
target: Checkpoint(
|
||||
epoch: current_epoch,
|
||||
root: epoch_boundary_block_root
|
||||
)
|
||||
)
|
||||
|
||||
func makeAttestation*(
|
||||
state: StateData, beacon_block_root: Eth2Digest,
|
||||
committee: seq[ValidatorIndex], slot: Slot, index: CommitteeIndex,
|
||||
validator_index: ValidatorIndex, cache: var StateCache,
|
||||
flags: UpdateFlags = {}): Attestation =
|
||||
|
@ -157,7 +189,7 @@ proc makeAttestation*(
|
|||
# montonoic enumerable index, is wasteful and slow. Most test callers
|
||||
# want ValidatorIndex, so that's supported too.
|
||||
let
|
||||
validator = state.validators[validator_index]
|
||||
validator = getStateField(state, validators)[validator_index]
|
||||
sac_index = committee.find(validator_index)
|
||||
data = makeAttestationData(state, slot, index, beacon_block_root)
|
||||
|
||||
|
@ -169,7 +201,9 @@ proc makeAttestation*(
|
|||
let
|
||||
sig =
|
||||
if skipBLSValidation notin flags:
|
||||
get_attestation_signature(state.fork, state.genesis_validators_root,
|
||||
get_attestation_signature(
|
||||
getStateField(state, fork),
|
||||
getStateField(state, genesis_validators_root),
|
||||
data, hackPrivKey(validator)).toValidatorSig()
|
||||
else:
|
||||
ValidatorSig()
|
||||
|
@ -180,10 +214,10 @@ proc makeAttestation*(
|
|||
signature: sig
|
||||
)
|
||||
|
||||
proc find_beacon_committee(
|
||||
state: BeaconState, validator_index: ValidatorIndex,
|
||||
func find_beacon_committee*(
|
||||
state: StateData, validator_index: ValidatorIndex,
|
||||
cache: var StateCache): auto =
|
||||
let epoch = compute_epoch_at_slot(state.slot)
|
||||
let epoch = compute_epoch_at_slot(getStateField(state, slot))
|
||||
for epoch_committee_index in 0'u64 ..< get_committee_count_per_slot(
|
||||
state, epoch, cache) * SLOTS_PER_EPOCH:
|
||||
let
|
||||
|
@ -195,16 +229,16 @@ proc find_beacon_committee(
|
|||
return (committee, slot, index)
|
||||
doAssert false
|
||||
|
||||
proc makeAttestation*(
|
||||
state: BeaconState, beacon_block_root: Eth2Digest,
|
||||
func makeAttestation*(
|
||||
state: StateData, beacon_block_root: Eth2Digest,
|
||||
validator_index: ValidatorIndex, cache: var StateCache): Attestation =
|
||||
let (committee, slot, index) =
|
||||
find_beacon_committee(state, validator_index, cache)
|
||||
makeAttestation(state, beacon_block_root, committee, slot, index,
|
||||
validator_index, cache)
|
||||
|
||||
proc makeFullAttestations*(
|
||||
state: BeaconState, beacon_block_root: Eth2Digest, slot: Slot,
|
||||
func makeFullAttestations*(
|
||||
state: StateData, beacon_block_root: Eth2Digest, slot: Slot,
|
||||
cache: var StateCache,
|
||||
flags: UpdateFlags = {}): seq[Attestation] =
|
||||
# Create attestations in which the full committee participates for each shard
|
||||
|
@ -216,7 +250,8 @@ proc makeFullAttestations*(
|
|||
let
|
||||
committee = get_beacon_committee(
|
||||
state, slot, index.CommitteeIndex, cache)
|
||||
data = makeAttestationData(state, slot, index.CommitteeIndex, beacon_block_root)
|
||||
data = makeAttestationData(
|
||||
state, slot, index.CommitteeIndex, beacon_block_root)
|
||||
|
||||
doAssert committee.len() >= 1
|
||||
# Initial attestation
|
||||
|
@ -226,8 +261,9 @@ proc makeFullAttestations*(
|
|||
|
||||
var agg {.noInit.}: AggregateSignature
|
||||
agg.init(get_attestation_signature(
|
||||
state.fork, state.genesis_validators_root, data,
|
||||
hackPrivKey(state.validators[committee[0]])))
|
||||
getStateField(state, fork),
|
||||
getStateField(state, genesis_validators_root), data,
|
||||
hackPrivKey(getStateField(state, validators)[committee[0]])))
|
||||
|
||||
# Aggregate the remainder
|
||||
attestation.aggregation_bits.setBit 0
|
||||
|
@ -235,13 +271,23 @@ proc makeFullAttestations*(
|
|||
attestation.aggregation_bits.setBit j
|
||||
if skipBLSValidation notin flags:
|
||||
agg.aggregate(get_attestation_signature(
|
||||
state.fork, state.genesis_validators_root, data,
|
||||
hackPrivKey(state.validators[committee[j]])
|
||||
getStateField(state, fork),
|
||||
getStateField(state, genesis_validators_root), data,
|
||||
hackPrivKey(getStateField(state, validators)[committee[j]])
|
||||
))
|
||||
|
||||
attestation.signature = agg.finish().toValidatorSig()
|
||||
result.add attestation
|
||||
|
||||
func makeFullAttestations*(
|
||||
state: HashedBeaconState, beacon_block_root: Eth2Digest, slot: Slot,
|
||||
cache: var StateCache,
|
||||
flags: UpdateFlags = {}): seq[Attestation] =
|
||||
makeFullAttestations(
|
||||
(ref StateData)(data: state, blck: BlockRef(
|
||||
root: beacon_block_root, slot: slot))[], beacon_block_root, slot, cache,
|
||||
flags)
|
||||
|
||||
iterator makeTestBlocks*(
|
||||
state: HashedBeaconState,
|
||||
parent_root: Eth2Digest,
|
||||
|
@ -253,9 +299,7 @@ iterator makeTestBlocks*(
|
|||
parent_root = parent_root
|
||||
for _ in 0..<blocks:
|
||||
let attestations = if attested:
|
||||
makeFullAttestations(
|
||||
state[].data, parent_root,
|
||||
state[].data.slot, cache)
|
||||
makeFullAttestations(state[], parent_root, state[].data.slot, cache)
|
||||
else:
|
||||
@[]
|
||||
|
||||
|
|
Loading…
Reference in New Issue