mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-01-26 22:39:49 +00:00
send and validate with v0.12.1 attestations (#1213)
* send and validate with v0.12.1 attestations * use EpochRef instead of empty cache in attestation validation
This commit is contained in:
parent
f60235b3e9
commit
a683656238
@ -10,8 +10,10 @@
|
||||
import
|
||||
options, chronicles,
|
||||
./spec/[
|
||||
beaconstate, datatypes, crypto, digest, helpers, validator, signatures],
|
||||
./block_pool, ./attestation_pool, ./beacon_node_types, ./ssz
|
||||
beaconstate, datatypes, crypto, digest, helpers, network, validator,
|
||||
signatures],
|
||||
./block_pool, ./block_pools/candidate_chains, ./attestation_pool,
|
||||
./beacon_node_types, ./ssz
|
||||
|
||||
logScope:
|
||||
topics = "att_aggr"
|
||||
@ -73,7 +75,6 @@ proc aggregate_attestations*(
|
||||
|
||||
none(AggregateAndProof)
|
||||
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#attestation-subnets
|
||||
proc isValidAttestation*(
|
||||
pool: var AttestationPool, attestation: Attestation, current_slot: Slot,
|
||||
@ -82,13 +83,6 @@ proc isValidAttestation*(
|
||||
topics = "att_aggr valid_att"
|
||||
received_attestation = shortLog(attestation)
|
||||
|
||||
# The attestation's committee index (attestation.data.index) is for the
|
||||
# correct subnet.
|
||||
if attestation.data.index != topicCommitteeIndex:
|
||||
debug "attestation's committee index not for the correct subnet",
|
||||
topicCommitteeIndex = topicCommitteeIndex
|
||||
return false
|
||||
|
||||
if not (attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >=
|
||||
current_slot and current_slot >= attestation.data.slot):
|
||||
debug "attestation.data.slot not within ATTESTATION_PROPAGATION_SLOT_RANGE"
|
||||
@ -133,20 +127,45 @@ proc isValidAttestation*(
|
||||
# TODO: consider a "slush pool" of attestations whose blocks have not yet
|
||||
# propagated - i.e. imagine that attestations are smaller than blocks and
|
||||
# therefore propagate faster, thus reordering their arrival in some nodes
|
||||
if pool.blockPool.get(attestation.data.beacon_block_root).isNone():
|
||||
let attestationBlck = pool.blockPool.getRef(attestation.data.beacon_block_root)
|
||||
if attestationBlck.isNil:
|
||||
debug "block doesn't exist in block pool"
|
||||
pool.blockPool.addMissing(attestation.data.beacon_block_root)
|
||||
return false
|
||||
|
||||
# The signature of attestation is valid.
|
||||
# TODO need to know above which validator anyway, and this is too general
|
||||
# as it supports aggregated attestations (which this can't be)
|
||||
var cache = get_empty_per_epoch_cache()
|
||||
if not is_valid_indexed_attestation(
|
||||
pool.blockPool.headState.data.data,
|
||||
get_indexed_attestation(
|
||||
pool.blockPool.headState.data.data, attestation, cache), {}):
|
||||
debug "signature verification failed"
|
||||
return false
|
||||
pool.blockPool.withEpochState(
|
||||
pool.blockPool.tmpState,
|
||||
BlockSlot(blck: attestationBlck, slot: attestation.data.slot)):
|
||||
when ETH2_SPEC == "v0.12.1":
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#attestation-subnets
|
||||
# [REJECT] The attestation is for the correct subnet (i.e.
|
||||
# compute_subnet_for_attestation(state, attestation) == subnet_id).
|
||||
let
|
||||
epochInfo = blck.getEpochInfo(state)
|
||||
requiredSubnetIndex =
|
||||
compute_subnet_for_attestation(
|
||||
epochInfo.shuffled_active_validator_indices.len.uint64, attestation)
|
||||
|
||||
if requiredSubnetIndex != topicCommitteeIndex:
|
||||
debug "isValidAttestation: attestation's committee index not for the correct subnet",
|
||||
topicCommitteeIndex = topicCommitteeIndex,
|
||||
attestation_data_index = attestation.data.index,
|
||||
requiredSubnetIndex = requiredSubnetIndex
|
||||
return false
|
||||
else:
|
||||
# The attestation's committee index (attestation.data.index) is for the
|
||||
# correct subnet.
|
||||
if attestation.data.index != topicCommitteeIndex:
|
||||
debug "isValidAttestation: attestation's committee index not for the correct subnet",
|
||||
topicCommitteeIndex = topicCommitteeIndex,
|
||||
attestation_data_index = attestation.data.index
|
||||
return false
|
||||
|
||||
# The signature of attestation is valid.
|
||||
var cache = getEpochCache(blck, state)
|
||||
if not is_valid_indexed_attestation(
|
||||
state, get_indexed_attestation(state, attestation, cache), {}):
|
||||
debug "signature verification failed"
|
||||
return false
|
||||
|
||||
true
|
||||
|
@ -155,7 +155,8 @@ template justifiedState*(pool: BlockPool): StateData =
|
||||
pool.dag.justifiedState
|
||||
|
||||
template withState*(
|
||||
pool: BlockPool, cache: var StateData, blockSlot: BlockSlot, body: untyped): untyped =
|
||||
pool: BlockPool, cache: var StateData, blockSlot: BlockSlot, body: untyped):
|
||||
untyped =
|
||||
## Helper template that updates state to a particular BlockSlot - usage of
|
||||
## cache is unsafe outside of block.
|
||||
## TODO async transformations will lead to a race where cache gets updated
|
||||
@ -163,6 +164,19 @@ template withState*(
|
||||
|
||||
withState(pool.dag, cache, blockSlot, body)
|
||||
|
||||
template withEpochState*(
|
||||
pool: BlockPool, cache: var StateData, blockSlot: BlockSlot, body: untyped):
|
||||
untyped =
|
||||
## Helper template that updates state to a state with an epoch matching the
|
||||
## epoch of blockSlot. This aims to be at least as fast as withState, quick
|
||||
## enough to expose to unautheticated, remote use, but trades off that it's
|
||||
## possible for it to decide that finding a state from a matching epoch may
|
||||
## provide too expensive for such use cases.
|
||||
##
|
||||
## cache is unsafe outside of block.
|
||||
|
||||
withEpochState(pool.dag, cache, blockSlot, body)
|
||||
|
||||
proc updateStateData*(pool: BlockPool, state: var StateData, bs: BlockSlot) =
|
||||
## Rewind or advance state such that it matches the given block and slot -
|
||||
## this may include replaying from an earlier snapshot if blck is on a
|
||||
|
@ -129,6 +129,22 @@ func get_ancestor*(blck: BlockRef, slot: Slot): BlockRef =
|
||||
|
||||
blck = blck.parent
|
||||
|
||||
iterator get_ancestors_in_epoch(blockSlot: BlockSlot): BlockSlot =
|
||||
let min_slot =
|
||||
blockSlot.slot.compute_epoch_at_slot.compute_start_slot_at_epoch
|
||||
var blockSlot = blockSlot
|
||||
|
||||
while true:
|
||||
for slot in countdown(blockSlot.slot, max(blockSlot.blck.slot, min_slot)):
|
||||
yield BlockSlot(blck: blockSlot.blck, slot: slot)
|
||||
|
||||
if blockSlot.blck.parent.isNil or blockSlot.blck.slot <= min_slot:
|
||||
break
|
||||
|
||||
doAssert blockSlot.blck.slot > blockSlot.blck.parent.slot
|
||||
blockSlot =
|
||||
BlockSlot(blck: blockSlot.blck.parent, slot: blockSlot.blck.slot - 1)
|
||||
|
||||
func atSlot*(blck: BlockRef, slot: Slot): BlockSlot =
|
||||
## Return a BlockSlot at a given slot, with the block set to the closest block
|
||||
## available. If slot comes from before the block, a suitable block ancestor
|
||||
@ -618,6 +634,24 @@ proc getStateDataCached(dag: CandidateChains, state: var StateData, bs: BlockSlo
|
||||
|
||||
false
|
||||
|
||||
template withEpochState*(
|
||||
dag: CandidateChains, cache: var StateData, blockSlot: BlockSlot, body: untyped): untyped =
|
||||
## Helper template that updates state to a particular BlockSlot - usage of
|
||||
## cache is unsafe outside of block.
|
||||
## TODO async transformations will lead to a race where cache gets updated
|
||||
## while waiting for future to complete - catch this here somehow?
|
||||
|
||||
for ancestor in get_ancestors_in_epoch(blockSlot):
|
||||
if getStateDataCached(dag, cache, ancestor):
|
||||
break
|
||||
|
||||
template hashedState(): HashedBeaconState {.inject, used.} = cache.data
|
||||
template state(): BeaconState {.inject, used.} = cache.data.data
|
||||
template blck(): BlockRef {.inject, used.} = cache.blck
|
||||
template root(): Eth2Digest {.inject, used.} = cache.data.root
|
||||
|
||||
body
|
||||
|
||||
proc updateStateData*(dag: CandidateChains, state: var StateData, bs: BlockSlot) =
|
||||
## Rewind or advance state such that it matches the given block and slot -
|
||||
## this may include replaying from an earlier snapshot if blck is on a
|
||||
|
@ -20,9 +20,9 @@ import
|
||||
# Local modules
|
||||
spec/[datatypes, digest, crypto, beaconstate, helpers, validator, network],
|
||||
conf, time, validator_pool, state_transition,
|
||||
attestation_pool, block_pool, eth2_network, keystore_management,
|
||||
beacon_node_common, beacon_node_types, nimbus_binary_common,
|
||||
mainchain_monitor, version, ssz/merkleization, interop,
|
||||
attestation_pool, block_pool, block_pools/candidate_chains, eth2_network,
|
||||
keystore_management, beacon_node_common, beacon_node_types,
|
||||
nimbus_binary_common, mainchain_monitor, version, ssz/merkleization, interop,
|
||||
attestation_aggregation, sync_manager, sszdump
|
||||
|
||||
# Metrics for tracking attestation and beacon block loss
|
||||
@ -98,28 +98,51 @@ proc isSynced(node: BeaconNode, head: BlockRef): bool =
|
||||
else:
|
||||
true
|
||||
|
||||
proc sendAttestation*(node: BeaconNode, attestation: Attestation) =
|
||||
proc sendAttestation*(
|
||||
node: BeaconNode, attestation: Attestation, num_active_validators: uint64) =
|
||||
logScope: pcs = "send_attestation"
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/validator.md#broadcast-attestation
|
||||
node.network.broadcast(
|
||||
getMainnetAttestationTopic(node.forkDigest, attestation.data.index),
|
||||
attestation)
|
||||
when ETH2_SPEC == "v0.12.1":
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#attestations-and-aggregation
|
||||
node.network.broadcast(
|
||||
getAttestationTopic(node.forkDigest, attestation, num_active_validators),
|
||||
attestation)
|
||||
else:
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/validator.md#broadcast-attestation
|
||||
node.network.broadcast(
|
||||
getMainnetAttestationTopic(node.forkDigest, attestation.data.index),
|
||||
attestation)
|
||||
|
||||
beacon_attestations_sent.inc()
|
||||
|
||||
proc sendAttestation*(node: BeaconNode, attestation: Attestation) =
|
||||
# For the validator API, which doesn't supply num_active_validators.
|
||||
let attestationBlck =
|
||||
node.blockPool.getRef(attestation.data.beacon_block_root)
|
||||
if attestationBlck.isNil:
|
||||
debug "Attempt to send attestation without corresponding block"
|
||||
return
|
||||
|
||||
node.blockPool.withEpochState(
|
||||
node.blockPool.tmpState,
|
||||
BlockSlot(blck: attestationBlck, slot: attestation.data.slot)):
|
||||
node.sendAttestation(
|
||||
attestation,
|
||||
blck.getEpochInfo(state).shuffled_active_validator_indices.len.uint64)
|
||||
|
||||
proc createAndSendAttestation(node: BeaconNode,
|
||||
fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
validator: AttachedValidator,
|
||||
attestationData: AttestationData,
|
||||
committeeLen: int,
|
||||
indexInCommittee: int) {.async.} =
|
||||
indexInCommittee: int,
|
||||
num_active_validators: uint64) {.async.} =
|
||||
logScope: pcs = "send_attestation"
|
||||
|
||||
var attestation = await validator.produceAndSignAttestation(attestationData, committeeLen, indexInCommittee, fork, genesis_validators_root)
|
||||
|
||||
node.sendAttestation(attestation)
|
||||
node.sendAttestation(attestation, num_active_validators)
|
||||
|
||||
if node.config.dumpEnabled:
|
||||
dump(node.config.dumpDirOutgoing, attestation.data, validator.pubKey)
|
||||
@ -311,8 +334,15 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
||||
# the complexity of handling forks correctly - instead, we use an adapted
|
||||
# version here that calculates the committee for a single slot only
|
||||
node.blockPool.withState(node.blockPool.tmpState, attestationHead):
|
||||
var cache = get_empty_per_epoch_cache()
|
||||
let committees_per_slot = get_committee_count_at_slot(state, slot)
|
||||
var cache = getEpochCache(attestationHead.blck, state)
|
||||
let
|
||||
committees_per_slot = get_committee_count_at_slot(state, slot)
|
||||
num_active_validators =
|
||||
try:
|
||||
cache.shuffled_active_validator_indices[
|
||||
slot.compute_epoch_at_slot].len.uint64
|
||||
except KeyError:
|
||||
raiseAssert "getEpochCache(...) didn't fill cache"
|
||||
|
||||
for committee_index in 0'u64..<committees_per_slot:
|
||||
let committee = get_beacon_committee(
|
||||
@ -327,7 +357,7 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
||||
for a in attestations:
|
||||
traceAsyncErrors createAndSendAttestation(
|
||||
node, state.fork, state.genesis_validators_root, a.validator, a.data,
|
||||
a.committeeLen, a.indexInCommittee)
|
||||
a.committeeLen, a.indexInCommittee, num_active_validators)
|
||||
|
||||
proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot):
|
||||
Future[BlockRef] {.async.} =
|
||||
|
Loading…
x
Reference in New Issue
Block a user