fix attestation sending schedules to avoid timing attack (#1995)

This commit is contained in:
tersec 2020-11-16 10:44:18 +01:00 committed by GitHub
parent 430bac81dc
commit e2f161dbf7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 67 additions and 11 deletions

View File

@ -63,6 +63,7 @@ type
attestationPool*: ref AttestationPool attestationPool*: ref AttestationPool
exitPool: ref ExitPool exitPool: ref ExitPool
quarantine*: QuarantineRef quarantine*: QuarantineRef
blockReceivedDuringSlot*: Future[void]
blocksQueue*: AsyncQueue[BlockEntry] blocksQueue*: AsyncQueue[BlockEntry]
attestationsQueue*: AsyncQueue[AttestationEntry] attestationsQueue*: AsyncQueue[AttestationEntry]
@ -136,6 +137,10 @@ proc storeBlock(
attestationPool[].addForkChoice( attestationPool[].addForkChoice(
epochRef, blckRef, signedBlock.message, wallSlot) epochRef, blckRef, signedBlock.message, wallSlot)
# Trigger attestation sending
if blck.isOk and not self.blockReceivedDuringSlot.finished:
self.blockReceivedDuringSlot.complete()
self.dumpBlock(signedBlock, blck) self.dumpBlock(signedBlock, blck)
# There can be a scenario where we receive a block we already received. # There can be a scenario where we receive a block we already received.
@ -146,7 +151,7 @@ proc storeBlock(
let duration = (Moment.now() - start).toFloatSeconds() let duration = (Moment.now() - start).toFloatSeconds()
beacon_store_block_duration_seconds.observe(duration) beacon_store_block_duration_seconds.observe(duration)
return ok() ok()
proc processAttestation( proc processAttestation(
self: var Eth2Processor, entry: AttestationEntry) = self: var Eth2Processor, entry: AttestationEntry) =
@ -462,6 +467,7 @@ proc new*(T: type Eth2Processor,
attestationPool: attestationPool, attestationPool: attestationPool,
exitPool: exitPool, exitPool: exitPool,
quarantine: quarantine, quarantine: quarantine,
blockReceivedDuringSlot: newFuture[void](),
blocksQueue: newAsyncQueue[BlockEntry](1), blocksQueue: newAsyncQueue[BlockEntry](1),
aggregatesQueue: newAsyncQueue[AggregateEntry](MAX_ATTESTATIONS.int), aggregatesQueue: newAsyncQueue[AggregateEntry](MAX_ATTESTATIONS.int),
attestationsQueue: newAsyncQueue[AttestationEntry](TARGET_COMMITTEE_SIZE.int * 4), attestationsQueue: newAsyncQueue[AttestationEntry](TARGET_COMMITTEE_SIZE.int * 4),

View File

@ -462,6 +462,10 @@ proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.async.} =
finalizedEpoch = finalizedEpoch =
node.chainDag.finalizedHead.blck.slot.compute_epoch_at_slot() node.chainDag.finalizedHead.blck.slot.compute_epoch_at_slot()
if not node.processor[].blockReceivedDuringSlot.finished:
node.processor[].blockReceivedDuringSlot.complete()
node.processor[].blockReceivedDuringSlot = newFuture[void]()
info "Slot start", info "Slot start",
lastSlot = shortLog(lastSlot), lastSlot = shortLog(lastSlot),
scheduledSlot = shortLog(scheduledSlot), scheduledSlot = shortLog(scheduledSlot),

View File

@ -74,6 +74,10 @@ const
DEPOSIT_CONTRACT_TREE_DEPTH* = 32 DEPOSIT_CONTRACT_TREE_DEPTH* = 32
BASE_REWARDS_PER_EPOCH* = 4 BASE_REWARDS_PER_EPOCH* = 4
# https://github.com/ethereum/eth2.0-specs/pull/2101
ATTESTATION_PRODUCTION_DIVISOR* = 3
ATTESTATION_ENTROPY_DIVISOR* = 12
template maxSize*(n: int) {.pragma.} template maxSize*(n: int) {.pragma.}
type type

View File

@ -7,7 +7,7 @@
import import
# Standard library # Standard library
std/[os, tables, sequtils, osproc, streams], std/[os, osproc, random, sequtils, streams, tables],
# Nimble packages # Nimble packages
stew/[objects], stew/shims/macros, stew/[objects], stew/shims/macros,
@ -23,7 +23,7 @@ import
conf, time, validator_pool, conf, time, validator_pool,
attestation_pool, exit_pool, block_pools/[spec_cache, chain_dag, clearance], attestation_pool, exit_pool, block_pools/[spec_cache, chain_dag, clearance],
eth2_network, keystore_management, beacon_node_common, beacon_node_types, eth2_network, keystore_management, beacon_node_common, beacon_node_types,
nimbus_binary_common, eth1_monitor, version, ssz/merkleization, eth1_monitor, version, ssz/merkleization,
attestation_aggregation, sync_manager, sszdump, attestation_aggregation, sync_manager, sszdump,
validator_slashing_protection validator_slashing_protection
@ -487,6 +487,28 @@ proc broadcastAggregatedAttestations(
attestation = shortLog(signedAP.message.aggregate), attestation = shortLog(signedAP.message.aggregate),
validator = shortLog(curr[0].v) validator = shortLog(curr[0].v)
proc getSlotTimingEntropy(): int64 =
# Ensure SECONDS_PER_SLOT / ATTESTATION_PRODUCTION_DIVISOR >
# SECONDS_PER_SLOT / ATTESTATION_ENTROPY_DIVISOR, which will
# enure that the second condition can't go negative.
static: doAssert ATTESTATION_ENTROPY_DIVISOR > ATTESTATION_PRODUCTION_DIVISOR
# For each `slot`, a validator must generate a uniform random variable
# `slot_timing_entropy` between `(-SECONDS_PER_SLOT /
# ATTESTATION_ENTROPY_DIVISOR, SECONDS_PER_SLOT /
# ATTESTATION_ENTROPY_DIVISOR)` with millisecond resolution and using local
# entropy.
#
# Per issue discussion "validators served by the same beacon node can have
# the same attestation production time, i.e., they can share the source of
# the entropy and the actual slot_timing_entropy value."
const
slot_timing_entropy_upper_bound =
SECONDS_PER_SLOT.int64 * 1000 div ATTESTATION_ENTROPY_DIVISOR
slot_timing_entropy_lower_bound = 0-slot_timing_entropy_upper_bound
rand(range[(slot_timing_entropy_lower_bound + 1) ..
(slot_timing_entropy_upper_bound - 1)])
proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} = proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
## Perform validator duties - create blocks, vote and aggregate existing votes ## Perform validator duties - create blocks, vote and aggregate existing votes
if node.attachedValidators.count == 0: if node.attachedValidators.count == 0:
@ -526,6 +548,19 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
head = await handleProposal(node, head, slot) head = await handleProposal(node, head, slot)
# Fix timing attack: https://github.com/ethereum/eth2.0-specs/pull/2101
# A validator must create and broadcast the `attestation` to the associated
# attestation subnet when the earlier one of the following two events occurs:
#
# - The validator has received a valid block from the expected block
# proposer for the assigned `slot`. In this case, the validator must set a
# timer for `abs(slot_timing_entropy)`. The end of this timer will be the
# trigger for attestation production.
#
# - `SECONDS_PER_SLOT / ATTESTATION_PRODUCTION_DIVISOR +
# slot_timing_entropy` seconds have elapsed since the start of the `slot`
# (using the `slot_timing_entropy` generated for this slot)
# We've been doing lots of work up until now which took time. Normally, we # We've been doing lots of work up until now which took time. Normally, we
# send out attestations at the slot thirds-point, so we go back to the clock # send out attestations at the slot thirds-point, so we go back to the clock
# to see how much time we need to wait. # to see how much time we need to wait.
@ -533,21 +568,28 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
# the work for the whole slot using a monotonic clock instead, then deal # the work for the whole slot using a monotonic clock instead, then deal
# with any clock discrepancies once only, at the start of slot timer # with any clock discrepancies once only, at the start of slot timer
# processing.. # processing..
let slotTimingEntropy = getSlotTimingEntropy()
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/validator.md#attesting
# A validator should create and broadcast the attestation to the associated
# attestation subnet when either (a) the validator has received a valid
# block from the expected block proposer for the assigned slot or
# (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / 3` seconds
# after the start of slot) -- whichever comes first.
template sleepToSlotOffsetWithHeadUpdate(extra: chronos.Duration, msg: static string) = template sleepToSlotOffsetWithHeadUpdate(extra: chronos.Duration, msg: static string) =
if await node.beaconClock.sleepToSlotOffset(extra, slot, msg): let waitTime = node.beaconClock.fromNow(slot.toBeaconTime(extra))
if waitTime.inFuture:
discard await withTimeout(
node.processor[].blockReceivedDuringSlot, waitTime.offset)
# Might have gotten a valid beacon block this slot, which triggers the
# first case, in which we wait for another abs(slotTimingEntropy).
if node.processor[].blockReceivedDuringSlot.finished:
await sleepAsync(
milliseconds(max(slotTimingEntropy, 0 - slotTimingEntropy)))
# Time passed - we might need to select a new head in that case # Time passed - we might need to select a new head in that case
node.processor[].updateHead(slot) node.processor[].updateHead(slot)
head = node.chainDag.head head = node.chainDag.head
sleepToSlotOffsetWithHeadUpdate( sleepToSlotOffsetWithHeadUpdate(
seconds(int64(SECONDS_PER_SLOT)) div 3, "Waiting to send attestations") milliseconds(SECONDS_PER_SLOT.int64 * 1000 div ATTESTATION_PRODUCTION_DIVISOR +
slotTimingEntropy),
"Waiting to send attestations")
handleAttestations(node, head, slot) handleAttestations(node, head, slot)