nimbus-eth2/beacon_chain/fork_choice.nim

250 lines
11 KiB
Nim
Raw Normal View History

2018-11-26 13:33:06 +00:00
import
2019-01-08 17:28:21 +00:00
deques, options, tables,
2019-01-08 16:07:37 +00:00
./spec/[datatypes, crypto, digest, helpers, validator], extras,
2019-01-08 17:28:21 +00:00
./beacon_chain_db
2018-11-26 13:33:06 +00:00
type
AttestationCandidate* = object
2018-11-26 13:33:06 +00:00
validator*: int
data*: AttestationData
2018-11-29 01:08:34 +00:00
signature*: ValidatorSig
2018-11-26 13:33:06 +00:00
AttestationPool* = object
# The Deque below stores all outstanding attestations per slot.
# In each slot, we have an array of all attestations indexed by their
# shard number. When we haven't received an attestation for a particular
# shard yet, the Option value will be `none`
attestations: Deque[array[SHARD_COUNT, Option[Attestation]]]
startingSlot: uint64
2018-11-26 13:33:06 +00:00
2019-01-09 01:01:07 +00:00
# TODO:
# The compilicated Deque above is not needed.
#
# In fact, we can use a simple array with length SHARD_COUNT because
# in each epoch, each shard is going to receive attestations exactly once.
# Once the epoch is over, we can discard all attestations and start all
# over again (no need for `discardHistoryToSlot` too).
#
# Per Danny as of 2018-12-21:
# Yeah, you can do any linear combination of signatures. but you have to
# remember the linear combination of pubkeys that constructed
# if you have two instances of a signature from pubkey p, then you need 2*p
# in the group pubkey because the attestation bitfield is only 1 bit per
# pubkey right now, attestations do not support this it could be extended to
# support N overlaps up to N times per pubkey if we had N bits per validator
# instead of 1
# We are shying away from this for the time being. If there end up being
# substantial difficulties in network layer aggregation, then adding bits to
# aid in supporting overlaps is one potential solution
2019-01-09 01:01:07 +00:00
proc init*(T: type AttestationPool, startingSlot: uint64): T =
result.attestations = initDeque[array[SHARD_COUNT, Option[Attestation]]]()
2018-11-26 13:33:06 +00:00
result.startingSlot = startingSlot
2018-11-29 01:08:34 +00:00
proc setLen*[T](d: var Deque[T], len: int) =
# TODO: The upstream `Deque` type should gain a proper resize API
let delta = len - d.len
if delta > 0:
for i in 0 ..< delta:
var defaultVal: T
d.addLast(defaultVal)
else:
d.shrink(fromLast = delta)
proc combine*(tgt: var Attestation, src: Attestation, flags: UpdateFlags) =
# Combine the signature and participation bitfield, with the assumption that
# the same data is being signed!
# TODO similar code in work_pool, clean up
assert tgt.data == src.data
2019-02-06 20:37:21 +00:00
for i in 0 ..< tgt.aggregation_bitfield.len:
# TODO:
# when BLS signatures are combined, we must ensure that
# the same participant key is not included on both sides
2019-02-06 20:37:21 +00:00
tgt.aggregation_bitfield[i] =
tgt.aggregation_bitfield[i] or
src.aggregation_bitfield[i]
if skipValidation notin flags:
tgt.aggregate_signature.combine(src.aggregate_signature)
2018-11-26 13:33:06 +00:00
proc add*(pool: var AttestationPool,
attestation: Attestation,
2018-11-26 13:33:06 +00:00
beaconState: BeaconState) =
# The caller of this function is responsible for ensuring that
# the attestations will be given in a strictly slot increasing order:
doAssert attestation.data.slot >= pool.startingSlot
2018-11-26 13:33:06 +00:00
# TODO:
# Validate that the attestation is authentic (it's properly signed)
# and make sure that the validator is supposed to make an attestation
# for the specific shard/slot
let slotIdxInPool = int(attestation.data.slot - pool.startingSlot)
2018-11-26 13:33:06 +00:00
if slotIdxInPool >= pool.attestations.len:
pool.attestations.setLen(slotIdxInPool + 1)
2018-11-29 01:08:34 +00:00
let shard = attestation.data.shard
if pool.attestations[slotIdxInPool][shard].isSome:
combine(pool.attestations[slotIdxInPool][shard].get, attestation, {})
else:
pool.attestations[slotIdxInPool][shard] = some(attestation)
proc getAttestationsForBlock*(pool: AttestationPool,
lastState: BeaconState,
newBlockSlot: uint64): seq[Attestation] =
if newBlockSlot < MIN_ATTESTATION_INCLUSION_DELAY or pool.attestations.len == 0:
return
doAssert newBlockSlot > lastState.slot
var
firstSlot = 0.uint64
lastSlot = newBlockSlot - MIN_ATTESTATION_INCLUSION_DELAY
if pool.startingSlot + MIN_ATTESTATION_INCLUSION_DELAY <= lastState.slot:
firstSlot = lastState.slot - MIN_ATTESTATION_INCLUSION_DELAY
2018-11-26 13:33:06 +00:00
for slot in firstSlot .. lastSlot:
let slotDequeIdx = int(slot - pool.startingSlot)
if slotDequeIdx >= pool.attestations.len: return
let shardAndComittees = get_crosslink_committees_at_slot(lastState, slot)
for s in shardAndComittees:
if pool.attestations[slotDequeIdx][s.shard].isSome:
result.add pool.attestations[slotDequeIdx][s.shard].get
2018-11-26 13:33:06 +00:00
proc discardHistoryToSlot*(pool: var AttestationPool, slot: uint64) =
2018-11-26 13:33:06 +00:00
## The index is treated inclusively
let slot = slot - MIN_ATTESTATION_INCLUSION_DELAY
if slot < pool.startingSlot:
return
let slotIdx = int(slot - pool.startingSlot)
2018-11-29 01:08:34 +00:00
pool.attestations.shrink(fromFirst = slotIdx + 1)
2018-11-26 13:33:06 +00:00
func getAttestationCandidate*(attestation: Attestation): AttestationCandidate =
# TODO: not complete AttestationCandidate object
result.data = attestation.data
result.signature = attestation.aggregate_signature
2019-01-08 14:41:47 +00:00
# ##################################################################
# Specs
2019-01-08 16:07:37 +00:00
#
# The beacon chain fork choice rule is a hybrid that combines justification and finality with Latest Message Driven (LMD) Greediest Heaviest Observed SubTree (GHOST). At any point in time a [validator](#dfn-validator) `v` subjectively calculates the beacon chain head as follows.
#
# * Let `store` be the set of attestations and blocks
# that the validator `v` has observed and verified
# (in particular, block ancestors must be recursively verified).
# Attestations not part of any chain are still included in `store`.
# * Let `finalized_head` be the finalized block with the highest slot number.
# (A block `B` is finalized if there is a descendant of `B` in `store`
# the processing of which sets `B` as finalized.)
# * Let `justified_head` be the descendant of `finalized_head`
# with the highest slot number that has been justified
# for at least `EPOCH_LENGTH` slots.
# (A block `B` is justified if there is a descendant of `B` in `store`
# the processing of which sets `B` as justified.)
# If no such descendant exists set `justified_head` to `finalized_head`.
# * Let `get_ancestor(store, block, slot)` be the ancestor of `block` with slot number `slot`.
# The `get_ancestor` function can be defined recursively
#
# def get_ancestor(store, block, slot):
# return block if block.slot == slot
# else get_ancestor(store, store.get_parent(block), slot)`.
#
# * Let `get_latest_attestation(store, validator)`
# be the attestation with the highest slot number in `store` from `validator`.
# If several such attestations exist,
# use the one the validator `v` observed first.
# * Let `get_latest_attestation_target(store, validator)`
# be the target block in the attestation `get_latest_attestation(store, validator)`.
# * The head is `lmd_ghost(store, justified_head)`. (See specs)
#
# Departing from specs:
# - We use a simple fork choice rule without finalized and justified head
# - We don't implement "get_latest_attestation(store, validator) -> Attestation"
# nor get_latest_attestation_target
2019-01-08 17:28:21 +00:00
# - We use block hashes (Eth2Digest) instead of raw blocks where possible
2019-01-14 12:19:44 +00:00
proc get_parent(db: BeaconChainDB, blck: Eth2Digest): Eth2Digest =
db.getBlock(blck).parent_root
2019-01-08 17:28:21 +00:00
2019-01-14 12:19:44 +00:00
proc get_ancestor(store: BeaconChainDB, blck: Eth2Digest, slot: uint64): Eth2Digest =
2019-01-08 17:28:21 +00:00
## Find the ancestor with a specific slot number
2019-01-14 12:19:44 +00:00
let blk = store.getBlock(blck)
if blk.slot == slot:
blck
2019-01-08 17:28:21 +00:00
else:
2019-01-14 12:19:44 +00:00
store.get_ancestor(blk.parent_root, slot) # TODO: Eliminate recursion
2019-01-08 17:28:21 +00:00
# TODO: what if the slot was never observed/verified?
2019-01-08 16:07:37 +00:00
func getVoteCount(aggregation_bitfield: openarray[byte]): int =
2019-01-08 16:07:37 +00:00
## Get the number of votes
# TODO: A bitfield type that tracks that information
# https://github.com/status-im/nim-beacon-chain/issues/19
for validatorIdx in 0 ..< aggregation_bitfield.len * 8:
result += int aggregation_bitfield.get_bitfield_bit(validatorIdx)
2019-01-08 16:07:37 +00:00
func getAttestationVoteCount(pool: AttestationPool, current_slot: uint64): CountTable[Eth2Digest] =
2019-01-08 17:28:21 +00:00
## Returns all blocks more recent that the current slot
## that were attested and their vote count
2019-01-08 16:07:37 +00:00
# This replaces:
# - get_latest_attestation,
# - get_latest_attestation_targets
# that are used in lmd_ghost for
# ```
# attestation_targets = [get_latest_attestation_target(store, validator)
# for validator in active_validators]
# ```
# Note that attestation_targets in the Eth2 specs can have duplicates
# while the following implementation will count such blockhash multiple times instead.
2019-01-08 17:28:21 +00:00
result = initCountTable[Eth2Digest]()
for slot in current_slot - pool.startingSlot ..< pool.attestations.len.uint64:
2019-01-08 17:28:21 +00:00
for attestation in pool.attestations[slot]:
if attestation.isSome:
# Increase the block attestation counts by the number of validators aggregated
2019-02-06 20:37:21 +00:00
let voteCount = attestation.get.aggregation_bitfield.getVoteCount()
2019-01-08 17:28:21 +00:00
result.inc(attestation.get.data.beacon_block_root, voteCount)
2019-01-14 12:19:44 +00:00
proc lmdGhost*(
2019-01-08 17:28:21 +00:00
store: BeaconChainDB,
pool: AttestationPool,
state: BeaconState,
blocksChildren: Table[Eth2Digest, seq[Eth2Digest]]): BeaconBlock =
# Recompute the new head of the beacon chain according to
# LMD GHOST (Latest Message Driven - Greediest Heaviest Observed SubTree)
2018-11-26 13:33:06 +00:00
2019-01-08 17:28:21 +00:00
# Raw vote count from all attestations
let rawVoteCount = pool.getAttestationVoteCount(state.slot)
2019-01-08 14:41:47 +00:00
2019-01-08 17:28:21 +00:00
# The real vote count for a block also takes into account votes for its children
2019-01-08 14:41:47 +00:00
2019-01-08 17:28:21 +00:00
# TODO: a Fenwick Tree datastructure to keep track of cumulated votes
# in O(log N) complexity
# https://en.wikipedia.org/wiki/Fenwick_tree
# Nim implementation for cumulative frequencies at
# https://github.com/numforge/laser/blob/990e59fffe50779cdef33aa0b8f22da19e1eb328/benchmarks/random_sampling/fenwicktree.nim
var head = state.latest_block_roots[state.slot mod LATEST_BLOCK_ROOTS_LENGTH]
var childVotes = initCountTable[Eth2Digest]()
while true: # TODO use a O(log N) implementation instead of O(N^2)
let children = blocksChildren[head]
if children.len == 0:
2019-01-14 12:19:44 +00:00
return store.getBlock(head)
2019-01-08 17:28:21 +00:00
# For now we assume that all children are direct descendant of the current head
2019-01-14 12:19:44 +00:00
let next_slot = store.getBlock(head).slot + 1
2019-01-08 17:28:21 +00:00
for child in children:
2019-01-14 12:19:44 +00:00
doAssert store.getBlock(child).slot == next_slot
2019-01-08 17:28:21 +00:00
childVotes.clear()
for target, votes in rawVoteCount.pairs:
2019-01-14 12:19:44 +00:00
if store.getBlock(target).slot >= next_slot:
2019-01-08 17:28:21 +00:00
childVotes.inc(store.get_ancestor(target, next_slot), votes)
2018-11-26 13:33:06 +00:00
2019-01-08 17:28:21 +00:00
head = childVotes.largest().key