2020-04-24 07:16:11 +00:00
|
|
|
# beacon_chain
|
|
|
|
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
import
|
2020-05-19 14:18:07 +00:00
|
|
|
extras, beacon_chain_db,
|
2020-05-21 17:08:31 +00:00
|
|
|
stew/results,
|
2020-05-19 14:18:07 +00:00
|
|
|
spec/[crypto, datatypes, digest]
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2020-04-28 08:08:32 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
import
|
|
|
|
block_pools/[block_pools_types, clearance, candidate_chains, quarantine]
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2020-05-28 14:19:25 +00:00
|
|
|
export results, block_pools_types
|
2020-05-21 17:08:31 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
# Block_Pools
|
|
|
|
# --------------------------------------------
|
|
|
|
#
|
|
|
|
# Compatibility shims to minimize PR breakage
|
|
|
|
# during block_pool refactor
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
type
|
|
|
|
BlockPools* = object
|
|
|
|
# TODO: Rename BlockPools
|
|
|
|
quarantine: Quarantine
|
|
|
|
dag: CandidateChains
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
BlockPool* = BlockPools
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
{.push raises: [Defect], inline.}
|
2019-03-08 16:40:17 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
# Quarantine dispatch
|
|
|
|
# --------------------------------------------
|
2019-04-26 16:38:56 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
func checkMissing*(pool: var BlockPool): seq[FetchRecord] {.noInit.} =
|
|
|
|
checkMissing(pool.quarantine)
|
2020-04-26 19:13:33 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
# CandidateChains
|
|
|
|
# --------------------------------------------
|
2020-04-26 19:13:33 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
template tail*(pool: BlockPool): BlockRef =
|
|
|
|
pool.dag.tail
|
2019-03-08 16:40:17 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
template heads*(pool: BlockPool): seq[Head] =
|
|
|
|
pool.dag.heads
|
2020-02-05 11:41:46 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
template head*(pool: BlockPool): Head =
|
|
|
|
pool.dag.head
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
template finalizedHead*(pool: BlockPool): BlockSlot =
|
|
|
|
pool.dag.finalizedHead
|
2019-04-26 16:38:56 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
proc add*(pool: var BlockPool, blockRoot: Eth2Digest,
|
2020-05-21 17:08:31 +00:00
|
|
|
signedBlock: SignedBeaconBlock): Result[BlockRef, BlockError] {.gcsafe.} =
|
2020-05-19 14:18:07 +00:00
|
|
|
add(pool.dag, pool.quarantine, blockRoot, signedBlock)
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
export parent # func parent*(bs: BlockSlot): BlockSlot
|
|
|
|
export isAncestorOf # func isAncestorOf*(a, b: BlockRef): bool
|
|
|
|
export getAncestorAt # func isAncestorOf*(a, b: BlockRef): bool
|
|
|
|
export get_ancestor # func get_ancestor*(blck: BlockRef, slot: Slot): BlockRef
|
|
|
|
export atSlot # func atSlot*(blck: BlockRef, slot: Slot): BlockSlot
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2019-03-27 20:17:01 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
proc init*(T: type BlockPools, db: BeaconChainDB,
|
|
|
|
updateFlags: UpdateFlags = {}): BlockPools =
|
|
|
|
result.dag = init(CandidateChains, db, updateFlags)
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
export init # func init*(T: type BlockRef, root: Eth2Digest, blck: BeaconBlock): BlockRef
|
2020-01-20 17:06:46 +00:00
|
|
|
|
2019-11-21 09:15:10 +00:00
|
|
|
func getRef*(pool: BlockPool, root: Eth2Digest): BlockRef =
|
2019-06-10 11:13:53 +00:00
|
|
|
## Retrieve a resolved block reference, if available
|
2020-05-19 14:18:07 +00:00
|
|
|
pool.dag.getRef(root)
|
2019-06-10 11:13:53 +00:00
|
|
|
|
2020-05-09 12:43:15 +00:00
|
|
|
func getBlockRange*(
|
2020-04-21 06:43:39 +00:00
|
|
|
pool: BlockPool, startSlot: Slot, skipStep: Natural,
|
|
|
|
output: var openArray[BlockRef]): Natural =
|
2019-09-10 05:50:37 +00:00
|
|
|
## This function populates an `output` buffer of blocks
|
2020-04-21 06:43:39 +00:00
|
|
|
## with a slots ranging from `startSlot` up to, but not including,
|
|
|
|
## `startSlot + skipStep * output.len`, skipping any slots that don't have
|
|
|
|
## a block.
|
2019-09-10 05:50:37 +00:00
|
|
|
##
|
2020-04-21 06:43:39 +00:00
|
|
|
## Blocks will be written to `output` from the end without gaps, even if
|
|
|
|
## a block is missing in a particular slot. The return value shows how
|
|
|
|
## many slots were missing blocks - to iterate over the result, start
|
|
|
|
## at this index.
|
|
|
|
##
|
|
|
|
## If there were no blocks in the range, `output.len` will be returned.
|
2020-05-19 14:18:07 +00:00
|
|
|
pool.dag.getBlockRange(startSlot, skipStep, output)
|
2019-09-10 05:50:37 +00:00
|
|
|
|
2020-03-16 22:28:54 +00:00
|
|
|
func getBlockBySlot*(pool: BlockPool, slot: Slot): BlockRef =
|
|
|
|
## Retrieves the first block in the current canonical chain
|
|
|
|
## with slot number less or equal to `slot`.
|
2020-05-19 14:18:07 +00:00
|
|
|
pool.dag.getBlockBySlot(slot)
|
2020-03-16 22:28:54 +00:00
|
|
|
|
2020-03-18 19:38:34 +00:00
|
|
|
func getBlockByPreciseSlot*(pool: BlockPool, slot: Slot): BlockRef =
|
2020-03-16 22:28:54 +00:00
|
|
|
## Retrieves a block from the canonical chain with a slot
|
|
|
|
## number equal to `slot`.
|
2020-05-19 14:18:07 +00:00
|
|
|
pool.dag.getBlockByPreciseSlot(slot)
|
2020-03-16 22:28:54 +00:00
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
proc get*(pool: BlockPool, blck: BlockRef): BlockData =
|
|
|
|
## Retrieve the associated block body of a block reference
|
2020-05-19 14:18:07 +00:00
|
|
|
pool.dag.get(blck)
|
2019-02-28 21:21:29 +00:00
|
|
|
|
|
|
|
proc get*(pool: BlockPool, root: Eth2Digest): Option[BlockData] =
|
|
|
|
## Retrieve a resolved block reference and its associated body, if available
|
2020-05-19 14:18:07 +00:00
|
|
|
pool.dag.get(root)
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2019-11-21 09:15:10 +00:00
|
|
|
func getOrResolve*(pool: var BlockPool, root: Eth2Digest): BlockRef =
|
2019-02-28 21:21:29 +00:00
|
|
|
## Fetch a block ref, or nil if not found (will be added to list of
|
|
|
|
## blocks-to-resolve)
|
2020-05-19 14:18:07 +00:00
|
|
|
getOrResolve(pool.dag, pool.quarantine, root)
|
2019-11-22 14:14:13 +00:00
|
|
|
|
2019-12-19 13:02:28 +00:00
|
|
|
proc updateHead*(pool: BlockPool, newHead: BlockRef) =
|
2019-03-13 22:59:20 +00:00
|
|
|
## Update what we consider to be the current head, as given by the fork
|
|
|
|
## choice.
|
|
|
|
## The choice of head affects the choice of finalization point - the order
|
|
|
|
## of operations naturally becomes important here - after updating the head,
|
|
|
|
## blocks that were once considered potential candidates for a tree will
|
|
|
|
## now fall from grace, or no longer be considered resolved.
|
2020-05-19 14:18:07 +00:00
|
|
|
updateHead(pool.dag, newHead)
|
2019-03-13 22:59:20 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
proc latestJustifiedBlock*(pool: BlockPool): BlockSlot =
|
2019-03-13 22:59:20 +00:00
|
|
|
## Return the most recent block that is justified and at least as recent
|
|
|
|
## as the latest finalized block
|
2020-05-19 14:18:07 +00:00
|
|
|
latestJustifiedBlock(pool.dag)
|
2019-03-28 06:10:48 +00:00
|
|
|
|
2020-06-23 09:29:08 +00:00
|
|
|
proc addMissing*(pool: var BlockPool, broot: Eth2Digest) {.inline.} =
|
|
|
|
pool.quarantine.addMissing(broot)
|
|
|
|
|
2020-01-17 13:44:01 +00:00
|
|
|
proc isInitialized*(T: type BlockPool, db: BeaconChainDB): bool =
|
2020-05-19 14:18:07 +00:00
|
|
|
isInitialized(CandidateChains, db)
|
2020-01-17 13:44:01 +00:00
|
|
|
|
2019-03-28 06:10:48 +00:00
|
|
|
proc preInit*(
|
2020-04-28 08:08:32 +00:00
|
|
|
T: type BlockPool, db: BeaconChainDB, state: BeaconState,
|
2019-12-16 18:08:50 +00:00
|
|
|
signedBlock: SignedBeaconBlock) =
|
2020-05-19 14:18:07 +00:00
|
|
|
preInit(CandidateChains, db, state, signedBlock)
|
2020-02-07 07:13:38 +00:00
|
|
|
|
2020-05-22 14:21:22 +00:00
|
|
|
proc getProposer*(pool: BlockPool, head: BlockRef, slot: Slot):
|
|
|
|
Option[(ValidatorIndex, ValidatorPubKey)] =
|
2020-05-19 14:18:07 +00:00
|
|
|
getProposer(pool.dag, head, slot)
|
2020-02-07 07:13:38 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
# Rewinder / State transitions
|
|
|
|
# --------------------------------------------
|
2020-03-31 18:39:02 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
template headState*(pool: BlockPool): StateData =
|
|
|
|
pool.dag.headState
|
2020-03-31 18:39:02 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
template tmpState*(pool: BlockPool): StateData =
|
|
|
|
pool.dag.tmpState
|
2020-04-21 16:52:53 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
template justifiedState*(pool: BlockPool): StateData =
|
|
|
|
pool.dag.justifiedState
|
2020-03-31 18:39:02 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
template withState*(
|
2020-06-23 10:38:59 +00:00
|
|
|
pool: BlockPool, cache: var StateData, blockSlot: BlockSlot, body: untyped):
|
|
|
|
untyped =
|
2020-05-19 14:18:07 +00:00
|
|
|
## Helper template that updates state to a particular BlockSlot - usage of
|
|
|
|
## cache is unsafe outside of block.
|
|
|
|
## TODO async transformations will lead to a race where cache gets updated
|
|
|
|
## while waiting for future to complete - catch this here somehow?
|
2020-04-21 16:52:53 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
withState(pool.dag, cache, blockSlot, body)
|
2020-03-31 18:39:02 +00:00
|
|
|
|
2020-06-23 10:38:59 +00:00
|
|
|
template withEpochState*(
|
|
|
|
pool: BlockPool, cache: var StateData, blockSlot: BlockSlot, body: untyped):
|
|
|
|
untyped =
|
|
|
|
## Helper template that updates state to a state with an epoch matching the
|
|
|
|
## epoch of blockSlot. This aims to be at least as fast as withState, quick
|
|
|
|
## enough to expose to unautheticated, remote use, but trades off that it's
|
|
|
|
## possible for it to decide that finding a state from a matching epoch may
|
|
|
|
## provide too expensive for such use cases.
|
|
|
|
##
|
|
|
|
## cache is unsafe outside of block.
|
|
|
|
|
|
|
|
withEpochState(pool.dag, cache, blockSlot, body)
|
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
proc updateStateData*(pool: BlockPool, state: var StateData, bs: BlockSlot) =
|
|
|
|
## Rewind or advance state such that it matches the given block and slot -
|
|
|
|
## this may include replaying from an earlier snapshot if blck is on a
|
|
|
|
## different branch or has advanced to a higher slot number than slot
|
|
|
|
## If slot is higher than blck.slot, replay will fill in with empty/non-block
|
|
|
|
## slots, else it is ignored
|
|
|
|
updateStateData(pool.dag, state, bs)
|
2020-03-31 18:39:02 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
proc loadTailState*(pool: BlockPool): StateData =
|
|
|
|
loadTailState(pool.dag)
|
2020-03-31 18:39:02 +00:00
|
|
|
|
2020-06-16 08:49:32 +00:00
|
|
|
proc isValidBeaconBlock*(
|
|
|
|
pool: var BlockPool, signed_beacon_block: SignedBeaconBlock,
|
|
|
|
current_slot: Slot, flags: UpdateFlags): Result[void, BlockError] =
|
|
|
|
isValidBeaconBlock(
|
|
|
|
pool.dag, pool.quarantine, signed_beacon_block, current_slot, flags)
|