mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-02-25 04:35:31 +00:00
add Altair support to block quarantine/clearance and block_sim (#2662)
* add Altair support to the block quarantine * switch some spec/datatypes imports to spec/datatypes/base * add Altair support to block_clearance * allow runtime configuration of Altair transition slot * enable Altair in block_sim, including in CI
This commit is contained in:
parent
8dc4db51c0
commit
ae1abf24af
4
Makefile
4
Makefile
@ -283,8 +283,8 @@ endif
|
||||
rm -rf 0000-*.json t_slashprot_migration.* *.log block_sim_db
|
||||
for TEST_BINARY in $(TEST_BINARIES); do \
|
||||
PARAMS=""; \
|
||||
if [[ "$${TEST_BINARY}" == "state_sim" ]]; then PARAMS="--validators=6000 --slots=128"; \
|
||||
elif [[ "$${TEST_BINARY}" == "block_sim" ]]; then PARAMS="--validators=6000 --slots=128"; \
|
||||
if [[ "$${TEST_BINARY}" == "state_sim" ]]; then PARAMS="--validators=8000 --slots=160"; \
|
||||
elif [[ "$${TEST_BINARY}" == "block_sim" ]]; then PARAMS="--validators=8000 --slots=160"; \
|
||||
fi; \
|
||||
echo -e "\nRunning $${TEST_BINARY} $${PARAMS}\n"; \
|
||||
build/$${TEST_BINARY} $${PARAMS} || { echo -e "\n$${TEST_BINARY} $${PARAMS} failed; Aborting."; exit 1; }; \
|
||||
|
@ -86,17 +86,60 @@ func batchVerify(quarantine: QuarantineRef, sigs: openArray[SignatureSet]): bool
|
||||
|
||||
proc addRawBlock*(
|
||||
dag: ChainDAGRef, quarantine: QuarantineRef,
|
||||
signedBlock: phase0.SignedBeaconBlock, onBlockAdded: OnBlockAdded
|
||||
signedBlock: phase0.SignedBeaconBlock | altair.SignedBeaconBlock,
|
||||
onBlockAdded: OnPhase0BlockAdded | OnAltairBlockAdded
|
||||
): Result[BlockRef, (ValidationResult, BlockError)] {.gcsafe.}
|
||||
|
||||
# Now that we have the new block, we should see if any of the previously
|
||||
# unresolved blocks magically become resolved
|
||||
# TODO This code is convoluted because when there are more than ~1.5k
|
||||
# blocks being synced, there's a stack overflow as `add` gets called
|
||||
# for the whole chain of blocks. Instead we use this ugly field in `dag`
|
||||
# which could be avoided by refactoring the code
|
||||
# TODO unit test the logic, in particular interaction with fork choice block parents
|
||||
proc resolveQuarantinedBlocks(
|
||||
dag: ChainDAGRef, quarantine: QuarantineRef,
|
||||
onBlockAdded: OnPhase0BlockAdded) =
|
||||
if not quarantine.inAdd:
|
||||
quarantine.inAdd = true
|
||||
defer: quarantine.inAdd = false
|
||||
var entries = 0
|
||||
while entries != quarantine.orphansPhase0.len:
|
||||
entries = quarantine.orphansPhase0.len # keep going while quarantine is shrinking
|
||||
var resolved: seq[phase0.SignedBeaconBlock]
|
||||
for _, v in quarantine.orphansPhase0:
|
||||
if v.message.parent_root in dag:
|
||||
resolved.add(v)
|
||||
|
||||
for v in resolved:
|
||||
discard addRawBlock(dag, quarantine, v, onBlockAdded)
|
||||
|
||||
proc resolveQuarantinedBlocks(
|
||||
dag: ChainDAGRef, quarantine: QuarantineRef,
|
||||
onBlockAdded: OnAltairBlockAdded) =
|
||||
if not quarantine.inAdd:
|
||||
quarantine.inAdd = true
|
||||
defer: quarantine.inAdd = false
|
||||
var entries = 0
|
||||
while entries != quarantine.orphansAltair.len:
|
||||
entries = quarantine.orphansAltair.len # keep going while quarantine is shrinking
|
||||
var resolved: seq[altair.SignedBeaconBlock]
|
||||
for _, v in quarantine.orphansAltair:
|
||||
if v.message.parent_root in dag:
|
||||
resolved.add(v)
|
||||
|
||||
for v in resolved:
|
||||
discard addRawBlock(dag, quarantine, v, onBlockAdded)
|
||||
|
||||
proc addResolvedBlock(
|
||||
dag: ChainDAGRef, quarantine: QuarantineRef,
|
||||
state: var StateData, trustedBlock: phase0.TrustedSignedBeaconBlock,
|
||||
state: var StateData,
|
||||
trustedBlock: phase0.TrustedSignedBeaconBlock | altair.TrustedSignedBeaconBlock,
|
||||
parent: BlockRef, cache: var StateCache,
|
||||
onBlockAdded: OnBlockAdded, stateDataDur, sigVerifyDur,
|
||||
onBlockAdded: OnPhase0BlockAdded | OnAltairBlockAdded,
|
||||
stateDataDur, sigVerifyDur,
|
||||
stateVerifyDur: Duration
|
||||
) =
|
||||
# TODO move quarantine processing out of here
|
||||
doAssert getStateField(state.data, slot) == trustedBlock.message.slot,
|
||||
"state must match block"
|
||||
doAssert state.blck.root == trustedBlock.message.parent_root,
|
||||
@ -112,7 +155,9 @@ proc addResolvedBlock(
|
||||
dag.blocks.incl(KeyedBlockRef.init(blockRef))
|
||||
|
||||
# Resolved blocks should be stored in database
|
||||
dag.putBlock(trustedBlock)
|
||||
when not (trustedBlock is altair.TrustedSignedBeaconBlock):
|
||||
# TODO implement this for altair
|
||||
dag.putBlock(trustedBlock)
|
||||
let putBlockTick = Moment.now()
|
||||
|
||||
var foundHead: bool
|
||||
@ -153,34 +198,16 @@ proc addResolvedBlock(
|
||||
if onBlockAdded != nil:
|
||||
onBlockAdded(blockRef, trustedBlock, epochRef)
|
||||
|
||||
# Now that we have the new block, we should see if any of the previously
|
||||
# unresolved blocks magically become resolved
|
||||
# TODO This code is convoluted because when there are more than ~1.5k
|
||||
# blocks being synced, there's a stack overflow as `add` gets called
|
||||
# for the whole chain of blocks. Instead we use this ugly field in `dag`
|
||||
# which could be avoided by refactoring the code
|
||||
# TODO unit test the logic, in particular interaction with fork choice block parents
|
||||
if not quarantine.inAdd:
|
||||
quarantine.inAdd = true
|
||||
defer: quarantine.inAdd = false
|
||||
var entries = 0
|
||||
while entries != quarantine.orphans.len:
|
||||
entries = quarantine.orphans.len # keep going while quarantine is shrinking
|
||||
var resolved: seq[phase0.SignedBeaconBlock]
|
||||
for _, v in quarantine.orphans:
|
||||
if v.message.parent_root in dag:
|
||||
resolved.add(v)
|
||||
|
||||
for v in resolved:
|
||||
discard addRawBlock(dag, quarantine, v, onBlockAdded)
|
||||
resolveQuarantinedBlocks(dag, quarantine, onBlockAdded)
|
||||
|
||||
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095
|
||||
# copy of phase0.SomeSignedBeaconBlock from datatypes/phase0.nim
|
||||
type SomeSignedPhase0Block =
|
||||
type SomeSignedBlock =
|
||||
phase0.SignedBeaconBlock | phase0.SigVerifiedSignedBeaconBlock |
|
||||
phase0.TrustedSignedBeaconBlock
|
||||
phase0.TrustedSignedBeaconBlock |
|
||||
altair.SignedBeaconBlock | altair.SigVerifiedSignedBeaconBlock |
|
||||
altair.TrustedSignedBeaconBlock
|
||||
proc checkStateTransition(
|
||||
dag: ChainDAGRef, signedBlock: SomeSignedPhase0Block,
|
||||
dag: ChainDAGRef, signedBlock: SomeSignedBlock,
|
||||
cache: var StateCache): (ValidationResult, BlockError) =
|
||||
## Ensure block can be applied on a state
|
||||
func restore(v: var ForkedHashedBeaconState) =
|
||||
@ -194,12 +221,9 @@ proc checkStateTransition(
|
||||
blck = shortLog(signedBlock.message)
|
||||
blockRoot = shortLog(signedBlock.root)
|
||||
|
||||
# TODO this won't transition because FAR_FUTURE_SLOT, so it's
|
||||
# fine, for now, but in general, blockchain_dag.addBlock must
|
||||
# match the transition here.
|
||||
if not state_transition_block(
|
||||
dag.runtimePreset, dag.clearanceState.data, signedBlock,
|
||||
cache, dag.updateFlags, restore, FAR_FUTURE_SLOT):
|
||||
cache, dag.updateFlags, restore, dag.altairTransitionSlot):
|
||||
info "Invalid block"
|
||||
|
||||
return (ValidationResult.Reject, Invalid)
|
||||
@ -225,9 +249,9 @@ proc advanceClearanceState*(dag: ChainDagRef) =
|
||||
|
||||
proc addRawBlockKnownParent(
|
||||
dag: ChainDAGRef, quarantine: QuarantineRef,
|
||||
signedBlock: phase0.SignedBeaconBlock,
|
||||
signedBlock: phase0.SignedBeaconBlock | altair.SignedBeaconBlock,
|
||||
parent: BlockRef,
|
||||
onBlockAdded: OnBlockAdded
|
||||
onBlockAdded: OnPhase0BlockAdded | OnAltairBlockAdded
|
||||
): Result[BlockRef, (ValidationResult, BlockError)] =
|
||||
## Add a block whose parent is known, after performing validity checks
|
||||
|
||||
@ -303,7 +327,7 @@ proc addRawBlockKnownParent(
|
||||
proc addRawBlockUnresolved(
|
||||
dag: ChainDAGRef,
|
||||
quarantine: QuarantineRef,
|
||||
signedBlock: phase0.SignedBeaconBlock
|
||||
signedBlock: phase0.SignedBeaconBlock | altair.SignedBeaconBlock,
|
||||
): Result[BlockRef, (ValidationResult, BlockError)] =
|
||||
## addRawBlock - Block is unresolved / has no parent
|
||||
|
||||
@ -319,7 +343,8 @@ proc addRawBlockUnresolved(
|
||||
if signedBlock.message.parent_root in quarantine.missing or
|
||||
containsOrphan(quarantine, signedBlock):
|
||||
debug "Unresolved block (parent missing or orphaned)",
|
||||
orphans = quarantine.orphans.len,
|
||||
orphansPhase0 = quarantine.orphansPhase0.len,
|
||||
orphansAltair = quarantine.orphansAltair.len,
|
||||
missing = quarantine.missing.len
|
||||
|
||||
return err((ValidationResult.Ignore, MissingParent))
|
||||
@ -334,15 +359,16 @@ proc addRawBlockUnresolved(
|
||||
# a risk of being slashed, making attestations a more valuable spam
|
||||
# filter.
|
||||
debug "Unresolved block (parent missing)",
|
||||
orphans = quarantine.orphans.len,
|
||||
orphansPhase0 = quarantine.orphansPhase0.len,
|
||||
orphansAltair = quarantine.orphansAltair.len,
|
||||
missing = quarantine.missing.len
|
||||
|
||||
return err((ValidationResult.Ignore, MissingParent))
|
||||
|
||||
proc addRawBlock(
|
||||
dag: ChainDAGRef, quarantine: QuarantineRef,
|
||||
signedBlock: phase0.SignedBeaconBlock,
|
||||
onBlockAdded: OnBlockAdded
|
||||
signedBlock: phase0.SignedBeaconBlock | altair.SignedBeaconBlock,
|
||||
onBlockAdded: OnPhase0BlockAdded | OnAltairBlockAdded
|
||||
): Result[BlockRef, (ValidationResult, BlockError)] =
|
||||
## Try adding a block to the chain, verifying first that it passes the state
|
||||
## transition function and contains correct cryptographic signature.
|
||||
|
@ -11,11 +11,11 @@ import
|
||||
# Standard library
|
||||
std/[sets, tables, hashes],
|
||||
# Status libraries
|
||||
stew/[endians2], chronicles,
|
||||
stew/endians2, chronicles,
|
||||
eth/keys,
|
||||
# Internals
|
||||
../spec/[
|
||||
datatypes, crypto, digest, signatures_batch, forkedbeaconstate_helpers],
|
||||
../spec/[crypto, digest, signatures_batch, forkedbeaconstate_helpers],
|
||||
../spec/datatypes/[phase0, altair],
|
||||
../beacon_chain_db, ../extras
|
||||
|
||||
export sets, tables
|
||||
@ -57,10 +57,15 @@ type
|
||||
##
|
||||
## Invalid blocks are dropped immediately.
|
||||
|
||||
orphans*: Table[(Eth2Digest, ValidatorSig), SignedBeaconBlock] ##\
|
||||
## Blocks that have passed validation but that we lack a link back to tail
|
||||
## for - when we receive a "missing link", we can use this data to build
|
||||
## an entire branch
|
||||
orphansPhase0*: Table[(Eth2Digest, ValidatorSig), phase0.SignedBeaconBlock] ##\
|
||||
## Phase 0 Blocks that have passed validation but that we lack a link back
|
||||
## to tail for - when we receive a "missing link", we can use this data to
|
||||
## build an entire branch
|
||||
|
||||
orphansAltair*: Table[(Eth2Digest, ValidatorSig), altair.SignedBeaconBlock] ##\
|
||||
## Altair Blocks that have passed validation, but that we lack a link back
|
||||
## to tail for - when we receive a "missing link", we can use this data to
|
||||
## build an entire branch
|
||||
|
||||
missing*: Table[Eth2Digest, MissingBlock] ##\
|
||||
## Roots of blocks that we would like to have (either parent_root of
|
||||
@ -160,6 +165,9 @@ type
|
||||
## block - we limit the number of held EpochRefs to put a cap on
|
||||
## memory usage
|
||||
|
||||
altairTransitionSlot*: Slot ##\
|
||||
## Slot at which to upgrade from phase 0 to Altair forks
|
||||
|
||||
EpochKey* = object
|
||||
## The epoch key fully determines the shuffling for proposers and
|
||||
## committees in a beacon state - the epoch level information in the state
|
||||
@ -199,7 +207,7 @@ type
|
||||
BlockData* = object
|
||||
## Body and graph in one
|
||||
|
||||
data*: TrustedSignedBeaconBlock # We trust all blocks we have a ref for
|
||||
data*: phase0.TrustedSignedBeaconBlock # We trust all blocks we have a ref for
|
||||
refs*: BlockRef
|
||||
|
||||
StateData* = object
|
||||
@ -218,8 +226,14 @@ type
|
||||
## Slot time for this BlockSlot which may differ from blck.slot when time
|
||||
## has advanced without blocks
|
||||
|
||||
OnBlockAdded* = proc(
|
||||
blckRef: BlockRef, blck: TrustedSignedBeaconBlock,
|
||||
OnPhase0BlockAdded* = proc(
|
||||
blckRef: BlockRef,
|
||||
blck: phase0.TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef) {.gcsafe, raises: [Defect].}
|
||||
|
||||
OnAltairBlockAdded* = proc(
|
||||
blckRef: BlockRef,
|
||||
blck: altair.TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef) {.gcsafe, raises: [Defect].}
|
||||
|
||||
template head*(dag: ChainDagRef): BlockRef = dag.headState.blck
|
||||
|
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -12,7 +12,8 @@ import
|
||||
chronicles,
|
||||
stew/bitops2,
|
||||
eth/keys,
|
||||
../spec/[crypto, datatypes, digest],
|
||||
../spec/[crypto, digest],
|
||||
../spec/datatypes/[phase0, altair],
|
||||
./block_pools_types
|
||||
|
||||
export options, block_pools_types
|
||||
@ -55,21 +56,42 @@ template anyIt(s, pred: untyped): bool =
|
||||
result
|
||||
|
||||
func containsOrphan*(
|
||||
quarantine: QuarantineRef, signedBlock: SignedBeaconBlock): bool =
|
||||
(signedBlock.root, signedBlock.signature) in quarantine.orphans
|
||||
quarantine: QuarantineRef, signedBlock: phase0.SignedBeaconBlock): bool =
|
||||
(signedBlock.root, signedBlock.signature) in quarantine.orphansPhase0
|
||||
|
||||
func containsOrphan*(
|
||||
quarantine: QuarantineRef, signedBlock: altair.SignedBeaconBlock): bool =
|
||||
(signedBlock.root, signedBlock.signature) in quarantine.orphansAltair
|
||||
|
||||
func addMissing*(quarantine: QuarantineRef, root: Eth2Digest) =
|
||||
## Schedule the download a the given block
|
||||
# Can only request by root, not by signature, so partial match suffices
|
||||
if not anyIt(quarantine.orphans.keys, it[0] == root):
|
||||
if (not anyIt(quarantine.orphansPhase0.keys, it[0] == root)) and
|
||||
(not anyIt(quarantine.orphansAltair.keys, it[0] == root)):
|
||||
# If the block is in orphans, we no longer need it
|
||||
discard quarantine.missing.hasKeyOrPut(root, MissingBlock())
|
||||
|
||||
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095
|
||||
# copy of phase0.SomeSignedBeaconBlock from datatypes/phase0.nim
|
||||
type SomeSignedPhase0Block =
|
||||
phase0.SignedBeaconBlock | phase0.SigVerifiedSignedBeaconBlock |
|
||||
phase0.TrustedSignedBeaconBlock
|
||||
func removeOrphan*(
|
||||
quarantine: QuarantineRef, signedBlock: SignedBeaconBlock) =
|
||||
quarantine.orphans.del((signedBlock.root, signedBlock.signature))
|
||||
quarantine: QuarantineRef, signedBlock: SomeSignedPhase0Block) =
|
||||
quarantine.orphansPhase0.del((signedBlock.root, signedBlock.signature))
|
||||
|
||||
func isViableOrphan(dag: ChainDAGRef, signedBlock: SignedBeaconBlock): bool =
|
||||
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095
|
||||
# copy of altair.SomeSignedBeaconBlock from datatypes/altair.nim
|
||||
type SomeSignedAltairBlock =
|
||||
altair.SignedBeaconBlock | altair.SigVerifiedSignedBeaconBlock |
|
||||
altair.TrustedSignedBeaconBlock
|
||||
func removeOrphan*(
|
||||
quarantine: QuarantineRef, signedBlock: SomeSignedAltairBlock) =
|
||||
quarantine.orphansAltair.del((signedBlock.root, signedBlock.signature))
|
||||
|
||||
func isViableOrphan(
|
||||
dag: ChainDAGRef,
|
||||
signedBlock: phase0.SignedBeaconBlock | altair.SignedBeaconBlock): bool =
|
||||
# The orphan must be newer than the finalization point so that its parent
|
||||
# either is the finalized block or more recent
|
||||
signedBlock.message.slot > dag.finalizedHead.slot
|
||||
@ -77,39 +99,43 @@ func isViableOrphan(dag: ChainDAGRef, signedBlock: SignedBeaconBlock): bool =
|
||||
func removeOldBlocks(quarantine: QuarantineRef, dag: ChainDAGRef) =
|
||||
var oldBlocks: seq[(Eth2Digest, ValidatorSig)]
|
||||
|
||||
for k, v in quarantine.orphans.pairs():
|
||||
if not isViableOrphan(dag, v):
|
||||
oldBlocks.add k
|
||||
template removeNonviableOrphans(orphans: untyped) =
|
||||
for k, v in orphans.pairs():
|
||||
if not isViableOrphan(dag, v):
|
||||
oldBlocks.add k
|
||||
|
||||
for k in oldBlocks:
|
||||
quarantine.orphans.del k
|
||||
for k in oldBlocks:
|
||||
orphans.del k
|
||||
|
||||
removeNonviableOrphans(quarantine.orphansPhase0)
|
||||
removeNonviableOrphans(quarantine.orphansAltair)
|
||||
|
||||
func clearQuarantine*(quarantine: QuarantineRef) =
|
||||
quarantine.orphans.clear()
|
||||
quarantine.orphansPhase0.clear()
|
||||
quarantine.orphansAltair.clear()
|
||||
quarantine.missing.clear()
|
||||
|
||||
# Typically, blocks will arrive in mostly topological order, with some
|
||||
# out-of-order block pairs. Therefore, it is unhelpful to use either a
|
||||
# FIFO or LIFO discpline, and since by definition each block gets used
|
||||
# either 0 or 1 times it's not a cache either. Instead, stop accepting
|
||||
# new blocks, and rely on syncing to cache up again if necessary. When
|
||||
# using forward sync, blocks only arrive in an order not requiring the
|
||||
# quarantine.
|
||||
#
|
||||
# For typical use cases, this need not be large, as they're two or three
|
||||
# blocks arriving out of order due to variable network delays. As blocks
|
||||
# for future slots are rejected before reaching quarantine, this usually
|
||||
# will be a block for the last couple of slots for which the parent is a
|
||||
# likely imminent arrival.
|
||||
|
||||
# Since we start forward sync when about one epoch is missing, that's as
|
||||
# good a number as any.
|
||||
const MAX_QUARANTINE_ORPHANS = SLOTS_PER_EPOCH
|
||||
|
||||
func add*(quarantine: QuarantineRef, dag: ChainDAGRef,
|
||||
signedBlock: SignedBeaconBlock): bool =
|
||||
signedBlock: phase0.SignedBeaconBlock): bool =
|
||||
## Adds block to quarantine's `orphans` and `missing` lists.
|
||||
|
||||
# Typically, blocks will arrive in mostly topological order, with some
|
||||
# out-of-order block pairs. Therefore, it is unhelpful to use either a
|
||||
# FIFO or LIFO discpline, and since by definition each block gets used
|
||||
# either 0 or 1 times it's not a cache either. Instead, stop accepting
|
||||
# new blocks, and rely on syncing to cache up again if necessary. When
|
||||
# using forward sync, blocks only arrive in an order not requiring the
|
||||
# quarantine.
|
||||
#
|
||||
# For typical use cases, this need not be large, as they're two or three
|
||||
# blocks arriving out of order due to variable network delays. As blocks
|
||||
# for future slots are rejected before reaching quarantine, this usually
|
||||
# will be a block for the last couple of slots for which the parent is a
|
||||
# likely imminent arrival.
|
||||
|
||||
# Since we start forward sync when about one epoch is missing, that's as
|
||||
# good a number as any.
|
||||
const MAX_QUARANTINE_ORPHANS = SLOTS_PER_EPOCH
|
||||
|
||||
if not isViableOrphan(dag, signedBlock):
|
||||
return false
|
||||
|
||||
@ -119,10 +145,32 @@ func add*(quarantine: QuarantineRef, dag: ChainDAGRef,
|
||||
# downloading or we'll never get to the bottom of things
|
||||
quarantine.addMissing(signedBlock.message.parent_root)
|
||||
|
||||
if quarantine.orphans.lenu64 >= MAX_QUARANTINE_ORPHANS:
|
||||
if quarantine.orphansPhase0.lenu64 >= MAX_QUARANTINE_ORPHANS:
|
||||
return false
|
||||
|
||||
quarantine.orphans[(signedBlock.root, signedBlock.signature)] = signedBlock
|
||||
quarantine.orphansPhase0[(signedBlock.root, signedBlock.signature)] =
|
||||
signedBlock
|
||||
quarantine.missing.del(signedBlock.root)
|
||||
|
||||
true
|
||||
|
||||
func add*(quarantine: QuarantineRef, dag: ChainDAGRef,
|
||||
signedBlock: altair.SignedBeaconBlock): bool =
|
||||
## Adds block to quarantine's `orphans` and `missing` lists.
|
||||
if not isViableOrphan(dag, signedBlock):
|
||||
return false
|
||||
|
||||
quarantine.removeOldBlocks(dag)
|
||||
|
||||
# Even if the quarantine is full, we need to schedule its parent for
|
||||
# downloading or we'll never get to the bottom of things
|
||||
quarantine.addMissing(signedBlock.message.parent_root)
|
||||
|
||||
if quarantine.orphansAltair.lenu64 >= MAX_QUARANTINE_ORPHANS:
|
||||
return false
|
||||
|
||||
quarantine.orphansAltair[(signedBlock.root, signedBlock.signature)] =
|
||||
signedBlock
|
||||
quarantine.missing.del(signedBlock.root)
|
||||
|
||||
true
|
||||
|
@ -319,7 +319,8 @@ func isStateCheckpoint(bs: BlockSlot): bool =
|
||||
proc init*(T: type ChainDAGRef,
|
||||
preset: RuntimePreset,
|
||||
db: BeaconChainDB,
|
||||
updateFlags: UpdateFlags = {}): ChainDAGRef =
|
||||
updateFlags: UpdateFlags = {},
|
||||
altairTransitionSlot: Slot = FAR_FUTURE_SLOT): ChainDAGRef =
|
||||
# TODO we require that the db contains both a head and a tail block -
|
||||
# asserting here doesn't seem like the right way to go about it however..
|
||||
|
||||
@ -417,6 +418,7 @@ proc init*(T: type ChainDAGRef,
|
||||
# allow skipping some validation.
|
||||
updateFlags: {verifyFinalization} * updateFlags,
|
||||
runtimePreset: preset,
|
||||
altairTransitionSlot: altairTransitionSlot
|
||||
)
|
||||
|
||||
doAssert dag.updateFlags in [{}, {verifyFinalization}]
|
||||
@ -557,7 +559,9 @@ proc putState(dag: ChainDAGRef, state: var StateData) =
|
||||
# Ideally we would save the state and the root lookup cache in a single
|
||||
# transaction to prevent database inconsistencies, but the state loading code
|
||||
# is resilient against one or the other going missing
|
||||
dag.db.putState(getStateRoot(state.data), state.data.hbsPhase0.data)
|
||||
if state.data.beaconStateFork != forkAltair:
|
||||
# TODO re-enable for Altair
|
||||
dag.db.putState(getStateRoot(state.data), state.data.hbsPhase0.data)
|
||||
|
||||
dag.db.putStateRoot(
|
||||
state.blck.root, getStateField(state.data, slot), getStateRoot(state.data))
|
||||
@ -664,7 +668,7 @@ proc advanceSlots(
|
||||
|
||||
doAssert process_slots(
|
||||
state.data, getStateField(state.data, slot) + 1, cache, rewards,
|
||||
dag.updateFlags, FAR_FUTURE_SLOT),
|
||||
dag.updateFlags, dag.altairTransitionSlot),
|
||||
"process_slots shouldn't fail when state slot is correct"
|
||||
if save:
|
||||
dag.putState(state)
|
||||
@ -688,7 +692,8 @@ proc applyBlock(
|
||||
|
||||
let ok = state_transition(
|
||||
dag.runtimePreset, state.data, blck.data,
|
||||
cache, rewards, flags + dag.updateFlags + {slotProcessed}, restore)
|
||||
cache, rewards, flags + dag.updateFlags + {slotProcessed}, restore,
|
||||
dag.altairTransitionSlot)
|
||||
if ok:
|
||||
state.blck = blck.refs
|
||||
|
||||
|
@ -12,7 +12,8 @@ import
|
||||
../consensus_object_pools/[blockchain_dag, exit_pool],
|
||||
../gossip_processing/gossip_validation,
|
||||
../validators/validator_duties,
|
||||
../spec/[crypto, datatypes, digest, forkedbeaconstate_helpers, network],
|
||||
../spec/[crypto, digest, forkedbeaconstate_helpers, network],
|
||||
../spec/datatypes/base,
|
||||
../ssz/merkleization,
|
||||
./eth2_json_rest_serialization, ./rest_utils
|
||||
|
||||
|
@ -11,7 +11,8 @@ import
|
||||
chronicles,
|
||||
nimcrypto/utils as ncrutils,
|
||||
../beacon_node_common, ../eth1/eth1_monitor,
|
||||
../spec/[datatypes, digest, forkedbeaconstate_helpers, presets],
|
||||
../spec/datatypes/base,
|
||||
../spec/[digest, forkedbeaconstate_helpers, presets],
|
||||
./eth2_json_rest_serialization, ./rest_utils
|
||||
|
||||
logScope: topics = "rest_config"
|
||||
|
@ -13,7 +13,8 @@ import
|
||||
chronicles,
|
||||
../version, ../beacon_node_common,
|
||||
../networking/[eth2_network, peer_pool],
|
||||
../spec/[datatypes, digest, presets],
|
||||
../spec/datatypes/base,
|
||||
../spec/[digest, presets],
|
||||
./rpc_utils, ./eth2_json_rpc_serialization
|
||||
|
||||
logScope: topics = "debugapi"
|
||||
|
@ -1,7 +1,8 @@
|
||||
import
|
||||
strutils,
|
||||
json_serialization/std/[sets, net], serialization/errors,
|
||||
../spec/[datatypes, digest, crypto, eth2_apis/beacon_rpc_client],
|
||||
../spec/datatypes/base,
|
||||
../spec/[crypto, digest, eth2_apis/beacon_rpc_client],
|
||||
json_rpc/[client, jsonmarshal]
|
||||
|
||||
from os import DirSep, AltSep
|
||||
|
@ -18,7 +18,8 @@ import
|
||||
".."/[
|
||||
beacon_node_common, nimbus_binary_common, networking/eth2_network,
|
||||
eth1/eth1_monitor, validators/validator_duties],
|
||||
../spec/[digest, datatypes, forkedbeaconstate_helpers, presets]
|
||||
../spec/datatypes/base,
|
||||
../spec/[digest, forkedbeaconstate_helpers, presets]
|
||||
|
||||
|
||||
logScope: topics = "nimbusapi"
|
||||
|
@ -17,7 +17,8 @@ import std/options,
|
||||
../beacon_node_common, ../version,
|
||||
../networking/[eth2_network, peer_pool],
|
||||
../sync/sync_manager,
|
||||
../spec/[datatypes, digest, presets],
|
||||
../spec/datatypes/base,
|
||||
../spec/[digest, presets],
|
||||
../spec/eth2_apis/callsigs_types
|
||||
|
||||
logScope: topics = "nodeapi"
|
||||
|
@ -7,7 +7,8 @@ import
|
||||
nimcrypto/utils as ncrutils,
|
||||
../version, ../beacon_node_common, ../sync/sync_manager,
|
||||
../networking/[eth2_network, peer_pool],
|
||||
../spec/[datatypes, digest, presets],
|
||||
../spec/datatypes/base,
|
||||
../spec/[digest, presets],
|
||||
../spec/eth2_apis/callsigs_types,
|
||||
./eth2_json_rest_serialization, ./rest_utils
|
||||
|
||||
|
@ -12,7 +12,8 @@ import
|
||||
stew/byteutils,
|
||||
../beacon_node_common, ../validators/validator_duties,
|
||||
../consensus_object_pools/[block_pools_types, blockchain_dag],
|
||||
../spec/[datatypes, digest, forkedbeaconstate_helpers, helpers]
|
||||
../spec/datatypes/base,
|
||||
../spec/[digest, forkedbeaconstate_helpers, helpers]
|
||||
|
||||
export blockchain_dag
|
||||
|
||||
|
@ -9,15 +9,16 @@
|
||||
|
||||
import
|
||||
# Standard library
|
||||
std/[tables],
|
||||
std/tables,
|
||||
|
||||
# Nimble packages
|
||||
stew/[objects],
|
||||
stew/objects,
|
||||
json_rpc/servers/httpserver,
|
||||
chronicles,
|
||||
|
||||
# Local modules
|
||||
../spec/[crypto, datatypes, digest, forkedbeaconstate_helpers, helpers, network, signatures],
|
||||
../spec/[crypto, digest, forkedbeaconstate_helpers, helpers, network, signatures],
|
||||
../spec/datatypes/base,
|
||||
../spec/eth2_apis/callsigs_types,
|
||||
../consensus_object_pools/[blockchain_dag, spec_cache, attestation_pool], ../ssz/merkleization,
|
||||
../beacon_node_common, ../beacon_node_types,
|
||||
|
@ -12,7 +12,8 @@ import
|
||||
../consensus_object_pools/[blockchain_dag, spec_cache, attestation_pool],
|
||||
../gossip_processing/gossip_validation,
|
||||
../validators/validator_duties,
|
||||
../spec/[crypto, datatypes, digest, forkedbeaconstate_helpers, network],
|
||||
../spec/[crypto, digest, forkedbeaconstate_helpers, network],
|
||||
../spec/datatypes/base,
|
||||
../ssz/merkleization,
|
||||
./eth2_json_rest_serialization, ./rest_utils
|
||||
|
||||
|
@ -306,7 +306,7 @@ proc state_transition*(
|
||||
phase0.TrustedSignedBeaconBlock | altair.SignedBeaconBlock,
|
||||
cache: var StateCache, rewards: var RewardInfo, flags: UpdateFlags,
|
||||
rollback: RollbackForkedHashedProc,
|
||||
altairForkSlot: Slot = FAR_FUTURE_SLOT): bool {.nbench.} =
|
||||
altairForkSlot: Slot): bool {.nbench.} =
|
||||
## Apply a block to the state, advancing the slot counter as necessary. The
|
||||
## given state must be of a lower slot, or, in case the `slotProcessed` flag
|
||||
## is set, can be the slot state of the same slot as the block (where the
|
||||
|
@ -15,7 +15,8 @@ import
|
||||
stew/[results, byteutils],
|
||||
chronicles, chronicles/timings,
|
||||
# Internal
|
||||
../spec/[datatypes, digest, crypto],
|
||||
../spec/datatypes/base,
|
||||
../spec/[digest, crypto],
|
||||
./slashing_protection_common,
|
||||
./slashing_protection_v1,
|
||||
./slashing_protection_v2
|
||||
|
@ -18,7 +18,8 @@ import
|
||||
json_serialization,
|
||||
chronicles,
|
||||
# Internal
|
||||
../spec/[datatypes, digest, crypto]
|
||||
../spec/datatypes/base,
|
||||
../spec/[digest, crypto]
|
||||
|
||||
export serialization, json_serialization # Generic sandwich https://github.com/nim-lang/Nim/issues/11225
|
||||
|
||||
|
@ -17,7 +17,8 @@ import
|
||||
serialization,
|
||||
json_serialization,
|
||||
# Internal
|
||||
../spec/[datatypes, digest, crypto],
|
||||
../spec/datatypes/base,
|
||||
../spec/[digest, crypto],
|
||||
../ssz,
|
||||
./slashing_protection_common
|
||||
|
||||
|
@ -164,7 +164,7 @@ proc runFullTransition*(dir, preState, blocksPrefix: string, blocksQty: int, ski
|
||||
else: {}
|
||||
let success = state_transition(
|
||||
defaultRuntimePreset, state[], signedBlock, cache, rewards, flags,
|
||||
noRollback)
|
||||
noRollback, FAR_FUTURE_SLOT)
|
||||
echo "State transition status: ", if success: "SUCCESS ✓" else: "FAILURE ⚠️"
|
||||
|
||||
proc runProcessSlots*(dir, preState: string, numSlots: uint64) =
|
||||
|
@ -90,7 +90,8 @@ proc doTransition(conf: NcliConf) =
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
if not state_transition(getRuntimePresetForNetwork(conf.eth2Network),
|
||||
stateY[], blckX, cache, rewards, flags, noRollback):
|
||||
stateY[], blckX, cache, rewards, flags, noRollback,
|
||||
FAR_FUTURE_SLOT):
|
||||
error "State transition failed"
|
||||
quit 1
|
||||
else:
|
||||
|
@ -122,7 +122,8 @@ proc nfuzz_block(input: openArray[byte], xoutput: ptr byte,
|
||||
rewards = RewardInfo()
|
||||
result =
|
||||
state_transition(
|
||||
preset, fhState[], blck, cache, rewards, flags, rollback)
|
||||
preset, fhState[], blck, cache, rewards, flags, rollback,
|
||||
FAR_FUTURE_SLOT)
|
||||
data.state = fhState.hbsPhase0.data
|
||||
|
||||
decodeAndProcess(BlockInput):
|
||||
|
@ -20,9 +20,10 @@ import
|
||||
confutils, chronicles, eth/db/kvstore_sqlite3,
|
||||
eth/keys,
|
||||
../tests/testblockutil,
|
||||
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest,
|
||||
../beacon_chain/spec/[beaconstate, crypto, digest,
|
||||
forkedbeaconstate_helpers, presets,
|
||||
helpers, signatures, state_transition],
|
||||
../beacon_chain/spec/datatypes/[phase0, altair],
|
||||
../beacon_chain/[beacon_node_types, beacon_chain_db, extras],
|
||||
../beacon_chain/eth1/eth1_monitor,
|
||||
../beacon_chain/validators/validator_pool,
|
||||
@ -52,7 +53,7 @@ proc gauss(r: var Rand; mu = 0.0; sigma = 1.0): float =
|
||||
result = mu + sigma * (b / a)
|
||||
|
||||
# TODO confutils is an impenetrable black box. how can a help text be added here?
|
||||
cli do(slots = SLOTS_PER_EPOCH * 5,
|
||||
cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
validators = SLOTS_PER_EPOCH * 400, # One per shard is minimum
|
||||
attesterRatio {.desc: "ratio of validators that attest in each round"} = 0.82,
|
||||
blockRatio {.desc: "ratio of slots with blocks"} = 1.0,
|
||||
@ -63,6 +64,8 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
||||
runtimePreset = defaultRuntimePreset
|
||||
genesisTime = float state[].data.genesis_time
|
||||
|
||||
const altairTransitionSlot = 96.Slot
|
||||
|
||||
echo "Starting simulation..."
|
||||
|
||||
let db = BeaconChainDB.new(runtimePreset, "block_sim_db")
|
||||
@ -72,7 +75,7 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
||||
putInitialDepositContractSnapshot(db, depositContractSnapshot)
|
||||
|
||||
var
|
||||
dag = ChainDAGRef.init(runtimePreset, db)
|
||||
dag = ChainDAGRef.init(runtimePreset, db, {}, altairTransitionSlot)
|
||||
eth1Chain = Eth1Chain.init(runtimePreset, db)
|
||||
merkleizer = depositContractSnapshot.createMerkleizer
|
||||
quarantine = QuarantineRef.init(keys.newRng())
|
||||
@ -123,65 +126,96 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
||||
signature: sig.toValidatorSig()
|
||||
), [validatorIdx], sig, data.slot)
|
||||
|
||||
proc proposeBlock(slot: Slot) =
|
||||
proc getNewBlock[T](
|
||||
stateData: var StateData, slot: Slot, cache: var StateCache): T =
|
||||
let
|
||||
finalizedEpochRef = dag.getFinalizedEpochRef()
|
||||
proposerIdx = get_beacon_proposer_index(
|
||||
stateData.data, cache, getStateField(stateData.data, slot)).get()
|
||||
privKey = hackPrivKey(
|
||||
getStateField(stateData.data, validators)[proposerIdx])
|
||||
eth1ProposalData = eth1Chain.getBlockProposalData(
|
||||
stateData.data,
|
||||
finalizedEpochRef.eth1_data,
|
||||
finalizedEpochRef.eth1_deposit_index)
|
||||
hashedState =
|
||||
when T is phase0.SignedBeaconBlock:
|
||||
addr stateData.data.hbsPhase0
|
||||
elif T is altair.SignedBeaconBlock:
|
||||
addr stateData.data.hbsAltair
|
||||
else:
|
||||
static: doAssert false
|
||||
message = makeBeaconBlock(
|
||||
runtimePreset,
|
||||
hashedState[],
|
||||
proposerIdx,
|
||||
dag.head.root,
|
||||
privKey.genRandaoReveal(
|
||||
getStateField(stateData.data, fork),
|
||||
getStateField(stateData.data, genesis_validators_root),
|
||||
slot).toValidatorSig(),
|
||||
eth1ProposalData.vote,
|
||||
default(GraffitiBytes),
|
||||
attPool.getAttestationsForTestBlock(stateData, cache),
|
||||
eth1ProposalData.deposits,
|
||||
@[],
|
||||
@[],
|
||||
@[],
|
||||
ExecutionPayload(),
|
||||
noRollback,
|
||||
cache)
|
||||
|
||||
var
|
||||
newBlock = T(
|
||||
message: message.get()
|
||||
)
|
||||
|
||||
let blockRoot = withTimerRet(timers[tHashBlock]):
|
||||
hash_tree_root(newBlock.message)
|
||||
newBlock.root = blockRoot
|
||||
# Careful, state no longer valid after here because of the await..
|
||||
newBlock.signature = withTimerRet(timers[tSignBlock]):
|
||||
get_block_signature(
|
||||
getStateField(stateData.data, fork),
|
||||
getStateField(stateData.data, genesis_validators_root),
|
||||
newBlock.message.slot,
|
||||
blockRoot, privKey).toValidatorSig()
|
||||
|
||||
newBlock
|
||||
|
||||
proc proposePhase0Block(slot: Slot) =
|
||||
if rand(r, 1.0) > blockRatio:
|
||||
return
|
||||
|
||||
let
|
||||
head = dag.head
|
||||
|
||||
dag.withState(tmpState[], head.atSlot(slot)):
|
||||
dag.withState(tmpState[], dag.head.atSlot(slot)):
|
||||
let
|
||||
finalizedEpochRef = dag.getFinalizedEpochRef()
|
||||
proposerIdx = get_beacon_proposer_index(
|
||||
stateData.data, cache, getStateField(stateData.data, slot)).get()
|
||||
privKey = hackPrivKey(
|
||||
getStateField(stateData.data, validators)[proposerIdx])
|
||||
eth1ProposalData = eth1Chain.getBlockProposalData(
|
||||
stateData.data,
|
||||
finalizedEpochRef.eth1_data,
|
||||
finalizedEpochRef.eth1_deposit_index)
|
||||
message = makeBeaconBlock(
|
||||
runtimePreset,
|
||||
stateData.data.hbsPhase0,
|
||||
proposerIdx,
|
||||
head.root,
|
||||
privKey.genRandaoReveal(
|
||||
getStateField(stateData.data, fork),
|
||||
getStateField(stateData.data, genesis_validators_root),
|
||||
slot).toValidatorSig(),
|
||||
eth1ProposalData.vote,
|
||||
default(GraffitiBytes),
|
||||
attPool.getAttestationsForTestBlock(stateData, cache),
|
||||
eth1ProposalData.deposits,
|
||||
@[],
|
||||
@[],
|
||||
@[],
|
||||
ExecutionPayload(),
|
||||
noRollback,
|
||||
cache)
|
||||
newBlock = getNewBlock[phase0.SignedBeaconBlock](stateData, slot, cache)
|
||||
added = dag.addRawBlock(quarantine, newBlock) do (
|
||||
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef):
|
||||
# Callback add to fork choice if valid
|
||||
attPool.addForkChoice(
|
||||
epochRef, blckRef, signedBlock.message, blckRef.slot)
|
||||
|
||||
var
|
||||
newBlock = SignedBeaconBlock(
|
||||
message: message.get()
|
||||
)
|
||||
blck() = added[]
|
||||
dag.updateHead(added[], quarantine)
|
||||
if dag.needStateCachesAndForkChoicePruning():
|
||||
dag.pruneStateCachesDAG()
|
||||
attPool.prune()
|
||||
|
||||
let blockRoot = withTimerRet(timers[tHashBlock]):
|
||||
hash_tree_root(newBlock.message)
|
||||
newBlock.root = blockRoot
|
||||
# Careful, state no longer valid after here because of the await..
|
||||
newBlock.signature = withTimerRet(timers[tSignBlock]):
|
||||
get_block_signature(
|
||||
getStateField(stateData.data, fork),
|
||||
getStateField(stateData.data, genesis_validators_root),
|
||||
newBlock.message.slot,
|
||||
blockRoot, privKey).toValidatorSig()
|
||||
proc proposeAltairBlock(slot: Slot) =
|
||||
if rand(r, 1.0) > blockRatio:
|
||||
return
|
||||
|
||||
let added = dag.addRawBlock(quarantine, newBlock) do (
|
||||
blckRef: BlockRef, signedBlock: TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef):
|
||||
# Callback add to fork choice if valid
|
||||
attPool.addForkChoice(epochRef, blckRef, signedBlock.message, blckRef.slot)
|
||||
dag.withState(tmpState[], dag.head.atSlot(slot)):
|
||||
let
|
||||
newBlock = getNewBlock[altair.SignedBeaconBlock](stateData, slot, cache)
|
||||
added = dag.addRawBlock(quarantine, newBlock) do (
|
||||
blckRef: BlockRef, signedBlock: altair.TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef):
|
||||
# Callback add to fork choice if valid
|
||||
attPool.addForkChoice(
|
||||
epochRef, blckRef, signedBlock.message, blckRef.slot)
|
||||
|
||||
blck() = added[]
|
||||
dag.updateHead(added[], quarantine)
|
||||
@ -228,7 +262,10 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
||||
|
||||
if blockRatio > 0.0:
|
||||
withTimer(timers[t]):
|
||||
proposeBlock(slot)
|
||||
if slot < altairTransitionSlot:
|
||||
proposePhase0Block(slot)
|
||||
else:
|
||||
proposeAltairBlock(slot)
|
||||
if attesterRatio > 0.0:
|
||||
withTimer(timers[tAttest]):
|
||||
handleAttestations(slot)
|
||||
|
@ -53,12 +53,12 @@ proc runTest(testName, testDir, unitTestName: string) =
|
||||
if hasPostState:
|
||||
let success = state_transition(
|
||||
defaultRuntimePreset, fhPreState[], blck, cache, rewards, flags = {},
|
||||
noRollback)
|
||||
noRollback, FAR_FUTURE_SLOT)
|
||||
doAssert success, "Failure when applying block " & $i
|
||||
else:
|
||||
let success = state_transition(
|
||||
defaultRuntimePreset, fhPreState[], blck, cache, rewards, flags = {},
|
||||
noRollback)
|
||||
noRollback, FAR_FUTURE_SLOT)
|
||||
doAssert (i + 1 < numBlocks) or not success,
|
||||
"We didn't expect these invalid blocks to be processed"
|
||||
|
||||
|
@ -53,12 +53,12 @@ proc runTest(testName, testDir, unitTestName: string) =
|
||||
if hasPostState:
|
||||
let success = state_transition(
|
||||
defaultRuntimePreset, fhPreState[], blck, cache, rewards, flags = {},
|
||||
noRollback)
|
||||
noRollback, FAR_FUTURE_SLOT)
|
||||
doAssert success, "Failure when applying block " & $i
|
||||
else:
|
||||
let success = state_transition(
|
||||
defaultRuntimePreset, fhPreState[], blck, cache, rewards, flags = {},
|
||||
noRollback)
|
||||
noRollback, FAR_FUTURE_SLOT)
|
||||
doAssert (i + 1 < numBlocks) or not success,
|
||||
"We didn't expect these invalid blocks to be processed"
|
||||
|
||||
|
@ -13,7 +13,8 @@ import
|
||||
unittest2,
|
||||
stew/[byteutils, endians2],
|
||||
# Internals
|
||||
../../beacon_chain/spec/[datatypes, presets],
|
||||
../../beacon_chain/spec/datatypes/base,
|
||||
../../beacon_chain/spec/presets,
|
||||
# Test utilities
|
||||
../testutil, ./fixtures_utils
|
||||
|
||||
|
@ -21,7 +21,8 @@ import
|
||||
slashing_protection,
|
||||
slashing_protection_v1
|
||||
],
|
||||
../../beacon_chain/spec/[datatypes, digest, crypto, presets],
|
||||
../../beacon_chain/spec/datatypes/base,
|
||||
../../beacon_chain/spec/[digest, crypto, presets],
|
||||
# Test utilies
|
||||
../testutil
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT)
|
||||
@ -14,7 +14,8 @@ import
|
||||
chronicles,
|
||||
# Internal
|
||||
../../beacon_chain/validators/[slashing_protection, slashing_protection_v2],
|
||||
../../beacon_chain/spec/[datatypes, digest, crypto, presets],
|
||||
../../beacon_chain/spec/datatypes/base,
|
||||
../../beacon_chain/spec/[digest, crypto, presets],
|
||||
# Test utilies
|
||||
../testutil, ../testdbutil,
|
||||
../official/fixtures_utils
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT)
|
||||
@ -16,7 +16,8 @@ import
|
||||
nimcrypto/utils,
|
||||
# Internal
|
||||
../../beacon_chain/validators/[slashing_protection, slashing_protection_v2],
|
||||
../../beacon_chain/spec/[datatypes, digest, crypto, presets],
|
||||
../../beacon_chain/spec/datatypes/base,
|
||||
../../beacon_chain/spec/[digest, crypto, presets],
|
||||
# Test utilies
|
||||
../testutil
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT)
|
||||
@ -15,7 +15,8 @@ import
|
||||
stew/results,
|
||||
# Internal
|
||||
../../beacon_chain/validators/slashing_protection,
|
||||
../../beacon_chain/spec/[datatypes, digest, crypto, presets, helpers],
|
||||
../../beacon_chain/spec/[crypto, digest, helpers, presets],
|
||||
../../beacon_chain/spec/datatypes/base,
|
||||
# Test utilies
|
||||
../testutil
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
chronicles,
|
||||
std/[options, sequtils],
|
||||
unittest2,
|
||||
stew/assign2,
|
||||
@ -21,9 +22,6 @@ import
|
||||
blockchain_dag, block_quarantine, block_clearance],
|
||||
./testutil, ./testdbutil, ./testblockutil
|
||||
|
||||
when isMainModule:
|
||||
import chronicles # or some random compile error happens...
|
||||
|
||||
proc `$`(x: BlockRef): string =
|
||||
$x.root
|
||||
|
||||
@ -124,6 +122,7 @@ suite "Block pool processing" & preset():
|
||||
db = makeTestDB(SLOTS_PER_EPOCH)
|
||||
dag = init(ChainDAGRef, defaultRuntimePreset, db)
|
||||
quarantine = QuarantineRef.init(keys.newRng())
|
||||
nilPhase0Callback: OnPhase0BlockAdded
|
||||
state = newClone(dag.headState.data)
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
@ -143,7 +142,7 @@ suite "Block pool processing" & preset():
|
||||
|
||||
test "Simple block add&get" & preset():
|
||||
let
|
||||
b1Add = dag.addRawBlock(quarantine, b1, nil)
|
||||
b1Add = dag.addRawBlock(quarantine, b1, nilPhase0Callback)
|
||||
b1Get = dag.get(b1.root)
|
||||
|
||||
check:
|
||||
@ -154,7 +153,7 @@ suite "Block pool processing" & preset():
|
||||
dag.heads[0] == b1Add[]
|
||||
|
||||
let
|
||||
b2Add = dag.addRawBlock(quarantine, b2, nil)
|
||||
b2Add = dag.addRawBlock(quarantine, b2, nilPhase0Callback)
|
||||
b2Get = dag.get(b2.root)
|
||||
er = dag.findEpochRef(b1Add[], b1Add[].slot.epoch)
|
||||
validators = getStateField(dag.headState.data, validators).lenu64()
|
||||
@ -183,7 +182,7 @@ suite "Block pool processing" & preset():
|
||||
|
||||
let
|
||||
b4 = addTestBlock(state[], b2.root, cache)
|
||||
b4Add = dag.addRawBlock(quarantine, b4, nil)
|
||||
b4Add = dag.addRawBlock(quarantine, b4, nilPhase0Callback)
|
||||
|
||||
check:
|
||||
b4Add[].parent == b2Add[]
|
||||
@ -231,14 +230,14 @@ suite "Block pool processing" & preset():
|
||||
blocks[2..<2].len == 0
|
||||
|
||||
test "Reverse order block add & get" & preset():
|
||||
let missing = dag.addRawBlock(quarantine, b2, nil)
|
||||
let missing = dag.addRawBlock(quarantine, b2, nilPhase0Callback)
|
||||
check: missing.error == (ValidationResult.Ignore, MissingParent)
|
||||
|
||||
check:
|
||||
dag.get(b2.root).isNone() # Unresolved, shouldn't show up
|
||||
FetchRecord(root: b1.root) in quarantine.checkMissing()
|
||||
|
||||
let status = dag.addRawBlock(quarantine, b1, nil)
|
||||
let status = dag.addRawBlock(quarantine, b1, nilPhase0Callback)
|
||||
|
||||
check: status.isOk
|
||||
|
||||
@ -275,8 +274,8 @@ suite "Block pool processing" & preset():
|
||||
|
||||
test "Adding the same block twice returns a Duplicate error" & preset():
|
||||
let
|
||||
b10 = dag.addRawBlock(quarantine, b1, nil)
|
||||
b11 = dag.addRawBlock(quarantine, b1, nil)
|
||||
b10 = dag.addRawBlock(quarantine, b1, nilPhase0Callback)
|
||||
b11 = dag.addRawBlock(quarantine, b1, nilPhase0Callback)
|
||||
|
||||
check:
|
||||
b11.error == (ValidationResult.Ignore, Duplicate)
|
||||
@ -284,7 +283,7 @@ suite "Block pool processing" & preset():
|
||||
|
||||
test "updateHead updates head and headState" & preset():
|
||||
let
|
||||
b1Add = dag.addRawBlock(quarantine, b1, nil)
|
||||
b1Add = dag.addRawBlock(quarantine, b1, nilPhase0Callback)
|
||||
|
||||
dag.updateHead(b1Add[], quarantine)
|
||||
dag.pruneAtFinalization()
|
||||
@ -295,8 +294,8 @@ suite "Block pool processing" & preset():
|
||||
|
||||
test "updateStateData sanity" & preset():
|
||||
let
|
||||
b1Add = dag.addRawBlock(quarantine, b1, nil)
|
||||
b2Add = dag.addRawBlock(quarantine, b2, nil)
|
||||
b1Add = dag.addRawBlock(quarantine, b1, nilPhase0Callback)
|
||||
b2Add = dag.addRawBlock(quarantine, b2, nilPhase0Callback)
|
||||
bs1 = BlockSlot(blck: b1Add[], slot: b1.message.slot)
|
||||
bs1_3 = b1Add[].atSlot(3.Slot)
|
||||
bs2_3 = b2Add[].atSlot(3.Slot)
|
||||
@ -348,6 +347,7 @@ suite "chain DAG finalization tests" & preset():
|
||||
db = makeTestDB(SLOTS_PER_EPOCH)
|
||||
dag = init(ChainDAGRef, defaultRuntimePreset, db)
|
||||
quarantine = QuarantineRef.init(keys.newRng())
|
||||
nilPhase0Callback: OnPhase0BlockAdded
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
|
||||
@ -363,7 +363,7 @@ suite "chain DAG finalization tests" & preset():
|
||||
|
||||
let lateBlock = addTestBlock(tmpState[], dag.head.root, cache)
|
||||
block:
|
||||
let status = dag.addRawBlock(quarantine, blck, nil)
|
||||
let status = dag.addRawBlock(quarantine, blck, nilPhase0Callback)
|
||||
check: status.isOk()
|
||||
|
||||
assign(tmpState[], dag.headState.data)
|
||||
@ -378,7 +378,7 @@ suite "chain DAG finalization tests" & preset():
|
||||
tmpState[], dag.head.root, cache,
|
||||
attestations = makeFullAttestations(
|
||||
tmpState[], dag.head.root, getStateField(tmpState[], slot), cache, {}))
|
||||
let added = dag.addRawBlock(quarantine, blck, nil)
|
||||
let added = dag.addRawBlock(quarantine, blck, nilPhase0Callback)
|
||||
check: added.isOk()
|
||||
dag.updateHead(added[], quarantine)
|
||||
dag.pruneAtFinalization()
|
||||
@ -420,7 +420,7 @@ suite "chain DAG finalization tests" & preset():
|
||||
block:
|
||||
# The late block is a block whose parent was finalized long ago and thus
|
||||
# is no longer a viable head candidate
|
||||
let status = dag.addRawBlock(quarantine, lateBlock, nil)
|
||||
let status = dag.addRawBlock(quarantine, lateBlock, nilPhase0Callback)
|
||||
check: status.error == (ValidationResult.Ignore, Unviable)
|
||||
|
||||
block:
|
||||
@ -449,7 +449,7 @@ suite "chain DAG finalization tests" & preset():
|
||||
assign(prestate[], dag.headState.data)
|
||||
|
||||
let blck = makeTestBlock(dag.headState.data, dag.head.root, cache)
|
||||
let added = dag.addRawBlock(quarantine, blck, nil)
|
||||
let added = dag.addRawBlock(quarantine, blck, nilPhase0Callback)
|
||||
check: added.isOk()
|
||||
dag.updateHead(added[], quarantine)
|
||||
dag.pruneAtFinalization()
|
||||
@ -468,21 +468,21 @@ suite "chain DAG finalization tests" & preset():
|
||||
let blck = makeTestBlock(prestate[], dag.head.parent.root, cache)
|
||||
|
||||
# Add block, but don't update head
|
||||
let added = dag.addRawBlock(quarantine, blck, nil)
|
||||
let added = dag.addRawBlock(quarantine, blck, nilPhase0Callback)
|
||||
check: added.isOk()
|
||||
|
||||
var
|
||||
dag2 = init(ChainDAGRef, defaultRuntimePreset, db)
|
||||
|
||||
# check that we can apply the block after the orphaning
|
||||
let added2 = dag2.addRawBlock(quarantine, blck, nil)
|
||||
let added2 = dag2.addRawBlock(quarantine, blck, nilPhase0Callback)
|
||||
check: added2.isOk()
|
||||
|
||||
test "init with gaps" & preset():
|
||||
for blck in makeTestBlocks(
|
||||
dag.headState.data, dag.head.root, cache, int(SLOTS_PER_EPOCH * 6 - 2),
|
||||
true):
|
||||
let added = dag.addRawBlock(quarantine, blck, nil)
|
||||
let added = dag.addRawBlock(quarantine, blck, nilPhase0Callback)
|
||||
check: added.isOk()
|
||||
dag.updateHead(added[], quarantine)
|
||||
dag.pruneAtFinalization()
|
||||
@ -499,7 +499,7 @@ suite "chain DAG finalization tests" & preset():
|
||||
dag.headState.data, dag.head.root, getStateField(dag.headState.data, slot),
|
||||
cache, {}))
|
||||
|
||||
let added = dag.addRawBlock(quarantine, blck, nil)
|
||||
let added = dag.addRawBlock(quarantine, blck, nilPhase0Callback)
|
||||
check: added.isOk()
|
||||
dag.updateHead(added[], quarantine)
|
||||
dag.pruneAtFinalization()
|
||||
|
Loading…
x
Reference in New Issue
Block a user