mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-02-25 12:45:37 +00:00
add Altair support to block quarantine/clearance and block_sim (#2662)
* add Altair support to the block quarantine * switch some spec/datatypes imports to spec/datatypes/base * add Altair support to block_clearance * allow runtime configuration of Altair transition slot * enable Altair in block_sim, including in CI
This commit is contained in:
parent
8dc4db51c0
commit
ae1abf24af
4
Makefile
4
Makefile
@ -283,8 +283,8 @@ endif
|
|||||||
rm -rf 0000-*.json t_slashprot_migration.* *.log block_sim_db
|
rm -rf 0000-*.json t_slashprot_migration.* *.log block_sim_db
|
||||||
for TEST_BINARY in $(TEST_BINARIES); do \
|
for TEST_BINARY in $(TEST_BINARIES); do \
|
||||||
PARAMS=""; \
|
PARAMS=""; \
|
||||||
if [[ "$${TEST_BINARY}" == "state_sim" ]]; then PARAMS="--validators=6000 --slots=128"; \
|
if [[ "$${TEST_BINARY}" == "state_sim" ]]; then PARAMS="--validators=8000 --slots=160"; \
|
||||||
elif [[ "$${TEST_BINARY}" == "block_sim" ]]; then PARAMS="--validators=6000 --slots=128"; \
|
elif [[ "$${TEST_BINARY}" == "block_sim" ]]; then PARAMS="--validators=8000 --slots=160"; \
|
||||||
fi; \
|
fi; \
|
||||||
echo -e "\nRunning $${TEST_BINARY} $${PARAMS}\n"; \
|
echo -e "\nRunning $${TEST_BINARY} $${PARAMS}\n"; \
|
||||||
build/$${TEST_BINARY} $${PARAMS} || { echo -e "\n$${TEST_BINARY} $${PARAMS} failed; Aborting."; exit 1; }; \
|
build/$${TEST_BINARY} $${PARAMS} || { echo -e "\n$${TEST_BINARY} $${PARAMS} failed; Aborting."; exit 1; }; \
|
||||||
|
@ -86,17 +86,60 @@ func batchVerify(quarantine: QuarantineRef, sigs: openArray[SignatureSet]): bool
|
|||||||
|
|
||||||
proc addRawBlock*(
|
proc addRawBlock*(
|
||||||
dag: ChainDAGRef, quarantine: QuarantineRef,
|
dag: ChainDAGRef, quarantine: QuarantineRef,
|
||||||
signedBlock: phase0.SignedBeaconBlock, onBlockAdded: OnBlockAdded
|
signedBlock: phase0.SignedBeaconBlock | altair.SignedBeaconBlock,
|
||||||
|
onBlockAdded: OnPhase0BlockAdded | OnAltairBlockAdded
|
||||||
): Result[BlockRef, (ValidationResult, BlockError)] {.gcsafe.}
|
): Result[BlockRef, (ValidationResult, BlockError)] {.gcsafe.}
|
||||||
|
|
||||||
|
# Now that we have the new block, we should see if any of the previously
|
||||||
|
# unresolved blocks magically become resolved
|
||||||
|
# TODO This code is convoluted because when there are more than ~1.5k
|
||||||
|
# blocks being synced, there's a stack overflow as `add` gets called
|
||||||
|
# for the whole chain of blocks. Instead we use this ugly field in `dag`
|
||||||
|
# which could be avoided by refactoring the code
|
||||||
|
# TODO unit test the logic, in particular interaction with fork choice block parents
|
||||||
|
proc resolveQuarantinedBlocks(
|
||||||
|
dag: ChainDAGRef, quarantine: QuarantineRef,
|
||||||
|
onBlockAdded: OnPhase0BlockAdded) =
|
||||||
|
if not quarantine.inAdd:
|
||||||
|
quarantine.inAdd = true
|
||||||
|
defer: quarantine.inAdd = false
|
||||||
|
var entries = 0
|
||||||
|
while entries != quarantine.orphansPhase0.len:
|
||||||
|
entries = quarantine.orphansPhase0.len # keep going while quarantine is shrinking
|
||||||
|
var resolved: seq[phase0.SignedBeaconBlock]
|
||||||
|
for _, v in quarantine.orphansPhase0:
|
||||||
|
if v.message.parent_root in dag:
|
||||||
|
resolved.add(v)
|
||||||
|
|
||||||
|
for v in resolved:
|
||||||
|
discard addRawBlock(dag, quarantine, v, onBlockAdded)
|
||||||
|
|
||||||
|
proc resolveQuarantinedBlocks(
|
||||||
|
dag: ChainDAGRef, quarantine: QuarantineRef,
|
||||||
|
onBlockAdded: OnAltairBlockAdded) =
|
||||||
|
if not quarantine.inAdd:
|
||||||
|
quarantine.inAdd = true
|
||||||
|
defer: quarantine.inAdd = false
|
||||||
|
var entries = 0
|
||||||
|
while entries != quarantine.orphansAltair.len:
|
||||||
|
entries = quarantine.orphansAltair.len # keep going while quarantine is shrinking
|
||||||
|
var resolved: seq[altair.SignedBeaconBlock]
|
||||||
|
for _, v in quarantine.orphansAltair:
|
||||||
|
if v.message.parent_root in dag:
|
||||||
|
resolved.add(v)
|
||||||
|
|
||||||
|
for v in resolved:
|
||||||
|
discard addRawBlock(dag, quarantine, v, onBlockAdded)
|
||||||
|
|
||||||
proc addResolvedBlock(
|
proc addResolvedBlock(
|
||||||
dag: ChainDAGRef, quarantine: QuarantineRef,
|
dag: ChainDAGRef, quarantine: QuarantineRef,
|
||||||
state: var StateData, trustedBlock: phase0.TrustedSignedBeaconBlock,
|
state: var StateData,
|
||||||
|
trustedBlock: phase0.TrustedSignedBeaconBlock | altair.TrustedSignedBeaconBlock,
|
||||||
parent: BlockRef, cache: var StateCache,
|
parent: BlockRef, cache: var StateCache,
|
||||||
onBlockAdded: OnBlockAdded, stateDataDur, sigVerifyDur,
|
onBlockAdded: OnPhase0BlockAdded | OnAltairBlockAdded,
|
||||||
|
stateDataDur, sigVerifyDur,
|
||||||
stateVerifyDur: Duration
|
stateVerifyDur: Duration
|
||||||
) =
|
) =
|
||||||
# TODO move quarantine processing out of here
|
|
||||||
doAssert getStateField(state.data, slot) == trustedBlock.message.slot,
|
doAssert getStateField(state.data, slot) == trustedBlock.message.slot,
|
||||||
"state must match block"
|
"state must match block"
|
||||||
doAssert state.blck.root == trustedBlock.message.parent_root,
|
doAssert state.blck.root == trustedBlock.message.parent_root,
|
||||||
@ -112,7 +155,9 @@ proc addResolvedBlock(
|
|||||||
dag.blocks.incl(KeyedBlockRef.init(blockRef))
|
dag.blocks.incl(KeyedBlockRef.init(blockRef))
|
||||||
|
|
||||||
# Resolved blocks should be stored in database
|
# Resolved blocks should be stored in database
|
||||||
dag.putBlock(trustedBlock)
|
when not (trustedBlock is altair.TrustedSignedBeaconBlock):
|
||||||
|
# TODO implement this for altair
|
||||||
|
dag.putBlock(trustedBlock)
|
||||||
let putBlockTick = Moment.now()
|
let putBlockTick = Moment.now()
|
||||||
|
|
||||||
var foundHead: bool
|
var foundHead: bool
|
||||||
@ -153,34 +198,16 @@ proc addResolvedBlock(
|
|||||||
if onBlockAdded != nil:
|
if onBlockAdded != nil:
|
||||||
onBlockAdded(blockRef, trustedBlock, epochRef)
|
onBlockAdded(blockRef, trustedBlock, epochRef)
|
||||||
|
|
||||||
# Now that we have the new block, we should see if any of the previously
|
resolveQuarantinedBlocks(dag, quarantine, onBlockAdded)
|
||||||
# unresolved blocks magically become resolved
|
|
||||||
# TODO This code is convoluted because when there are more than ~1.5k
|
|
||||||
# blocks being synced, there's a stack overflow as `add` gets called
|
|
||||||
# for the whole chain of blocks. Instead we use this ugly field in `dag`
|
|
||||||
# which could be avoided by refactoring the code
|
|
||||||
# TODO unit test the logic, in particular interaction with fork choice block parents
|
|
||||||
if not quarantine.inAdd:
|
|
||||||
quarantine.inAdd = true
|
|
||||||
defer: quarantine.inAdd = false
|
|
||||||
var entries = 0
|
|
||||||
while entries != quarantine.orphans.len:
|
|
||||||
entries = quarantine.orphans.len # keep going while quarantine is shrinking
|
|
||||||
var resolved: seq[phase0.SignedBeaconBlock]
|
|
||||||
for _, v in quarantine.orphans:
|
|
||||||
if v.message.parent_root in dag:
|
|
||||||
resolved.add(v)
|
|
||||||
|
|
||||||
for v in resolved:
|
|
||||||
discard addRawBlock(dag, quarantine, v, onBlockAdded)
|
|
||||||
|
|
||||||
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095
|
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095
|
||||||
# copy of phase0.SomeSignedBeaconBlock from datatypes/phase0.nim
|
type SomeSignedBlock =
|
||||||
type SomeSignedPhase0Block =
|
|
||||||
phase0.SignedBeaconBlock | phase0.SigVerifiedSignedBeaconBlock |
|
phase0.SignedBeaconBlock | phase0.SigVerifiedSignedBeaconBlock |
|
||||||
phase0.TrustedSignedBeaconBlock
|
phase0.TrustedSignedBeaconBlock |
|
||||||
|
altair.SignedBeaconBlock | altair.SigVerifiedSignedBeaconBlock |
|
||||||
|
altair.TrustedSignedBeaconBlock
|
||||||
proc checkStateTransition(
|
proc checkStateTransition(
|
||||||
dag: ChainDAGRef, signedBlock: SomeSignedPhase0Block,
|
dag: ChainDAGRef, signedBlock: SomeSignedBlock,
|
||||||
cache: var StateCache): (ValidationResult, BlockError) =
|
cache: var StateCache): (ValidationResult, BlockError) =
|
||||||
## Ensure block can be applied on a state
|
## Ensure block can be applied on a state
|
||||||
func restore(v: var ForkedHashedBeaconState) =
|
func restore(v: var ForkedHashedBeaconState) =
|
||||||
@ -194,12 +221,9 @@ proc checkStateTransition(
|
|||||||
blck = shortLog(signedBlock.message)
|
blck = shortLog(signedBlock.message)
|
||||||
blockRoot = shortLog(signedBlock.root)
|
blockRoot = shortLog(signedBlock.root)
|
||||||
|
|
||||||
# TODO this won't transition because FAR_FUTURE_SLOT, so it's
|
|
||||||
# fine, for now, but in general, blockchain_dag.addBlock must
|
|
||||||
# match the transition here.
|
|
||||||
if not state_transition_block(
|
if not state_transition_block(
|
||||||
dag.runtimePreset, dag.clearanceState.data, signedBlock,
|
dag.runtimePreset, dag.clearanceState.data, signedBlock,
|
||||||
cache, dag.updateFlags, restore, FAR_FUTURE_SLOT):
|
cache, dag.updateFlags, restore, dag.altairTransitionSlot):
|
||||||
info "Invalid block"
|
info "Invalid block"
|
||||||
|
|
||||||
return (ValidationResult.Reject, Invalid)
|
return (ValidationResult.Reject, Invalid)
|
||||||
@ -225,9 +249,9 @@ proc advanceClearanceState*(dag: ChainDagRef) =
|
|||||||
|
|
||||||
proc addRawBlockKnownParent(
|
proc addRawBlockKnownParent(
|
||||||
dag: ChainDAGRef, quarantine: QuarantineRef,
|
dag: ChainDAGRef, quarantine: QuarantineRef,
|
||||||
signedBlock: phase0.SignedBeaconBlock,
|
signedBlock: phase0.SignedBeaconBlock | altair.SignedBeaconBlock,
|
||||||
parent: BlockRef,
|
parent: BlockRef,
|
||||||
onBlockAdded: OnBlockAdded
|
onBlockAdded: OnPhase0BlockAdded | OnAltairBlockAdded
|
||||||
): Result[BlockRef, (ValidationResult, BlockError)] =
|
): Result[BlockRef, (ValidationResult, BlockError)] =
|
||||||
## Add a block whose parent is known, after performing validity checks
|
## Add a block whose parent is known, after performing validity checks
|
||||||
|
|
||||||
@ -303,7 +327,7 @@ proc addRawBlockKnownParent(
|
|||||||
proc addRawBlockUnresolved(
|
proc addRawBlockUnresolved(
|
||||||
dag: ChainDAGRef,
|
dag: ChainDAGRef,
|
||||||
quarantine: QuarantineRef,
|
quarantine: QuarantineRef,
|
||||||
signedBlock: phase0.SignedBeaconBlock
|
signedBlock: phase0.SignedBeaconBlock | altair.SignedBeaconBlock,
|
||||||
): Result[BlockRef, (ValidationResult, BlockError)] =
|
): Result[BlockRef, (ValidationResult, BlockError)] =
|
||||||
## addRawBlock - Block is unresolved / has no parent
|
## addRawBlock - Block is unresolved / has no parent
|
||||||
|
|
||||||
@ -319,7 +343,8 @@ proc addRawBlockUnresolved(
|
|||||||
if signedBlock.message.parent_root in quarantine.missing or
|
if signedBlock.message.parent_root in quarantine.missing or
|
||||||
containsOrphan(quarantine, signedBlock):
|
containsOrphan(quarantine, signedBlock):
|
||||||
debug "Unresolved block (parent missing or orphaned)",
|
debug "Unresolved block (parent missing or orphaned)",
|
||||||
orphans = quarantine.orphans.len,
|
orphansPhase0 = quarantine.orphansPhase0.len,
|
||||||
|
orphansAltair = quarantine.orphansAltair.len,
|
||||||
missing = quarantine.missing.len
|
missing = quarantine.missing.len
|
||||||
|
|
||||||
return err((ValidationResult.Ignore, MissingParent))
|
return err((ValidationResult.Ignore, MissingParent))
|
||||||
@ -334,15 +359,16 @@ proc addRawBlockUnresolved(
|
|||||||
# a risk of being slashed, making attestations a more valuable spam
|
# a risk of being slashed, making attestations a more valuable spam
|
||||||
# filter.
|
# filter.
|
||||||
debug "Unresolved block (parent missing)",
|
debug "Unresolved block (parent missing)",
|
||||||
orphans = quarantine.orphans.len,
|
orphansPhase0 = quarantine.orphansPhase0.len,
|
||||||
|
orphansAltair = quarantine.orphansAltair.len,
|
||||||
missing = quarantine.missing.len
|
missing = quarantine.missing.len
|
||||||
|
|
||||||
return err((ValidationResult.Ignore, MissingParent))
|
return err((ValidationResult.Ignore, MissingParent))
|
||||||
|
|
||||||
proc addRawBlock(
|
proc addRawBlock(
|
||||||
dag: ChainDAGRef, quarantine: QuarantineRef,
|
dag: ChainDAGRef, quarantine: QuarantineRef,
|
||||||
signedBlock: phase0.SignedBeaconBlock,
|
signedBlock: phase0.SignedBeaconBlock | altair.SignedBeaconBlock,
|
||||||
onBlockAdded: OnBlockAdded
|
onBlockAdded: OnPhase0BlockAdded | OnAltairBlockAdded
|
||||||
): Result[BlockRef, (ValidationResult, BlockError)] =
|
): Result[BlockRef, (ValidationResult, BlockError)] =
|
||||||
## Try adding a block to the chain, verifying first that it passes the state
|
## Try adding a block to the chain, verifying first that it passes the state
|
||||||
## transition function and contains correct cryptographic signature.
|
## transition function and contains correct cryptographic signature.
|
||||||
|
@ -11,11 +11,11 @@ import
|
|||||||
# Standard library
|
# Standard library
|
||||||
std/[sets, tables, hashes],
|
std/[sets, tables, hashes],
|
||||||
# Status libraries
|
# Status libraries
|
||||||
stew/[endians2], chronicles,
|
stew/endians2, chronicles,
|
||||||
eth/keys,
|
eth/keys,
|
||||||
# Internals
|
# Internals
|
||||||
../spec/[
|
../spec/[crypto, digest, signatures_batch, forkedbeaconstate_helpers],
|
||||||
datatypes, crypto, digest, signatures_batch, forkedbeaconstate_helpers],
|
../spec/datatypes/[phase0, altair],
|
||||||
../beacon_chain_db, ../extras
|
../beacon_chain_db, ../extras
|
||||||
|
|
||||||
export sets, tables
|
export sets, tables
|
||||||
@ -57,10 +57,15 @@ type
|
|||||||
##
|
##
|
||||||
## Invalid blocks are dropped immediately.
|
## Invalid blocks are dropped immediately.
|
||||||
|
|
||||||
orphans*: Table[(Eth2Digest, ValidatorSig), SignedBeaconBlock] ##\
|
orphansPhase0*: Table[(Eth2Digest, ValidatorSig), phase0.SignedBeaconBlock] ##\
|
||||||
## Blocks that have passed validation but that we lack a link back to tail
|
## Phase 0 Blocks that have passed validation but that we lack a link back
|
||||||
## for - when we receive a "missing link", we can use this data to build
|
## to tail for - when we receive a "missing link", we can use this data to
|
||||||
## an entire branch
|
## build an entire branch
|
||||||
|
|
||||||
|
orphansAltair*: Table[(Eth2Digest, ValidatorSig), altair.SignedBeaconBlock] ##\
|
||||||
|
## Altair Blocks that have passed validation, but that we lack a link back
|
||||||
|
## to tail for - when we receive a "missing link", we can use this data to
|
||||||
|
## build an entire branch
|
||||||
|
|
||||||
missing*: Table[Eth2Digest, MissingBlock] ##\
|
missing*: Table[Eth2Digest, MissingBlock] ##\
|
||||||
## Roots of blocks that we would like to have (either parent_root of
|
## Roots of blocks that we would like to have (either parent_root of
|
||||||
@ -160,6 +165,9 @@ type
|
|||||||
## block - we limit the number of held EpochRefs to put a cap on
|
## block - we limit the number of held EpochRefs to put a cap on
|
||||||
## memory usage
|
## memory usage
|
||||||
|
|
||||||
|
altairTransitionSlot*: Slot ##\
|
||||||
|
## Slot at which to upgrade from phase 0 to Altair forks
|
||||||
|
|
||||||
EpochKey* = object
|
EpochKey* = object
|
||||||
## The epoch key fully determines the shuffling for proposers and
|
## The epoch key fully determines the shuffling for proposers and
|
||||||
## committees in a beacon state - the epoch level information in the state
|
## committees in a beacon state - the epoch level information in the state
|
||||||
@ -199,7 +207,7 @@ type
|
|||||||
BlockData* = object
|
BlockData* = object
|
||||||
## Body and graph in one
|
## Body and graph in one
|
||||||
|
|
||||||
data*: TrustedSignedBeaconBlock # We trust all blocks we have a ref for
|
data*: phase0.TrustedSignedBeaconBlock # We trust all blocks we have a ref for
|
||||||
refs*: BlockRef
|
refs*: BlockRef
|
||||||
|
|
||||||
StateData* = object
|
StateData* = object
|
||||||
@ -218,8 +226,14 @@ type
|
|||||||
## Slot time for this BlockSlot which may differ from blck.slot when time
|
## Slot time for this BlockSlot which may differ from blck.slot when time
|
||||||
## has advanced without blocks
|
## has advanced without blocks
|
||||||
|
|
||||||
OnBlockAdded* = proc(
|
OnPhase0BlockAdded* = proc(
|
||||||
blckRef: BlockRef, blck: TrustedSignedBeaconBlock,
|
blckRef: BlockRef,
|
||||||
|
blck: phase0.TrustedSignedBeaconBlock,
|
||||||
|
epochRef: EpochRef) {.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
|
OnAltairBlockAdded* = proc(
|
||||||
|
blckRef: BlockRef,
|
||||||
|
blck: altair.TrustedSignedBeaconBlock,
|
||||||
epochRef: EpochRef) {.gcsafe, raises: [Defect].}
|
epochRef: EpochRef) {.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
template head*(dag: ChainDagRef): BlockRef = dag.headState.blck
|
template head*(dag: ChainDagRef): BlockRef = dag.headState.blck
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# beacon_chain
|
# beacon_chain
|
||||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
@ -12,7 +12,8 @@ import
|
|||||||
chronicles,
|
chronicles,
|
||||||
stew/bitops2,
|
stew/bitops2,
|
||||||
eth/keys,
|
eth/keys,
|
||||||
../spec/[crypto, datatypes, digest],
|
../spec/[crypto, digest],
|
||||||
|
../spec/datatypes/[phase0, altair],
|
||||||
./block_pools_types
|
./block_pools_types
|
||||||
|
|
||||||
export options, block_pools_types
|
export options, block_pools_types
|
||||||
@ -55,21 +56,42 @@ template anyIt(s, pred: untyped): bool =
|
|||||||
result
|
result
|
||||||
|
|
||||||
func containsOrphan*(
|
func containsOrphan*(
|
||||||
quarantine: QuarantineRef, signedBlock: SignedBeaconBlock): bool =
|
quarantine: QuarantineRef, signedBlock: phase0.SignedBeaconBlock): bool =
|
||||||
(signedBlock.root, signedBlock.signature) in quarantine.orphans
|
(signedBlock.root, signedBlock.signature) in quarantine.orphansPhase0
|
||||||
|
|
||||||
|
func containsOrphan*(
|
||||||
|
quarantine: QuarantineRef, signedBlock: altair.SignedBeaconBlock): bool =
|
||||||
|
(signedBlock.root, signedBlock.signature) in quarantine.orphansAltair
|
||||||
|
|
||||||
func addMissing*(quarantine: QuarantineRef, root: Eth2Digest) =
|
func addMissing*(quarantine: QuarantineRef, root: Eth2Digest) =
|
||||||
## Schedule the download a the given block
|
## Schedule the download a the given block
|
||||||
# Can only request by root, not by signature, so partial match suffices
|
# Can only request by root, not by signature, so partial match suffices
|
||||||
if not anyIt(quarantine.orphans.keys, it[0] == root):
|
if (not anyIt(quarantine.orphansPhase0.keys, it[0] == root)) and
|
||||||
|
(not anyIt(quarantine.orphansAltair.keys, it[0] == root)):
|
||||||
# If the block is in orphans, we no longer need it
|
# If the block is in orphans, we no longer need it
|
||||||
discard quarantine.missing.hasKeyOrPut(root, MissingBlock())
|
discard quarantine.missing.hasKeyOrPut(root, MissingBlock())
|
||||||
|
|
||||||
|
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095
|
||||||
|
# copy of phase0.SomeSignedBeaconBlock from datatypes/phase0.nim
|
||||||
|
type SomeSignedPhase0Block =
|
||||||
|
phase0.SignedBeaconBlock | phase0.SigVerifiedSignedBeaconBlock |
|
||||||
|
phase0.TrustedSignedBeaconBlock
|
||||||
func removeOrphan*(
|
func removeOrphan*(
|
||||||
quarantine: QuarantineRef, signedBlock: SignedBeaconBlock) =
|
quarantine: QuarantineRef, signedBlock: SomeSignedPhase0Block) =
|
||||||
quarantine.orphans.del((signedBlock.root, signedBlock.signature))
|
quarantine.orphansPhase0.del((signedBlock.root, signedBlock.signature))
|
||||||
|
|
||||||
func isViableOrphan(dag: ChainDAGRef, signedBlock: SignedBeaconBlock): bool =
|
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095
|
||||||
|
# copy of altair.SomeSignedBeaconBlock from datatypes/altair.nim
|
||||||
|
type SomeSignedAltairBlock =
|
||||||
|
altair.SignedBeaconBlock | altair.SigVerifiedSignedBeaconBlock |
|
||||||
|
altair.TrustedSignedBeaconBlock
|
||||||
|
func removeOrphan*(
|
||||||
|
quarantine: QuarantineRef, signedBlock: SomeSignedAltairBlock) =
|
||||||
|
quarantine.orphansAltair.del((signedBlock.root, signedBlock.signature))
|
||||||
|
|
||||||
|
func isViableOrphan(
|
||||||
|
dag: ChainDAGRef,
|
||||||
|
signedBlock: phase0.SignedBeaconBlock | altair.SignedBeaconBlock): bool =
|
||||||
# The orphan must be newer than the finalization point so that its parent
|
# The orphan must be newer than the finalization point so that its parent
|
||||||
# either is the finalized block or more recent
|
# either is the finalized block or more recent
|
||||||
signedBlock.message.slot > dag.finalizedHead.slot
|
signedBlock.message.slot > dag.finalizedHead.slot
|
||||||
@ -77,39 +99,43 @@ func isViableOrphan(dag: ChainDAGRef, signedBlock: SignedBeaconBlock): bool =
|
|||||||
func removeOldBlocks(quarantine: QuarantineRef, dag: ChainDAGRef) =
|
func removeOldBlocks(quarantine: QuarantineRef, dag: ChainDAGRef) =
|
||||||
var oldBlocks: seq[(Eth2Digest, ValidatorSig)]
|
var oldBlocks: seq[(Eth2Digest, ValidatorSig)]
|
||||||
|
|
||||||
for k, v in quarantine.orphans.pairs():
|
template removeNonviableOrphans(orphans: untyped) =
|
||||||
if not isViableOrphan(dag, v):
|
for k, v in orphans.pairs():
|
||||||
oldBlocks.add k
|
if not isViableOrphan(dag, v):
|
||||||
|
oldBlocks.add k
|
||||||
|
|
||||||
for k in oldBlocks:
|
for k in oldBlocks:
|
||||||
quarantine.orphans.del k
|
orphans.del k
|
||||||
|
|
||||||
|
removeNonviableOrphans(quarantine.orphansPhase0)
|
||||||
|
removeNonviableOrphans(quarantine.orphansAltair)
|
||||||
|
|
||||||
func clearQuarantine*(quarantine: QuarantineRef) =
|
func clearQuarantine*(quarantine: QuarantineRef) =
|
||||||
quarantine.orphans.clear()
|
quarantine.orphansPhase0.clear()
|
||||||
|
quarantine.orphansAltair.clear()
|
||||||
quarantine.missing.clear()
|
quarantine.missing.clear()
|
||||||
|
|
||||||
|
# Typically, blocks will arrive in mostly topological order, with some
|
||||||
|
# out-of-order block pairs. Therefore, it is unhelpful to use either a
|
||||||
|
# FIFO or LIFO discpline, and since by definition each block gets used
|
||||||
|
# either 0 or 1 times it's not a cache either. Instead, stop accepting
|
||||||
|
# new blocks, and rely on syncing to cache up again if necessary. When
|
||||||
|
# using forward sync, blocks only arrive in an order not requiring the
|
||||||
|
# quarantine.
|
||||||
|
#
|
||||||
|
# For typical use cases, this need not be large, as they're two or three
|
||||||
|
# blocks arriving out of order due to variable network delays. As blocks
|
||||||
|
# for future slots are rejected before reaching quarantine, this usually
|
||||||
|
# will be a block for the last couple of slots for which the parent is a
|
||||||
|
# likely imminent arrival.
|
||||||
|
|
||||||
|
# Since we start forward sync when about one epoch is missing, that's as
|
||||||
|
# good a number as any.
|
||||||
|
const MAX_QUARANTINE_ORPHANS = SLOTS_PER_EPOCH
|
||||||
|
|
||||||
func add*(quarantine: QuarantineRef, dag: ChainDAGRef,
|
func add*(quarantine: QuarantineRef, dag: ChainDAGRef,
|
||||||
signedBlock: SignedBeaconBlock): bool =
|
signedBlock: phase0.SignedBeaconBlock): bool =
|
||||||
## Adds block to quarantine's `orphans` and `missing` lists.
|
## Adds block to quarantine's `orphans` and `missing` lists.
|
||||||
|
|
||||||
# Typically, blocks will arrive in mostly topological order, with some
|
|
||||||
# out-of-order block pairs. Therefore, it is unhelpful to use either a
|
|
||||||
# FIFO or LIFO discpline, and since by definition each block gets used
|
|
||||||
# either 0 or 1 times it's not a cache either. Instead, stop accepting
|
|
||||||
# new blocks, and rely on syncing to cache up again if necessary. When
|
|
||||||
# using forward sync, blocks only arrive in an order not requiring the
|
|
||||||
# quarantine.
|
|
||||||
#
|
|
||||||
# For typical use cases, this need not be large, as they're two or three
|
|
||||||
# blocks arriving out of order due to variable network delays. As blocks
|
|
||||||
# for future slots are rejected before reaching quarantine, this usually
|
|
||||||
# will be a block for the last couple of slots for which the parent is a
|
|
||||||
# likely imminent arrival.
|
|
||||||
|
|
||||||
# Since we start forward sync when about one epoch is missing, that's as
|
|
||||||
# good a number as any.
|
|
||||||
const MAX_QUARANTINE_ORPHANS = SLOTS_PER_EPOCH
|
|
||||||
|
|
||||||
if not isViableOrphan(dag, signedBlock):
|
if not isViableOrphan(dag, signedBlock):
|
||||||
return false
|
return false
|
||||||
|
|
||||||
@ -119,10 +145,32 @@ func add*(quarantine: QuarantineRef, dag: ChainDAGRef,
|
|||||||
# downloading or we'll never get to the bottom of things
|
# downloading or we'll never get to the bottom of things
|
||||||
quarantine.addMissing(signedBlock.message.parent_root)
|
quarantine.addMissing(signedBlock.message.parent_root)
|
||||||
|
|
||||||
if quarantine.orphans.lenu64 >= MAX_QUARANTINE_ORPHANS:
|
if quarantine.orphansPhase0.lenu64 >= MAX_QUARANTINE_ORPHANS:
|
||||||
return false
|
return false
|
||||||
|
|
||||||
quarantine.orphans[(signedBlock.root, signedBlock.signature)] = signedBlock
|
quarantine.orphansPhase0[(signedBlock.root, signedBlock.signature)] =
|
||||||
|
signedBlock
|
||||||
|
quarantine.missing.del(signedBlock.root)
|
||||||
|
|
||||||
|
true
|
||||||
|
|
||||||
|
func add*(quarantine: QuarantineRef, dag: ChainDAGRef,
|
||||||
|
signedBlock: altair.SignedBeaconBlock): bool =
|
||||||
|
## Adds block to quarantine's `orphans` and `missing` lists.
|
||||||
|
if not isViableOrphan(dag, signedBlock):
|
||||||
|
return false
|
||||||
|
|
||||||
|
quarantine.removeOldBlocks(dag)
|
||||||
|
|
||||||
|
# Even if the quarantine is full, we need to schedule its parent for
|
||||||
|
# downloading or we'll never get to the bottom of things
|
||||||
|
quarantine.addMissing(signedBlock.message.parent_root)
|
||||||
|
|
||||||
|
if quarantine.orphansAltair.lenu64 >= MAX_QUARANTINE_ORPHANS:
|
||||||
|
return false
|
||||||
|
|
||||||
|
quarantine.orphansAltair[(signedBlock.root, signedBlock.signature)] =
|
||||||
|
signedBlock
|
||||||
quarantine.missing.del(signedBlock.root)
|
quarantine.missing.del(signedBlock.root)
|
||||||
|
|
||||||
true
|
true
|
||||||
|
@ -319,7 +319,8 @@ func isStateCheckpoint(bs: BlockSlot): bool =
|
|||||||
proc init*(T: type ChainDAGRef,
|
proc init*(T: type ChainDAGRef,
|
||||||
preset: RuntimePreset,
|
preset: RuntimePreset,
|
||||||
db: BeaconChainDB,
|
db: BeaconChainDB,
|
||||||
updateFlags: UpdateFlags = {}): ChainDAGRef =
|
updateFlags: UpdateFlags = {},
|
||||||
|
altairTransitionSlot: Slot = FAR_FUTURE_SLOT): ChainDAGRef =
|
||||||
# TODO we require that the db contains both a head and a tail block -
|
# TODO we require that the db contains both a head and a tail block -
|
||||||
# asserting here doesn't seem like the right way to go about it however..
|
# asserting here doesn't seem like the right way to go about it however..
|
||||||
|
|
||||||
@ -417,6 +418,7 @@ proc init*(T: type ChainDAGRef,
|
|||||||
# allow skipping some validation.
|
# allow skipping some validation.
|
||||||
updateFlags: {verifyFinalization} * updateFlags,
|
updateFlags: {verifyFinalization} * updateFlags,
|
||||||
runtimePreset: preset,
|
runtimePreset: preset,
|
||||||
|
altairTransitionSlot: altairTransitionSlot
|
||||||
)
|
)
|
||||||
|
|
||||||
doAssert dag.updateFlags in [{}, {verifyFinalization}]
|
doAssert dag.updateFlags in [{}, {verifyFinalization}]
|
||||||
@ -557,7 +559,9 @@ proc putState(dag: ChainDAGRef, state: var StateData) =
|
|||||||
# Ideally we would save the state and the root lookup cache in a single
|
# Ideally we would save the state and the root lookup cache in a single
|
||||||
# transaction to prevent database inconsistencies, but the state loading code
|
# transaction to prevent database inconsistencies, but the state loading code
|
||||||
# is resilient against one or the other going missing
|
# is resilient against one or the other going missing
|
||||||
dag.db.putState(getStateRoot(state.data), state.data.hbsPhase0.data)
|
if state.data.beaconStateFork != forkAltair:
|
||||||
|
# TODO re-enable for Altair
|
||||||
|
dag.db.putState(getStateRoot(state.data), state.data.hbsPhase0.data)
|
||||||
|
|
||||||
dag.db.putStateRoot(
|
dag.db.putStateRoot(
|
||||||
state.blck.root, getStateField(state.data, slot), getStateRoot(state.data))
|
state.blck.root, getStateField(state.data, slot), getStateRoot(state.data))
|
||||||
@ -664,7 +668,7 @@ proc advanceSlots(
|
|||||||
|
|
||||||
doAssert process_slots(
|
doAssert process_slots(
|
||||||
state.data, getStateField(state.data, slot) + 1, cache, rewards,
|
state.data, getStateField(state.data, slot) + 1, cache, rewards,
|
||||||
dag.updateFlags, FAR_FUTURE_SLOT),
|
dag.updateFlags, dag.altairTransitionSlot),
|
||||||
"process_slots shouldn't fail when state slot is correct"
|
"process_slots shouldn't fail when state slot is correct"
|
||||||
if save:
|
if save:
|
||||||
dag.putState(state)
|
dag.putState(state)
|
||||||
@ -688,7 +692,8 @@ proc applyBlock(
|
|||||||
|
|
||||||
let ok = state_transition(
|
let ok = state_transition(
|
||||||
dag.runtimePreset, state.data, blck.data,
|
dag.runtimePreset, state.data, blck.data,
|
||||||
cache, rewards, flags + dag.updateFlags + {slotProcessed}, restore)
|
cache, rewards, flags + dag.updateFlags + {slotProcessed}, restore,
|
||||||
|
dag.altairTransitionSlot)
|
||||||
if ok:
|
if ok:
|
||||||
state.blck = blck.refs
|
state.blck = blck.refs
|
||||||
|
|
||||||
|
@ -12,7 +12,8 @@ import
|
|||||||
../consensus_object_pools/[blockchain_dag, exit_pool],
|
../consensus_object_pools/[blockchain_dag, exit_pool],
|
||||||
../gossip_processing/gossip_validation,
|
../gossip_processing/gossip_validation,
|
||||||
../validators/validator_duties,
|
../validators/validator_duties,
|
||||||
../spec/[crypto, datatypes, digest, forkedbeaconstate_helpers, network],
|
../spec/[crypto, digest, forkedbeaconstate_helpers, network],
|
||||||
|
../spec/datatypes/base,
|
||||||
../ssz/merkleization,
|
../ssz/merkleization,
|
||||||
./eth2_json_rest_serialization, ./rest_utils
|
./eth2_json_rest_serialization, ./rest_utils
|
||||||
|
|
||||||
|
@ -11,7 +11,8 @@ import
|
|||||||
chronicles,
|
chronicles,
|
||||||
nimcrypto/utils as ncrutils,
|
nimcrypto/utils as ncrutils,
|
||||||
../beacon_node_common, ../eth1/eth1_monitor,
|
../beacon_node_common, ../eth1/eth1_monitor,
|
||||||
../spec/[datatypes, digest, forkedbeaconstate_helpers, presets],
|
../spec/datatypes/base,
|
||||||
|
../spec/[digest, forkedbeaconstate_helpers, presets],
|
||||||
./eth2_json_rest_serialization, ./rest_utils
|
./eth2_json_rest_serialization, ./rest_utils
|
||||||
|
|
||||||
logScope: topics = "rest_config"
|
logScope: topics = "rest_config"
|
||||||
|
@ -13,7 +13,8 @@ import
|
|||||||
chronicles,
|
chronicles,
|
||||||
../version, ../beacon_node_common,
|
../version, ../beacon_node_common,
|
||||||
../networking/[eth2_network, peer_pool],
|
../networking/[eth2_network, peer_pool],
|
||||||
../spec/[datatypes, digest, presets],
|
../spec/datatypes/base,
|
||||||
|
../spec/[digest, presets],
|
||||||
./rpc_utils, ./eth2_json_rpc_serialization
|
./rpc_utils, ./eth2_json_rpc_serialization
|
||||||
|
|
||||||
logScope: topics = "debugapi"
|
logScope: topics = "debugapi"
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
import
|
import
|
||||||
strutils,
|
strutils,
|
||||||
json_serialization/std/[sets, net], serialization/errors,
|
json_serialization/std/[sets, net], serialization/errors,
|
||||||
../spec/[datatypes, digest, crypto, eth2_apis/beacon_rpc_client],
|
../spec/datatypes/base,
|
||||||
|
../spec/[crypto, digest, eth2_apis/beacon_rpc_client],
|
||||||
json_rpc/[client, jsonmarshal]
|
json_rpc/[client, jsonmarshal]
|
||||||
|
|
||||||
from os import DirSep, AltSep
|
from os import DirSep, AltSep
|
||||||
|
@ -18,7 +18,8 @@ import
|
|||||||
".."/[
|
".."/[
|
||||||
beacon_node_common, nimbus_binary_common, networking/eth2_network,
|
beacon_node_common, nimbus_binary_common, networking/eth2_network,
|
||||||
eth1/eth1_monitor, validators/validator_duties],
|
eth1/eth1_monitor, validators/validator_duties],
|
||||||
../spec/[digest, datatypes, forkedbeaconstate_helpers, presets]
|
../spec/datatypes/base,
|
||||||
|
../spec/[digest, forkedbeaconstate_helpers, presets]
|
||||||
|
|
||||||
|
|
||||||
logScope: topics = "nimbusapi"
|
logScope: topics = "nimbusapi"
|
||||||
|
@ -17,7 +17,8 @@ import std/options,
|
|||||||
../beacon_node_common, ../version,
|
../beacon_node_common, ../version,
|
||||||
../networking/[eth2_network, peer_pool],
|
../networking/[eth2_network, peer_pool],
|
||||||
../sync/sync_manager,
|
../sync/sync_manager,
|
||||||
../spec/[datatypes, digest, presets],
|
../spec/datatypes/base,
|
||||||
|
../spec/[digest, presets],
|
||||||
../spec/eth2_apis/callsigs_types
|
../spec/eth2_apis/callsigs_types
|
||||||
|
|
||||||
logScope: topics = "nodeapi"
|
logScope: topics = "nodeapi"
|
||||||
|
@ -7,7 +7,8 @@ import
|
|||||||
nimcrypto/utils as ncrutils,
|
nimcrypto/utils as ncrutils,
|
||||||
../version, ../beacon_node_common, ../sync/sync_manager,
|
../version, ../beacon_node_common, ../sync/sync_manager,
|
||||||
../networking/[eth2_network, peer_pool],
|
../networking/[eth2_network, peer_pool],
|
||||||
../spec/[datatypes, digest, presets],
|
../spec/datatypes/base,
|
||||||
|
../spec/[digest, presets],
|
||||||
../spec/eth2_apis/callsigs_types,
|
../spec/eth2_apis/callsigs_types,
|
||||||
./eth2_json_rest_serialization, ./rest_utils
|
./eth2_json_rest_serialization, ./rest_utils
|
||||||
|
|
||||||
|
@ -12,7 +12,8 @@ import
|
|||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
../beacon_node_common, ../validators/validator_duties,
|
../beacon_node_common, ../validators/validator_duties,
|
||||||
../consensus_object_pools/[block_pools_types, blockchain_dag],
|
../consensus_object_pools/[block_pools_types, blockchain_dag],
|
||||||
../spec/[datatypes, digest, forkedbeaconstate_helpers, helpers]
|
../spec/datatypes/base,
|
||||||
|
../spec/[digest, forkedbeaconstate_helpers, helpers]
|
||||||
|
|
||||||
export blockchain_dag
|
export blockchain_dag
|
||||||
|
|
||||||
|
@ -9,15 +9,16 @@
|
|||||||
|
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
std/[tables],
|
std/tables,
|
||||||
|
|
||||||
# Nimble packages
|
# Nimble packages
|
||||||
stew/[objects],
|
stew/objects,
|
||||||
json_rpc/servers/httpserver,
|
json_rpc/servers/httpserver,
|
||||||
chronicles,
|
chronicles,
|
||||||
|
|
||||||
# Local modules
|
# Local modules
|
||||||
../spec/[crypto, datatypes, digest, forkedbeaconstate_helpers, helpers, network, signatures],
|
../spec/[crypto, digest, forkedbeaconstate_helpers, helpers, network, signatures],
|
||||||
|
../spec/datatypes/base,
|
||||||
../spec/eth2_apis/callsigs_types,
|
../spec/eth2_apis/callsigs_types,
|
||||||
../consensus_object_pools/[blockchain_dag, spec_cache, attestation_pool], ../ssz/merkleization,
|
../consensus_object_pools/[blockchain_dag, spec_cache, attestation_pool], ../ssz/merkleization,
|
||||||
../beacon_node_common, ../beacon_node_types,
|
../beacon_node_common, ../beacon_node_types,
|
||||||
|
@ -12,7 +12,8 @@ import
|
|||||||
../consensus_object_pools/[blockchain_dag, spec_cache, attestation_pool],
|
../consensus_object_pools/[blockchain_dag, spec_cache, attestation_pool],
|
||||||
../gossip_processing/gossip_validation,
|
../gossip_processing/gossip_validation,
|
||||||
../validators/validator_duties,
|
../validators/validator_duties,
|
||||||
../spec/[crypto, datatypes, digest, forkedbeaconstate_helpers, network],
|
../spec/[crypto, digest, forkedbeaconstate_helpers, network],
|
||||||
|
../spec/datatypes/base,
|
||||||
../ssz/merkleization,
|
../ssz/merkleization,
|
||||||
./eth2_json_rest_serialization, ./rest_utils
|
./eth2_json_rest_serialization, ./rest_utils
|
||||||
|
|
||||||
|
@ -306,7 +306,7 @@ proc state_transition*(
|
|||||||
phase0.TrustedSignedBeaconBlock | altair.SignedBeaconBlock,
|
phase0.TrustedSignedBeaconBlock | altair.SignedBeaconBlock,
|
||||||
cache: var StateCache, rewards: var RewardInfo, flags: UpdateFlags,
|
cache: var StateCache, rewards: var RewardInfo, flags: UpdateFlags,
|
||||||
rollback: RollbackForkedHashedProc,
|
rollback: RollbackForkedHashedProc,
|
||||||
altairForkSlot: Slot = FAR_FUTURE_SLOT): bool {.nbench.} =
|
altairForkSlot: Slot): bool {.nbench.} =
|
||||||
## Apply a block to the state, advancing the slot counter as necessary. The
|
## Apply a block to the state, advancing the slot counter as necessary. The
|
||||||
## given state must be of a lower slot, or, in case the `slotProcessed` flag
|
## given state must be of a lower slot, or, in case the `slotProcessed` flag
|
||||||
## is set, can be the slot state of the same slot as the block (where the
|
## is set, can be the slot state of the same slot as the block (where the
|
||||||
|
@ -15,7 +15,8 @@ import
|
|||||||
stew/[results, byteutils],
|
stew/[results, byteutils],
|
||||||
chronicles, chronicles/timings,
|
chronicles, chronicles/timings,
|
||||||
# Internal
|
# Internal
|
||||||
../spec/[datatypes, digest, crypto],
|
../spec/datatypes/base,
|
||||||
|
../spec/[digest, crypto],
|
||||||
./slashing_protection_common,
|
./slashing_protection_common,
|
||||||
./slashing_protection_v1,
|
./slashing_protection_v1,
|
||||||
./slashing_protection_v2
|
./slashing_protection_v2
|
||||||
|
@ -18,7 +18,8 @@ import
|
|||||||
json_serialization,
|
json_serialization,
|
||||||
chronicles,
|
chronicles,
|
||||||
# Internal
|
# Internal
|
||||||
../spec/[datatypes, digest, crypto]
|
../spec/datatypes/base,
|
||||||
|
../spec/[digest, crypto]
|
||||||
|
|
||||||
export serialization, json_serialization # Generic sandwich https://github.com/nim-lang/Nim/issues/11225
|
export serialization, json_serialization # Generic sandwich https://github.com/nim-lang/Nim/issues/11225
|
||||||
|
|
||||||
|
@ -17,7 +17,8 @@ import
|
|||||||
serialization,
|
serialization,
|
||||||
json_serialization,
|
json_serialization,
|
||||||
# Internal
|
# Internal
|
||||||
../spec/[datatypes, digest, crypto],
|
../spec/datatypes/base,
|
||||||
|
../spec/[digest, crypto],
|
||||||
../ssz,
|
../ssz,
|
||||||
./slashing_protection_common
|
./slashing_protection_common
|
||||||
|
|
||||||
|
@ -164,7 +164,7 @@ proc runFullTransition*(dir, preState, blocksPrefix: string, blocksQty: int, ski
|
|||||||
else: {}
|
else: {}
|
||||||
let success = state_transition(
|
let success = state_transition(
|
||||||
defaultRuntimePreset, state[], signedBlock, cache, rewards, flags,
|
defaultRuntimePreset, state[], signedBlock, cache, rewards, flags,
|
||||||
noRollback)
|
noRollback, FAR_FUTURE_SLOT)
|
||||||
echo "State transition status: ", if success: "SUCCESS ✓" else: "FAILURE ⚠️"
|
echo "State transition status: ", if success: "SUCCESS ✓" else: "FAILURE ⚠️"
|
||||||
|
|
||||||
proc runProcessSlots*(dir, preState: string, numSlots: uint64) =
|
proc runProcessSlots*(dir, preState: string, numSlots: uint64) =
|
||||||
|
@ -90,7 +90,8 @@ proc doTransition(conf: NcliConf) =
|
|||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
rewards = RewardInfo()
|
||||||
if not state_transition(getRuntimePresetForNetwork(conf.eth2Network),
|
if not state_transition(getRuntimePresetForNetwork(conf.eth2Network),
|
||||||
stateY[], blckX, cache, rewards, flags, noRollback):
|
stateY[], blckX, cache, rewards, flags, noRollback,
|
||||||
|
FAR_FUTURE_SLOT):
|
||||||
error "State transition failed"
|
error "State transition failed"
|
||||||
quit 1
|
quit 1
|
||||||
else:
|
else:
|
||||||
|
@ -122,7 +122,8 @@ proc nfuzz_block(input: openArray[byte], xoutput: ptr byte,
|
|||||||
rewards = RewardInfo()
|
rewards = RewardInfo()
|
||||||
result =
|
result =
|
||||||
state_transition(
|
state_transition(
|
||||||
preset, fhState[], blck, cache, rewards, flags, rollback)
|
preset, fhState[], blck, cache, rewards, flags, rollback,
|
||||||
|
FAR_FUTURE_SLOT)
|
||||||
data.state = fhState.hbsPhase0.data
|
data.state = fhState.hbsPhase0.data
|
||||||
|
|
||||||
decodeAndProcess(BlockInput):
|
decodeAndProcess(BlockInput):
|
||||||
|
@ -20,9 +20,10 @@ import
|
|||||||
confutils, chronicles, eth/db/kvstore_sqlite3,
|
confutils, chronicles, eth/db/kvstore_sqlite3,
|
||||||
eth/keys,
|
eth/keys,
|
||||||
../tests/testblockutil,
|
../tests/testblockutil,
|
||||||
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest,
|
../beacon_chain/spec/[beaconstate, crypto, digest,
|
||||||
forkedbeaconstate_helpers, presets,
|
forkedbeaconstate_helpers, presets,
|
||||||
helpers, signatures, state_transition],
|
helpers, signatures, state_transition],
|
||||||
|
../beacon_chain/spec/datatypes/[phase0, altair],
|
||||||
../beacon_chain/[beacon_node_types, beacon_chain_db, extras],
|
../beacon_chain/[beacon_node_types, beacon_chain_db, extras],
|
||||||
../beacon_chain/eth1/eth1_monitor,
|
../beacon_chain/eth1/eth1_monitor,
|
||||||
../beacon_chain/validators/validator_pool,
|
../beacon_chain/validators/validator_pool,
|
||||||
@ -52,7 +53,7 @@ proc gauss(r: var Rand; mu = 0.0; sigma = 1.0): float =
|
|||||||
result = mu + sigma * (b / a)
|
result = mu + sigma * (b / a)
|
||||||
|
|
||||||
# TODO confutils is an impenetrable black box. how can a help text be added here?
|
# TODO confutils is an impenetrable black box. how can a help text be added here?
|
||||||
cli do(slots = SLOTS_PER_EPOCH * 5,
|
cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||||
validators = SLOTS_PER_EPOCH * 400, # One per shard is minimum
|
validators = SLOTS_PER_EPOCH * 400, # One per shard is minimum
|
||||||
attesterRatio {.desc: "ratio of validators that attest in each round"} = 0.82,
|
attesterRatio {.desc: "ratio of validators that attest in each round"} = 0.82,
|
||||||
blockRatio {.desc: "ratio of slots with blocks"} = 1.0,
|
blockRatio {.desc: "ratio of slots with blocks"} = 1.0,
|
||||||
@ -63,6 +64,8 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||||||
runtimePreset = defaultRuntimePreset
|
runtimePreset = defaultRuntimePreset
|
||||||
genesisTime = float state[].data.genesis_time
|
genesisTime = float state[].data.genesis_time
|
||||||
|
|
||||||
|
const altairTransitionSlot = 96.Slot
|
||||||
|
|
||||||
echo "Starting simulation..."
|
echo "Starting simulation..."
|
||||||
|
|
||||||
let db = BeaconChainDB.new(runtimePreset, "block_sim_db")
|
let db = BeaconChainDB.new(runtimePreset, "block_sim_db")
|
||||||
@ -72,7 +75,7 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||||||
putInitialDepositContractSnapshot(db, depositContractSnapshot)
|
putInitialDepositContractSnapshot(db, depositContractSnapshot)
|
||||||
|
|
||||||
var
|
var
|
||||||
dag = ChainDAGRef.init(runtimePreset, db)
|
dag = ChainDAGRef.init(runtimePreset, db, {}, altairTransitionSlot)
|
||||||
eth1Chain = Eth1Chain.init(runtimePreset, db)
|
eth1Chain = Eth1Chain.init(runtimePreset, db)
|
||||||
merkleizer = depositContractSnapshot.createMerkleizer
|
merkleizer = depositContractSnapshot.createMerkleizer
|
||||||
quarantine = QuarantineRef.init(keys.newRng())
|
quarantine = QuarantineRef.init(keys.newRng())
|
||||||
@ -123,65 +126,96 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||||||
signature: sig.toValidatorSig()
|
signature: sig.toValidatorSig()
|
||||||
), [validatorIdx], sig, data.slot)
|
), [validatorIdx], sig, data.slot)
|
||||||
|
|
||||||
proc proposeBlock(slot: Slot) =
|
proc getNewBlock[T](
|
||||||
|
stateData: var StateData, slot: Slot, cache: var StateCache): T =
|
||||||
|
let
|
||||||
|
finalizedEpochRef = dag.getFinalizedEpochRef()
|
||||||
|
proposerIdx = get_beacon_proposer_index(
|
||||||
|
stateData.data, cache, getStateField(stateData.data, slot)).get()
|
||||||
|
privKey = hackPrivKey(
|
||||||
|
getStateField(stateData.data, validators)[proposerIdx])
|
||||||
|
eth1ProposalData = eth1Chain.getBlockProposalData(
|
||||||
|
stateData.data,
|
||||||
|
finalizedEpochRef.eth1_data,
|
||||||
|
finalizedEpochRef.eth1_deposit_index)
|
||||||
|
hashedState =
|
||||||
|
when T is phase0.SignedBeaconBlock:
|
||||||
|
addr stateData.data.hbsPhase0
|
||||||
|
elif T is altair.SignedBeaconBlock:
|
||||||
|
addr stateData.data.hbsAltair
|
||||||
|
else:
|
||||||
|
static: doAssert false
|
||||||
|
message = makeBeaconBlock(
|
||||||
|
runtimePreset,
|
||||||
|
hashedState[],
|
||||||
|
proposerIdx,
|
||||||
|
dag.head.root,
|
||||||
|
privKey.genRandaoReveal(
|
||||||
|
getStateField(stateData.data, fork),
|
||||||
|
getStateField(stateData.data, genesis_validators_root),
|
||||||
|
slot).toValidatorSig(),
|
||||||
|
eth1ProposalData.vote,
|
||||||
|
default(GraffitiBytes),
|
||||||
|
attPool.getAttestationsForTestBlock(stateData, cache),
|
||||||
|
eth1ProposalData.deposits,
|
||||||
|
@[],
|
||||||
|
@[],
|
||||||
|
@[],
|
||||||
|
ExecutionPayload(),
|
||||||
|
noRollback,
|
||||||
|
cache)
|
||||||
|
|
||||||
|
var
|
||||||
|
newBlock = T(
|
||||||
|
message: message.get()
|
||||||
|
)
|
||||||
|
|
||||||
|
let blockRoot = withTimerRet(timers[tHashBlock]):
|
||||||
|
hash_tree_root(newBlock.message)
|
||||||
|
newBlock.root = blockRoot
|
||||||
|
# Careful, state no longer valid after here because of the await..
|
||||||
|
newBlock.signature = withTimerRet(timers[tSignBlock]):
|
||||||
|
get_block_signature(
|
||||||
|
getStateField(stateData.data, fork),
|
||||||
|
getStateField(stateData.data, genesis_validators_root),
|
||||||
|
newBlock.message.slot,
|
||||||
|
blockRoot, privKey).toValidatorSig()
|
||||||
|
|
||||||
|
newBlock
|
||||||
|
|
||||||
|
proc proposePhase0Block(slot: Slot) =
|
||||||
if rand(r, 1.0) > blockRatio:
|
if rand(r, 1.0) > blockRatio:
|
||||||
return
|
return
|
||||||
|
|
||||||
let
|
dag.withState(tmpState[], dag.head.atSlot(slot)):
|
||||||
head = dag.head
|
|
||||||
|
|
||||||
dag.withState(tmpState[], head.atSlot(slot)):
|
|
||||||
let
|
let
|
||||||
finalizedEpochRef = dag.getFinalizedEpochRef()
|
newBlock = getNewBlock[phase0.SignedBeaconBlock](stateData, slot, cache)
|
||||||
proposerIdx = get_beacon_proposer_index(
|
added = dag.addRawBlock(quarantine, newBlock) do (
|
||||||
stateData.data, cache, getStateField(stateData.data, slot)).get()
|
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
|
||||||
privKey = hackPrivKey(
|
epochRef: EpochRef):
|
||||||
getStateField(stateData.data, validators)[proposerIdx])
|
# Callback add to fork choice if valid
|
||||||
eth1ProposalData = eth1Chain.getBlockProposalData(
|
attPool.addForkChoice(
|
||||||
stateData.data,
|
epochRef, blckRef, signedBlock.message, blckRef.slot)
|
||||||
finalizedEpochRef.eth1_data,
|
|
||||||
finalizedEpochRef.eth1_deposit_index)
|
|
||||||
message = makeBeaconBlock(
|
|
||||||
runtimePreset,
|
|
||||||
stateData.data.hbsPhase0,
|
|
||||||
proposerIdx,
|
|
||||||
head.root,
|
|
||||||
privKey.genRandaoReveal(
|
|
||||||
getStateField(stateData.data, fork),
|
|
||||||
getStateField(stateData.data, genesis_validators_root),
|
|
||||||
slot).toValidatorSig(),
|
|
||||||
eth1ProposalData.vote,
|
|
||||||
default(GraffitiBytes),
|
|
||||||
attPool.getAttestationsForTestBlock(stateData, cache),
|
|
||||||
eth1ProposalData.deposits,
|
|
||||||
@[],
|
|
||||||
@[],
|
|
||||||
@[],
|
|
||||||
ExecutionPayload(),
|
|
||||||
noRollback,
|
|
||||||
cache)
|
|
||||||
|
|
||||||
var
|
blck() = added[]
|
||||||
newBlock = SignedBeaconBlock(
|
dag.updateHead(added[], quarantine)
|
||||||
message: message.get()
|
if dag.needStateCachesAndForkChoicePruning():
|
||||||
)
|
dag.pruneStateCachesDAG()
|
||||||
|
attPool.prune()
|
||||||
|
|
||||||
let blockRoot = withTimerRet(timers[tHashBlock]):
|
proc proposeAltairBlock(slot: Slot) =
|
||||||
hash_tree_root(newBlock.message)
|
if rand(r, 1.0) > blockRatio:
|
||||||
newBlock.root = blockRoot
|
return
|
||||||
# Careful, state no longer valid after here because of the await..
|
|
||||||
newBlock.signature = withTimerRet(timers[tSignBlock]):
|
|
||||||
get_block_signature(
|
|
||||||
getStateField(stateData.data, fork),
|
|
||||||
getStateField(stateData.data, genesis_validators_root),
|
|
||||||
newBlock.message.slot,
|
|
||||||
blockRoot, privKey).toValidatorSig()
|
|
||||||
|
|
||||||
let added = dag.addRawBlock(quarantine, newBlock) do (
|
dag.withState(tmpState[], dag.head.atSlot(slot)):
|
||||||
blckRef: BlockRef, signedBlock: TrustedSignedBeaconBlock,
|
let
|
||||||
epochRef: EpochRef):
|
newBlock = getNewBlock[altair.SignedBeaconBlock](stateData, slot, cache)
|
||||||
# Callback add to fork choice if valid
|
added = dag.addRawBlock(quarantine, newBlock) do (
|
||||||
attPool.addForkChoice(epochRef, blckRef, signedBlock.message, blckRef.slot)
|
blckRef: BlockRef, signedBlock: altair.TrustedSignedBeaconBlock,
|
||||||
|
epochRef: EpochRef):
|
||||||
|
# Callback add to fork choice if valid
|
||||||
|
attPool.addForkChoice(
|
||||||
|
epochRef, blckRef, signedBlock.message, blckRef.slot)
|
||||||
|
|
||||||
blck() = added[]
|
blck() = added[]
|
||||||
dag.updateHead(added[], quarantine)
|
dag.updateHead(added[], quarantine)
|
||||||
@ -228,7 +262,10 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||||||
|
|
||||||
if blockRatio > 0.0:
|
if blockRatio > 0.0:
|
||||||
withTimer(timers[t]):
|
withTimer(timers[t]):
|
||||||
proposeBlock(slot)
|
if slot < altairTransitionSlot:
|
||||||
|
proposePhase0Block(slot)
|
||||||
|
else:
|
||||||
|
proposeAltairBlock(slot)
|
||||||
if attesterRatio > 0.0:
|
if attesterRatio > 0.0:
|
||||||
withTimer(timers[tAttest]):
|
withTimer(timers[tAttest]):
|
||||||
handleAttestations(slot)
|
handleAttestations(slot)
|
||||||
|
@ -53,12 +53,12 @@ proc runTest(testName, testDir, unitTestName: string) =
|
|||||||
if hasPostState:
|
if hasPostState:
|
||||||
let success = state_transition(
|
let success = state_transition(
|
||||||
defaultRuntimePreset, fhPreState[], blck, cache, rewards, flags = {},
|
defaultRuntimePreset, fhPreState[], blck, cache, rewards, flags = {},
|
||||||
noRollback)
|
noRollback, FAR_FUTURE_SLOT)
|
||||||
doAssert success, "Failure when applying block " & $i
|
doAssert success, "Failure when applying block " & $i
|
||||||
else:
|
else:
|
||||||
let success = state_transition(
|
let success = state_transition(
|
||||||
defaultRuntimePreset, fhPreState[], blck, cache, rewards, flags = {},
|
defaultRuntimePreset, fhPreState[], blck, cache, rewards, flags = {},
|
||||||
noRollback)
|
noRollback, FAR_FUTURE_SLOT)
|
||||||
doAssert (i + 1 < numBlocks) or not success,
|
doAssert (i + 1 < numBlocks) or not success,
|
||||||
"We didn't expect these invalid blocks to be processed"
|
"We didn't expect these invalid blocks to be processed"
|
||||||
|
|
||||||
|
@ -53,12 +53,12 @@ proc runTest(testName, testDir, unitTestName: string) =
|
|||||||
if hasPostState:
|
if hasPostState:
|
||||||
let success = state_transition(
|
let success = state_transition(
|
||||||
defaultRuntimePreset, fhPreState[], blck, cache, rewards, flags = {},
|
defaultRuntimePreset, fhPreState[], blck, cache, rewards, flags = {},
|
||||||
noRollback)
|
noRollback, FAR_FUTURE_SLOT)
|
||||||
doAssert success, "Failure when applying block " & $i
|
doAssert success, "Failure when applying block " & $i
|
||||||
else:
|
else:
|
||||||
let success = state_transition(
|
let success = state_transition(
|
||||||
defaultRuntimePreset, fhPreState[], blck, cache, rewards, flags = {},
|
defaultRuntimePreset, fhPreState[], blck, cache, rewards, flags = {},
|
||||||
noRollback)
|
noRollback, FAR_FUTURE_SLOT)
|
||||||
doAssert (i + 1 < numBlocks) or not success,
|
doAssert (i + 1 < numBlocks) or not success,
|
||||||
"We didn't expect these invalid blocks to be processed"
|
"We didn't expect these invalid blocks to be processed"
|
||||||
|
|
||||||
|
@ -13,7 +13,8 @@ import
|
|||||||
unittest2,
|
unittest2,
|
||||||
stew/[byteutils, endians2],
|
stew/[byteutils, endians2],
|
||||||
# Internals
|
# Internals
|
||||||
../../beacon_chain/spec/[datatypes, presets],
|
../../beacon_chain/spec/datatypes/base,
|
||||||
|
../../beacon_chain/spec/presets,
|
||||||
# Test utilities
|
# Test utilities
|
||||||
../testutil, ./fixtures_utils
|
../testutil, ./fixtures_utils
|
||||||
|
|
||||||
|
@ -21,7 +21,8 @@ import
|
|||||||
slashing_protection,
|
slashing_protection,
|
||||||
slashing_protection_v1
|
slashing_protection_v1
|
||||||
],
|
],
|
||||||
../../beacon_chain/spec/[datatypes, digest, crypto, presets],
|
../../beacon_chain/spec/datatypes/base,
|
||||||
|
../../beacon_chain/spec/[digest, crypto, presets],
|
||||||
# Test utilies
|
# Test utilies
|
||||||
../testutil
|
../testutil
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# Nimbus
|
# Nimbus
|
||||||
# Copyright (c) 2018 Status Research & Development GmbH
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0)
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0)
|
||||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT)
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT)
|
||||||
@ -14,7 +14,8 @@ import
|
|||||||
chronicles,
|
chronicles,
|
||||||
# Internal
|
# Internal
|
||||||
../../beacon_chain/validators/[slashing_protection, slashing_protection_v2],
|
../../beacon_chain/validators/[slashing_protection, slashing_protection_v2],
|
||||||
../../beacon_chain/spec/[datatypes, digest, crypto, presets],
|
../../beacon_chain/spec/datatypes/base,
|
||||||
|
../../beacon_chain/spec/[digest, crypto, presets],
|
||||||
# Test utilies
|
# Test utilies
|
||||||
../testutil, ../testdbutil,
|
../testutil, ../testdbutil,
|
||||||
../official/fixtures_utils
|
../official/fixtures_utils
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# Nimbus
|
# Nimbus
|
||||||
# Copyright (c) 2018 Status Research & Development GmbH
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0)
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0)
|
||||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT)
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT)
|
||||||
@ -16,7 +16,8 @@ import
|
|||||||
nimcrypto/utils,
|
nimcrypto/utils,
|
||||||
# Internal
|
# Internal
|
||||||
../../beacon_chain/validators/[slashing_protection, slashing_protection_v2],
|
../../beacon_chain/validators/[slashing_protection, slashing_protection_v2],
|
||||||
../../beacon_chain/spec/[datatypes, digest, crypto, presets],
|
../../beacon_chain/spec/datatypes/base,
|
||||||
|
../../beacon_chain/spec/[digest, crypto, presets],
|
||||||
# Test utilies
|
# Test utilies
|
||||||
../testutil
|
../testutil
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# Nimbus
|
# Nimbus
|
||||||
# Copyright (c) 2018 Status Research & Development GmbH
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0)
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0)
|
||||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT)
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT)
|
||||||
@ -15,7 +15,8 @@ import
|
|||||||
stew/results,
|
stew/results,
|
||||||
# Internal
|
# Internal
|
||||||
../../beacon_chain/validators/slashing_protection,
|
../../beacon_chain/validators/slashing_protection,
|
||||||
../../beacon_chain/spec/[datatypes, digest, crypto, presets, helpers],
|
../../beacon_chain/spec/[crypto, digest, helpers, presets],
|
||||||
|
../../beacon_chain/spec/datatypes/base,
|
||||||
# Test utilies
|
# Test utilies
|
||||||
../testutil
|
../testutil
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
{.used.}
|
{.used.}
|
||||||
|
|
||||||
import
|
import
|
||||||
|
chronicles,
|
||||||
std/[options, sequtils],
|
std/[options, sequtils],
|
||||||
unittest2,
|
unittest2,
|
||||||
stew/assign2,
|
stew/assign2,
|
||||||
@ -21,9 +22,6 @@ import
|
|||||||
blockchain_dag, block_quarantine, block_clearance],
|
blockchain_dag, block_quarantine, block_clearance],
|
||||||
./testutil, ./testdbutil, ./testblockutil
|
./testutil, ./testdbutil, ./testblockutil
|
||||||
|
|
||||||
when isMainModule:
|
|
||||||
import chronicles # or some random compile error happens...
|
|
||||||
|
|
||||||
proc `$`(x: BlockRef): string =
|
proc `$`(x: BlockRef): string =
|
||||||
$x.root
|
$x.root
|
||||||
|
|
||||||
@ -124,6 +122,7 @@ suite "Block pool processing" & preset():
|
|||||||
db = makeTestDB(SLOTS_PER_EPOCH)
|
db = makeTestDB(SLOTS_PER_EPOCH)
|
||||||
dag = init(ChainDAGRef, defaultRuntimePreset, db)
|
dag = init(ChainDAGRef, defaultRuntimePreset, db)
|
||||||
quarantine = QuarantineRef.init(keys.newRng())
|
quarantine = QuarantineRef.init(keys.newRng())
|
||||||
|
nilPhase0Callback: OnPhase0BlockAdded
|
||||||
state = newClone(dag.headState.data)
|
state = newClone(dag.headState.data)
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
rewards = RewardInfo()
|
||||||
@ -143,7 +142,7 @@ suite "Block pool processing" & preset():
|
|||||||
|
|
||||||
test "Simple block add&get" & preset():
|
test "Simple block add&get" & preset():
|
||||||
let
|
let
|
||||||
b1Add = dag.addRawBlock(quarantine, b1, nil)
|
b1Add = dag.addRawBlock(quarantine, b1, nilPhase0Callback)
|
||||||
b1Get = dag.get(b1.root)
|
b1Get = dag.get(b1.root)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
@ -154,7 +153,7 @@ suite "Block pool processing" & preset():
|
|||||||
dag.heads[0] == b1Add[]
|
dag.heads[0] == b1Add[]
|
||||||
|
|
||||||
let
|
let
|
||||||
b2Add = dag.addRawBlock(quarantine, b2, nil)
|
b2Add = dag.addRawBlock(quarantine, b2, nilPhase0Callback)
|
||||||
b2Get = dag.get(b2.root)
|
b2Get = dag.get(b2.root)
|
||||||
er = dag.findEpochRef(b1Add[], b1Add[].slot.epoch)
|
er = dag.findEpochRef(b1Add[], b1Add[].slot.epoch)
|
||||||
validators = getStateField(dag.headState.data, validators).lenu64()
|
validators = getStateField(dag.headState.data, validators).lenu64()
|
||||||
@ -183,7 +182,7 @@ suite "Block pool processing" & preset():
|
|||||||
|
|
||||||
let
|
let
|
||||||
b4 = addTestBlock(state[], b2.root, cache)
|
b4 = addTestBlock(state[], b2.root, cache)
|
||||||
b4Add = dag.addRawBlock(quarantine, b4, nil)
|
b4Add = dag.addRawBlock(quarantine, b4, nilPhase0Callback)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
b4Add[].parent == b2Add[]
|
b4Add[].parent == b2Add[]
|
||||||
@ -231,14 +230,14 @@ suite "Block pool processing" & preset():
|
|||||||
blocks[2..<2].len == 0
|
blocks[2..<2].len == 0
|
||||||
|
|
||||||
test "Reverse order block add & get" & preset():
|
test "Reverse order block add & get" & preset():
|
||||||
let missing = dag.addRawBlock(quarantine, b2, nil)
|
let missing = dag.addRawBlock(quarantine, b2, nilPhase0Callback)
|
||||||
check: missing.error == (ValidationResult.Ignore, MissingParent)
|
check: missing.error == (ValidationResult.Ignore, MissingParent)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
dag.get(b2.root).isNone() # Unresolved, shouldn't show up
|
dag.get(b2.root).isNone() # Unresolved, shouldn't show up
|
||||||
FetchRecord(root: b1.root) in quarantine.checkMissing()
|
FetchRecord(root: b1.root) in quarantine.checkMissing()
|
||||||
|
|
||||||
let status = dag.addRawBlock(quarantine, b1, nil)
|
let status = dag.addRawBlock(quarantine, b1, nilPhase0Callback)
|
||||||
|
|
||||||
check: status.isOk
|
check: status.isOk
|
||||||
|
|
||||||
@ -275,8 +274,8 @@ suite "Block pool processing" & preset():
|
|||||||
|
|
||||||
test "Adding the same block twice returns a Duplicate error" & preset():
|
test "Adding the same block twice returns a Duplicate error" & preset():
|
||||||
let
|
let
|
||||||
b10 = dag.addRawBlock(quarantine, b1, nil)
|
b10 = dag.addRawBlock(quarantine, b1, nilPhase0Callback)
|
||||||
b11 = dag.addRawBlock(quarantine, b1, nil)
|
b11 = dag.addRawBlock(quarantine, b1, nilPhase0Callback)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
b11.error == (ValidationResult.Ignore, Duplicate)
|
b11.error == (ValidationResult.Ignore, Duplicate)
|
||||||
@ -284,7 +283,7 @@ suite "Block pool processing" & preset():
|
|||||||
|
|
||||||
test "updateHead updates head and headState" & preset():
|
test "updateHead updates head and headState" & preset():
|
||||||
let
|
let
|
||||||
b1Add = dag.addRawBlock(quarantine, b1, nil)
|
b1Add = dag.addRawBlock(quarantine, b1, nilPhase0Callback)
|
||||||
|
|
||||||
dag.updateHead(b1Add[], quarantine)
|
dag.updateHead(b1Add[], quarantine)
|
||||||
dag.pruneAtFinalization()
|
dag.pruneAtFinalization()
|
||||||
@ -295,8 +294,8 @@ suite "Block pool processing" & preset():
|
|||||||
|
|
||||||
test "updateStateData sanity" & preset():
|
test "updateStateData sanity" & preset():
|
||||||
let
|
let
|
||||||
b1Add = dag.addRawBlock(quarantine, b1, nil)
|
b1Add = dag.addRawBlock(quarantine, b1, nilPhase0Callback)
|
||||||
b2Add = dag.addRawBlock(quarantine, b2, nil)
|
b2Add = dag.addRawBlock(quarantine, b2, nilPhase0Callback)
|
||||||
bs1 = BlockSlot(blck: b1Add[], slot: b1.message.slot)
|
bs1 = BlockSlot(blck: b1Add[], slot: b1.message.slot)
|
||||||
bs1_3 = b1Add[].atSlot(3.Slot)
|
bs1_3 = b1Add[].atSlot(3.Slot)
|
||||||
bs2_3 = b2Add[].atSlot(3.Slot)
|
bs2_3 = b2Add[].atSlot(3.Slot)
|
||||||
@ -348,6 +347,7 @@ suite "chain DAG finalization tests" & preset():
|
|||||||
db = makeTestDB(SLOTS_PER_EPOCH)
|
db = makeTestDB(SLOTS_PER_EPOCH)
|
||||||
dag = init(ChainDAGRef, defaultRuntimePreset, db)
|
dag = init(ChainDAGRef, defaultRuntimePreset, db)
|
||||||
quarantine = QuarantineRef.init(keys.newRng())
|
quarantine = QuarantineRef.init(keys.newRng())
|
||||||
|
nilPhase0Callback: OnPhase0BlockAdded
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
rewards = RewardInfo()
|
||||||
|
|
||||||
@ -363,7 +363,7 @@ suite "chain DAG finalization tests" & preset():
|
|||||||
|
|
||||||
let lateBlock = addTestBlock(tmpState[], dag.head.root, cache)
|
let lateBlock = addTestBlock(tmpState[], dag.head.root, cache)
|
||||||
block:
|
block:
|
||||||
let status = dag.addRawBlock(quarantine, blck, nil)
|
let status = dag.addRawBlock(quarantine, blck, nilPhase0Callback)
|
||||||
check: status.isOk()
|
check: status.isOk()
|
||||||
|
|
||||||
assign(tmpState[], dag.headState.data)
|
assign(tmpState[], dag.headState.data)
|
||||||
@ -378,7 +378,7 @@ suite "chain DAG finalization tests" & preset():
|
|||||||
tmpState[], dag.head.root, cache,
|
tmpState[], dag.head.root, cache,
|
||||||
attestations = makeFullAttestations(
|
attestations = makeFullAttestations(
|
||||||
tmpState[], dag.head.root, getStateField(tmpState[], slot), cache, {}))
|
tmpState[], dag.head.root, getStateField(tmpState[], slot), cache, {}))
|
||||||
let added = dag.addRawBlock(quarantine, blck, nil)
|
let added = dag.addRawBlock(quarantine, blck, nilPhase0Callback)
|
||||||
check: added.isOk()
|
check: added.isOk()
|
||||||
dag.updateHead(added[], quarantine)
|
dag.updateHead(added[], quarantine)
|
||||||
dag.pruneAtFinalization()
|
dag.pruneAtFinalization()
|
||||||
@ -420,7 +420,7 @@ suite "chain DAG finalization tests" & preset():
|
|||||||
block:
|
block:
|
||||||
# The late block is a block whose parent was finalized long ago and thus
|
# The late block is a block whose parent was finalized long ago and thus
|
||||||
# is no longer a viable head candidate
|
# is no longer a viable head candidate
|
||||||
let status = dag.addRawBlock(quarantine, lateBlock, nil)
|
let status = dag.addRawBlock(quarantine, lateBlock, nilPhase0Callback)
|
||||||
check: status.error == (ValidationResult.Ignore, Unviable)
|
check: status.error == (ValidationResult.Ignore, Unviable)
|
||||||
|
|
||||||
block:
|
block:
|
||||||
@ -449,7 +449,7 @@ suite "chain DAG finalization tests" & preset():
|
|||||||
assign(prestate[], dag.headState.data)
|
assign(prestate[], dag.headState.data)
|
||||||
|
|
||||||
let blck = makeTestBlock(dag.headState.data, dag.head.root, cache)
|
let blck = makeTestBlock(dag.headState.data, dag.head.root, cache)
|
||||||
let added = dag.addRawBlock(quarantine, blck, nil)
|
let added = dag.addRawBlock(quarantine, blck, nilPhase0Callback)
|
||||||
check: added.isOk()
|
check: added.isOk()
|
||||||
dag.updateHead(added[], quarantine)
|
dag.updateHead(added[], quarantine)
|
||||||
dag.pruneAtFinalization()
|
dag.pruneAtFinalization()
|
||||||
@ -468,21 +468,21 @@ suite "chain DAG finalization tests" & preset():
|
|||||||
let blck = makeTestBlock(prestate[], dag.head.parent.root, cache)
|
let blck = makeTestBlock(prestate[], dag.head.parent.root, cache)
|
||||||
|
|
||||||
# Add block, but don't update head
|
# Add block, but don't update head
|
||||||
let added = dag.addRawBlock(quarantine, blck, nil)
|
let added = dag.addRawBlock(quarantine, blck, nilPhase0Callback)
|
||||||
check: added.isOk()
|
check: added.isOk()
|
||||||
|
|
||||||
var
|
var
|
||||||
dag2 = init(ChainDAGRef, defaultRuntimePreset, db)
|
dag2 = init(ChainDAGRef, defaultRuntimePreset, db)
|
||||||
|
|
||||||
# check that we can apply the block after the orphaning
|
# check that we can apply the block after the orphaning
|
||||||
let added2 = dag2.addRawBlock(quarantine, blck, nil)
|
let added2 = dag2.addRawBlock(quarantine, blck, nilPhase0Callback)
|
||||||
check: added2.isOk()
|
check: added2.isOk()
|
||||||
|
|
||||||
test "init with gaps" & preset():
|
test "init with gaps" & preset():
|
||||||
for blck in makeTestBlocks(
|
for blck in makeTestBlocks(
|
||||||
dag.headState.data, dag.head.root, cache, int(SLOTS_PER_EPOCH * 6 - 2),
|
dag.headState.data, dag.head.root, cache, int(SLOTS_PER_EPOCH * 6 - 2),
|
||||||
true):
|
true):
|
||||||
let added = dag.addRawBlock(quarantine, blck, nil)
|
let added = dag.addRawBlock(quarantine, blck, nilPhase0Callback)
|
||||||
check: added.isOk()
|
check: added.isOk()
|
||||||
dag.updateHead(added[], quarantine)
|
dag.updateHead(added[], quarantine)
|
||||||
dag.pruneAtFinalization()
|
dag.pruneAtFinalization()
|
||||||
@ -499,7 +499,7 @@ suite "chain DAG finalization tests" & preset():
|
|||||||
dag.headState.data, dag.head.root, getStateField(dag.headState.data, slot),
|
dag.headState.data, dag.head.root, getStateField(dag.headState.data, slot),
|
||||||
cache, {}))
|
cache, {}))
|
||||||
|
|
||||||
let added = dag.addRawBlock(quarantine, blck, nil)
|
let added = dag.addRawBlock(quarantine, blck, nilPhase0Callback)
|
||||||
check: added.isOk()
|
check: added.isOk()
|
||||||
dag.updateHead(added[], quarantine)
|
dag.updateHead(added[], quarantine)
|
||||||
dag.pruneAtFinalization()
|
dag.pruneAtFinalization()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user