keep track of latest blocks for optimistic sync (#3715)

When launched with `--light-client-enable` the latest blocks are fetched
and optimistic candidate blocks are passed to a callback (log for now).
This helps accelerate syncing in the future (optimistic sync).
This commit is contained in:
Etan Kissling 2022-06-10 16:16:37 +02:00 committed by GitHub
parent cc5f95dbbb
commit 15967c4076
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 1446 additions and 47 deletions

View File

@ -55,6 +55,17 @@ OK: 5/5 Fail: 0/5 Skip: 0/5
+ basics OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## Block clearance (light client) [Preset: mainnet]
```diff
+ Delayed finality update OK
+ Error conditions OK
+ Incremental sync OK
+ Initial sync OK
+ Low slot numbers OK
+ Reorg OK
+ Reverse incremental sync OK
```
OK: 7/7 Fail: 0/7 Skip: 0/7
## Block pool altair processing [Preset: mainnet]
```diff
+ Invalid signatures [Preset: mainnet] OK
@ -544,4 +555,4 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
OK: 1/1 Fail: 0/1 Skip: 0/1
---TOTAL---
OK: 301/306 Fail: 0/306 Skip: 5/306
OK: 308/313 Fail: 0/313 Skip: 5/313

View File

@ -309,7 +309,9 @@ define CONNECT_TO_NETWORK_IN_DEV_MODE
--network=$(1) $(3) $(GOERLI_TESTNETS_PARAMS) \
--log-level="DEBUG; TRACE:discv5,networking; REQUIRED:none; DISABLED:none" \
--data-dir=build/data/shared_$(1)_$(NODE_ID) \
--serve-light-client-data=1 --import-light-client-data=only-new \
--light-client-enable=on \
--serve-light-client-data=on \
--import-light-client-data=only-new \
--dump $(NODE_PARAMS)
endef

View File

@ -22,7 +22,7 @@ import
blockchain_dag, block_quarantine, exit_pool, attestation_pool,
sync_committee_msg_pool],
./spec/datatypes/base,
./sync/[sync_manager, request_manager],
./sync/[optimistic_sync_light_client, sync_manager, request_manager],
./validators/[action_tracker, validator_monitor, validator_pool],
./rpc/state_ttl_cache
@ -30,9 +30,9 @@ export
osproc, chronos, httpserver, presto, action_tracker,
beacon_clock, beacon_chain_db, conf, light_client,
attestation_pool, sync_committee_msg_pool, validator_pool,
eth2_network, eth1_monitor, request_manager, sync_manager,
eth2_processor, blockchain_dag, block_quarantine, base, exit_pool,
validator_monitor, consensus_manager
eth2_network, eth1_monitor, optimistic_sync_light_client,
request_manager, sync_manager, eth2_processor, blockchain_dag,
block_quarantine, base, exit_pool, validator_monitor, consensus_manager
type
RpcServer* = RpcHttpServer
@ -45,6 +45,7 @@ type
db*: BeaconChainDB
config*: BeaconNodeConf
attachedValidators*: ref ValidatorPool
lcOptSync*: LCOptimisticSync
lightClient*: LightClient
dag*: ChainDAGRef
quarantine*: ref Quarantine

View File

@ -29,11 +29,56 @@ proc initLightClient*(
# because the light client module also handles gossip subscriptions
# for broadcasting light client data as a server.
let lightClient = createLightClient(
node.network, rng, config, cfg,
forkDigests, getBeaconTime, genesis_validators_root)
let
optimisticProcessor = proc(signedBlock: ForkedMsgTrustedSignedBeaconBlock):
Future[void] {.async.} =
debug "New LC optimistic block",
opt = signedBlock.toBlockId(),
dag = node.dag.head.bid,
wallSlot = node.currentSlot
return
optSync = initLCOptimisticSync(
node.network, getBeaconTime, optimisticProcessor,
config.safeSlotsToImportOptimistically)
lightClient = createLightClient(
node.network, rng, config, cfg,
forkDigests, getBeaconTime, genesis_validators_root)
if config.lightClientEnable.get:
proc shouldSyncOptimistically(slot: Slot): bool =
const
# Minimum number of slots to be ahead of DAG to use optimistic sync
minProgress = 8 * SLOTS_PER_EPOCH
# Maximum age of light client optimistic header to use optimistic sync
maxAge = 2 * SLOTS_PER_EPOCH
if slot < getStateField(node.dag.headState, slot) + minProgress:
false
elif getBeaconTime().slotOrZero > slot + maxAge:
false
else:
true
proc onFinalizedHeader(lightClient: LightClient) =
let optimisticHeader = lightClient.optimisticHeader.valueOr:
return
if not shouldSyncOptimistically(optimisticHeader.slot):
return
let finalizedHeader = lightClient.finalizedHeader.valueOr:
return
optSync.setOptimisticHeader(optimisticHeader)
optSync.setFinalizedHeader(finalizedHeader)
proc onOptimisticHeader(lightClient: LightClient) =
let optimisticHeader = lightClient.optimisticHeader.valueOr:
return
if not shouldSyncOptimistically(optimisticHeader.slot):
return
optSync.setOptimisticHeader(optimisticHeader)
lightClient.onFinalizedHeader = onFinalizedHeader
lightClient.onOptimisticHeader = onOptimisticHeader
lightClient.trustedBlockRoot = config.lightClientTrustedBlockRoot
elif config.lightClientTrustedBlockRoot.isSome:
@ -41,12 +86,14 @@ proc initLightClient*(
lightClientEnable = config.lightClientEnable.get,
lightClientTrustedBlockRoot = config.lightClientTrustedBlockRoot
node.lcOptSync = optSync
node.lightClient = lightClient
proc startLightClient*(node: BeaconNode) =
if not node.config.lightClientEnable.get:
return
node.lcOptSync.start()
node.lightClient.start()
proc installLightClientMessageValidators*(node: BeaconNode) =

View File

@ -517,7 +517,7 @@ type
desc: "Modify SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY"
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/sync/optimistic.md#constants
defaultValue: 128
name: "safe-slots-to-import-optimistically" }: uint64
name: "safe-slots-to-import-optimistically" }: uint16
# Same option as appears in Lighthouse and Prysm
# https://lighthouse-book.sigmaprime.io/suggested-fee-recipient.html

View File

@ -0,0 +1,306 @@
# beacon_chain
# Copyright (c) 2019-2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].}
import
std/[deques, math],
chronicles,
../spec/forks,
../beacon_chain_db,
./block_pools_types
export forks, block_pools_types
logScope:
topics = "clearance"
# Clearance (light client)
# ---------------------------------------------
#
# This module validates blocks obtained using the light client sync protocol.
# Those blocks are considered trusted by delegating the full verification to a
# supermajority (> 2/3) of the corresponding sync committee (512 members).
# The validated blocks are downloaded in backwards order into a `deque`.
#
# If the sync committee is trusted, expensive verification already done by the
# sync committee may be skipped:
# - BLS signatures (except the outer block signature not covered by `root`)
# - Verifying whether the state transition function applies
# - `ExecutionPayload` verification
# - `state_root` computation and verification
type LCBlocks* = object
maxSlots: int # max cache.len
cache: Deque[ref ForkedMsgTrustedSignedBeaconBlock] # by slots descending
headSlot: Slot # matches cache[0].slot once block is downloaded
backfill: BeaconBlockSummary # next expected block
finalizedBid: BlockId
func initLCBlocks*(maxSlots: int): LCBlocks =
LCBlocks(
maxSlots: maxSlots,
cache: initDeque[ref ForkedMsgTrustedSignedBeaconBlock](
nextPowerOfTwo(maxSlots)),
headSlot: FAR_FUTURE_SLOT)
func getHeadSlot*(lcBlocks: LCBlocks): Slot =
lcBlocks.headSlot
func getFinalizedSlot*(lcBlocks: LCBlocks): Slot =
lcBlocks.finalizedBid.slot
func getFrontfillSlot*(lcBlocks: LCBlocks): Slot =
lcBlocks.headSlot + 1 - lcBlocks.cache.lenu64
func getBackfillSlot*(lcBlocks: LCBlocks): Slot =
if lcBlocks.backfill.slot != FAR_FUTURE_SLOT:
max(lcBlocks.backfill.slot, lcBlocks.getFrontfillSlot())
else:
lcBlocks.headSlot + 1
func getBackfillRoot*(lcBlocks: LCBlocks): Option[Eth2Digest] =
if lcBlocks.headSlot == FAR_FUTURE_SLOT:
none(Eth2Digest)
elif lcBlocks.backfill.slot < lcBlocks.getFrontfillSlot():
none(Eth2Digest)
else:
some lcBlocks.backfill.parent_root
func getCacheIndex(lcBlocks: LCBlocks, slot: Slot): uint64 =
if slot < lcBlocks.headSlot and lcBlocks.headSlot != FAR_FUTURE_SLOT:
lcBlocks.headSlot - slot
else:
0
func getBlockAtSlot*(
lcBlocks: LCBlocks, slot: Slot): Opt[ForkedMsgTrustedSignedBeaconBlock] =
if slot < lcBlocks.backfill.slot:
return err()
let index = lcBlocks.getCacheIndex(slot)
if index >= lcBlocks.cache.lenu64:
return err()
let existing = lcBlocks.cache[index]
if existing == nil:
return err()
return ok existing[]
func getLatestBlockThroughSlot*(
lcBlocks: LCBlocks, maxSlot: Slot): Opt[ForkedMsgTrustedSignedBeaconBlock] =
if maxSlot < lcBlocks.backfill.slot:
return err()
let startIndex = lcBlocks.getCacheIndex(maxSlot)
for i in startIndex ..< lcBlocks.cache.lenu64:
let blck = lcBlocks.cache[i]
if blck != nil:
return ok blck[]
err()
proc processBlock(
lcBlocks: var LCBlocks,
signedBlock: ForkySignedBeaconBlock,
isNewBlock = true): Result[void, BlockError] =
logScope:
headSlot = lcBlocks.headSlot
backfill = (lcBlocks.backfill.slot, shortLog(lcBlocks.backfill.parent_root))
blck = shortLog(signedBlock.toBlockId())
let startTick = Moment.now()
template blck(): untyped = signedBlock.message
template blockRoot(): untyped = signedBlock.root
if blck.slot > lcBlocks.headSlot:
debug "LC block too new"
return err(BlockError.Duplicate)
# Handle head block
if lcBlocks.backfill.slot == FAR_FUTURE_SLOT:
if blck.slot < lcBlocks.headSlot:
if isNewBlock:
debug "Head LC block skipped"
return err(BlockError.MissingParent)
if blockRoot != lcBlocks.backfill.parent_root:
if isNewBlock:
debug "Head LC block from unviable fork"
return err(BlockError.UnviableFork)
const index = 0'u64 # Head block is always mapped to index 0 (never empty)
if index >= lcBlocks.cache.lenu64:
lcBlocks.backfill.slot = blck.slot
debug "Final head LC block"
return ok()
lcBlocks.backfill = blck.toBeaconBlockSummary()
let existing = lcBlocks.cache[index]
if existing != nil:
if blockRoot == existing[].root:
if isNewBlock:
debug "Head LC block already known"
return ok()
warn "Head LC block reorg", existing = existing[]
lcBlocks.cache[index] =
newClone ForkedMsgTrustedSignedBeaconBlock.init(
signedBlock.asMsgTrusted())
debug "Head LC block cached", cacheDur = Moment.now() - startTick
return ok()
# Handle duplicate block
if blck.slot >= lcBlocks.getBackfillSlot():
let index = lcBlocks.getCacheIndex(blck.slot)
doAssert index < lcBlocks.cache.lenu64
let existing = lcBlocks.cache[index]
if existing == nil:
debug "Duplicate LC block for empty slot"
return err(BlockError.UnviableFork)
doAssert blck.slot == existing[].slot
if blockRoot != existing[].root:
debug "Duplicate LC block from unviable fork"
return err(BlockError.UnviableFork)
debug "Duplicate LC block"
return err(BlockError.Duplicate)
# Handle new block
if blck.slot > lcBlocks.backfill.slot:
debug "LC block for empty slot"
return err(BlockError.UnviableFork)
if blockRoot != lcBlocks.backfill.parent_root:
if blck.slot == lcBlocks.backfill.slot:
debug "Final LC block from unviable fork"
return err(BlockError.UnviableFork)
if isNewBlock:
debug "LC block does not match expected backfill root"
return err(BlockError.MissingParent)
if blck.slot == lcBlocks.backfill.slot:
debug "Duplicate final LC block"
return err(BlockError.Duplicate)
let
previousIndex = lcBlocks.getCacheIndex(lcBlocks.backfill.slot)
index = lcBlocks.getCacheIndex(blck.slot)
for i in previousIndex + 1 ..< min(index, lcBlocks.cache.lenu64):
let existing = lcBlocks.cache[i]
if existing != nil:
warn "LC block reorg to empty", existing = existing[]
lcBlocks.cache[i] = nil
if index >= lcBlocks.cache.lenu64:
lcBlocks.backfill.slot = blck.slot
debug "Final LC block"
return ok()
lcBlocks.backfill = blck.toBeaconBlockSummary()
let existing = lcBlocks.cache[index]
if existing != nil:
if blockRoot == existing[].root:
if isNewBlock:
debug "LC block already known"
return ok()
warn "LC block reorg", existing = existing[]
lcBlocks.cache[index] =
newClone ForkedMsgTrustedSignedBeaconBlock.init(
signedBlock.asMsgTrusted())
debug "LC block cached", cacheDur = Moment.now() - startTick
ok()
proc setHeadBid*(lcBlocks: var LCBlocks, headBid: BlockId) =
debug "New LC head block", headBid
if lcBlocks.maxSlots == 0:
discard
elif lcBlocks.headSlot == FAR_FUTURE_SLOT or
headBid.slot >= lcBlocks.headSlot + lcBlocks.maxSlots.uint64 or (
lcBlocks.headSlot - lcBlocks.cache.lenu64 != FAR_FUTURE_SLOT and
headBid.slot <= lcBlocks.headSlot - lcBlocks.cache.lenu64):
lcBlocks.cache.clear()
for i in 0 ..< min(headBid.slot + 1, lcBlocks.maxSlots.Slot).int:
lcBlocks.cache.addLast(nil)
elif headBid.slot > lcBlocks.headSlot:
let numNewSlots = headBid.slot - lcBlocks.headSlot
doAssert numNewSlots <= lcBlocks.maxSlots.uint64
if numNewSlots > lcBlocks.maxSlots.uint64 - lcBlocks.cache.lenu64:
lcBlocks.cache.shrink(
fromLast = numNewSlots.int + lcBlocks.cache.len - lcBlocks.maxSlots)
for i in 0 ..< numNewSlots:
lcBlocks.cache.addFirst(nil)
else:
lcBlocks.cache.shrink(fromFirst = (lcBlocks.headSlot - headBid.slot).int)
let startLen = lcBlocks.cache.len
for i in startLen ..< min(headBid.slot + 1, lcBlocks.maxSlots.Slot).int:
lcBlocks.cache.addLast(nil)
lcBlocks.headSlot = headBid.slot
lcBlocks.backfill.slot = FAR_FUTURE_SLOT
lcBlocks.backfill.parent_root = headBid.root
for i in 0 ..< lcBlocks.cache.len:
let existing = lcBlocks.cache[i]
if existing != nil:
let res =
withBlck(existing[]):
lcBlocks.processBlock(blck.asSigned(), isNewBlock = false)
if res.isErr:
break
proc setFinalizedBid*(lcBlocks: var LCBlocks, finalizedBid: BlockId) =
if finalizedBid.slot > lcBlocks.headSlot:
lcBlocks.setHeadBid(finalizedBid)
if finalizedBid != lcBlocks.finalizedBid:
debug "New LC finalized block", finalizedBid
lcBlocks.finalizedBid = finalizedBid
if finalizedBid.slot <= lcBlocks.headSlot and
finalizedBid.slot >= lcBlocks.getBackfillSlot:
let index = lcBlocks.getCacheIndex(finalizedBid.slot)
doAssert index < lcBlocks.cache.lenu64
let existing = lcBlocks.cache[index]
if existing == nil or finalizedBid.root != existing[].root:
if existing != nil:
error "Finalized LC block reorg", existing = existing[]
else:
error "Finalized LC block reorg"
lcBlocks.cache.clear()
lcBlocks.backfill.reset()
lcBlocks.headSlot.reset()
lcBlocks.setHeadBid(finalizedBid)
proc addBlock*(
lcBlocks: var LCBlocks,
signedBlock: ForkedSignedBeaconBlock): Result[void, BlockError] =
let oldBackfillSlot = lcBlocks.getBackfillSlot()
withBlck(signedBlock):
? lcBlocks.processBlock(blck)
if oldBackfillSlot > lcBlocks.finalizedBid.slot and
lcBlocks.getBackfillSlot() <= lcBlocks.finalizedBid.slot:
if signedBlock.slot != lcBlocks.finalizedBid.slot or
signedBlock.root != lcBlocks.finalizedBid.root:
error "LC finalized block from unviable fork"
lcBlocks.setFinalizedBid(lcBlocks.finalizedBid)
return err(BlockError.UnviableFork)
let slot = signedBlock.slot
for i in lcBlocks.getCacheIndex(slot) + 1 ..< lcBlocks.cache.lenu64:
let existing = lcBlocks.cache[i]
if existing != nil:
let res =
withBlck(existing[]):
lcBlocks.processBlock(blck.asSigned(), isNewBlock = false)
if res.isErr:
break
ok()

View File

@ -480,15 +480,31 @@ type
root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block
MsgTrustedSignedBeaconBlock* = object
message*: TrustedBeaconBlock
signature*: ValidatorSig
root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block
TrustedSignedBeaconBlock* = object
message*: TrustedBeaconBlock
signature*: TrustedSig
root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block
SomeSignedBeaconBlock* = SignedBeaconBlock | SigVerifiedSignedBeaconBlock | TrustedSignedBeaconBlock
SomeBeaconBlock* = BeaconBlock | SigVerifiedBeaconBlock | TrustedBeaconBlock
SomeBeaconBlockBody* = BeaconBlockBody | SigVerifiedBeaconBlockBody | TrustedBeaconBlockBody
SomeSignedBeaconBlock* =
SignedBeaconBlock |
SigVerifiedSignedBeaconBlock |
MsgTrustedSignedBeaconBlock |
TrustedSignedBeaconBlock
SomeBeaconBlock* =
BeaconBlock |
SigVerifiedBeaconBlock |
TrustedBeaconBlock
SomeBeaconBlockBody* =
BeaconBlockBody |
SigVerifiedBeaconBlockBody |
TrustedBeaconBlockBody
SomeSyncAggregate* = SyncAggregate | TrustedSyncAggregate
@ -729,13 +745,26 @@ func clear*(info: var EpochInfo) =
info.validators.setLen(0)
info.balances = UnslashedParticipatingBalances()
template asSigned*(x: SigVerifiedSignedBeaconBlock | TrustedSignedBeaconBlock):
SignedBeaconBlock =
template asSigned*(
x: SigVerifiedSignedBeaconBlock |
MsgTrustedSignedBeaconBlock |
TrustedSignedBeaconBlock): SignedBeaconBlock =
isomorphicCast[SignedBeaconBlock](x)
template asSigVerified*(x: SignedBeaconBlock | TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock =
template asSigVerified*(
x: SignedBeaconBlock |
MsgTrustedSignedBeaconBlock |
TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock =
isomorphicCast[SigVerifiedSignedBeaconBlock](x)
template asMsgTrusted*(
x: SignedBeaconBlock |
SigVerifiedSignedBeaconBlock |
TrustedSignedBeaconBlock): MsgTrustedSignedBeaconBlock =
isomorphicCast[MsgTrustedSignedBeaconBlock](x)
template asTrusted*(
x: SignedBeaconBlock | SigVerifiedSignedBeaconBlock): TrustedSignedBeaconBlock =
x: SignedBeaconBlock |
SigVerifiedSignedBeaconBlock |
MsgTrustedSignedBeaconBlock): TrustedSignedBeaconBlock =
isomorphicCast[TrustedSignedBeaconBlock](x)

View File

@ -313,15 +313,31 @@ type
root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block
MsgTrustedSignedBeaconBlock* = object
message*: TrustedBeaconBlock
signature*: ValidatorSig
root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block
TrustedSignedBeaconBlock* = object
message*: TrustedBeaconBlock
signature*: TrustedSig
root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block
SomeSignedBeaconBlock* = SignedBeaconBlock | SigVerifiedSignedBeaconBlock | TrustedSignedBeaconBlock
SomeBeaconBlock* = BeaconBlock | SigVerifiedBeaconBlock | TrustedBeaconBlock
SomeBeaconBlockBody* = BeaconBlockBody | SigVerifiedBeaconBlockBody | TrustedBeaconBlockBody
SomeSignedBeaconBlock* =
SignedBeaconBlock |
SigVerifiedSignedBeaconBlock |
MsgTrustedSignedBeaconBlock |
TrustedSignedBeaconBlock
SomeBeaconBlock* =
BeaconBlock |
SigVerifiedBeaconBlock |
TrustedBeaconBlock
SomeBeaconBlockBody* =
BeaconBlockBody |
SigVerifiedBeaconBlockBody |
TrustedBeaconBlockBody
BlockParams = object
parentHash*: string
@ -378,13 +394,26 @@ func shortLog*(v: SomeSignedBeaconBlock): auto =
signature: shortLog(v.signature)
)
template asSigned*(x: SigVerifiedSignedBeaconBlock | TrustedSignedBeaconBlock):
SignedBeaconBlock =
template asSigned*(
x: SigVerifiedSignedBeaconBlock |
MsgTrustedSignedBeaconBlock |
TrustedSignedBeaconBlock): SignedBeaconBlock =
isomorphicCast[SignedBeaconBlock](x)
template asSigVerified*(x: SignedBeaconBlock | TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock =
template asSigVerified*(
x: SignedBeaconBlock |
MsgTrustedSignedBeaconBlock |
TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock =
isomorphicCast[SigVerifiedSignedBeaconBlock](x)
template asMsgTrusted*(
x: SignedBeaconBlock |
SigVerifiedSignedBeaconBlock |
TrustedSignedBeaconBlock): MsgTrustedSignedBeaconBlock =
isomorphicCast[MsgTrustedSignedBeaconBlock](x)
template asTrusted*(
x: SignedBeaconBlock | SigVerifiedSignedBeaconBlock): TrustedSignedBeaconBlock =
x: SignedBeaconBlock |
SigVerifiedSignedBeaconBlock |
MsgTrustedSignedBeaconBlock): TrustedSignedBeaconBlock =
isomorphicCast[TrustedSignedBeaconBlock](x)

View File

@ -218,15 +218,31 @@ type
root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block
MsgTrustedSignedBeaconBlock* = object
message*: TrustedBeaconBlock
signature*: ValidatorSig
root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block
TrustedSignedBeaconBlock* = object
message*: TrustedBeaconBlock
signature*: TrustedSig
root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block
SomeSignedBeaconBlock* = SignedBeaconBlock | SigVerifiedSignedBeaconBlock | TrustedSignedBeaconBlock
SomeBeaconBlock* = BeaconBlock | SigVerifiedBeaconBlock | TrustedBeaconBlock
SomeBeaconBlockBody* = BeaconBlockBody | SigVerifiedBeaconBlockBody | TrustedBeaconBlockBody
SomeSignedBeaconBlock* =
SignedBeaconBlock |
SigVerifiedSignedBeaconBlock |
MsgTrustedSignedBeaconBlock |
TrustedSignedBeaconBlock
SomeBeaconBlock* =
BeaconBlock |
SigVerifiedBeaconBlock |
TrustedBeaconBlock
SomeBeaconBlockBody* =
BeaconBlockBody |
SigVerifiedBeaconBlockBody |
TrustedBeaconBlockBody
EpochInfo* = object
## Information about the outcome of epoch processing
@ -261,13 +277,26 @@ func shortLog*(v: SomeSignedBeaconBlock): auto =
signature: shortLog(v.signature)
)
template asSigned*(x: SigVerifiedSignedBeaconBlock | TrustedSignedBeaconBlock):
SignedBeaconBlock =
template asSigned*(
x: SigVerifiedSignedBeaconBlock |
MsgTrustedSignedBeaconBlock |
TrustedSignedBeaconBlock): SignedBeaconBlock =
isomorphicCast[SignedBeaconBlock](x)
template asSigVerified*(x: SignedBeaconBlock | TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock =
template asSigVerified*(
x: SignedBeaconBlock |
MsgTrustedSignedBeaconBlock |
TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock =
isomorphicCast[SigVerifiedSignedBeaconBlock](x)
template asMsgTrusted*(
x: SignedBeaconBlock |
SigVerifiedSignedBeaconBlock |
TrustedSignedBeaconBlock): MsgTrustedSignedBeaconBlock =
isomorphicCast[MsgTrustedSignedBeaconBlock](x)
template asTrusted*(
x: SignedBeaconBlock | SigVerifiedSignedBeaconBlock): TrustedSignedBeaconBlock =
x: SignedBeaconBlock |
SigVerifiedSignedBeaconBlock |
MsgTrustedSignedBeaconBlock): TrustedSignedBeaconBlock =
isomorphicCast[TrustedSignedBeaconBlock](x)

View File

@ -131,11 +131,22 @@ type
altair.SigVerifiedSignedBeaconBlock |
bellatrix.SigVerifiedSignedBeaconBlock
ForkyMsgTrustedSignedBeaconBlock* =
phase0.MsgTrustedSignedBeaconBlock |
altair.MsgTrustedSignedBeaconBlock |
bellatrix.MsgTrustedSignedBeaconBlock
ForkyTrustedSignedBeaconBlock* =
phase0.TrustedSignedBeaconBlock |
altair.TrustedSignedBeaconBlock |
bellatrix.TrustedSignedBeaconBlock
ForkedMsgTrustedSignedBeaconBlock* = object
case kind*: BeaconBlockFork
of BeaconBlockFork.Phase0: phase0Data*: phase0.MsgTrustedSignedBeaconBlock
of BeaconBlockFork.Altair: altairData*: altair.MsgTrustedSignedBeaconBlock
of BeaconBlockFork.Bellatrix: bellatrixData*: bellatrix.MsgTrustedSignedBeaconBlock
ForkedTrustedSignedBeaconBlock* = object
case kind*: BeaconBlockFork
of BeaconBlockFork.Phase0: phase0Data*: phase0.TrustedSignedBeaconBlock
@ -145,6 +156,7 @@ type
SomeForkySignedBeaconBlock* =
ForkySignedBeaconBlock |
ForkySigVerifiedSignedBeaconBlock |
ForkyMsgTrustedSignedBeaconBlock |
ForkyTrustedSignedBeaconBlock
EpochInfoFork* {.pure.} = enum
@ -223,6 +235,13 @@ template init*(T: type ForkedSignedBeaconBlock, forked: ForkedBeaconBlock,
root: blockRoot,
signature: signature))
template init*(T: type ForkedMsgTrustedSignedBeaconBlock, blck: phase0.MsgTrustedSignedBeaconBlock): T =
T(kind: BeaconBlockFork.Phase0, phase0Data: blck)
template init*(T: type ForkedMsgTrustedSignedBeaconBlock, blck: altair.MsgTrustedSignedBeaconBlock): T =
T(kind: BeaconBlockFork.Altair, altairData: blck)
template init*(T: type ForkedMsgTrustedSignedBeaconBlock, blck: bellatrix.MsgTrustedSignedBeaconBlock): T =
T(kind: BeaconBlockFork.Bellatrix, bellatrixData: blck)
template init*(T: type ForkedTrustedSignedBeaconBlock, blck: phase0.TrustedSignedBeaconBlock): T =
T(kind: BeaconBlockFork.Phase0, phase0Data: blck)
template init*(T: type ForkedTrustedSignedBeaconBlock, blck: altair.TrustedSignedBeaconBlock): T =
@ -233,18 +252,21 @@ template init*(T: type ForkedTrustedSignedBeaconBlock, blck: bellatrix.TrustedSi
template toFork*[T:
phase0.SignedBeaconBlock |
phase0.SigVerifiedSignedBeaconBlock |
phase0.MsgTrustedSignedBeaconBlock |
phase0.TrustedSignedBeaconBlock](
t: type T): BeaconBlockFork =
BeaconBlockFork.Phase0
template toFork*[T:
altair.SignedBeaconBlock |
altair.SigVerifiedSignedBeaconBlock |
altair.MsgTrustedSignedBeaconBlock |
altair.TrustedSignedBeaconBlock](
t: type T): BeaconBlockFork =
BeaconBlockFork.Altair
template toFork*[T:
bellatrix.SignedBeaconBlock |
bellatrix.SigVerifiedSignedBeaconBlock |
bellatrix.MsgTrustedSignedBeaconBlock |
bellatrix.TrustedSignedBeaconBlock](
t: type T): BeaconBlockFork =
BeaconBlockFork.Bellatrix
@ -366,15 +388,25 @@ template atEpoch*(
forkDigests: ForkDigests, epoch: Epoch, cfg: RuntimeConfig): ForkDigest =
forkDigests.atStateFork(cfg.stateForkAtEpoch(epoch))
template asSigned*(x: ForkedTrustedSignedBeaconBlock): ForkedSignedBeaconBlock =
template asSigned*(
x: ForkedMsgTrustedSignedBeaconBlock |
ForkedTrustedSignedBeaconBlock): ForkedSignedBeaconBlock =
isomorphicCast[ForkedSignedBeaconBlock](x)
template asTrusted*(x: ForkedSignedBeaconBlock): ForkedTrustedSignedBeaconBlock =
template asMsgTrusted*(
x: ForkedSignedBeaconBlock |
ForkedTrustedSignedBeaconBlock): ForkedMsgTrustedSignedBeaconBlock =
isomorphicCast[ForkedMsgTrustedSignedBeaconBlock](x)
template asTrusted*(
x: ForkedSignedBeaconBlock |
ForkedMsgTrustedSignedBeaconBlock): ForkedTrustedSignedBeaconBlock =
isomorphicCast[ForkedTrustedSignedBeaconBlock](x)
template withBlck*(
x: ForkedBeaconBlock | Web3SignerForkedBeaconBlock |
ForkedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock,
ForkedSignedBeaconBlock | ForkedMsgTrustedSignedBeaconBlock |
ForkedTrustedSignedBeaconBlock,
body: untyped): untyped =
case x.kind
of BeaconBlockFork.Phase0:
@ -398,38 +430,51 @@ func hash_tree_root*(x: ForkedBeaconBlock): Eth2Digest =
func hash_tree_root*(x: Web3SignerForkedBeaconBlock): Eth2Digest {.borrow.}
template getForkedBlockField*(x: ForkedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock, y: untyped): untyped =
template getForkedBlockField*(
x: ForkedSignedBeaconBlock |
ForkedMsgTrustedSignedBeaconBlock |
ForkedTrustedSignedBeaconBlock,
y: untyped): untyped =
# unsafeAddr avoids a copy of the field in some cases
(case x.kind
of BeaconBlockFork.Phase0: unsafeAddr x.phase0Data.message.y
of BeaconBlockFork.Altair: unsafeAddr x.altairData.message.y
of BeaconBlockFork.Bellatrix: unsafeAddr x.bellatrixData.message.y)[]
template signature*(x: ForkedSignedBeaconBlock): ValidatorSig =
template signature*(x: ForkedSignedBeaconBlock |
ForkedMsgTrustedSignedBeaconBlock): ValidatorSig =
withBlck(x): blck.signature
template signature*(x: ForkedTrustedSignedBeaconBlock): TrustedSig =
withBlck(x): blck.signature
template root*(x: ForkedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock): Eth2Digest =
template root*(x: ForkedSignedBeaconBlock |
ForkedMsgTrustedSignedBeaconBlock |
ForkedTrustedSignedBeaconBlock): Eth2Digest =
withBlck(x): blck.root
template slot*(x: ForkedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock): Slot =
template slot*(x: ForkedSignedBeaconBlock |
ForkedMsgTrustedSignedBeaconBlock |
ForkedTrustedSignedBeaconBlock): Slot =
withBlck(x): blck.message.slot
template shortLog*(x: ForkedBeaconBlock): auto =
withBlck(x): shortLog(blck)
template shortLog*(x: ForkedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock): auto =
template shortLog*(x: ForkedSignedBeaconBlock |
ForkedMsgTrustedSignedBeaconBlock |
ForkedTrustedSignedBeaconBlock): auto =
withBlck(x): shortLog(blck)
chronicles.formatIt ForkedBeaconBlock: it.shortLog
chronicles.formatIt ForkedSignedBeaconBlock: it.shortLog
chronicles.formatIt ForkedMsgTrustedSignedBeaconBlock: it.shortLog
chronicles.formatIt ForkedTrustedSignedBeaconBlock: it.shortLog
template withStateAndBlck*(
s: ForkedHashedBeaconState,
b: ForkedBeaconBlock | ForkedSignedBeaconBlock |
ForkedMsgTrustedSignedBeaconBlock |
ForkedTrustedSignedBeaconBlock,
body: untyped): untyped =
case s.kind
@ -465,10 +510,10 @@ template toBeaconBlockHeader*(
blck.message.toBeaconBlockHeader
template toBeaconBlockHeader*(
blckParam: ForkedTrustedSignedBeaconBlock): BeaconBlockHeader =
## Reduce a given `ForkedTrustedSignedBeaconBlock` to its `BeaconBlockHeader`.
withBlck(blckParam):
blck.toBeaconBlockHeader()
blckParam: ForkedMsgTrustedSignedBeaconBlock |
ForkedTrustedSignedBeaconBlock): BeaconBlockHeader =
## Reduce a given signed beacon block to its `BeaconBlockHeader`.
withBlck(blckParam): blck.toBeaconBlockHeader()
func genesisFork*(cfg: RuntimeConfig): Fork =
Fork(
@ -611,8 +656,13 @@ func init*(T: type ForkDigests,
compute_fork_digest(cfg.SHARDING_FORK_VERSION, genesis_validators_root),
)
func toBlockId*(header: BeaconBlockHeader): BlockId =
BlockId(root: header.hash_tree_root(), slot: header.slot)
func toBlockId*(blck: SomeForkySignedBeaconBlock): BlockId =
BlockId(root: blck.root, slot: blck.message.slot)
func toBlockId*(blck: ForkedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock): BlockId =
func toBlockId*(blck: ForkedSignedBeaconBlock |
ForkedMsgTrustedSignedBeaconBlock |
ForkedTrustedSignedBeaconBlock): BlockId =
withBlck(blck): BlockId(root: blck.root, slot: blck.message.slot)

View File

@ -597,6 +597,13 @@ func is_merge_transition_complete*(state: bellatrix.BeaconState): bool =
const defaultExecutionPayloadHeader = default(ExecutionPayloadHeader)
state.latest_execution_payload_header != defaultExecutionPayloadHeader
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/sync/optimistic.md#helpers
func is_execution_block*(
body: bellatrix.BeaconBlockBody | bellatrix.TrustedBeaconBlockBody |
bellatrix.SigVerifiedBeaconBlockBody): bool =
const defaultBellatrixExecutionPayload = default(bellatrix.ExecutionPayload)
body.execution_payload != defaultBellatrixExecutionPayload
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/bellatrix/beacon-chain.md#is_merge_transition_block
func is_merge_transition_block(
state: bellatrix.BeaconState,

View File

@ -0,0 +1,286 @@
# beacon_chain
# Copyright (c) 2019-2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].}
import
chronos,
../consensus_object_pools/block_clearance_light_client,
../networking/eth2_network,
../beacon_clock,
./request_manager
logScope:
topics = "optsync"
type
MsgTrustedBlockProcessor* =
proc(signedBlock: ForkedMsgTrustedSignedBeaconBlock): Future[void] {.
gcsafe, raises: [Defect].}
SyncStrategy {.pure.} = enum
None,
RequestManager,
SyncManager
LCOptimisticSync* = ref object
network: Eth2Node
getBeaconTime: GetBeaconTimeFn
optimisticProcessor: MsgTrustedBlockProcessor
safeSlotsToImportOptimistically: uint16
lcBlocks: LCBlocks
blockVerifier: request_manager.BlockVerifier
requestManager: RequestManager
finalizedBid, optimisticBid, optimisticCandidateBid: BlockId
finalizedIsExecutionBlock: Option[bool]
syncStrategy: SyncStrategy
syncFut, processFut: Future[void]
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/sync/optimistic.md
proc reportOptimisticCandidateBlock(optSync: LCOptimisticSync) {.gcsafe.} =
if optSync.processFut != nil:
return
# Check if finalized is execution block (implies that justified is, too)
if optSync.finalizedIsExecutionBlock.isNone:
let
finalizedSlot = optSync.lcBlocks.getFinalizedSlot()
finalizedBlock = optSync.lcBlocks.getBlockAtSlot(finalizedSlot)
if finalizedBlock.isOk:
optSync.finalizedIsExecutionBlock =
withBlck(finalizedBlock.get):
when stateFork >= BeaconStateFork.Bellatrix:
some blck.message.body.is_execution_block()
else:
some false
let
currentSlot = optSync.lcBlocks.getHeadSlot()
signedBlock =
if optSync.finalizedIsExecutionBlock.get(false):
# If finalized is execution block, can import any later block
optSync.lcBlocks.getLatestBlockThroughSlot(currentSlot)
else:
# Else, block must be deep (min `SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY`)
let
minAge = optSync.safeSlotsToImportOptimistically
maxSlot = max(currentSlot, minAge.Slot) - minAge.uint64
optSync.lcBlocks.getLatestBlockThroughSlot(maxSlot)
if signedBlock.isOk:
let bid = signedBlock.get.toBlockId()
if bid.slot > optSync.optimisticCandidateBid.slot:
optSync.optimisticCandidateBid = bid
optSync.processFut = optSync.optimisticProcessor(signedBlock.get)
proc handleFinishedProcess(future: pointer) =
optSync.processFut = nil
optSync.reportOptimisticCandidateBlock()
optSync.processFut.addCallback(handleFinishedProcess)
proc initLCOptimisticSync*(
network: Eth2Node,
getBeaconTime: GetBeaconTimeFn,
optimisticProcessor: MsgTrustedBlockProcessor,
safeSlotsToImportOptimistically: uint16): LCOptimisticSync =
const numExtraSlots = 2 * SLOTS_PER_EPOCH.int + 1
let maxSlots = safeSlotsToImportOptimistically.int + numExtraSlots
let optSync = LCOptimisticSync(
network: network,
getBeaconTime: getBeaconTime,
optimisticProcessor: optimisticProcessor,
safeSlotsToImportOptimistically: safeSlotsToImportOptimistically,
lcBlocks: initLCBlocks(maxSlots))
proc blockVerifier(signedBlock: ForkedSignedBeaconBlock):
Future[Result[void, BlockError]] =
let res = optSync.lcBlocks.addBlock(signedBlock)
if res.isOk:
if optSync.syncStrategy == SyncStrategy.RequestManager:
let root = optSync.lcBlocks.getBackfillRoot()
if root.isSome:
optSync.requestManager.fetchAncestorBlocks(
@[FetchRecord(root: root.get)])
else:
if not optSync.syncFut.finished:
optSync.syncFut.cancel()
optSync.reportOptimisticCandidateBlock()
let resfut = newFuture[Result[void, BlockError]]("lcOptSyncBlockVerifier")
resfut.complete(res)
resfut
optSync.blockVerifier = blockVerifier
optSync.requestManager = RequestManager.init(network, optSync.blockVerifier)
optSync
proc start*(optSync: LCOptimisticSync) =
optSync.requestManager.start()
func supportsRetarget(syncStrategy: SyncStrategy): bool =
case syncStrategy
of SyncStrategy.None, SyncStrategy.RequestManager:
true
of SyncStrategy.SyncManager:
false
proc syncUsingRequestManager(optSync: LCOptimisticSync) {.async.} =
let startTick = Moment.now()
var cancellationRequested = false
while not cancellationRequested:
let root = optSync.lcBlocks.getBackfillRoot()
if root.isNone:
break
if optSync.requestManager.inpQueue.empty:
optSync.requestManager.fetchAncestorBlocks(@[FetchRecord(root: root.get)])
try:
await chronos.sleepAsync(chronos.seconds(10))
except CancelledError as exc:
cancellationRequested = true
debug "LC optimistic sync complete",
headSlot = optSync.lcBlocks.getHeadSlot(),
finalizedSlot = optSync.lcBlocks.getFinalizedSlot(),
backfillSlot = optSync.lcBlocks.getBackfillSlot(),
frontfillSlot = optSync.lcBlocks.getFrontfillSlot(),
syncStrategy = optSync.syncStrategy,
cancellationRequested,
syncDur = Moment.now() - startTick
proc syncUsingSyncManager(optSync: LCOptimisticSync) {.async.} =
let startTick = Moment.now()
func getLocalHeadSlot(): Slot =
optSync.lcBlocks.getHeadSlot() + 1
proc getLocalWallSlot(): Slot =
optSync.getBeaconTime().slotOrZero
var cancellationRequested = false
func getProgressSlot(): Slot =
if not cancellationRequested:
optSync.lcBlocks.getBackfillSlot()
else:
# Report out-of-band completion of sync
optSync.lcBlocks.getFrontfillSlot()
func getFinalizedSlot(): Slot =
getProgressSlot()
func getBackfillSlot(): Slot =
getProgressSlot()
func getFrontfillSlot(): Slot =
optSync.lcBlocks.getFrontfillSlot()
let lcOptSyncManager = newSyncManager[Peer, PeerID](
optSync.network.peerPool, SyncQueueKind.Backward, getLocalHeadSlot,
getLocalWallSlot, getFinalizedSlot, getBackfillSlot, getFrontfillSlot,
progressPivot = optSync.lcBlocks.getHeadSlot(), optSync.blockVerifier,
maxHeadAge = 0, flags = {SyncManagerFlag.NoMonitor}, ident = "lcOptSync")
lcOptSyncManager.start()
while lcOptSyncManager.inProgress:
try:
await chronos.sleepAsync(chronos.seconds(10))
except CancelledError as exc:
cancellationRequested = true
debug "LC optimistic sync complete",
headSlot = optSync.lcBlocks.getHeadSlot(),
finalizedSlot = optSync.lcBlocks.getFinalizedSlot(),
backfillSlot = optSync.lcBlocks.getBackfillSlot(),
frontfillSlot = optSync.lcBlocks.getFrontfillSlot(),
syncStrategy = optSync.syncStrategy,
cancellationRequested,
syncDur = Moment.now() - startTick
proc continueSync(optSync: LCOptimisticSync) {.gcsafe.} =
let
currentHeadSlot = optSync.lcBlocks.getHeadSlot()
targetHeadSlot = optSync.optimisticBid.slot
headDiff =
if targetHeadSlot > currentHeadSlot:
targetHeadSlot - currentHeadSlot
else:
currentHeadSlot - targetHeadSlot
currentFinalizedSlot = optSync.lcBlocks.getFinalizedSlot()
targetFinalizedSlot = optSync.finalizedBid.slot
backfillSlot = optSync.lcBlocks.getBackfillSlot()
frontfillSlot = optSync.lcBlocks.getFrontfillSlot()
syncDistance =
if backfillSlot > frontfillSlot:
backfillSlot - frontfillSlot
else:
0
# If sync is complete, work is done
if currentHeadSlot == targetHeadSlot and
currentFinalizedSlot == targetFinalizedSlot and
syncDistance == 0:
return
# Cancel ongoing sync if sync target jumped
if headDiff >= SLOTS_PER_EPOCH and optSync.syncFut != nil:
if not optSync.syncFut.finished:
optSync.syncFut.cancel()
return
# When retargeting ongoing sync is not possible, cancel on finality change
if not optSync.syncStrategy.supportsRetarget:
if currentFinalizedSlot != targetFinalizedSlot and optSync.syncFut != nil:
if not optSync.syncFut.finished:
optSync.syncFut.cancel()
return
# Set new sync target
let
finalizedBid = optSync.finalizedBid
optimisticBid = optSync.optimisticBid
doAssert optimisticBid.slot >= finalizedBid.slot
if optSync.lcBlocks.getHeadSlot() != optimisticBid.slot:
optSync.lcBlocks.setHeadBid(optimisticBid)
if optSync.lcBlocks.getFinalizedSlot() != finalizedBid.slot:
optSync.lcBlocks.setFinalizedBid(finalizedBid)
optSync.finalizedIsExecutionBlock.reset()
optSync.reportOptimisticCandidateBlock()
if optSync.syncFut == nil:
# Select sync strategy
optSync.syncFut =
if headDiff >= SLOTS_PER_EPOCH:
optSync.syncStrategy = SyncStrategy.SyncManager
optSync.syncUsingSyncManager()
else:
optSync.syncStrategy = SyncStrategy.RequestManager
optSync.syncUsingRequestManager()
# Continue syncing until complete
proc handleFinishedSync(future: pointer) =
optSync.syncStrategy.reset()
optSync.syncFut = nil
optSync.continueSync()
optSync.syncFut.addCallback(handleFinishedSync)
proc setOptimisticHeader*(
optSync: LCOptimisticSync, optimisticHeader: BeaconBlockHeader) =
optSync.optimisticBid = optimisticHeader.toBlockId
optSync.continueSync()
proc setFinalizedHeader*(
optSync: LCOptimisticSync, finalizedHeader: BeaconBlockHeader) =
optSync.finalizedBid = finalizedHeader.toBlockId
if optSync.finalizedBid.slot > optSync.optimisticBid.slot:
optSync.optimisticBid = optSync.finalizedBid
optSync.continueSync()

View File

@ -15,7 +15,7 @@ import
../networking/eth2_network,
../consensus_object_pools/block_quarantine,
"."/sync_protocol, "."/sync_manager
export sync_manager
export block_quarantine, sync_manager
logScope:
topics = "requman"

View File

@ -12,7 +12,7 @@ import stew/[results, base10], chronos, chronicles
import
../spec/datatypes/[phase0, altair],
../spec/eth2_apis/rest_types,
../spec/[helpers, forks],
../spec/[helpers, forks, network],
../networking/[peer_pool, peer_scores, eth2_network],
../beacon_clock,
"."/[sync_protocol, sync_queue]

View File

@ -676,7 +676,9 @@ for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do
${STOP_AT_EPOCH_FLAG} \
--rest-port="$(( BASE_REST_PORT + NUM_NODE ))" \
--metrics-port="$(( BASE_METRICS_PORT + NUM_NODE ))" \
--serve-light-client-data=1 --import-light-client-data=only-new \
--light-client-enable=on \
--serve-light-client-data=on \
--import-light-client-data=only-new \
${EXTRA_ARGS} \
&> "${DATA_DIR}/log${NUM_NODE}.txt" &

View File

@ -15,6 +15,7 @@ import # Unit test
./test_attestation_pool,
./test_beacon_chain_db,
./test_beacon_time,
./test_block_clearance_light_client,
./test_block_dag,
./test_block_processor,
./test_block_quarantine,

View File

@ -0,0 +1,599 @@
# beacon_chain
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].}
{.used.}
import
# Status libraries
eth/keys, taskpools,
# Beacon chain internals
../beacon_chain/consensus_object_pools/
[block_clearance_light_client, block_clearance,
block_quarantine, blockchain_dag],
../beacon_chain/spec/state_transition,
# Test utilities
./testutil, ./testdbutil
suite "Block clearance (light client)" & preset():
let
cfg = block:
var res = defaultRuntimeConfig
res.ALTAIR_FORK_EPOCH = GENESIS_EPOCH + 1
res
taskpool = Taskpool.new()
proc newTestDag(): ChainDAGRef =
const num_validators = SLOTS_PER_EPOCH
let
validatorMonitor = newClone(ValidatorMonitor.init())
dag = ChainDAGRef.init(
cfg, makeTestDB(num_validators), validatorMonitor, {})
dag
proc addBlocks(
dag: ChainDAGRef,
numBlocks: int,
finalizedCheckpoints: var seq[Checkpoint],
syncCommitteeRatio = 0.0,
numSkippedSlots = 0.uint64) =
let quarantine = newClone(Quarantine.init())
var
cache: StateCache
verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool)
if numSkippedSlots > 0:
var info: ForkedEpochInfo
let slot = getStateField(dag.headState, slot) + numSkippedSlots
process_slots(
cfg, dag.headState, slot, cache, info, flags = {}).expect("no failure")
for blck in makeTestBlocks(dag.headState, cache, numBlocks,
attested = true, syncCommitteeRatio, cfg):
let added =
case blck.kind
of BeaconBlockFork.Phase0:
const nilCallback = OnPhase0BlockAdded(nil)
dag.addHeadBlock(verifier, blck.phase0Data, nilCallback)
of BeaconBlockFork.Altair:
const nilCallback = OnAltairBlockAdded(nil)
dag.addHeadBlock(verifier, blck.altairData, nilCallback)
of BeaconBlockFork.Bellatrix:
const nilCallback = OnBellatrixBlockAdded(nil)
dag.addHeadBlock(verifier, blck.bellatrixData, nilCallback)
check: added.isOk()
dag.updateHead(added[], quarantine[])
withState(dag.headState):
if finalizedCheckpoints.len == 0 or
state.data.finalized_checkpoint != finalizedCheckpoints[^1]:
finalizedCheckpoints.add(state.data.finalized_checkpoint)
proc checkBlocks(lcBlocks: LCBlocks, dag: ChainDAGRef, slots: Slice[Slot]) =
for slot in slots.a .. slots.b:
let
latestLcBlck = lcBlocks.getLatestBlockThroughSlot(slot)
lcBlck = lcBlocks.getBlockAtSlot(slot)
bsi = dag.getBlockIdAtSlot(slot)
dagBlck =
if bsi.isOk:
dag.getForkedBlock(bsi.get.bid)
else:
Opt[ForkedTrustedSignedBeaconBlock].err()
check:
lcBlck.isOk == dagBlck.isOk
lcBlck.isOk == latestLcBlck.isOk
if lcBlck.isOk:
check:
lcBlck.get.root == dagBlck.get.root
lcBlck.get.root == latestLcBlck.get.root
setup:
let dag = newTestDag()
var finalizedCheckpoints: seq[Checkpoint] = @[]
dag.addBlocks(200, finalizedCheckpoints)
test "Initial sync":
const maxSlots = 160
var lcBlocks = initLCBlocks(maxSlots)
let minSlot = dag.head.slot + 1 - maxSlots
check:
lcBlocks.getHeadSlot() == FAR_FUTURE_SLOT
lcBlocks.getFinalizedSlot() == GENESIS_SLOT
lcBlocks.getFrontfillSlot() == GENESIS_SLOT
lcBlocks.getBackfillSlot() == GENESIS_SLOT
lcBlocks.setHeadBid(dag.head.bid)
check:
lcBlocks.getHeadSlot() == dag.head.slot
lcBlocks.getFinalizedSlot() == GENESIS_SLOT
lcBlocks.getFrontfillSlot() == minSlot
lcBlocks.getBackfillSlot() == dag.head.slot + 1
lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid)
check:
lcBlocks.getHeadSlot() == dag.head.slot
lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot
lcBlocks.getFrontfillSlot() == minSlot
lcBlocks.getBackfillSlot() == dag.head.slot + 1
var bid = dag.head.bid
while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot():
let
bdata = dag.getForkedBlock(bid).valueOr:
break
res = lcBlocks.addBlock(bdata.asSigned())
check:
res.isOk
lcBlocks.getHeadSlot() == dag.head.slot
lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot
lcBlocks.getFrontfillSlot() == minSlot
lcBlocks.getBackfillSlot() == max(bdata.slot, minSlot)
bid = dag.parent(bid).valueOr:
break
check:
lcBlocks.getHeadSlot() == dag.head.slot
lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot
lcBlocks.getFrontfillSlot() == minSlot
lcBlocks.getBackfillSlot() == minSlot
lcBlocks.checkBlocks(dag, minSlot .. dag.head.slot)
test "Delayed finality update":
const maxSlots = 160
var lcBlocks = initLCBlocks(maxSlots)
let minSlot = dag.head.slot + 1 - maxSlots
lcBlocks.setHeadBid(dag.head.bid)
var bid = dag.head.bid
while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot():
let
bdata = dag.getForkedBlock(bid).valueOr:
break
res = lcBlocks.addBlock(bdata.asSigned())
check res.isOk
bid = dag.parent(bid).valueOr:
break
for finalizedCheckpoint in finalizedCheckpoints:
let bsi = dag.getBlockIdAtSlot(finalizedCheckpoint.epoch.start_slot)
check bsi.isOk
lcBlocks.setFinalizedBid(bsi.get.bid)
check:
lcBlocks.getHeadSlot() == dag.head.slot
lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot
lcBlocks.getFrontfillSlot() == minSlot
lcBlocks.getBackfillSlot() == minSlot
lcBlocks.checkBlocks(dag, minSlot .. dag.head.slot)
test "Incremental sync":
const maxSlots = 160
var lcBlocks = initLCBlocks(maxSlots)
let
oldHeadSlot = dag.head.slot
oldMinSlot = dag.head.slot + 1 - maxSlots
lcBlocks.setHeadBid(dag.head.bid)
lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid)
var bid = dag.head.bid
while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot():
let
bdata = dag.getForkedBlock(bid).valueOr:
break
res = lcBlocks.addBlock(bdata.asSigned())
check res.isOk
bid = dag.parent(bid).valueOr:
break
dag.addBlocks(20, finalizedCheckpoints)
lcBlocks.setHeadBid(dag.head.bid)
lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid)
let newMinSlot = dag.head.slot + 1 - maxSlots
check:
lcBlocks.getHeadSlot() == dag.head.slot
lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot
lcBlocks.getFrontfillSlot() == newMinSlot
lcBlocks.getBackfillSlot() == dag.head.slot + 1
bid = dag.head.bid
while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot():
let
bdata = dag.getForkedBlock(bid).valueOr:
break
res = lcBlocks.addBlock(bdata.asSigned())
check res.isOk
bid = dag.parent(bid).valueOr:
break
check:
lcBlocks.getHeadSlot() == dag.head.slot
lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot
lcBlocks.getFrontfillSlot() == newMinSlot
lcBlocks.getBackfillSlot() == newMinSlot
lcBlocks.checkBlocks(dag, newMinSlot .. dag.head.slot)
dag.addBlocks(200, finalizedCheckpoints)
lcBlocks.setHeadBid(dag.head.bid)
lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid)
let minSlot = dag.head.slot + 1 - maxSlots
check:
lcBlocks.getHeadSlot() == dag.head.slot
lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot
lcBlocks.getFrontfillSlot() == minSlot
lcBlocks.getBackfillSlot() == dag.head.slot + 1
bid = dag.head.bid
while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot():
let
bdata = dag.getForkedBlock(bid).valueOr:
break
res = lcBlocks.addBlock(bdata.asSigned())
check res.isOk
bid = dag.parent(bid).valueOr:
break
check:
lcBlocks.getHeadSlot() == dag.head.slot
lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot
lcBlocks.getFrontfillSlot() == minSlot
lcBlocks.getBackfillSlot() == minSlot
lcBlocks.checkBlocks(dag, minSlot .. dag.head.slot)
test "Reverse incremental sync":
const maxSlots = 160
var lcBlocks = initLCBlocks(maxSlots)
let
newHeadBid = dag.head.bid
newFinalizedBid = dag.finalizedHead.blck.bid
dag.addBlocks(20, finalizedCheckpoints)
lcBlocks.setHeadBid(dag.head.bid)
lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid)
let oldMinSlot = dag.head.slot + 1 - maxSlots
var bid = dag.head.bid
while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot():
let
bdata = dag.getForkedBlock(bid).valueOr:
break
res = lcBlocks.addBlock(bdata.asSigned())
check res.isOk
bid = dag.parent(bid).valueOr:
break
check:
lcBlocks.getHeadSlot() == dag.head.slot
lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot
lcBlocks.getFrontfillSlot() == oldMinSlot
lcBlocks.getBackfillSlot() == oldMinSlot
lcBlocks.checkBlocks(dag, oldMinSlot .. dag.head.slot)
lcBlocks.setHeadBid(newHeadBid)
lcBlocks.setFinalizedBid(newFinalizedBid)
let newMinSlot = newHeadBid.slot + 1 - maxSlots
check:
lcBlocks.getHeadSlot() == newHeadBid.slot
lcBlocks.getFinalizedSlot() == newFinalizedBid.slot
lcBlocks.getFrontfillSlot() == newMinSlot
lcBlocks.getBackfillSlot() == oldMinSlot
while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot():
let
bdata = dag.getForkedBlock(bid).valueOr:
break
res = lcBlocks.addBlock(bdata.asSigned())
check res.isOk
bid = dag.parent(bid).valueOr:
break
check:
lcBlocks.getHeadSlot() == newHeadBid.slot
lcBlocks.getFinalizedSlot() == newFinalizedBid.slot
lcBlocks.getFrontfillSlot() == newMinSlot
lcBlocks.getBackfillSlot() == newMinSlot
lcBlocks.checkBlocks(dag, newMinSlot .. newHeadBid.slot)
test "Reorg":
const maxSlots = 160
var lcBlocks = initLCBlocks(maxSlots)
let minSlot = dag.head.slot + 1 - maxSlots
lcBlocks.setHeadBid(dag.head.bid)
lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid)
var bid = dag.head.bid
while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot():
let
bdata = dag.getForkedBlock(bid).valueOr:
break
res = lcBlocks.addBlock(bdata.asSigned())
check res.isOk
bid = dag.parent(bid).valueOr:
break
check:
lcBlocks.getHeadSlot() == dag.head.slot
lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot
lcBlocks.getFrontfillSlot() == minSlot
lcBlocks.getBackfillSlot() == minSlot
lcBlocks.checkBlocks(dag, minSlot .. dag.head.slot)
let dag2 = newTestDag()
var finalizedCheckpoints2: seq[Checkpoint] = @[]
dag2.addBlocks(200, finalizedCheckpoints2, syncCommitteeRatio = 0.1)
lcBlocks.setHeadBid(dag2.head.bid)
lcBlocks.setFinalizedBid(dag2.finalizedHead.blck.bid)
check:
lcBlocks.getHeadSlot() == dag2.head.slot
lcBlocks.getFinalizedSlot() == dag2.finalizedHead.blck.slot
lcBlocks.getFrontfillSlot() == minSlot
lcBlocks.getBackfillSlot() == dag2.head.slot + 1
bid = dag2.head.bid
while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot():
let
bdata = dag2.getForkedBlock(bid).valueOr:
break
res = lcBlocks.addBlock(bdata.asSigned())
check res.isOk
bid = dag2.parent(bid).valueOr:
break
check:
lcBlocks.getHeadSlot() == dag2.head.slot
lcBlocks.getFinalizedSlot() == dag2.finalizedHead.blck.slot
lcBlocks.getFrontfillSlot() == minSlot
lcBlocks.getBackfillSlot() == minSlot
lcBlocks.checkBlocks(dag2, minSlot .. dag2.head.slot)
lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid)
check:
lcBlocks.getHeadSlot() == dag.finalizedHead.blck.slot
lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot
lcBlocks.getFrontfillSlot() ==
max(dag.finalizedHead.slot, maxSlots.Slot) + 1 - maxSlots
lcBlocks.getBackfillSlot() == dag.finalizedHead.blck.slot + 1
bid = dag.finalizedHead.blck.bid
while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot():
let
bdata = dag.getForkedBlock(bid).valueOr:
break
res = lcBlocks.addBlock(bdata.asSigned())
check res.isOk
bid = dag.parent(bid).valueOr:
break
lcBlocks.setHeadBid(dag.head.bid)
bid = dag.head.bid
while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot():
let
bdata = dag.getForkedBlock(bid).valueOr:
break
res = lcBlocks.addBlock(bdata.asSigned())
check res.isOk
bid = dag.parent(bid).valueOr:
break
check:
lcBlocks.getHeadSlot() == dag.head.slot
lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot
lcBlocks.getFrontfillSlot() == minSlot
lcBlocks.getBackfillSlot() == minSlot
lcBlocks.checkBlocks(dag, minSlot .. dag.head.slot)
test "Low slot numbers":
const maxSlots = 320 # DAG slot numbers are smaller than `maxSlots`
var lcBlocks = initLCBlocks(maxSlots)
let
oldHeadBid = dag.head.bid
oldFinalizedBid = dag.finalizedHead.blck.bid
lcBlocks.setHeadBid(dag.head.bid)
lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid)
var bid = dag.head.bid
while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot():
let
bdata = dag.getForkedBlock(bid).valueOr:
break
res = lcBlocks.addBlock(bdata.asSigned())
check res.isOk
bid = dag.parent(bid).valueOr:
break
check:
lcBlocks.getHeadSlot() == dag.head.slot
lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot
lcBlocks.getFrontfillSlot() == GENESIS_SLOT
lcBlocks.getBackfillSlot() == GENESIS_SLOT
lcBlocks.checkBlocks(dag, GENESIS_SLOT .. dag.head.slot)
dag.addBlocks(20, finalizedCheckpoints)
lcBlocks.setHeadBid(dag.head.bid)
lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid)
bid = dag.head.bid
while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot():
let
bdata = dag.getForkedBlock(bid).valueOr:
break
res = lcBlocks.addBlock(bdata.asSigned())
check res.isOk
bid = dag.parent(bid).valueOr:
break
check:
lcBlocks.getHeadSlot() == dag.head.slot
lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot
lcBlocks.getFrontfillSlot() == GENESIS_SLOT
lcBlocks.getBackfillSlot() == GENESIS_SLOT
lcBlocks.setHeadBid(oldHeadBid)
lcBlocks.setFinalizedBid(oldFinalizedBid)
check:
lcBlocks.getHeadSlot() == oldHeadBid.slot
lcBlocks.getFinalizedSlot() == oldFinalizedBid.slot
lcBlocks.getFrontfillSlot() == GENESIS_SLOT
lcBlocks.getBackfillSlot() == GENESIS_SLOT
test "Error conditions":
let dag2 = newTestDag()
var finalizedCheckpoints2: seq[Checkpoint] = @[]
dag2.addBlocks(200, finalizedCheckpoints2, syncCommitteeRatio = 0.1)
const maxSlots = 2
var lcBlocks = initLCBlocks(maxSlots)
check:
lcBlocks.getBlockAtSlot(GENESIS_SLOT).isErr
lcBlocks.getBlockAtSlot(FAR_FUTURE_SLOT).isErr
lcBlocks.getLatestBlockThroughSlot(GENESIS_SLOT).isErr
lcBlocks.getLatestBlockThroughSlot(FAR_FUTURE_SLOT).isErr
lcBlocks.setHeadBid(dag.head.bid)
lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid)
check:
lcBlocks.getBlockAtSlot(GENESIS_SLOT).isErr
lcBlocks.getBlockAtSlot(FAR_FUTURE_SLOT).isErr
lcBlocks.getBlockAtSlot(dag.head.slot).isErr
lcBlocks.getBlockAtSlot(dag.finalizedHead.blck.slot).isErr
lcBlocks.getLatestBlockThroughSlot(GENESIS_SLOT).isErr
lcBlocks.getLatestBlockThroughSlot(FAR_FUTURE_SLOT).isErr
lcBlocks.getLatestBlockThroughSlot(dag.head.slot).isErr
lcBlocks.getLatestBlockThroughSlot(dag.finalizedHead.blck.slot).isErr
let
parentBid = dag.parent(dag.head.bid).expect("Parent exists")
parentBdata = dag.getForkedBlock(parentBid).expect("Parent block exists")
var res = lcBlocks.addBlock(parentBdata.asSigned())
check:
res.isErr
res.error == BlockError.MissingParent
lcBlocks.getBackfillSlot() == dag.head.slot + 1
let bdata2 = dag2.getForkedBlock(dag2.head.bid).expect("DAG 2 block exists")
res = lcBlocks.addBlock(bdata2.asSigned())
check:
res.isErr
res.error == BlockError.UnviableFork
lcBlocks.getBackfillSlot() == dag.head.slot + 1
let bdata = dag.getForkedBlock(dag.head.bid).expect("DAG block exists")
res = lcBlocks.addBlock(bdata.asSigned())
check:
res.isOk
lcBlocks.getBackfillSlot() == dag.head.slot
res = lcBlocks.addBlock(bdata2.asSigned())
check:
res.isErr
res.error == BlockError.UnviableFork
lcBlocks.getBackfillSlot() == dag.head.slot
res = lcBlocks.addBlock(bdata.asSigned())
check:
res.isErr
res.error == BlockError.Duplicate
lcBlocks.getBackfillSlot() == dag.head.slot
let
onePastBid = dag.parent(parentBid).expect("Parent of parent exists")
onePastBdata = dag.getForkedBlock(onePastBid).expect("Block exists")
res = lcBlocks.addBlock(onePastBdata.asSigned())
check:
res.isErr
res.error == BlockError.MissingParent
lcBlocks.getBackfillSlot() == dag.head.slot
res = lcBlocks.addBlock(parentBdata.asSigned())
check:
res.isOk
lcBlocks.getBackfillSlot() == parentBdata.slot
lcBlocks.getBlockAtSlot(parentBdata.slot).isOk
lcBlocks.getLatestBlockThroughSlot(parentBdata.slot).isOk
res = lcBlocks.addBlock(onePastBdata.asSigned())
check:
res.isOk
lcBlocks.getBackfillSlot() == dag.head.slot + 1 - maxSlots
lcBlocks.getBlockAtSlot(onePastBdata.slot).isErr
lcBlocks.getLatestBlockThroughSlot(onePastBdata.slot).isErr
res = lcBlocks.addBlock(onePastBdata.asSigned())
check:
res.isErr
res.error == BlockError.Duplicate
lcBlocks.getBackfillSlot() == dag.head.slot + 1 - maxSlots
let oldHeadBid = dag.head.bid
dag.addBlocks(1, finalizedCheckpoints, numSkippedSlots = 3) # ---X
dag2.addBlocks(2, finalizedCheckpoints2, numSkippedSlots = 2) # --XX
lcBlocks.setHeadBid(dag.head.bid)
lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid)
let newBdata = dag.getForkedBlock(dag.head.bid).expect("New block ok")
res = lcBlocks.addBlock(newBdata.asSigned())
check:
res.isOk
lcBlocks.getBackfillSlot() == dag.head.slot
res = lcBlocks.addBlock(bdata.asSigned())
check:
res.isOk
lcBlocks.getBackfillSlot() == dag.head.slot + 1 - maxSlots
lcBlocks.getBlockAtSlot(dag.head.slot).isOk
lcBlocks.getBlockAtSlot(dag.head.slot - 1).isErr
lcBlocks.getBlockAtSlot(dag.head.slot - 2).isErr
let
newParentBid2 = dag2.parent(dag2.head.bid).expect("New parent 2 exists")
newParentBdata2 = dag2.getForkedBlock(newParentBid2).expect("Parent 2 ok")
res = lcBlocks.addBlock(newParentBdata2.asSigned())
check:
res.isErr
res.error == BlockError.UnviableFork
lcBlocks.getBackfillSlot() == dag.head.slot + 1 - maxSlots
lcBlocks.setHeadBid(dag2.head.bid)
lcBlocks.setFinalizedBid(newParentBid2)
let newBdata2 = dag2.getForkedBlock(dag2.head.bid).expect("New block 2 ok")
res = lcBlocks.addBlock(newBdata2.asSigned())
check:
res.isOk
lcBlocks.getBackfillSlot() == dag2.head.slot
res = lcBlocks.addBlock(newParentBdata2.asSigned())
check:
res.isOk
lcBlocks.getBackfillSlot() == dag2.head.slot + 1 - maxSlots
lcBlocks.setHeadBid(dag.head.bid)
res = lcBlocks.addBlock(newBdata.asSigned())
check:
res.isOk
lcBlocks.getBackfillSlot() == dag.head.slot
res = lcBlocks.addBlock(bdata.asSigned())
check:
res.isErr
res.error == BlockError.UnviableFork
lcBlocks.getHeadSlot() == newParentBid2.slot
lcBlocks.getFinalizedSlot() == newParentBid2.slot
lcBlocks.getFrontfillSlot() == newParentBid2.slot + 1 - maxSlots
lcBlocks.getBackfillSlot() == newParentBid2.slot + 1
res = lcBlocks.addBlock(newParentBdata2.asSigned())
check:
res.isOk
lcBlocks.getBackfillSlot() == newParentBid2.slot
res = lcBlocks.addBlock(bdata2.asSigned())
check:
res.isOk
lcBlocks.getBackfillSlot() == newParentBid2.slot + 1 - maxSlots
lcBlocks.setHeadBid(dag2.head.bid)
lcBlocks.setFinalizedBid(oldHeadBid)
res = lcBlocks.addBlock(newBdata2.asSigned())
check:
res.isOk
lcBlocks.getBackfillSlot() == dag2.head.slot
res = lcBlocks.addBlock(newParentBdata2.asSigned())
check:
res.isOk
lcBlocks.getBackfillSlot() == newParentBid2.slot
res = lcBlocks.addBlock(bdata.asSigned())
check:
res.isErr
res.error == BlockError.MissingParent
lcBlocks.getBackfillSlot() == newParentBid2.slot
lcBlocks = initLCBlocks(maxSlots = 0)
lcBlocks.setHeadBid(dag.head.bid)
lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid)
res = lcBlocks.addBlock(newBdata2.asSigned())
check:
res.isErr
res.error == BlockError.UnviableFork
lcBlocks.getBackfillSlot() == dag.head.slot + 1
res = lcBlocks.addBlock(newBdata.asSigned())
check:
res.isOk
lcBlocks.getBackfillSlot() == dag.head.slot + 1
res = lcBlocks.addBlock(newBdata2.asSigned())
check:
res.isErr
res.error == BlockError.UnviableFork
lcBlocks.getBackfillSlot() == dag.head.slot + 1
res = lcBlocks.addBlock(newBdata.asSigned())
check:
res.isErr
res.error == BlockError.Duplicate
lcBlocks.getBackfillSlot() == dag.head.slot + 1