implement v1.2.0 optimistic sync tests (#4174)

* implement v1.2.0 optimistic sync tests

* Update beacon_chain/consensus_object_pools/blockchain_dag.nim

Co-authored-by: Etan Kissling <etan@status.im>

* `lvh` -> `latestValidHash` and only invalidate one specific block"

* `getEarliestInvalidRoot` -> `getEarliestInvalidBlockRoot`; `defaultEarliestInvalidRoot` -> `defaultEarliestInvalidBlockRoot`

Co-authored-by: Etan Kissling <etan@status.im>
This commit is contained in:
tersec 2022-09-27 12:11:47 +00:00 committed by GitHub
parent 7f9af78ddb
commit 0f6d19b4b3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 257 additions and 31 deletions

View File

@ -278,6 +278,11 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
+ [SCRYPT] Network Keystore encryption OK
```
OK: 12/12 Fail: 0/12 Skip: 0/12
## Latest valid hash [Preset: mainnet]
```diff
+ LVH searching OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## Light client [Preset: mainnet]
```diff
+ Init from checkpoint OK

View File

@ -224,6 +224,7 @@ ConsensusSpecPreset-mainnet
+ Slots - over_epoch_boundary OK
+ Slots - slots_1 OK
+ Slots - slots_2 OK
+ Sync - mainnet/bellatrix/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK
+ [Invalid] EF - Altair - Sanity - Blocks - double_same_proposer_slashings_same_block [Prese OK
+ [Invalid] EF - Altair - Sanity - Blocks - double_similar_proposer_slashings_same_block [Pr OK
+ [Invalid] EF - Altair - Sanity - Blocks - double_validator_exit_same_block [Preset: mainne OK
@ -439,7 +440,7 @@ ConsensusSpecPreset-mainnet
+ fork_random_low_balances OK
+ fork_random_misc_balances OK
```
OK: 429/436 Fail: 0/436 Skip: 7/436
OK: 430/437 Fail: 0/437 Skip: 7/437
## Attestation
```diff
+ [Invalid] EF - Altair - Operations - Attestation - after_epoch_slots OK
@ -1292,4 +1293,4 @@ OK: 44/44 Fail: 0/44 Skip: 0/44
OK: 33/33 Fail: 0/33 Skip: 0/33
---TOTAL---
OK: 1115/1122 Fail: 0/1122 Skip: 7/1122
OK: 1116/1123 Fail: 0/1123 Skip: 7/1123

View File

@ -264,6 +264,7 @@ ConsensusSpecPreset-minimal
+ Slots - over_epoch_boundary OK
+ Slots - slots_1 OK
+ Slots - slots_2 OK
+ Sync - minimal/bellatrix/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK
+ [Invalid] EF - Altair - Sanity - Blocks - double_same_proposer_slashings_same_block [Prese OK
+ [Invalid] EF - Altair - Sanity - Blocks - double_similar_proposer_slashings_same_block [Pr OK
+ [Invalid] EF - Altair - Sanity - Blocks - double_validator_exit_same_block [Preset: minima OK
@ -497,7 +498,7 @@ ConsensusSpecPreset-minimal
+ fork_random_low_balances OK
+ fork_random_misc_balances OK
```
OK: 487/494 Fail: 0/494 Skip: 7/494
OK: 488/495 Fail: 0/495 Skip: 7/495
## Attestation
```diff
+ [Invalid] EF - Altair - Operations - Attestation - after_epoch_slots OK
@ -1391,4 +1392,4 @@ OK: 48/48 Fail: 0/48 Skip: 0/48
OK: 36/36 Fail: 0/36 Skip: 0/36
---TOTAL---
OK: 1206/1213 Fail: 0/1213 Skip: 7/1213
OK: 1207/1214 Fail: 0/1214 Skip: 7/1214

View File

@ -1898,6 +1898,45 @@ proc updateHead*(
dag.finalizedHead.blck.root, stateRoot, dag.finalizedHead.slot.epoch)
dag.onFinHappened(dag, data)
proc getEarliestInvalidBlockRoot*(
dag: ChainDAGRef, initialSearchRoot: Eth2Digest,
latestValidHash: Eth2Digest, defaultEarliestInvalidBlockRoot: Eth2Digest):
Eth2Digest =
# Earliest within a chain/fork in question, per LVH definition. Intended to
# be called with `initialRoot` as the parent of the block regarding which a
# newPayload or forkchoiceUpdated execution_status has been received as the
# tests effectively require being able to access this before the BlockRef's
# made. Therefore, to accommodate the EF consensus spec sync tests, and the
# possibilities that the LVH might be an immediate parent or a more distant
# ancestor special-case handling of an earliest invalid root as potentially
# not being from this function's search, but being provided as a default by
# the caller with access to the block.
var curBlck = dag.getBlockRef(initialSearchRoot).valueOr:
# Being asked to traverse a chain which the DAG doesn't know about -- but
# that'd imply the block's otherwise invalid for CL as well as EL.
return static(default(Eth2Digest))
# Only allow this special case outside loop; it's when the LVH is the direct
# parent of the reported invalid block
if curBlck.executionBlockRoot.isSome and
curBlck.executionBlockRoot.get == latestValidHash:
return defaultEarliestInvalidBlockRoot
while true:
# This was supposed to have been either caught by the pre-loop check or the
# parent check.
if curBlck.executionBlockRoot.isSome and
curBlck.executionBlockRoot.get == latestValidHash:
doAssert false, "getEarliestInvalidBlockRoot: unexpected LVH in loop body"
if (curBlck.parent.isNil) or
curBlck.parent.executionBlockRoot.get(latestValidHash) ==
latestValidHash:
break
curBlck = curBlck.parent
curBlck.root
proc isInitialized*(T: type ChainDAGRef, db: BeaconChainDB): Result[void, cstring] =
# Lightweight check to see if we have the minimal information needed to
# load up a database - we don't check head here - if something is wrong with

View File

@ -167,7 +167,7 @@ func setOptimisticHead*(
proc runForkchoiceUpdated*(
eth1Monitor: Eth1Monitor,
headBlockRoot, safeBlockRoot, finalizedBlockRoot: Eth2Digest):
Future[PayloadExecutionStatus] {.async.} =
Future[(PayloadExecutionStatus, Option[BlockHash])] {.async.} =
# Allow finalizedBlockRoot to be 0 to avoid sync deadlocks.
#
# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-3675.md#pos-events
@ -199,11 +199,11 @@ proc runForkchoiceUpdated*(
latestValidHash = $fcuR.payloadStatus.latestValidHash,
validationError = $fcuR.payloadStatus.validationError
return fcuR.payloadStatus.status
return (fcuR.payloadStatus.status, fcuR.payloadStatus.latestValidHash)
except CatchableError as err:
error "runForkchoiceUpdated: forkchoiceUpdated failed",
err = err.msg
return PayloadExecutionStatus.syncing
return (PayloadExecutionStatus.syncing, none BlockHash)
proc runForkchoiceUpdatedDiscardResult*(
eth1Monitor: Eth1Monitor,
@ -228,15 +228,26 @@ proc updateExecutionClientHead(
return Opt[void].ok()
# Can't use dag.head here because it hasn't been updated yet
let payloadExecutionStatus = await self.eth1Monitor.runForkchoiceUpdated(
headExecutionPayloadHash,
newHead.safeExecutionPayloadHash,
newHead.finalizedExecutionPayloadHash)
let (payloadExecutionStatus, latestValidHash) =
await self.eth1Monitor.runForkchoiceUpdated(
headExecutionPayloadHash,
newHead.safeExecutionPayloadHash,
newHead.finalizedExecutionPayloadHash)
case payloadExecutionStatus
of PayloadExecutionStatus.valid:
self.dag.markBlockVerified(self.quarantine[], newHead.blck.root)
of PayloadExecutionStatus.invalid, PayloadExecutionStatus.invalid_block_hash:
# This is a CL root, not EL hash
let earliestKnownInvalidRoot =
if latestValidHash.isSome:
self.dag.getEarliestInvalidBlockRoot(
newHead.blck.root, latestValidHash.get.asEth2Digest,
newHead.blck.root)
else:
newHead.blck.root
self.attestationPool[].forkChoice.mark_root_invalid(newHead.blck.root)
self.dag.markBlockInvalid(newHead.blck.root)
self.quarantine[].addUnviable(newHead.blck.root)
return Opt.none(void)

View File

@ -442,6 +442,7 @@ func mark_root_invalid*(self: var ForkChoice, root: Eth2Digest) =
self.backend.proto_array.nodes.offset
if nodePhysicalIdx < self.backend.proto_array.nodes.buf.len:
self.backend.proto_array.nodes.buf[nodePhysicalIdx].invalid = true
self.backend.proto_array.propagateInvalidity(nodePhysicalIdx)
# Best-effort; attempts to mark unknown roots invalid harmlessly ignored
except KeyError:
discard

View File

@ -558,6 +558,28 @@ func nodeIsViableForHead(self: ProtoArray, node: ProtoNode): bool =
(self.checkpoints.finalized.epoch == GENESIS_EPOCH)
)
func propagateInvalidity*(
self: var ProtoArray, startPhysicalIdx: Index) =
# Called when startPhysicalIdx is updated in a parent role, so the pairs of
# indices generated of (parent, child) where both >= startPhysicalIdx, mean
# the loop in general from the child's perspective starts one index higher.
for nodePhysicalIdx in startPhysicalIdx + 1 ..< self.nodes.len:
let nodeParent = self.nodes.buf[nodePhysicalIdx].parent
if nodeParent.isNone:
continue
let
parentLogicalIdx = nodeParent.unsafeGet()
parentPhysicalIdx = parentLogicalIdx - self.nodes.offset
# Former case is orphaned, latter is invalid, but caught in score updates
if parentPhysicalIdx < 0 or parentPhysicalIdx >= self.nodes.len:
continue
# Invalidity transmits to all descendents
if self.nodes.buf[parentPhysicalIdx].invalid:
self.nodes.buf[nodePhysicalIdx].invalid = true
# Diagnostics
# ----------------------------------------------------------------------
# Helpers to dump internal state

View File

@ -178,7 +178,7 @@ proc expectValidForkchoiceUpdated(
eth1Monitor: Eth1Monitor,
headBlockRoot, safeBlockRoot, finalizedBlockRoot: Eth2Digest
): Future[void] {.async.} =
let payloadExecutionStatus =
let (payloadExecutionStatus, _) =
await eth1Monitor.runForkchoiceUpdated(
headBlockRoot, safeBlockRoot, finalizedBlockRoot)
if payloadExecutionStatus != PayloadExecutionStatus.valid:

View File

@ -39,6 +39,7 @@ type
opOnBlock
opOnMergeBlock
opOnAttesterSlashing
opInvalidateRoot
opChecks
Operation = object
@ -55,6 +56,9 @@ type
powBlock: PowBlock
of opOnAttesterSlashing:
attesterSlashing: AttesterSlashing
of opInvalidateRoot:
invalidatedRoot: Eth2Digest
latestValidHash: Eth2Digest
of opChecks:
checks: JsonNode
@ -156,6 +160,13 @@ proc loadOps(path: string, fork: BeaconStateFork): seq[Operation] =
)
result.add Operation(kind: opOnAttesterSlashing,
attesterSlashing: attesterSlashing)
elif step.hasKey"payload_status":
if step["payload_status"]["status"].getStr() == "INVALID":
result.add Operation(kind: opInvalidateRoot,
valid: true,
invalidatedRoot: Eth2Digest.fromHex(step["block_hash"].getStr()),
latestValidHash: Eth2Digest.fromHex(
step["payload_status"]["latest_valid_hash"].getStr()))
elif step.hasKey"checks":
result.add Operation(kind: opChecks,
checks: step["checks"])
@ -165,7 +176,7 @@ proc loadOps(path: string, fork: BeaconStateFork): seq[Operation] =
if step.hasKey"valid":
doAssert step.len == 2
result[^1].valid = step["valid"].getBool()
elif not step.hasKey"checks":
elif not step.hasKey"checks" and not step.hasKey"payload_status":
doAssert step.len == 1
result[^1].valid = true
@ -176,7 +187,9 @@ proc stepOnBlock(
state: var ForkedHashedBeaconState,
stateCache: var StateCache,
signedBlock: ForkySignedBeaconBlock,
time: BeaconTime): Result[BlockRef, BlockError] =
time: BeaconTime,
invalidatedRoots: Table[Eth2Digest, Eth2Digest]):
Result[BlockRef, BlockError] =
# 1. Move state to proper slot.
doAssert dag.updateState(
state,
@ -193,6 +206,30 @@ proc stepOnBlock(
else:
type TrustedBlock = bellatrix.TrustedSignedBeaconBlock
# In normal Nimbus flow, for this (effectively) newPayload-based INVALID, it
# is checked even before entering the DAG, by the block processor. Currently
# the optimistic sync test(s) don't include a later-fcU-INVALID case. Whilst
# this wouldn't be part of this check, presumably, their FC test vector step
# would also have `true` validity because it'd not be known they weren't, so
# adding this mock of the block processor is realistic and sufficient.
when not (
signedBlock is phase0.SignedBeaconBlock or
signedBlock is altair.SignedBeaconBlock):
let executionPayloadHash =
signedBlock.message.body.execution_payload.block_hash
if executionPayloadHash in invalidatedRoots:
# Mocks fork choice INVALID list application. These tests sequence this
# in a way the block processor does not, specifying each payload_status
# before the block itself, while Nimbus fork choice treats invalidating
# a non-existent block root as a no-op and does not remember it for the
# future.
let lvh = invalidatedRoots.getOrDefault(
executionPayloadHash, static(default(Eth2Digest)))
fkChoice[].mark_root_invalid(dag.getEarliestInvalidBlockRoot(
signedBlock.message.parent_root, lvh, executionPayloadHash))
return err BlockError.Invalid
let blockAdded = dag.addHeadBlock(verifier, signedBlock) do (
blckRef: BlockRef, signedBlock: TrustedBlock,
epochRef: EpochRef, unrealized: FinalityCheckpoints):
@ -278,6 +315,7 @@ proc doRunTest(path: string, fork: BeaconStateFork) =
let steps = loadOps(path, fork)
var time = stores.fkChoice.checkpoints.time
var invalidatedRoots: Table[Eth2Digest, Eth2Digest]
let state = newClone(stores.dag.headState)
var stateCache = StateCache()
@ -298,7 +336,7 @@ proc doRunTest(path: string, fork: BeaconStateFork) =
let status = stepOnBlock(
stores.dag, stores.fkChoice,
verifier, state[], stateCache,
blck, time)
blck, time, invalidatedRoots)
doAssert status.isOk == step.valid
of opOnAttesterSlashing:
let indices =
@ -307,12 +345,14 @@ proc doRunTest(path: string, fork: BeaconStateFork) =
for idx in indices.get:
stores.fkChoice[].process_equivocation(idx)
doAssert indices.isOk == step.valid
of opInvalidateRoot:
invalidatedRoots[step.invalidatedRoot] = step.latestValidHash
of opChecks:
stepChecks(step.checks, stores.dag, stores.fkChoice, time)
else:
doAssert false, "Unsupported"
proc runTest(path: string, fork: BeaconStateFork) =
proc runTest(testType: static[string], path: string, fork: BeaconStateFork) =
const SKIP = [
# protoArray can handle blocks in the future gracefully
# spec: https://github.com/ethereum/consensus-specs/blame/v1.1.3/specs/phase0/fork-choice.md#L349
@ -327,7 +367,7 @@ proc runTest(path: string, fork: BeaconStateFork) =
"all_valid",
]
test "ForkChoice - " & path.relativePath(SszTestsDir):
test testType & " - " & path.relativePath(SszTestsDir):
when defined(windows):
# Some test files have very long paths
skip()
@ -337,17 +377,21 @@ proc runTest(path: string, fork: BeaconStateFork) =
else:
doRunTest(path, fork)
suite "EF - ForkChoice" & preset():
const presetPath = SszTestsDir/const_preset
for kind, path in walkDir(presetPath, relative = true, checkDir = true):
let testsPath = presetPath/path/"fork_choice"
if kind != pcDir or not dirExists(testsPath):
continue
let fork = forkForPathComponent(path).valueOr:
raiseAssert "Unknown test fork: " & testsPath
for kind, path in walkDir(testsPath, relative = true, checkDir = true):
let basePath = testsPath/path/"pyspec_tests"
if kind != pcDir:
template fcSuite(suiteName: static[string], testPathElem: static[string]) =
suite "EF - " & suiteName & preset():
const presetPath = SszTestsDir/const_preset
for kind, path in walkDir(presetPath, relative = true, checkDir = true):
let testsPath = presetPath/path/testPathElem
if kind != pcDir or not dirExists(testsPath):
continue
for kind, path in walkDir(basePath, relative = true, checkDir = true):
runTest(basePath/path, fork)
let fork = forkForPathComponent(path).valueOr:
raiseAssert "Unknown test fork: " & testsPath
for kind, path in walkDir(testsPath, relative = true, checkDir = true):
let basePath = testsPath/path/"pyspec_tests"
if kind != pcDir:
continue
for kind, path in walkDir(basePath, relative = true, checkDir = true):
runTest(suiteName, basePath/path, fork)
fcSuite("ForkChoice", "fork_choice")
fcSuite("Sync", "sync")

View File

@ -22,6 +22,7 @@ func `$`(x: BlockRef): string = shortLog(x)
const
nilPhase0Callback = OnPhase0BlockAdded(nil)
nilAltairCallback = OnAltairBlockAdded(nil)
nilBellatrixCallback = OnBellatrixBlockAdded(nil)
proc pruneAtFinalization(dag: ChainDAGRef) =
if dag.needStateCachesAndForkChoicePruning():
@ -919,3 +920,58 @@ suite "Backfill":
blocks[^2].toBlockId().atSlot()
dag2.getBlockIdAtSlot(dag.tail.slot - 2).isNone
dag2.backfill == blocks[^2].phase0Data.message.toBeaconBlockSummary()
suite "Latest valid hash" & preset():
setup:
var runtimeConfig = defaultRuntimeConfig
runtimeConfig.ALTAIR_FORK_EPOCH = 1.Epoch
runtimeConfig.BELLATRIX_FORK_EPOCH = 2.Epoch
var
db = makeTestDB(SLOTS_PER_EPOCH)
validatorMonitor = newClone(ValidatorMonitor.init())
dag = init(ChainDAGRef, runtimeConfig, db, validatorMonitor, {})
verifier = BatchVerifier(rng: keys.newRng(), taskpool: Taskpool.new())
quarantine = newClone(Quarantine.init())
cache = StateCache()
info = ForkedEpochInfo()
state = newClone(dag.headState)
test "LVH searching":
# Reach Bellatrix, where execution payloads exist
check process_slots(
runtimeConfig, state[],
getStateField(state[], slot) + (3 * SLOTS_PER_EPOCH).uint64,
cache, info, {}).isOk()
var
b1 = addTestBlock(state[], cache, cfg = runtimeConfig).bellatrixData
b1Add = dag.addHeadBlock(verifier, b1, nilBellatrixCallback)
b2 = addTestBlock(state[], cache, cfg = runtimeConfig).bellatrixData
b2Add = dag.addHeadBlock(verifier, b2, nilBellatrixCallback)
b3 = addTestBlock(state[], cache, cfg = runtimeConfig).bellatrixData
b3Add = dag.addHeadBlock(verifier, b3, nilBellatrixCallback)
dag.updateHead(b3Add[], quarantine[])
const fallbackEarliestInvalid =
Eth2Digest.fromHex("0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")
check:
# Represents where LVH is two behind the invalid-marked block (because
# first param is parent). It searches using LVH (i.e. execution hash),
# but returns CL block hash, because that's what fork choice and other
# Nimbus components mostly use as a coordinate system. Since b1 is set
# to be valid here by being the LVH, it means that b2 must be invalid.
dag.getEarliestInvalidBlockRoot(
b2Add[].root, b1.message.body.execution_payload.block_hash,
fallbackEarliestInvalid) == b2Add[].root
# This simulates calling it based on b3 (child of b2), where there's no
# gap in detecting the invalid blocks. Because the API, due to testcase
# design, does not assume the block being tested is in the DAG, there's
# a manually specified fallback (CL) block root to use, because it does
# not have access to this information otherwise, because the very first
# newest block in the chain it's examining is already valid.
dag.getEarliestInvalidBlockRoot(
b2Add[].root, b2.message.body.execution_payload.block_hash,
fallbackEarliestInvalid) == fallbackEarliestInvalid

View File

@ -15,6 +15,10 @@ import
../beacon_chain/spec/[
beaconstate, helpers, keystore, signatures, state_transition, validator]
from eth/common/eth_types import EMPTY_ROOT_HASH
from eth/common/eth_types_rlp import rlpHash
from eth/eip1559 import EIP1559_INITIAL_BASE_FEE
type
MockPrivKeysT = object
MockPubKeysT = object
@ -74,6 +78,33 @@ func signBlock(
ValidatorSig()
ForkedSignedBeaconBlock.init(forked, root, signature)
func build_empty_merge_execution_payload(state: bellatrix.BeaconState):
ExecutionPayload =
## Assuming a pre-state of the same slot, build a valid ExecutionPayload
## without any transactions from a non-merged block.
doAssert not is_merge_transition_complete(state)
let
latest = state.latest_execution_payload_header
timestamp = compute_timestamp_at_slot(state, state.slot)
randao_mix = get_randao_mix(state, get_current_epoch(state))
var payload = ExecutionPayload(
parent_hash: latest.block_hash,
state_root: latest.state_root, # no changes to the state
receipts_root: EMPTY_ROOT_HASH,
block_number: latest.block_number + 1,
prev_randao: randao_mix,
gas_limit: 30000000, # retain same limit
gas_used: 0, # empty block, 0 gas
timestamp: timestamp,
base_fee_per_gas: EIP1559_INITIAL_BASE_FEE)
payload.block_hash = rlpHash emptyPayloadToBlockHeader(payload)
payload
proc addTestBlock*(
state: var ForkedHashedBeaconState,
cache: var StateCache,
@ -105,6 +136,21 @@ proc addTestBlock*(
else:
ValidatorSig()
let execution_payload =
withState(state):
when stateFork >= BeaconStateFork.Bellatrix:
# Merge shortly after Bellatrix
if forkyState.data.slot >
cfg.BELLATRIX_FORK_EPOCH * SLOTS_PER_EPOCH + 10:
if is_merge_transition_complete(forkyState.data):
build_empty_execution_payload(forkyState.data)
else:
build_empty_merge_execution_payload(forkyState.data)
else:
default(ExecutionPayload)
else:
default(ExecutionPayload)
let
message = makeBeaconBlock(
cfg,
@ -121,7 +167,7 @@ proc addTestBlock*(
deposits,
BeaconBlockExits(),
sync_aggregate,
default(ExecutionPayload),
execution_payload,
noRollback,
cache,
verificationFlags = {skipBlsValidation})