`...ExecutionPayloadHash` --> `...ExecutionBlockHash` (#5864)
Finish the rename started in #4809 to have a consistent naming. `ExecutionPayloadHash` suggests hash over payload instead of block. `BlockHash` is also the canonical name in engine API.
This commit is contained in:
parent
464ff68658
commit
e398078abc
|
@ -71,8 +71,8 @@ proc initLightClient*(
|
||||||
template callForkchoiceUpdated(attributes: untyped) =
|
template callForkchoiceUpdated(attributes: untyped) =
|
||||||
discard await node.elManager.forkchoiceUpdated(
|
discard await node.elManager.forkchoiceUpdated(
|
||||||
headBlockHash = blckPayload.block_hash,
|
headBlockHash = blckPayload.block_hash,
|
||||||
safeBlockHash = beaconHead.safeExecutionPayloadHash,
|
safeBlockHash = beaconHead.safeExecutionBlockHash,
|
||||||
finalizedBlockHash = beaconHead.finalizedExecutionPayloadHash,
|
finalizedBlockHash = beaconHead.finalizedExecutionBlockHash,
|
||||||
payloadAttributes = none attributes)
|
payloadAttributes = none attributes)
|
||||||
|
|
||||||
case node.dag.cfg.consensusForkAtEpoch(
|
case node.dag.cfg.consensusForkAtEpoch(
|
||||||
|
|
|
@ -761,18 +761,18 @@ func getAggregatedAttestation*(pool: var AttestationPool,
|
||||||
|
|
||||||
type BeaconHead* = object
|
type BeaconHead* = object
|
||||||
blck*: BlockRef
|
blck*: BlockRef
|
||||||
safeExecutionPayloadHash*, finalizedExecutionPayloadHash*: Eth2Digest
|
safeExecutionBlockHash*, finalizedExecutionBlockHash*: Eth2Digest
|
||||||
|
|
||||||
proc getBeaconHead*(
|
proc getBeaconHead*(
|
||||||
pool: AttestationPool, headBlock: BlockRef): BeaconHead =
|
pool: AttestationPool, headBlock: BlockRef): BeaconHead =
|
||||||
let
|
let
|
||||||
finalizedExecutionPayloadHash =
|
finalizedExecutionBlockHash =
|
||||||
pool.dag.loadExecutionBlockHash(pool.dag.finalizedHead.blck)
|
pool.dag.loadExecutionBlockHash(pool.dag.finalizedHead.blck)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/fork_choice/safe-block.md#get_safe_execution_payload_hash
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/fork_choice/safe-block.md#get_safe_execution_payload_hash
|
||||||
safeBlockRoot = pool.forkChoice.get_safe_beacon_block_root()
|
safeBlockRoot = pool.forkChoice.get_safe_beacon_block_root()
|
||||||
safeBlock = pool.dag.getBlockRef(safeBlockRoot)
|
safeBlock = pool.dag.getBlockRef(safeBlockRoot)
|
||||||
safeExecutionPayloadHash =
|
safeExecutionBlockHash =
|
||||||
if safeBlock.isErr:
|
if safeBlock.isErr:
|
||||||
# Safe block is currently the justified block determined by fork choice.
|
# Safe block is currently the justified block determined by fork choice.
|
||||||
# If finality already advanced beyond the current justified checkpoint,
|
# If finality already advanced beyond the current justified checkpoint,
|
||||||
|
@ -780,14 +780,14 @@ proc getBeaconHead*(
|
||||||
# the justified block may end up not having a `BlockRef` anymore.
|
# the justified block may end up not having a `BlockRef` anymore.
|
||||||
# Because we know that a different fork already finalized a later point,
|
# Because we know that a different fork already finalized a later point,
|
||||||
# let's just report the finalized execution payload hash instead.
|
# let's just report the finalized execution payload hash instead.
|
||||||
finalizedExecutionPayloadHash
|
finalizedExecutionBlockHash
|
||||||
else:
|
else:
|
||||||
pool.dag.loadExecutionBlockHash(safeBlock.get)
|
pool.dag.loadExecutionBlockHash(safeBlock.get)
|
||||||
|
|
||||||
BeaconHead(
|
BeaconHead(
|
||||||
blck: headBlock,
|
blck: headBlock,
|
||||||
safeExecutionPayloadHash: safeExecutionPayloadHash,
|
safeExecutionBlockHash: safeExecutionBlockHash,
|
||||||
finalizedExecutionPayloadHash: finalizedExecutionPayloadHash)
|
finalizedExecutionBlockHash: finalizedExecutionBlockHash)
|
||||||
|
|
||||||
proc selectOptimisticHead*(
|
proc selectOptimisticHead*(
|
||||||
pool: var AttestationPool, wallTime: BeaconTime): Opt[BeaconHead] =
|
pool: var AttestationPool, wallTime: BeaconTime): Opt[BeaconHead] =
|
||||||
|
|
|
@ -145,7 +145,7 @@ func shouldSyncOptimistically*(self: ConsensusManager, wallSlot: Slot): bool =
|
||||||
func optimisticHead*(self: ConsensusManager): BlockId =
|
func optimisticHead*(self: ConsensusManager): BlockId =
|
||||||
self.optimisticHead.bid
|
self.optimisticHead.bid
|
||||||
|
|
||||||
func optimisticExecutionPayloadHash*(self: ConsensusManager): Eth2Digest =
|
func optimisticExecutionBlockHash*(self: ConsensusManager): Eth2Digest =
|
||||||
self.optimisticHead.execution_block_hash
|
self.optimisticHead.execution_block_hash
|
||||||
|
|
||||||
func setOptimisticHead*(
|
func setOptimisticHead*(
|
||||||
|
@ -155,18 +155,18 @@ func setOptimisticHead*(
|
||||||
|
|
||||||
proc updateExecutionClientHead(self: ref ConsensusManager,
|
proc updateExecutionClientHead(self: ref ConsensusManager,
|
||||||
newHead: BeaconHead): Future[Opt[void]] {.async: (raises: [CancelledError]).} =
|
newHead: BeaconHead): Future[Opt[void]] {.async: (raises: [CancelledError]).} =
|
||||||
let headExecutionPayloadHash = self.dag.loadExecutionBlockHash(newHead.blck)
|
let headExecutionBlockHash = self.dag.loadExecutionBlockHash(newHead.blck)
|
||||||
|
|
||||||
if headExecutionPayloadHash.isZero:
|
if headExecutionBlockHash.isZero:
|
||||||
# Blocks without execution payloads can't be optimistic.
|
# Blocks without execution payloads can't be optimistic.
|
||||||
self.dag.markBlockVerified(newHead.blck)
|
self.dag.markBlockVerified(newHead.blck)
|
||||||
return Opt[void].ok()
|
return Opt[void].ok()
|
||||||
|
|
||||||
template callForkchoiceUpdated(attributes: untyped): auto =
|
template callForkchoiceUpdated(attributes: untyped): auto =
|
||||||
await self.elManager.forkchoiceUpdated(
|
await self.elManager.forkchoiceUpdated(
|
||||||
headBlockHash = headExecutionPayloadHash,
|
headBlockHash = headExecutionBlockHash,
|
||||||
safeBlockHash = newHead.safeExecutionPayloadHash,
|
safeBlockHash = newHead.safeExecutionBlockHash,
|
||||||
finalizedBlockHash = newHead.finalizedExecutionPayloadHash,
|
finalizedBlockHash = newHead.finalizedExecutionBlockHash,
|
||||||
payloadAttributes = none attributes)
|
payloadAttributes = none attributes)
|
||||||
|
|
||||||
# Can't use dag.head here because it hasn't been updated yet
|
# Can't use dag.head here because it hasn't been updated yet
|
||||||
|
@ -352,13 +352,13 @@ proc runProposalForkchoiceUpdated*(
|
||||||
if headBlockHash.isZero:
|
if headBlockHash.isZero:
|
||||||
return err()
|
return err()
|
||||||
|
|
||||||
let safeBlockHash = beaconHead.safeExecutionPayloadHash
|
let safeBlockHash = beaconHead.safeExecutionBlockHash
|
||||||
|
|
||||||
withState(self.dag.headState):
|
withState(self.dag.headState):
|
||||||
template callForkchoiceUpdated(fcPayloadAttributes: auto) =
|
template callForkchoiceUpdated(fcPayloadAttributes: auto) =
|
||||||
let (status, _) = await self.elManager.forkchoiceUpdated(
|
let (status, _) = await self.elManager.forkchoiceUpdated(
|
||||||
headBlockHash, safeBlockHash,
|
headBlockHash, safeBlockHash,
|
||||||
beaconHead.finalizedExecutionPayloadHash,
|
beaconHead.finalizedExecutionBlockHash,
|
||||||
payloadAttributes = some fcPayloadAttributes)
|
payloadAttributes = some fcPayloadAttributes)
|
||||||
debug "Fork-choice updated for proposal", status
|
debug "Fork-choice updated for proposal", status
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ import
|
||||||
from std/deques import Deque, addLast, contains, initDeque, items, len, shrink
|
from std/deques import Deque, addLast, contains, initDeque, items, len, shrink
|
||||||
from std/sequtils import mapIt
|
from std/sequtils import mapIt
|
||||||
from ../consensus_object_pools/consensus_manager import
|
from ../consensus_object_pools/consensus_manager import
|
||||||
ConsensusManager, checkNextProposer, optimisticExecutionPayloadHash,
|
ConsensusManager, checkNextProposer, optimisticExecutionBlockHash,
|
||||||
runProposalForkchoiceUpdated, shouldSyncOptimistically, updateHead,
|
runProposalForkchoiceUpdated, shouldSyncOptimistically, updateHead,
|
||||||
updateHeadWithExecution
|
updateHeadWithExecution
|
||||||
from ../consensus_object_pools/blockchain_dag import
|
from ../consensus_object_pools/blockchain_dag import
|
||||||
|
@ -636,12 +636,12 @@ proc storeBlock(
|
||||||
|
|
||||||
template callForkchoiceUpdated(attributes: untyped) =
|
template callForkchoiceUpdated(attributes: untyped) =
|
||||||
if NewPayloadStatus.noResponse != payloadStatus and
|
if NewPayloadStatus.noResponse != payloadStatus and
|
||||||
not self.consensusManager[].optimisticExecutionPayloadHash.isZero:
|
not self.consensusManager[].optimisticExecutionBlockHash.isZero:
|
||||||
discard await elManager.forkchoiceUpdated(
|
discard await elManager.forkchoiceUpdated(
|
||||||
headBlockHash =
|
headBlockHash =
|
||||||
self.consensusManager[].optimisticExecutionPayloadHash,
|
self.consensusManager[].optimisticExecutionBlockHash,
|
||||||
safeBlockHash = newHead.get.safeExecutionPayloadHash,
|
safeBlockHash = newHead.get.safeExecutionBlockHash,
|
||||||
finalizedBlockHash = newHead.get.finalizedExecutionPayloadHash,
|
finalizedBlockHash = newHead.get.finalizedExecutionBlockHash,
|
||||||
payloadAttributes = none attributes)
|
payloadAttributes = none attributes)
|
||||||
|
|
||||||
let consensusFork = self.consensusManager.dag.cfg.consensusForkAtEpoch(
|
let consensusFork = self.consensusManager.dag.cfg.consensusForkAtEpoch(
|
||||||
|
@ -651,10 +651,10 @@ proc storeBlock(
|
||||||
callForkchoiceUpdated(consensusFork.PayloadAttributes)
|
callForkchoiceUpdated(consensusFork.PayloadAttributes)
|
||||||
else:
|
else:
|
||||||
let
|
let
|
||||||
headExecutionPayloadHash =
|
headExecutionBlockHash =
|
||||||
dag.loadExecutionBlockHash(newHead.get.blck)
|
dag.loadExecutionBlockHash(newHead.get.blck)
|
||||||
wallSlot = self.getBeaconTime().slotOrZero
|
wallSlot = self.getBeaconTime().slotOrZero
|
||||||
if headExecutionPayloadHash.isZero or
|
if headExecutionBlockHash.isZero or
|
||||||
NewPayloadStatus.noResponse == payloadStatus:
|
NewPayloadStatus.noResponse == payloadStatus:
|
||||||
# Blocks without execution payloads can't be optimistic, and don't try
|
# Blocks without execution payloads can't be optimistic, and don't try
|
||||||
# to fcU to a block the EL hasn't seen
|
# to fcU to a block the EL hasn't seen
|
||||||
|
@ -666,9 +666,9 @@ proc storeBlock(
|
||||||
template callExpectValidFCU(payloadAttributeType: untyped): auto =
|
template callExpectValidFCU(payloadAttributeType: untyped): auto =
|
||||||
await elManager.expectValidForkchoiceUpdated(
|
await elManager.expectValidForkchoiceUpdated(
|
||||||
headBlockPayloadAttributesType = payloadAttributeType,
|
headBlockPayloadAttributesType = payloadAttributeType,
|
||||||
headBlockHash = headExecutionPayloadHash,
|
headBlockHash = headExecutionBlockHash,
|
||||||
safeBlockHash = newHead.get.safeExecutionPayloadHash,
|
safeBlockHash = newHead.get.safeExecutionBlockHash,
|
||||||
finalizedBlockHash = newHead.get.finalizedExecutionPayloadHash,
|
finalizedBlockHash = newHead.get.finalizedExecutionBlockHash,
|
||||||
receivedBlock = signedBlock)
|
receivedBlock = signedBlock)
|
||||||
|
|
||||||
template callForkChoiceUpdated: auto =
|
template callForkChoiceUpdated: auto =
|
||||||
|
|
|
@ -389,8 +389,8 @@ proc getExecutionPayload(
|
||||||
forkyState.data.latest_execution_payload_header.block_hash
|
forkyState.data.latest_execution_payload_header.block_hash
|
||||||
else:
|
else:
|
||||||
(static(default(Eth2Digest)))
|
(static(default(Eth2Digest)))
|
||||||
latestSafe = beaconHead.safeExecutionPayloadHash
|
latestSafe = beaconHead.safeExecutionBlockHash
|
||||||
latestFinalized = beaconHead.finalizedExecutionPayloadHash
|
latestFinalized = beaconHead.finalizedExecutionBlockHash
|
||||||
timestamp = withState(proposalState[]):
|
timestamp = withState(proposalState[]):
|
||||||
compute_timestamp_at_slot(forkyState.data, forkyState.data.slot)
|
compute_timestamp_at_slot(forkyState.data, forkyState.data.slot)
|
||||||
random = withState(proposalState[]):
|
random = withState(proposalState[]):
|
||||||
|
@ -553,7 +553,7 @@ proc getBlindedExecutionPayload[
|
||||||
EPH: capella.ExecutionPayloadHeader |
|
EPH: capella.ExecutionPayloadHeader |
|
||||||
deneb_mev.BlindedExecutionPayloadAndBlobsBundle](
|
deneb_mev.BlindedExecutionPayloadAndBlobsBundle](
|
||||||
node: BeaconNode, payloadBuilderClient: RestClientRef, slot: Slot,
|
node: BeaconNode, payloadBuilderClient: RestClientRef, slot: Slot,
|
||||||
executionBlockRoot: Eth2Digest, pubkey: ValidatorPubKey):
|
executionBlockHash: Eth2Digest, pubkey: ValidatorPubKey):
|
||||||
Future[BlindedBlockResult[EPH]] {.async: (raises: [CancelledError, RestError]).} =
|
Future[BlindedBlockResult[EPH]] {.async: (raises: [CancelledError, RestError]).} =
|
||||||
# Not ideal to use `when` where instead of splitting into separate functions,
|
# Not ideal to use `when` where instead of splitting into separate functions,
|
||||||
# but Nim doesn't overload on generic EPH type parameter.
|
# but Nim doesn't overload on generic EPH type parameter.
|
||||||
|
@ -561,7 +561,7 @@ proc getBlindedExecutionPayload[
|
||||||
let
|
let
|
||||||
response = awaitWithTimeout(
|
response = awaitWithTimeout(
|
||||||
payloadBuilderClient.getHeaderCapella(
|
payloadBuilderClient.getHeaderCapella(
|
||||||
slot, executionBlockRoot, pubkey),
|
slot, executionBlockHash, pubkey),
|
||||||
BUILDER_PROPOSAL_DELAY_TOLERANCE):
|
BUILDER_PROPOSAL_DELAY_TOLERANCE):
|
||||||
return err "Timeout obtaining Capella blinded header from builder"
|
return err "Timeout obtaining Capella blinded header from builder"
|
||||||
|
|
||||||
|
@ -577,7 +577,7 @@ proc getBlindedExecutionPayload[
|
||||||
let
|
let
|
||||||
response = awaitWithTimeout(
|
response = awaitWithTimeout(
|
||||||
payloadBuilderClient.getHeaderDeneb(
|
payloadBuilderClient.getHeaderDeneb(
|
||||||
slot, executionBlockRoot, pubkey),
|
slot, executionBlockHash, pubkey),
|
||||||
BUILDER_PROPOSAL_DELAY_TOLERANCE):
|
BUILDER_PROPOSAL_DELAY_TOLERANCE):
|
||||||
return err "Timeout obtaining Deneb blinded header from builder"
|
return err "Timeout obtaining Deneb blinded header from builder"
|
||||||
|
|
||||||
|
@ -767,12 +767,12 @@ proc getBlindedBlockParts[
|
||||||
Future[Result[(EPH, UInt256, ForkedBeaconBlock), string]]
|
Future[Result[(EPH, UInt256, ForkedBeaconBlock), string]]
|
||||||
{.async: (raises: [CancelledError]).} =
|
{.async: (raises: [CancelledError]).} =
|
||||||
let
|
let
|
||||||
executionBlockRoot = node.dag.loadExecutionBlockHash(head)
|
executionBlockHash = node.dag.loadExecutionBlockHash(head)
|
||||||
executionPayloadHeader =
|
executionPayloadHeader =
|
||||||
try:
|
try:
|
||||||
awaitWithTimeout(
|
awaitWithTimeout(
|
||||||
getBlindedExecutionPayload[EPH](
|
getBlindedExecutionPayload[EPH](
|
||||||
node, payloadBuilderClient, slot, executionBlockRoot, pubkey),
|
node, payloadBuilderClient, slot, executionBlockHash, pubkey),
|
||||||
BUILDER_PROPOSAL_DELAY_TOLERANCE):
|
BUILDER_PROPOSAL_DELAY_TOLERANCE):
|
||||||
BlindedBlockResult[EPH].err("getBlindedExecutionPayload timed out")
|
BlindedBlockResult[EPH].err("getBlindedExecutionPayload timed out")
|
||||||
except RestDecodingError as exc:
|
except RestDecodingError as exc:
|
||||||
|
|
|
@ -39,7 +39,7 @@ type
|
||||||
opOnBlock
|
opOnBlock
|
||||||
opOnMergeBlock
|
opOnMergeBlock
|
||||||
opOnAttesterSlashing
|
opOnAttesterSlashing
|
||||||
opInvalidateRoot
|
opInvalidateHash
|
||||||
opChecks
|
opChecks
|
||||||
|
|
||||||
BlobData = object
|
BlobData = object
|
||||||
|
@ -61,8 +61,8 @@ type
|
||||||
powBlock: PowBlock
|
powBlock: PowBlock
|
||||||
of opOnAttesterSlashing:
|
of opOnAttesterSlashing:
|
||||||
attesterSlashing: AttesterSlashing
|
attesterSlashing: AttesterSlashing
|
||||||
of opInvalidateRoot:
|
of opInvalidateHash:
|
||||||
invalidatedRoot: Eth2Digest
|
invalidatedHash: Eth2Digest
|
||||||
latestValidHash: Eth2Digest
|
latestValidHash: Eth2Digest
|
||||||
of opChecks:
|
of opChecks:
|
||||||
checks: JsonNode
|
checks: JsonNode
|
||||||
|
@ -153,9 +153,9 @@ proc loadOps(path: string, fork: ConsensusFork): seq[Operation] =
|
||||||
attesterSlashing: attesterSlashing)
|
attesterSlashing: attesterSlashing)
|
||||||
elif step.hasKey"payload_status":
|
elif step.hasKey"payload_status":
|
||||||
if step["payload_status"]["status"].getStr() == "INVALID":
|
if step["payload_status"]["status"].getStr() == "INVALID":
|
||||||
result.add Operation(kind: opInvalidateRoot,
|
result.add Operation(kind: opInvalidateHash,
|
||||||
valid: true,
|
valid: true,
|
||||||
invalidatedRoot: Eth2Digest.fromHex(step["block_hash"].getStr()),
|
invalidatedHash: Eth2Digest.fromHex(step["block_hash"].getStr()),
|
||||||
latestValidHash: Eth2Digest.fromHex(
|
latestValidHash: Eth2Digest.fromHex(
|
||||||
step["payload_status"]["latest_valid_hash"].getStr()))
|
step["payload_status"]["latest_valid_hash"].getStr()))
|
||||||
elif step.hasKey"checks":
|
elif step.hasKey"checks":
|
||||||
|
@ -180,7 +180,7 @@ proc stepOnBlock(
|
||||||
signedBlock: ForkySignedBeaconBlock,
|
signedBlock: ForkySignedBeaconBlock,
|
||||||
blobData: Opt[BlobData],
|
blobData: Opt[BlobData],
|
||||||
time: BeaconTime,
|
time: BeaconTime,
|
||||||
invalidatedRoots: Table[Eth2Digest, Eth2Digest]):
|
invalidatedHashes: Table[Eth2Digest, Eth2Digest]):
|
||||||
Result[BlockRef, VerifierError] =
|
Result[BlockRef, VerifierError] =
|
||||||
# 1. Validate blobs
|
# 1. Validate blobs
|
||||||
when typeof(signedBlock).kind >= ConsensusFork.Deneb:
|
when typeof(signedBlock).kind >= ConsensusFork.Deneb:
|
||||||
|
@ -210,18 +210,18 @@ proc stepOnBlock(
|
||||||
# would also have `true` validity because it'd not be known they weren't, so
|
# would also have `true` validity because it'd not be known they weren't, so
|
||||||
# adding this mock of the block processor is realistic and sufficient.
|
# adding this mock of the block processor is realistic and sufficient.
|
||||||
when consensusFork >= ConsensusFork.Bellatrix:
|
when consensusFork >= ConsensusFork.Bellatrix:
|
||||||
let executionPayloadHash =
|
let executionBlockHash =
|
||||||
signedBlock.message.body.execution_payload.block_hash
|
signedBlock.message.body.execution_payload.block_hash
|
||||||
if executionPayloadHash in invalidatedRoots:
|
if executionBlockHash in invalidatedHashes:
|
||||||
# Mocks fork choice INVALID list application. These tests sequence this
|
# Mocks fork choice INVALID list application. These tests sequence this
|
||||||
# in a way the block processor does not, specifying each payload_status
|
# in a way the block processor does not, specifying each payload_status
|
||||||
# before the block itself, while Nimbus fork choice treats invalidating
|
# before the block itself, while Nimbus fork choice treats invalidating
|
||||||
# a non-existent block root as a no-op and does not remember it for the
|
# a non-existent block root as a no-op and does not remember it for the
|
||||||
# future.
|
# future.
|
||||||
let lvh = invalidatedRoots.getOrDefault(
|
let lvh = invalidatedHashes.getOrDefault(
|
||||||
executionPayloadHash, static(default(Eth2Digest)))
|
executionBlockHash, static(default(Eth2Digest)))
|
||||||
fkChoice[].mark_root_invalid(dag.getEarliestInvalidBlockRoot(
|
fkChoice[].mark_root_invalid(dag.getEarliestInvalidBlockRoot(
|
||||||
signedBlock.message.parent_root, lvh, executionPayloadHash))
|
signedBlock.message.parent_root, lvh, executionBlockHash))
|
||||||
|
|
||||||
return err VerifierError.Invalid
|
return err VerifierError.Invalid
|
||||||
|
|
||||||
|
@ -304,7 +304,7 @@ proc doRunTest(path: string, fork: ConsensusFork) =
|
||||||
|
|
||||||
let steps = loadOps(path, fork)
|
let steps = loadOps(path, fork)
|
||||||
var time = stores.fkChoice.checkpoints.time
|
var time = stores.fkChoice.checkpoints.time
|
||||||
var invalidatedRoots: Table[Eth2Digest, Eth2Digest]
|
var invalidatedHashes: Table[Eth2Digest, Eth2Digest]
|
||||||
|
|
||||||
let state = newClone(stores.dag.headState)
|
let state = newClone(stores.dag.headState)
|
||||||
var stateCache = StateCache()
|
var stateCache = StateCache()
|
||||||
|
@ -325,7 +325,7 @@ proc doRunTest(path: string, fork: ConsensusFork) =
|
||||||
let status = stepOnBlock(
|
let status = stepOnBlock(
|
||||||
stores.dag, stores.fkChoice,
|
stores.dag, stores.fkChoice,
|
||||||
verifier, state[], stateCache,
|
verifier, state[], stateCache,
|
||||||
forkyBlck, step.blobData, time, invalidatedRoots)
|
forkyBlck, step.blobData, time, invalidatedHashes)
|
||||||
doAssert status.isOk == step.valid
|
doAssert status.isOk == step.valid
|
||||||
of opOnAttesterSlashing:
|
of opOnAttesterSlashing:
|
||||||
let indices =
|
let indices =
|
||||||
|
@ -334,8 +334,8 @@ proc doRunTest(path: string, fork: ConsensusFork) =
|
||||||
for idx in indices.get:
|
for idx in indices.get:
|
||||||
stores.fkChoice[].process_equivocation(idx)
|
stores.fkChoice[].process_equivocation(idx)
|
||||||
doAssert indices.isOk == step.valid
|
doAssert indices.isOk == step.valid
|
||||||
of opInvalidateRoot:
|
of opInvalidateHash:
|
||||||
invalidatedRoots[step.invalidatedRoot] = step.latestValidHash
|
invalidatedHashes[step.invalidatedHash] = step.latestValidHash
|
||||||
of opChecks:
|
of opChecks:
|
||||||
stepChecks(step.checks, stores.dag, stores.fkChoice, time)
|
stepChecks(step.checks, stores.dag, stores.fkChoice, time)
|
||||||
else:
|
else:
|
||||||
|
|
Loading…
Reference in New Issue