prioritize REST API-provided BLS to execution changes over gossip-received changes (#4580)

This commit is contained in:
tersec 2023-02-03 16:28:28 +01:00 committed by GitHub
parent 1c62a5eb24
commit bca781b1b0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 105 additions and 158 deletions

View File

@ -34,7 +34,7 @@ type
bid*: BlockId ##\
## Root that can be used to retrieve block data from database
executionBlockRoot*: Option[Eth2Digest]
executionBlockRoot*: Opt[Eth2Digest]
parent*: BlockRef ##\
## Not nil, except for the finalized head
@ -54,7 +54,7 @@ template slot*(blck: BlockRef): Slot = blck.bid.slot
func init*(
T: type BlockRef, root: Eth2Digest,
executionPayloadRoot: Option[Eth2Digest], slot: Slot): BlockRef =
executionPayloadRoot: Opt[Eth2Digest], slot: Slot): BlockRef =
BlockRef(
bid: BlockId(root: root, slot: slot),
executionBlockRoot: executionPayloadRoot)
@ -63,7 +63,7 @@ func init*(
T: type BlockRef, root: Eth2Digest,
blck: phase0.SomeBeaconBlock | altair.SomeBeaconBlock |
phase0.TrustedBeaconBlock | altair.TrustedBeaconBlock): BlockRef =
BlockRef.init(root, some ZERO_HASH, blck.slot)
BlockRef.init(root, Opt.some ZERO_HASH, blck.slot)
func init*(
T: type BlockRef, root: Eth2Digest,
@ -71,7 +71,8 @@ func init*(
capella.SomeBeaconBlock | capella.TrustedBeaconBlock |
eip4844.SomeBeaconBlock | eip4844.TrustedBeaconBlock): BlockRef =
BlockRef.init(
root, some Eth2Digest(blck.body.execution_payload.block_hash), blck.slot)
root, Opt.some Eth2Digest(blck.body.execution_payload.block_hash),
blck.slot)
func parent*(bs: BlockSlot): BlockSlot =
## Return a blockslot representing the previous slot, using the parent block

View File

@ -968,7 +968,8 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
# hasn't been written yet
for blck in db.getAncestorSummaries(head.root):
# The execution block root gets filled in as needed
let newRef = BlockRef.init(blck.root, none Eth2Digest, blck.summary.slot)
let newRef =
BlockRef.init(blck.root, Opt.none Eth2Digest, blck.summary.slot)
if headRef == nil:
headRef = newRef
@ -1950,7 +1951,7 @@ proc loadExecutionBlockRoot*(dag: ChainDAGRef, bid: BlockId): Eth2Digest =
proc loadExecutionBlockRoot*(dag: ChainDAGRef, blck: BlockRef): Eth2Digest =
if blck.executionBlockRoot.isNone:
blck.executionBlockRoot = some dag.loadExecutionBlockRoot(blck.bid)
blck.executionBlockRoot = Opt.some dag.loadExecutionBlockRoot(blck.bid)
blck.executionBlockRoot.unsafeGet
proc updateHead*(

View File

@ -10,16 +10,12 @@
import
# Standard libraries
std/[deques, sets],
# Status libraries
chronicles,
# Internal
../spec/datatypes/base,
../spec/[helpers, state_transition_block],
"."/[attestation_pool, blockchain_dag]
export base, deques, sets, blockchain_dag
logScope: topics = "exitpool"
export base, deques, blockchain_dag
const
ATTESTER_SLASHINGS_BOUND = MAX_ATTESTER_SLASHINGS * 4
@ -27,7 +23,7 @@ const
VOLUNTARY_EXITS_BOUND = MAX_VOLUNTARY_EXITS * 4
# For Capella launch; scale back later
BLS_TO_EXECUTION_CHANGES_BOUND = 32768'u64
BLS_TO_EXECUTION_CHANGES_BOUND = 16384'u64
type
OnVoluntaryExitCallback =
@ -47,23 +43,26 @@ type
voluntary_exits*: Deque[SignedVoluntaryExit] ## \
## Not a function of chain DAG branch; just used as a FIFO queue for blocks
bls_to_execution_changes*: Deque[SignedBLSToExecutionChange] ## \
bls_to_execution_changes_gossip*: Deque[SignedBLSToExecutionChange] ## \
## Not a function of chain DAG branch; just used as a FIFO queue for blocks
prior_seen_attester_slashed_indices*: HashSet[uint64] ## \
bls_to_execution_changes_api*: Deque[SignedBLSToExecutionChange] ## \
## Not a function of chain DAG branch; just used as a FIFO queue for blocks
prior_seen_attester_slashed_indices: HashSet[uint64] ## \
## Records attester-slashed indices seen.
prior_seen_proposer_slashed_indices*: HashSet[uint64] ## \
prior_seen_proposer_slashed_indices: HashSet[uint64] ## \
## Records proposer-slashed indices seen.
prior_seen_voluntary_exit_indices*: HashSet[uint64] ##\
prior_seen_voluntary_exit_indices: HashSet[uint64] ##\
## Records voluntary exit indices seen.
prior_seen_bls_to_execution_change_indices*: HashSet[uint64] ##\
prior_seen_bls_to_execution_change_indices: HashSet[uint64] ##\
## Records BLS to execution change indices seen.
dag*: ChainDAGRef
attestationPool*: ref AttestationPool
attestationPool: ref AttestationPool
onVoluntaryExitReceived*: OnVoluntaryExitCallback
func init*(T: type ValidatorChangePool, dag: ChainDAGRef,
@ -78,7 +77,12 @@ func init*(T: type ValidatorChangePool, dag: ChainDAGRef,
initDeque[ProposerSlashing](initialSize = PROPOSER_SLASHINGS_BOUND.int),
voluntary_exits:
initDeque[SignedVoluntaryExit](initialSize = VOLUNTARY_EXITS_BOUND.int),
bls_to_execution_changes:
bls_to_execution_changes_gossip:
# TODO scale-back to BLS_TO_EXECUTION_CHANGES_BOUND post-capella, but
# given large bound, allow to grow dynamically rather than statically
# allocate all at once
initDeque[SignedBLSToExecutionChange](initialSize = 1024),
bls_to_execution_changes_api:
# TODO scale-back to BLS_TO_EXECUTION_CHANGES_BOUND post-capella, but
# given large bound, allow to grow dynamically rather than statically
# allocate all at once
@ -153,28 +157,34 @@ func addMessage*(pool: var ValidatorChangePool, msg: SignedVoluntaryExit) =
pool.voluntary_exits.addValidatorChangeMessage(
pool.prior_seen_voluntary_exit_indices, msg, VOLUNTARY_EXITS_BOUND)
func addMessage*(pool: var ValidatorChangePool, msg: SignedBLSToExecutionChange) =
func addMessage*(
pool: var ValidatorChangePool, msg: SignedBLSToExecutionChange,
localPriorityMessage: bool) =
pool.prior_seen_bls_to_execution_change_indices.incl(
msg.message.validator_index)
pool.bls_to_execution_changes.addValidatorChangeMessage(
pool.prior_seen_bls_to_execution_change_indices, msg,
BLS_TO_EXECUTION_CHANGES_BOUND)
template addMessageAux(subpool) =
addValidatorChangeMessage(
subpool, pool.prior_seen_bls_to_execution_change_indices, msg,
BLS_TO_EXECUTION_CHANGES_BOUND)
if localPriorityMessage:
addMessageAux(pool.bls_to_execution_changes_api)
else:
addMessageAux(pool.bls_to_execution_changes_gossip)
proc validateExitMessage(
proc validateValidatorChangeMessage(
cfg: RuntimeConfig, state: ForkyBeaconState, msg: ProposerSlashing): bool =
check_proposer_slashing(state, msg, {}).isOk
proc validateExitMessage(
proc validateValidatorChangeMessage(
cfg: RuntimeConfig, state: ForkyBeaconState, msg: AttesterSlashing): bool =
check_attester_slashing(state, msg, {}).isOk
proc validateExitMessage(
proc validateValidatorChangeMessage(
cfg: RuntimeConfig, state: ForkyBeaconState, msg: SignedVoluntaryExit):
bool =
check_voluntary_exit(cfg, state, msg, {}).isOk
proc validateExitMessage(
proc validateValidatorChangeMessage(
cfg: RuntimeConfig, state: ForkyBeaconState,
msg: SignedBLSToExecutionChange): bool =
true
# TODO check_voluntary_exit(cfg, state, msg, {}).isOk
check_bls_to_execution_change(cfg, state, msg).isOk
proc getValidatorChangeMessagesForBlock(
subpool: var Deque, cfg: RuntimeConfig, state: ForkyBeaconState,
@ -200,20 +210,20 @@ proc getValidatorChangeMessagesForBlock(
# removed them, if we have the chance.
while subpool.len > 0 and output.len < output.maxLen:
# Prefer recent messages
let exit_message = subpool.popLast()
let validator_change_message = subpool.popLast()
# Re-check that message is still valid in the state that we're proposing
if not validateExitMessage(cfg, state, exit_message):
if not validateValidatorChangeMessage(cfg, state, validator_change_message):
continue
var skip = false
for slashed_index in getValidatorIndices(exit_message):
for slashed_index in getValidatorIndices(validator_change_message):
if seen.containsOrIncl(slashed_index):
skip = true
break
if skip:
continue
if not output.add exit_message:
if not output.add validator_change_message:
break
subpool.clear()
@ -232,8 +242,13 @@ proc getBeaconBlockValidatorChanges*(
getValidatorChangeMessagesForBlock(
pool.voluntary_exits, cfg, state, indices, res.voluntary_exits)
when typeof(state).toFork() >= ConsensusFork.Capella:
# Prioritize these
getValidatorChangeMessagesForBlock(
pool.bls_to_execution_changes, cfg, state, indices,
pool.bls_to_execution_changes_api, cfg, state, indices,
res.bls_to_execution_changes)
getValidatorChangeMessagesForBlock(
pool.bls_to_execution_changes_gossip, cfg, state, indices,
res.bls_to_execution_changes)
res

View File

@ -413,7 +413,9 @@ proc processBlsToExecutionChange*(
if v.isOk():
trace "BLS to execution change validated"
self.validatorChangePool[].addMessage(blsToExecutionChange)
# Prioritize API-provided messages
self.validatorChangePool[].addMessage(
blsToExecutionChange, src == MsgSource.api)
else:
debug "Dropping BLS to execution change", validationError = v.error
beacon_attester_slashings_dropped.inc(1, [$v.error[0]])

View File

@ -1120,7 +1120,8 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
router.api(MethodGet, "/eth/v1/beacon/pool/bls_to_execution_changes") do (
) -> RestApiResponse:
return RestApiResponse.jsonResponse(
toSeq(node.validatorChangePool.bls_to_execution_changes))
toSeq(node.validatorChangePool.bls_to_execution_changes_gossip) &
toSeq(node.validatorChangePool.bls_to_execution_changes_api))
# https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Beacon/submitPoolBLSToExecutionChange
# https://github.com/ethereum/beacon-APIs/blob/86850001845df9163da5ae9605dbf15cd318d5d0/apis/beacon/pool/bls_to_execution_changes.yaml

View File

@ -535,7 +535,7 @@ func shortLog*(v: BLSToExecutionChange): auto =
(
validator_index: v.validator_index,
from_bls_pubkey: shortLog(v.from_bls_pubkey),
to_execution_address: v.to_execution_address
to_execution_address: $v.to_execution_address
)
func shortLog*(v: SignedBLSToExecutionChange): auto =

View File

@ -370,16 +370,27 @@ proc verify_builder_signature*(
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.2/specs/capella/beacon-chain.md#new-process_bls_to_execution_change
func compute_bls_to_execution_change_signing_root(
genesisFork: Fork, genesis_validators_root: Eth2Digest,
epoch: Epoch, msg: BLSToExecutionChange): Eth2Digest =
msg: BLSToExecutionChange): Eth2Digest =
# So the epoch doesn't matter when calling get_domain
doAssert genesisFork.previous_version == genesisFork.current_version
let domain = get_domain(
genesisFork, DOMAIN_BLS_TO_EXECUTION_CHANGE, epoch,
genesisFork, DOMAIN_BLS_TO_EXECUTION_CHANGE, GENESIS_EPOCH,
genesis_validators_root)
compute_signing_root(msg, domain)
proc get_bls_to_execution_change_signature*(
genesisFork: Fork, genesis_validators_root: Eth2Digest,
msg: BLSToExecutionChange, privkey: ValidatorPrivKey):
CookedSig =
let signing_root = compute_bls_to_execution_change_signing_root(
genesisFork, genesis_validators_root, msg)
blsSign(privkey, signing_root.data)
proc verify_bls_to_execution_change_signature*(
genesisFork: Fork, genesis_validators_root: Eth2Digest, epoch: Epoch,
genesisFork: Fork, genesis_validators_root: Eth2Digest,
msg: SignedBLSToExecutionChange,
pubkey: ValidatorPubKey | CookedPubKey, signature: SomeSig): bool =
let signing_root = compute_bls_to_execution_change_signing_root(
genesisFork, genesis_validators_root, epoch, msg.message)
genesisFork, genesis_validators_root, msg.message)
blsVerify(pubkey, signing_root.data, signature)

View File

@ -407,9 +407,8 @@ proc check_bls_to_execution_change*(
return err("process_bls_to_execution_change: invalid withdrawal credentials")
if not verify_bls_to_execution_change_signature(
cfg.genesisFork, state.genesis_validators_root, state.get_current_epoch,
signed_address_change, address_change.from_bls_pubkey,
signed_address_change.signature):
cfg.genesisFork, state.genesis_validators_root, signed_address_change,
address_change.from_bls_pubkey, signed_address_change.signature):
return err("process_bls_to_execution_change: invalid signature")
ok()

View File

@ -8,7 +8,7 @@
{.push raises: [].}
import
std/[options, tables, json, streams, sequtils, uri],
std/[tables, json, streams, sequtils, uri],
chronos, chronicles, metrics, eth/async_utils,
json_serialization/std/net,
presto, presto/client,
@ -21,7 +21,7 @@ import
./slashing_protection
export
streams, options, keystore, phase0, altair, tables, uri, crypto,
streams, keystore, phase0, altair, tables, uri, crypto,
rest_types, eth2_rest_serialization, rest_remote_signer_calls,
slashing_protection

View File

@ -1,102 +0,0 @@
# beacon_chain
# Copyright (c) 2022-2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# https://notes.ethereum.org/@9AeMAlpyQYaAAyuj47BzRw/rkwW3ceVY
# Monitor traffic: socat -v TCP-LISTEN:9550,fork TCP-CONNECT:127.0.0.1:8550
import
std/options,
stew/results,
chronos,
../beacon_chain/eth1/eth1_monitor
from nimcrypto/utils import fromHex
from web3/engine_api_types import PayloadExecutionStatus
from ../beacon_chain/networking/network_metadata import Eth1Network
from ../beacon_chain/spec/datatypes/base import ZERO_HASH
from ../beacon_chain/spec/presets import Eth1Address, defaultRuntimeConfig
{.push raises: [].}
# TODO hm, actually factor this out into a callable function
# and have a version with the result of the JWT secret slurp for testing purposes
proc readJwtSecret(jwtSecretFile: string): Result[seq[byte], cstring] =
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/authentication.md#key-distribution
# If such a parameter is given, but the file cannot be read, or does not
# contain a hex-encoded key of 256 bits, the client should treat this as an
# error: either abort the startup, or show error and continue without
# exposing the authenticated port.
const MIN_SECRET_LEN = 32
try:
let lines = readLines(jwtSecretFile, 1)
if lines.len > 0:
# Secret JWT key is parsed in constant time using nimcrypto:
# https://github.com/cheatfate/nimcrypto/pull/44
let secret = utils.fromHex(lines[0])
if secret.len >= MIN_SECRET_LEN:
ok(secret)
else:
err("JWT secret not at least 256 bits")
else:
err("JWT secret file empty")
except IOError as exc:
err("JWT secret file could not be read from")
const
feeRecipient =
Eth1Address.fromHex("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b")
web3Url = "http://127.0.0.1:8551"
proc run() {.async.} =
let
jwtSecret = some readJwtSecret("jwt.hex").get
eth1Monitor = Eth1Monitor.init(
defaultRuntimeConfig, db = nil, nil, @[web3Url],
none(DepositTreeSnapshot), none(Eth1Network),
false, jwtSecret)
web3Provider = (await Web3DataProvider.new(
default(Eth1Address), web3Url, jwtSecret)).get
const feeRecipient =
Eth1Address.fromHex("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b")
let
existingBlock = await web3Provider.getBlockByNumber(0)
await eth1Monitor.ensureDataProvider()
let
payloadId = await eth1Monitor.forkchoiceUpdated(
existingBlock.hash.asEth2Digest,
existingBlock.hash.asEth2Digest,
existingBlock.timestamp.uint64 + 12,
ZERO_HASH.data, # Random
feeRecipient)
payload = await eth1Monitor.getPayload(
array[8, byte] (payloadId.payloadId.get))
payloadStatus = await eth1Monitor.newPayload(payload)
fcupdatedStatus = await eth1Monitor.forkchoiceUpdated(
payload.blockHash.asEth2Digest,
payload.blockHash.asEth2Digest,
existingBlock.timestamp.uint64 + 24,
ZERO_HASH.data, # Random
feeRecipient)
payload2 = await eth1Monitor.getPayload(
array[8, byte] (fcupdatedStatus.payloadId.get))
payloadStatus2 = await eth1Monitor.newPayload(payload2)
fcupdatedStatus2 = await eth1Monitor.forkchoiceUpdated(
payload2.blockHash.asEth2Digest,
payload2.blockHash.asEth2Digest,
existingBlock.timestamp.uint64 + 36,
ZERO_HASH.data, # Random
feeRecipient)
doAssert payloadStatus.status == PayloadExecutionStatus.valid
doAssert fcupdatedStatus.payloadStatus.status == PayloadExecutionStatus.valid
doAssert payloadStatus2.status == PayloadExecutionStatus.valid
doAssert fcupdatedStatus2.payloadStatus.status == PayloadExecutionStatus.valid
waitFor run()

View File

@ -49,6 +49,8 @@ func makeSignedVoluntaryExit(
fork, genesis_validators_root, tmp,
MockPrivKeys[validator_index]).toValidatorSig)
from std/sequtils import allIt
suite "Validator change pool testing suite":
setup:
let
@ -129,7 +131,6 @@ suite "Validator change pool testing suite":
for i in 0'u64 .. MAX_VOLUNTARY_EXITS + 5:
for j in 0'u64 .. i:
# Cannot exit until
let msg = makeSignedVoluntaryExit(
fork, genesis_validators_root, dag.headState.get_current_epoch(), j)
if i == 0:
@ -159,12 +160,17 @@ suite "Validator change pool testing suite":
for i in 0'u64 .. MAX_BLS_TO_EXECUTION_CHANGES + 5:
for j in 0'u64 .. i:
let msg = SignedBLSToExecutionChange(
message: BLSToExecutionChange(validator_index: j))
var msg = SignedBLSToExecutionChange(
message: BLSToExecutionChange(
validator_index: j,
from_bls_pubkey: MockPubKeys[j]))
msg.signature = toValidatorSig(get_bls_to_execution_change_signature(
dag.cfg.genesisFork(), dag.genesis_validators_root, msg.message,
MockPrivKeys[msg.message.validator_index]))
if i == 0:
check not pool[].isSeen(msg)
pool[].addMessage(msg)
pool[].addMessage(msg, false)
check: pool[].isSeen(msg)
withState(dag.headState):
@ -184,20 +190,33 @@ suite "Validator change pool testing suite":
let fork = dag.forkAtEpoch(dag.headState.get_current_epoch())
for i in 0'u64 .. MAX_BLS_TO_EXECUTION_CHANGES + 5:
var priorityMessages: seq[SignedBLSToExecutionChange]
for j in 0'u64 .. i:
let msg = SignedBLSToExecutionChange(
message: BLSToExecutionChange(validator_index: j))
var msg = SignedBLSToExecutionChange(
message: BLSToExecutionChange(
validator_index: j,
from_bls_pubkey: MockPubKeys[j]))
msg.signature = toValidatorSig(get_bls_to_execution_change_signature(
dag.cfg.genesisFork(), dag.genesis_validators_root, msg.message,
MockPrivKeys[msg.message.validator_index]))
if i == 0:
check not pool[].isSeen(msg)
pool[].addMessage(msg)
let isPriorityMessage = i mod 2 == 0
pool[].addMessage(msg, localPriorityMessage = isPriorityMessage)
if isPriorityMessage:
priorityMessages.add msg
check: pool[].isSeen(msg)
withState(dag.headState):
let blsToExecutionChanges = pool[].getBeaconBlockValidatorChanges(
cfg, forkyState.data).bls_to_execution_changes
check:
pool[].getBeaconBlockValidatorChanges(
cfg, forkyState.data).bls_to_execution_changes.lenu64 ==
min(i + 1, MAX_BLS_TO_EXECUTION_CHANGES)
blsToExecutionChanges.lenu64 == min(i + 1, MAX_BLS_TO_EXECUTION_CHANGES)
# Ensure priority of API to gossip messages is observed
allIt(priorityMessages, pool[].isSeen(it))
pool[].getBeaconBlockValidatorChanges(
cfg, forkyState.data).bls_to_execution_changes.len == 0