2022-07-06 16:11:44 +00:00
|
|
|
# beacon_chain
|
2024-01-06 14:26:56 +00:00
|
|
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
2022-07-06 16:11:44 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-01-20 14:14:37 +00:00
|
|
|
{.push raises: [].}
|
2022-07-06 16:11:44 +00:00
|
|
|
|
|
|
|
import
|
2023-01-09 18:42:10 +00:00
|
|
|
stew/results,
|
2022-07-06 16:11:44 +00:00
|
|
|
std/sequtils,
|
2024-08-26 19:36:54 +00:00
|
|
|
kzg4844/[kzg_ex],
|
2022-07-06 16:11:44 +00:00
|
|
|
chronicles,
|
|
|
|
metrics,
|
|
|
|
../spec/network,
|
2024-06-24 12:02:06 +00:00
|
|
|
../spec/eip7594_helpers,
|
2022-07-06 16:11:44 +00:00
|
|
|
../consensus_object_pools/spec_cache,
|
2024-07-03 16:20:52 +00:00
|
|
|
../gossip_processing/[
|
|
|
|
eth2_processor,
|
|
|
|
block_processor],
|
2022-08-19 10:30:07 +00:00
|
|
|
../networking/eth2_network,
|
2023-01-09 18:42:10 +00:00
|
|
|
./activity_metrics,
|
2024-08-26 19:36:54 +00:00
|
|
|
../spec/datatypes/[deneb, eip7594]
|
2023-06-28 17:55:31 +00:00
|
|
|
from ../spec/state_transition_block import validate_blobs
|
2022-07-06 16:11:44 +00:00
|
|
|
|
|
|
|
export eth2_processor, eth2_network
|
|
|
|
|
2023-08-05 13:12:44 +00:00
|
|
|
logScope:
|
|
|
|
topics = "message_router"
|
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
declareCounter beacon_voluntary_exits_sent,
|
|
|
|
"Number of beacon voluntary sent by this node"
|
|
|
|
|
|
|
|
declareCounter beacon_attester_slashings_sent,
|
|
|
|
"Number of beacon attester slashings sent by this node"
|
|
|
|
|
|
|
|
declareCounter beacon_proposer_slashings_sent,
|
|
|
|
"Number of beacon proposer slashings sent by this node"
|
|
|
|
|
|
|
|
type
|
|
|
|
MessageRouter* = object
|
|
|
|
## The message router is responsible for routing messages produced by
|
|
|
|
## attached validators or received via REST.
|
|
|
|
##
|
|
|
|
## Message routing does 3 things:
|
|
|
|
##
|
|
|
|
## * perform a "quick" sanity check of the message similar to gossip
|
|
|
|
## processing - regardless where the message comes from, this check is
|
|
|
|
## done so as to protect the internal state of the beacon node
|
|
|
|
## * broadcast the message to the network - in general, the aim is to start
|
|
|
|
## the broadcasting as soon as possible without risking that the node
|
|
|
|
## gets descored
|
|
|
|
## * update the internal state of the beacon node with the data in the
|
|
|
|
## message - for example add a block to the dag or an attestation to the
|
|
|
|
## attestation pool and fork choice - as a consequence, the message will
|
|
|
|
## also be published to event subscribers
|
|
|
|
##
|
|
|
|
## Because the message router produces messages that will be gossiped, we
|
|
|
|
## run the messages through the same validation as incoming gossip messages.
|
|
|
|
##
|
|
|
|
## In most cases, processing of valid messages is identical to that done
|
|
|
|
## for gossip - blocks in particular however skip the queue.
|
|
|
|
|
|
|
|
processor*: ref Eth2Processor
|
|
|
|
network*: Eth2Node
|
|
|
|
|
|
|
|
# TODO this belongs somewhere else, ie sync committee pool
|
2023-08-25 09:29:07 +00:00
|
|
|
onSyncCommitteeMessage*: proc(slot: Slot) {.gcsafe, raises: [].}
|
2022-07-06 16:11:44 +00:00
|
|
|
|
|
|
|
func isGoodForSending(validationResult: ValidationRes): bool =
|
|
|
|
# When routing messages from REST, it's possible that these have already
|
|
|
|
# been received via gossip (because they might have been sent to multiple
|
|
|
|
# beacon nodes, as is the case with Vouch) - thus, we treat `IGNORE`
|
|
|
|
# as success as far as further processing goes. `libp2p` however will not
|
|
|
|
# re-broadcast the message as it already exists in its cache.
|
|
|
|
validationResult.isOk() or
|
|
|
|
validationResult.error[0] == ValidationResult.Ignore
|
|
|
|
|
|
|
|
template dag(router: MessageRouter): ChainDAGRef =
|
|
|
|
router.processor[].dag
|
|
|
|
template quarantine(router: MessageRouter): ref Quarantine =
|
|
|
|
router.processor[].quarantine
|
|
|
|
template blockProcessor(router: MessageRouter): ref BlockProcessor =
|
|
|
|
router.processor[].blockProcessor
|
|
|
|
template getCurrentBeaconTime(router: MessageRouter): BeaconTime =
|
|
|
|
router.processor[].getCurrentBeaconTime()
|
|
|
|
|
2024-01-15 15:53:34 +00:00
|
|
|
type RouteBlockResult = Result[Opt[BlockRef], string]
|
2022-07-06 16:11:44 +00:00
|
|
|
proc routeSignedBeaconBlock*(
|
2023-06-28 17:55:31 +00:00
|
|
|
router: ref MessageRouter, blck: ForkySignedBeaconBlock,
|
2024-02-07 11:26:04 +00:00
|
|
|
blobsOpt: Opt[seq[BlobSidecar]]):
|
|
|
|
Future[RouteBlockResult] {.async: (raises: [CancelledError]).} =
|
2022-07-06 16:11:44 +00:00
|
|
|
## Validate and broadcast beacon block, then add it to the block database
|
|
|
|
## Returns the new Head when block is added successfully to dag, none when
|
|
|
|
## block passes validation but is not added, and error otherwise
|
2023-02-28 11:36:17 +00:00
|
|
|
let wallTime = router[].getCurrentBeaconTime()
|
2022-07-06 16:11:44 +00:00
|
|
|
|
|
|
|
# Start with a quick gossip validation check such that broadcasting the
|
|
|
|
# block doesn't get the node into trouble
|
|
|
|
block:
|
|
|
|
let res = validateBeaconBlock(
|
2023-02-28 11:36:17 +00:00
|
|
|
router[].dag, router[].quarantine, blck, wallTime, {})
|
2023-01-16 16:26:48 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
if not res.isGoodForSending():
|
|
|
|
warn "Block failed validation",
|
|
|
|
blockRoot = shortLog(blck.root), blck = shortLog(blck.message),
|
|
|
|
signature = shortLog(blck.signature), error = res.error()
|
2024-01-15 15:53:34 +00:00
|
|
|
return err($(res.error()[1]))
|
2022-07-06 16:11:44 +00:00
|
|
|
|
2023-09-27 15:10:28 +00:00
|
|
|
when typeof(blck).kind >= ConsensusFork.Deneb:
|
2023-06-28 17:55:31 +00:00
|
|
|
if blobsOpt.isSome:
|
|
|
|
let blobs = blobsOpt.get()
|
|
|
|
let kzgCommits = blck.message.body.blob_kzg_commitments.asSeq
|
|
|
|
if blobs.len > 0 or kzgCommits.len > 0:
|
2024-08-05 13:57:39 +00:00
|
|
|
let res = validate_blobs(
|
|
|
|
kzgCommits,
|
|
|
|
blobs.mapIt(KzgBlob(bytes: it.blob)),
|
|
|
|
blobs.mapIt(it.kzg_proof))
|
2023-06-28 17:55:31 +00:00
|
|
|
if res.isErr():
|
|
|
|
warn "blobs failed validation",
|
|
|
|
blockRoot = shortLog(blck.root),
|
|
|
|
blobs = shortLog(blobs),
|
|
|
|
blck = shortLog(blck.message),
|
|
|
|
signature = shortLog(blck.signature),
|
|
|
|
msg = res.error()
|
|
|
|
return err(res.error())
|
2022-07-06 16:11:44 +00:00
|
|
|
let
|
|
|
|
sendTime = router[].getCurrentBeaconTime()
|
|
|
|
delay = sendTime - blck.message.slot.block_deadline()
|
2023-06-28 17:55:31 +00:00
|
|
|
# The block (and blobs, if present) passed basic gossip validation
|
|
|
|
# - we can "safely" broadcast it now. In fact, per the spec, we
|
|
|
|
# should broadcast it even if it later fails to apply to our
|
|
|
|
# state.
|
2023-01-21 06:34:04 +00:00
|
|
|
|
2023-02-28 11:36:17 +00:00
|
|
|
let res = await router[].network.broadcastBeaconBlock(blck)
|
2022-07-06 16:11:44 +00:00
|
|
|
|
2022-07-07 12:57:56 +00:00
|
|
|
if res.isOk():
|
|
|
|
beacon_blocks_sent.inc()
|
|
|
|
beacon_blocks_sent_delay.observe(delay.toFloatSeconds())
|
|
|
|
|
|
|
|
notice "Block sent",
|
|
|
|
blockRoot = shortLog(blck.root), blck = shortLog(blck.message),
|
|
|
|
signature = shortLog(blck.signature), delay
|
|
|
|
else: # "no broadcast" is not a fatal error
|
|
|
|
notice "Block not sent",
|
|
|
|
blockRoot = shortLog(blck.root), blck = shortLog(blck.message),
|
|
|
|
signature = shortLog(blck.signature), error = res.error()
|
2022-07-06 16:11:44 +00:00
|
|
|
|
2024-08-05 13:57:39 +00:00
|
|
|
# PREVENT PROPOSING BLOB SIDECARS IN PEERDAS DEVNET
|
2024-07-04 06:16:40 +00:00
|
|
|
var blobRefs = Opt.none(BlobSidecars)
|
2024-08-17 18:12:31 +00:00
|
|
|
# if blobsOpt.isSome():
|
|
|
|
# let blobs = blobsOpt.get()
|
|
|
|
# var workers = newSeq[Future[SendResult]](blobs.len)
|
|
|
|
# for i in 0..<blobs.lenu64:
|
|
|
|
# let subnet_id = compute_subnet_for_blob_sidecar(i)
|
|
|
|
# workers[i] = router[].network.broadcastBlobSidecar(subnet_id, blobs[i])
|
|
|
|
# let allres = await allFinished(workers)
|
|
|
|
# for i in 0..<allres.len:
|
|
|
|
# let res = allres[i]
|
|
|
|
# doAssert res.finished()
|
|
|
|
# if res.failed():
|
|
|
|
# notice "Blob not sent",
|
|
|
|
# blob = shortLog(blobs[i]), error = res.error[]
|
|
|
|
# else:
|
|
|
|
# notice "Blob sent", blob = shortLog(blobs[i])
|
|
|
|
# blobRefs = Opt.some(blobs.mapIt(newClone(it)))
|
2024-06-24 12:02:06 +00:00
|
|
|
|
2024-07-04 13:50:53 +00:00
|
|
|
var dataColumnRefs = Opt.none(DataColumnSidecars)
|
2024-09-21 10:17:58 +00:00
|
|
|
when typeof(blck).kind >= ConsensusFork.Deneb:
|
|
|
|
let blobs = blobsOpt.get()
|
|
|
|
debugEcho blobs.len
|
|
|
|
if blobs.len != 0:
|
|
|
|
let dataColumnsOpt =
|
|
|
|
newClone get_data_column_sidecars(blck, blobs.mapIt(KzgBlob(bytes: it.blob)))
|
|
|
|
if not dataColumnsOpt[].isOk:
|
|
|
|
debug "Issue with computing data column from blob bundle"
|
|
|
|
let data_columns = dataColumnsOpt[].get()
|
|
|
|
var das_workers = newSeq[Future[SendResult]](len(dataColumnsOpt[].get()))
|
|
|
|
debugEcho "das workers len"
|
|
|
|
debugEcho das_workers.len
|
|
|
|
for i in 0..<data_columns.lenu64:
|
|
|
|
let subnet_id = compute_subnet_for_data_column_sidecar(data_columns[i].index)
|
|
|
|
das_workers[i] =
|
|
|
|
router[].network.broadcastDataColumnSidecar(subnet_id, data_columns[i])
|
|
|
|
let allres = await allFinished(das_workers)
|
|
|
|
for i in 0..<allres.len:
|
|
|
|
let res = allres[i]
|
|
|
|
doAssert res.finished()
|
|
|
|
if res.failed():
|
|
|
|
notice "Data Columns not sent",
|
|
|
|
data_column = shortLog(data_columns[i]), error = res.error[]
|
|
|
|
else:
|
|
|
|
notice "Data columns sent", data_column = shortLog(dataColumnsOpt[].get()[i])
|
|
|
|
let
|
|
|
|
metadata = router[].network.metadata.custody_subnet_count.uint64
|
|
|
|
custody_columns = router[].network.nodeId.get_custody_columns(max(SAMPLES_PER_SLOT.uint64,
|
|
|
|
metadata))
|
|
|
|
|
2024-10-02 12:51:05 +00:00
|
|
|
debugEcho "dcs"
|
|
|
|
debugEcho data_columns.len
|
|
|
|
|
|
|
|
debugEcho "custodycols"
|
|
|
|
debugEcho custody_columns.len
|
|
|
|
|
2024-10-02 13:33:55 +00:00
|
|
|
var final_columns: seq[DataColumnSidecar]
|
2024-09-21 10:17:58 +00:00
|
|
|
for dc in data_columns:
|
|
|
|
if dc.index in custody_columns:
|
2024-10-02 13:33:55 +00:00
|
|
|
final_columns.add(dc)
|
|
|
|
dataColumnRefs = Opt.some(final_columns.mapIt(newClone(it)))
|
2022-07-06 16:11:44 +00:00
|
|
|
|
2024-10-02 14:24:17 +00:00
|
|
|
|
2024-10-02 14:26:16 +00:00
|
|
|
let added = await router[].blockProcessor[].addBlock(
|
|
|
|
MsgSource.api, ForkedSignedBeaconBlock.init(blck), blobRefs, dataColumnRefs)
|
2024-10-02 12:19:58 +00:00
|
|
|
|
2024-10-02 14:26:16 +00:00
|
|
|
# The boolean we return tells the caller whether the block was integrated
|
|
|
|
# into the chain
|
|
|
|
if added.isErr():
|
|
|
|
return if added.error() != VerifierError.Duplicate:
|
|
|
|
warn "Unable to add routed block to block pool",
|
2024-10-02 11:23:32 +00:00
|
|
|
blockRoot = shortLog(blck.root), blck = shortLog(blck.message),
|
|
|
|
signature = shortLog(blck.signature), err = added.error()
|
2024-10-02 14:26:16 +00:00
|
|
|
ok(Opt.none(BlockRef))
|
|
|
|
else:
|
|
|
|
# If it's duplicate, there's an existing BlockRef to return. The block
|
|
|
|
# shouldn't be finalized already because that requires a couple epochs
|
|
|
|
# before occurring, so only check non-finalized resolved blockrefs.
|
|
|
|
let blockRef = router[].dag.getBlockRef(blck.root)
|
|
|
|
if blockRef.isErr:
|
|
|
|
warn "Unable to add routed duplicate block to block pool",
|
|
|
|
blockRoot = shortLog(blck.root), blck = shortLog(blck.message),
|
|
|
|
signature = shortLog(blck.signature), err = added.error()
|
|
|
|
ok(blockRef)
|
2024-10-02 12:19:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
let blockRef = router[].dag.getBlockRef(blck.root)
|
|
|
|
if blockRef.isErr:
|
|
|
|
warn "Block finalised while waiting for block processor",
|
|
|
|
blockRoot = shortLog(blck.root), blck = shortLog(blck.message),
|
|
|
|
signature = shortLog(blck.signature)
|
|
|
|
ok(blockRef)
|
2022-07-06 16:11:44 +00:00
|
|
|
|
|
|
|
proc routeAttestation*(
|
2024-05-17 15:34:46 +00:00
|
|
|
router: ref MessageRouter,
|
|
|
|
attestation: phase0.Attestation | electra.Attestation,
|
2024-02-07 11:26:04 +00:00
|
|
|
subnet_id: SubnetId, checkSignature: bool):
|
|
|
|
Future[SendResult] {.async: (raises: [CancelledError]).} =
|
2022-07-06 16:11:44 +00:00
|
|
|
## Process and broadcast attestation - processing will register the it with
|
|
|
|
## the attestation pool
|
|
|
|
block:
|
|
|
|
let res = await router[].processor.processAttestation(
|
|
|
|
MsgSource.api, attestation, subnet_id, checkSignature)
|
|
|
|
|
|
|
|
if not res.isGoodForSending:
|
|
|
|
warn "Attestation failed validation",
|
|
|
|
attestation = shortLog(attestation), error = res.error()
|
|
|
|
return err(res.error()[1])
|
|
|
|
|
|
|
|
let
|
|
|
|
sendTime = router[].processor.getCurrentBeaconTime()
|
|
|
|
delay = sendTime - attestation.data.slot.attestation_deadline()
|
2022-07-07 12:57:56 +00:00
|
|
|
res = await router[].network.broadcastAttestation(subnet_id, attestation)
|
2022-07-06 16:11:44 +00:00
|
|
|
|
2022-07-07 12:57:56 +00:00
|
|
|
if res.isOk():
|
|
|
|
beacon_attestations_sent.inc()
|
|
|
|
beacon_attestation_sent_delay.observe(delay.toFloatSeconds())
|
2022-07-06 16:11:44 +00:00
|
|
|
|
2023-01-16 10:28:35 +00:00
|
|
|
info "Attestation sent",
|
2022-07-07 12:57:56 +00:00
|
|
|
attestation = shortLog(attestation), delay, subnet_id
|
|
|
|
else: # "no broadcast" is not a fatal error
|
|
|
|
notice "Attestation not sent",
|
|
|
|
attestation = shortLog(attestation), error = res.error()
|
2022-07-06 16:11:44 +00:00
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
proc routeAttestation*(
|
2024-05-17 15:34:46 +00:00
|
|
|
router: ref MessageRouter, attestation: phase0.Attestation | electra.Attestation):
|
2024-02-07 11:26:04 +00:00
|
|
|
Future[SendResult] {.async: (raises: [CancelledError]).} =
|
2022-07-06 16:11:44 +00:00
|
|
|
# Compute subnet, then route attestation
|
|
|
|
let
|
|
|
|
target = router[].dag.getBlockRef(attestation.data.target.root).valueOr:
|
|
|
|
notice "Attempt to send attestation for unknown target",
|
|
|
|
attestation = shortLog(attestation)
|
|
|
|
return err(
|
|
|
|
"Attempt to send attestation for unknown target")
|
|
|
|
|
2022-08-18 18:07:01 +00:00
|
|
|
shufflingRef = router[].dag.getShufflingRef(
|
2022-07-06 16:11:44 +00:00
|
|
|
target, attestation.data.target.epoch, false).valueOr:
|
|
|
|
warn "Cannot construct EpochRef for attestation, skipping send - report bug",
|
|
|
|
target = shortLog(target),
|
|
|
|
attestation = shortLog(attestation)
|
|
|
|
return
|
|
|
|
committee_index =
|
2024-05-17 15:34:46 +00:00
|
|
|
shufflingRef.get_committee_index(attestation.committee_index()).valueOr:
|
2022-07-06 16:11:44 +00:00
|
|
|
notice "Invalid committee index in attestation",
|
|
|
|
attestation = shortLog(attestation)
|
|
|
|
return err("Invalid committee index in attestation")
|
|
|
|
subnet_id = compute_subnet_for_attestation(
|
2022-08-18 18:07:01 +00:00
|
|
|
get_committee_count_per_slot(shufflingRef), attestation.data.slot,
|
2022-07-06 16:11:44 +00:00
|
|
|
committee_index)
|
|
|
|
|
|
|
|
return await router.routeAttestation(
|
|
|
|
attestation, subnet_id, checkSignature = true)
|
|
|
|
|
|
|
|
proc routeSignedAggregateAndProof*(
|
2024-05-14 04:12:35 +00:00
|
|
|
router: ref MessageRouter, proof: phase0.SignedAggregateAndProof,
|
2022-07-06 16:11:44 +00:00
|
|
|
checkSignature = true):
|
2024-02-07 11:26:04 +00:00
|
|
|
Future[SendResult] {.async: (raises: [CancelledError]).} =
|
2022-07-06 16:11:44 +00:00
|
|
|
## Validate and broadcast aggregate
|
|
|
|
block:
|
|
|
|
# Because the aggregate was (most likely) produced by this beacon node,
|
|
|
|
# we already know all attestations in it - we skip the coverage check so
|
|
|
|
# that all processing happens anyway
|
|
|
|
let res = await router[].processor.processSignedAggregateAndProof(
|
|
|
|
MsgSource.api, proof, checkSignature = checkSignature,
|
|
|
|
checkCover = false)
|
|
|
|
if not res.isGoodForSending:
|
|
|
|
warn "Aggregated attestation failed validation",
|
|
|
|
attestation = shortLog(proof.message.aggregate),
|
|
|
|
aggregator_index = proof.message.aggregator_index,
|
|
|
|
signature = shortLog(proof.signature), error = res.error()
|
|
|
|
return err(res.error()[1])
|
|
|
|
|
|
|
|
let
|
|
|
|
sendTime = router[].processor.getCurrentBeaconTime()
|
|
|
|
delay = sendTime - proof.message.aggregate.data.slot.aggregate_deadline()
|
2022-07-07 12:57:56 +00:00
|
|
|
res = await router[].network.broadcastAggregateAndProof(proof)
|
|
|
|
|
|
|
|
if res.isOk():
|
|
|
|
beacon_aggregates_sent.inc()
|
|
|
|
|
2023-01-16 10:28:35 +00:00
|
|
|
info "Aggregated attestation sent",
|
2022-07-07 12:57:56 +00:00
|
|
|
attestation = shortLog(proof.message.aggregate),
|
|
|
|
aggregator_index = proof.message.aggregator_index,
|
|
|
|
selection_proof = shortLog(proof.message.selection_proof),
|
|
|
|
signature = shortLog(proof.signature), delay
|
|
|
|
else: # "no broadcast" is not a fatal error
|
|
|
|
notice "Aggregated attestation not sent",
|
|
|
|
attestation = shortLog(proof.message.aggregate),
|
|
|
|
aggregator_index = proof.message.aggregator_index,
|
|
|
|
signature = shortLog(proof.signature), error = res.error()
|
2022-07-06 16:11:44 +00:00
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
proc routeSyncCommitteeMessage*(
|
|
|
|
router: ref MessageRouter, msg: SyncCommitteeMessage,
|
|
|
|
subcommitteeIdx: SyncSubcommitteeIndex,
|
2024-02-07 11:26:04 +00:00
|
|
|
checkSignature: bool):
|
|
|
|
Future[SendResult] {.async: (raises: [CancelledError]).} =
|
2022-07-06 16:11:44 +00:00
|
|
|
block:
|
|
|
|
let res = await router[].processor.processSyncCommitteeMessage(
|
|
|
|
MsgSource.api, msg, subcommitteeIdx, checkSignature)
|
|
|
|
|
|
|
|
if not res.isGoodForSending:
|
|
|
|
warn "Sync committee message failed validation",
|
|
|
|
message = shortLog(msg), error = res.error()
|
|
|
|
return err(res.error()[1])
|
|
|
|
|
|
|
|
let
|
|
|
|
sendTime = router[].processor.getCurrentBeaconTime()
|
|
|
|
delay = sendTime - msg.slot.sync_committee_message_deadline()
|
|
|
|
|
2022-07-07 12:57:56 +00:00
|
|
|
res = await router[].network.broadcastSyncCommitteeMessage(
|
2022-07-06 16:11:44 +00:00
|
|
|
msg, subcommitteeIdx)
|
|
|
|
|
2022-07-07 12:57:56 +00:00
|
|
|
if res.isOk():
|
|
|
|
beacon_sync_committee_messages_sent.inc()
|
|
|
|
beacon_sync_committee_message_sent_delay.observe(delay.toFloatSeconds())
|
2022-07-06 16:11:44 +00:00
|
|
|
|
2023-01-16 10:28:35 +00:00
|
|
|
info "Sync committee message sent", message = shortLog(msg), delay
|
2022-07-07 12:57:56 +00:00
|
|
|
else: # "no broadcast" is not a fatal error
|
|
|
|
notice "Sync committee message not sent",
|
|
|
|
message = shortLog(msg), error = res.error()
|
2022-07-06 16:11:44 +00:00
|
|
|
|
|
|
|
if router[].onSyncCommitteeMessage != nil:
|
|
|
|
router[].onSyncCommitteeMessage(msg.slot)
|
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
proc routeSyncCommitteeMessages*(
|
|
|
|
router: ref MessageRouter, msgs: seq[SyncCommitteeMessage]):
|
2024-02-07 11:26:04 +00:00
|
|
|
Future[seq[SendResult]] {.async: (raises: [CancelledError]).} =
|
2022-07-06 16:11:44 +00:00
|
|
|
return withState(router[].dag.headState):
|
2023-03-11 00:35:52 +00:00
|
|
|
when consensusFork >= ConsensusFork.Altair:
|
2023-03-31 20:46:47 +00:00
|
|
|
var statuses = newSeq[Opt[SendResult]](len(msgs))
|
2022-07-06 16:11:44 +00:00
|
|
|
|
|
|
|
let
|
2022-09-10 06:12:07 +00:00
|
|
|
curPeriod = sync_committee_period(forkyState.data.slot)
|
2022-07-06 16:11:44 +00:00
|
|
|
nextPeriod = curPeriod + 1
|
|
|
|
|
|
|
|
let (keysCur, keysNxt) =
|
|
|
|
block:
|
|
|
|
var resCur: Table[uint64, int]
|
|
|
|
var resNxt: Table[uint64, int]
|
|
|
|
|
|
|
|
for index, msg in msgs:
|
2022-09-10 06:12:07 +00:00
|
|
|
if msg.validator_index < lenu64(forkyState.data.validators):
|
2022-07-06 16:11:44 +00:00
|
|
|
let msgPeriod = sync_committee_period(msg.slot + 1)
|
|
|
|
if msgPeriod == curPeriod:
|
|
|
|
resCur[msg.validator_index] = index
|
|
|
|
elif msgPeriod == nextPeriod:
|
|
|
|
resNxt[msg.validator_index] = index
|
|
|
|
else:
|
2023-03-31 20:46:47 +00:00
|
|
|
statuses[index] = Opt.some(
|
|
|
|
SendResult.err("Message's slot out of state's head range"))
|
2022-07-06 16:11:44 +00:00
|
|
|
else:
|
2023-03-31 20:46:47 +00:00
|
|
|
statuses[index] = Opt.some(
|
|
|
|
SendResult.err("Incorrect validator's index"))
|
2022-07-06 16:11:44 +00:00
|
|
|
if (len(resCur) == 0) and (len(resNxt) == 0):
|
|
|
|
return statuses.mapIt(it.get())
|
|
|
|
(resCur, resNxt)
|
|
|
|
|
|
|
|
let (pending, indices) = block:
|
|
|
|
var resFutures: seq[Future[SendResult]]
|
|
|
|
var resIndices: seq[int]
|
|
|
|
template headSyncCommittees(): auto = router[].dag.headSyncCommittees
|
|
|
|
for subcommitteeIdx in SyncSubcommitteeIndex:
|
|
|
|
for valKey in syncSubcommittee(
|
|
|
|
headSyncCommittees.current_sync_committee, subcommitteeIdx):
|
|
|
|
let index = keysCur.getOrDefault(uint64(valKey), -1)
|
|
|
|
if index >= 0:
|
|
|
|
resIndices.add(index)
|
|
|
|
resFutures.add(router.routeSyncCommitteeMessage(
|
|
|
|
msgs[index], subcommitteeIdx, true))
|
|
|
|
for subcommitteeIdx in SyncSubcommitteeIndex:
|
|
|
|
for valKey in syncSubcommittee(
|
|
|
|
headSyncCommittees.next_sync_committee, subcommitteeIdx):
|
|
|
|
let index = keysNxt.getOrDefault(uint64(valKey), -1)
|
|
|
|
if index >= 0:
|
|
|
|
resIndices.add(index)
|
|
|
|
resFutures.add(router.routeSyncCommitteeMessage(
|
|
|
|
msgs[index], subcommitteeIdx, true))
|
|
|
|
(resFutures, resIndices)
|
|
|
|
|
|
|
|
await allFutures(pending)
|
|
|
|
|
|
|
|
for index, future in pending:
|
2023-06-01 08:04:30 +00:00
|
|
|
if future.completed():
|
2024-02-07 11:26:04 +00:00
|
|
|
let fres = future.value()
|
2022-07-06 16:11:44 +00:00
|
|
|
if fres.isErr():
|
2023-03-31 20:46:47 +00:00
|
|
|
statuses[indices[index]] = Opt.some(SendResult.err(fres.error()))
|
2022-07-06 16:11:44 +00:00
|
|
|
else:
|
2023-03-31 20:46:47 +00:00
|
|
|
statuses[indices[index]] = Opt.some(SendResult.ok())
|
2022-07-06 16:11:44 +00:00
|
|
|
elif future.failed() or future.cancelled():
|
2024-02-07 11:26:04 +00:00
|
|
|
let exc = future.error()
|
2022-07-06 16:11:44 +00:00
|
|
|
debug "Unexpected failure while sending committee message",
|
|
|
|
message = msgs[indices[index]], error = $exc.msg
|
2023-03-31 20:46:47 +00:00
|
|
|
statuses[indices[index]] = Opt.some(SendResult.err(
|
2022-07-06 16:11:44 +00:00
|
|
|
"Unexpected failure while sending committee message"))
|
|
|
|
|
|
|
|
var res: seq[SendResult]
|
|
|
|
for item in statuses:
|
|
|
|
if item.isSome():
|
|
|
|
res.add(item.get())
|
|
|
|
else:
|
|
|
|
res.add(SendResult.err("Message validator not in sync committee"))
|
|
|
|
res
|
|
|
|
else:
|
|
|
|
var res: seq[SendResult]
|
|
|
|
for _ in msgs:
|
|
|
|
res.add(SendResult.err("Waiting for altair fork"))
|
|
|
|
res
|
|
|
|
|
|
|
|
proc routeSignedContributionAndProof*(
|
|
|
|
router: ref MessageRouter,
|
|
|
|
msg: SignedContributionAndProof,
|
2024-02-07 11:26:04 +00:00
|
|
|
checkSignature: bool):
|
|
|
|
Future[SendResult] {.async: (raises: [CancelledError]).} =
|
2022-07-06 16:11:44 +00:00
|
|
|
block:
|
|
|
|
let res = await router[].processor.processSignedContributionAndProof(
|
|
|
|
MsgSource.api, msg)
|
|
|
|
if not res.isGoodForSending:
|
|
|
|
warn "Contribution failed validation",
|
|
|
|
contribution = shortLog(msg.message.contribution),
|
|
|
|
aggregator_index = msg.message.aggregator_index,
|
|
|
|
selection_proof = shortLog(msg.message.selection_proof),
|
|
|
|
signature = shortLog(msg.signature), error = res.error()
|
|
|
|
return err(res.error()[1])
|
|
|
|
|
|
|
|
let
|
|
|
|
sendTime = router[].processor.getCurrentBeaconTime()
|
|
|
|
delay = sendTime - msg.message.contribution.slot.sync_contribution_deadline()
|
|
|
|
|
2022-07-07 12:57:56 +00:00
|
|
|
let res = await router[].network.broadcastSignedContributionAndProof(msg)
|
|
|
|
if res.isOk():
|
|
|
|
beacon_sync_committee_contributions_sent.inc()
|
2023-01-16 10:28:35 +00:00
|
|
|
info "Contribution sent",
|
2022-07-07 12:57:56 +00:00
|
|
|
contribution = shortLog(msg.message.contribution),
|
|
|
|
aggregator_index = msg.message.aggregator_index,
|
|
|
|
selection_proof = shortLog(msg.message.selection_proof),
|
|
|
|
signature = shortLog(msg.signature), delay
|
|
|
|
else: # "no broadcast" is not a fatal error
|
|
|
|
notice "Contribution not sent",
|
|
|
|
contribution = shortLog(msg.message.contribution),
|
|
|
|
aggregator_index = msg.message.aggregator_index,
|
|
|
|
selection_proof = shortLog(msg.message.selection_proof),
|
|
|
|
signature = shortLog(msg.signature), error = res.error()
|
2022-07-06 16:11:44 +00:00
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
proc routeSignedVoluntaryExit*(
|
|
|
|
router: ref MessageRouter, exit: SignedVoluntaryExit):
|
2024-02-07 11:26:04 +00:00
|
|
|
Future[SendResult] {.async: (raises: [CancelledError]).} =
|
2022-07-06 16:11:44 +00:00
|
|
|
block:
|
|
|
|
let res =
|
|
|
|
router[].processor[].processSignedVoluntaryExit(MsgSource.api, exit)
|
|
|
|
if not res.isGoodForSending:
|
|
|
|
warn "Voluntary exit failed validation",
|
|
|
|
exit = shortLog(exit), error = res.error()
|
2022-08-17 01:47:21 +00:00
|
|
|
return err(res.error()[1])
|
2022-07-06 16:11:44 +00:00
|
|
|
|
2022-07-07 12:57:56 +00:00
|
|
|
let res = await router[].network.broadcastVoluntaryExit(exit)
|
|
|
|
if res.isOk():
|
|
|
|
beacon_voluntary_exits_sent.inc()
|
|
|
|
notice "Voluntary exit sent", exit = shortLog(exit)
|
|
|
|
else: # "no broadcast" is not a fatal error
|
|
|
|
notice "Voluntary exit not sent", exit = shortLog(exit), error = res.error()
|
2022-07-06 16:11:44 +00:00
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
proc routeAttesterSlashing*(
|
2024-04-21 05:49:11 +00:00
|
|
|
router: ref MessageRouter, slashing: phase0.AttesterSlashing):
|
2024-02-07 11:26:04 +00:00
|
|
|
Future[SendResult] {.async: (raises: [CancelledError]).} =
|
2022-07-06 16:11:44 +00:00
|
|
|
block:
|
|
|
|
let res =
|
|
|
|
router[].processor[].processAttesterSlashing(MsgSource.api, slashing)
|
|
|
|
if not res.isGoodForSending:
|
|
|
|
warn "Attester slashing failed validation",
|
|
|
|
slashing = shortLog(slashing), error = res.error()
|
2022-08-17 01:47:21 +00:00
|
|
|
return err(res.error()[1])
|
2022-07-06 16:11:44 +00:00
|
|
|
|
2022-07-07 12:57:56 +00:00
|
|
|
let res = await router[].network.broadcastAttesterSlashing(slashing)
|
|
|
|
if res.isOk():
|
|
|
|
beacon_attester_slashings_sent.inc()
|
|
|
|
notice "Attester slashing sent", slashing = shortLog(slashing)
|
|
|
|
else: # "no broadcast" is not a fatal error
|
|
|
|
notice "Attester slashing not sent",
|
|
|
|
slashing = shortLog(slashing), error = res.error()
|
2022-07-06 16:11:44 +00:00
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
proc routeProposerSlashing*(
|
|
|
|
router: ref MessageRouter, slashing: ProposerSlashing):
|
2024-02-07 11:26:04 +00:00
|
|
|
Future[SendResult] {.async: (raises: [CancelledError]).} =
|
2022-07-06 16:11:44 +00:00
|
|
|
block:
|
|
|
|
let res =
|
|
|
|
router[].processor[].processProposerSlashing(MsgSource.api, slashing)
|
|
|
|
if not res.isGoodForSending:
|
|
|
|
warn "Proposer slashing request failed validation",
|
|
|
|
slashing = shortLog(slashing), error = res.error()
|
2022-08-17 01:47:21 +00:00
|
|
|
return err(res.error()[1])
|
2022-07-06 16:11:44 +00:00
|
|
|
|
2022-07-07 12:57:56 +00:00
|
|
|
let res = await router[].network.broadcastProposerSlashing(slashing)
|
|
|
|
if res.isOk():
|
|
|
|
beacon_proposer_slashings_sent.inc()
|
|
|
|
notice "Proposer slashing sent", slashing = shortLog(slashing)
|
|
|
|
else: # "no broadcast" is not a fatal error
|
|
|
|
notice "Proposer slashing not sent",
|
|
|
|
slashing = shortLog(slashing), error = res.error()
|
2022-07-06 16:11:44 +00:00
|
|
|
|
|
|
|
return ok()
|
2023-01-19 22:00:40 +00:00
|
|
|
|
|
|
|
proc routeBlsToExecutionChange*(
|
|
|
|
router: ref MessageRouter,
|
|
|
|
bls_to_execution_change: SignedBLSToExecutionChange):
|
2024-02-07 11:26:04 +00:00
|
|
|
Future[SendResult] {.async: (raises: [CancelledError]).} =
|
2023-01-19 22:00:40 +00:00
|
|
|
block:
|
2023-02-17 13:35:12 +00:00
|
|
|
let res = await router.processor.processBlsToExecutionChange(
|
2023-01-19 22:00:40 +00:00
|
|
|
MsgSource.api, bls_to_execution_change)
|
|
|
|
if not res.isGoodForSending:
|
|
|
|
warn "BLS to execution change request failed validation",
|
2023-01-23 13:58:40 +00:00
|
|
|
change = shortLog(bls_to_execution_change),
|
|
|
|
error = res.error()
|
2023-01-19 22:00:40 +00:00
|
|
|
return err(res.error()[1])
|
|
|
|
|
|
|
|
if router[].getCurrentBeaconTime().slotOrZero.epoch <
|
|
|
|
router[].processor[].dag.cfg.CAPELLA_FORK_EPOCH:
|
|
|
|
# Broadcast hasn't failed, it just hasn't happened; desire seems to be to
|
|
|
|
# allow queuing up BLS to execution changes.
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
let res = await router[].network.broadcastBlsToExecutionChange(
|
|
|
|
bls_to_execution_change)
|
|
|
|
if res.isOk():
|
|
|
|
notice "BLS to execution change sent",
|
|
|
|
bls_to_execution_change = shortLog(bls_to_execution_change)
|
|
|
|
else: # "no broadcast" is not a fatal error
|
|
|
|
notice "BLS to execution change not sent",
|
|
|
|
bls_to_execution_change = shortLog(bls_to_execution_change),
|
|
|
|
error = res.error()
|
|
|
|
|
|
|
|
return ok()
|