fix Electra aggregated attestation gossip verification committee index (#6800)
This commit is contained in:
parent
c0108c2f2a
commit
177897929b
|
@ -27,7 +27,7 @@ type
|
|||
supernode*: bool
|
||||
custody_columns*: seq[ColumnIndex]
|
||||
onDataColumnSidecarCallback*: OnDataColumnSidecarCallback
|
||||
|
||||
|
||||
DataColumnFetchRecord* = object
|
||||
block_root*: Eth2Digest
|
||||
indices*: seq[ColumnIndex]
|
||||
|
@ -48,7 +48,7 @@ func put*(quarantine: var DataColumnQuarantine,
|
|||
# insert -> block resolve -> data column insert, which leaves
|
||||
# garbage data columns.
|
||||
#
|
||||
# This also therefore automatically garbage-collects otherwise valid
|
||||
# This also therefore automatically garbage-collects otherwise valid
|
||||
# data columns that are correctly signed, point to either correct block
|
||||
# root which isn't ever seen, and then for any reason simply never used.
|
||||
var oldest_column_key: DataColumnIdentifier
|
||||
|
@ -56,7 +56,7 @@ func put*(quarantine: var DataColumnQuarantine,
|
|||
oldest_column_key = k
|
||||
break
|
||||
quarantine.data_columns.del(oldest_column_key)
|
||||
let block_root =
|
||||
let block_root =
|
||||
hash_tree_root(dataColumnSidecar.signed_block_header.message)
|
||||
discard quarantine.data_columns.hasKeyOrPut(
|
||||
DataColumnIdentifier(block_root: block_root,
|
||||
|
@ -91,18 +91,17 @@ func peekColumnIndices*(quarantine: DataColumnQuarantine,
|
|||
indices
|
||||
|
||||
func gatherDataColumns*(quarantine: DataColumnQuarantine,
|
||||
digest: Eth2Digest):
|
||||
seq[ref DataColumnSidecar] =
|
||||
# Returns the current data columns quried by a
|
||||
# block header
|
||||
digest: Eth2Digest):
|
||||
seq[ref DataColumnSidecar] =
|
||||
# Returns the current data columns queried by a block header
|
||||
var columns: seq[ref DataColumnSidecar]
|
||||
for i in quarantine.custody_columns:
|
||||
let dc_identifier =
|
||||
let dc_identifier =
|
||||
DataColumnIdentifier(
|
||||
block_root: digest,
|
||||
index: i)
|
||||
if quarantine.data_columns.hasKey(dc_identifier):
|
||||
let value =
|
||||
let value =
|
||||
quarantine.data_columns.getOrDefault(dc_identifier,
|
||||
default(ref DataColumnSidecar))
|
||||
columns.add(value)
|
||||
|
@ -134,7 +133,7 @@ func hasMissingDataColumns*(quarantine: DataColumnQuarantine,
|
|||
# root request columns over RPC.
|
||||
var col_counter = 0
|
||||
for idx in quarantine.custody_columns:
|
||||
let dc_identifier =
|
||||
let dc_identifier =
|
||||
DataColumnIdentifier(
|
||||
block_root: blck.root,
|
||||
index: idx)
|
||||
|
@ -155,7 +154,7 @@ func hasEnoughDataColumns*(quarantine: DataColumnQuarantine,
|
|||
# if it receives atleast 50%+ gossip and RPC
|
||||
|
||||
# Once 50%+ columns are available we can use this function to
|
||||
# check it, and thereby check column reconstructability, right from
|
||||
# check it, and thereby check column reconstructability, right from
|
||||
# gossip validation, consequently populating the quarantine with
|
||||
# rest of the data columns.
|
||||
if quarantine.supernode:
|
||||
|
@ -165,7 +164,7 @@ func hasEnoughDataColumns*(quarantine: DataColumnQuarantine,
|
|||
return true
|
||||
else:
|
||||
for i in quarantine.custody_columns:
|
||||
let dc_identifier =
|
||||
let dc_identifier =
|
||||
DataColumnIdentifier(
|
||||
block_root: blck.root,
|
||||
index: i)
|
||||
|
@ -187,4 +186,4 @@ func dataColumnFetchRecord*(quarantine: DataColumnQuarantine,
|
|||
if not quarantine.data_columns.hasKey(
|
||||
dc_id):
|
||||
indices.add(idx)
|
||||
DataColumnFetchRecord(block_root: blck.root, indices: indices)
|
||||
DataColumnFetchRecord(block_root: blck.root, indices: indices)
|
||||
|
|
|
@ -592,7 +592,7 @@ func propagateInvalidity*(
|
|||
if parentPhysicalIdx < 0 or parentPhysicalIdx >= self.nodes.len:
|
||||
continue
|
||||
|
||||
# Invalidity transmits to all descendents
|
||||
# Invalidity transmits to all descendants
|
||||
if self.nodes.buf[parentPhysicalIdx].invalid:
|
||||
self.nodes.buf[nodePhysicalIdx].invalid = true
|
||||
|
||||
|
|
|
@ -351,7 +351,7 @@ proc getExecutionValidity(
|
|||
of PayloadExecutionStatus.invalid,
|
||||
PayloadExecutionStatus.invalid_block_hash:
|
||||
# Blocks come either from gossip or request manager requests. In the
|
||||
# former case, they've passed libp2p gosisp validation which implies
|
||||
# former case, they've passed libp2p gossip validation which implies
|
||||
# correct signature for correct proposer,which makes spam expensive,
|
||||
# while for the latter, spam is limited by the request manager.
|
||||
info "execution payload invalid from EL client newPayload",
|
||||
|
|
|
@ -1113,7 +1113,7 @@ proc validateAttestation*(
|
|||
# [REJECT] The committee index is within the expected range -- i.e.
|
||||
# data.index < get_committee_count_per_slot(state, data.target.epoch).
|
||||
let committee_index = block:
|
||||
let idx = shufflingRef.get_committee_index(attestation.data.index)
|
||||
let idx = shufflingRef.get_committee_index(attestation.committee_index)
|
||||
if idx.isErr():
|
||||
return pool.checkedReject(
|
||||
"Attestation: committee index not within expected range")
|
||||
|
@ -1231,7 +1231,12 @@ proc validateAggregate*(
|
|||
# [REJECT] The committee index is within the expected range -- i.e.
|
||||
# data.index < get_committee_count_per_slot(state, data.target.epoch).
|
||||
let committee_index = block:
|
||||
let idx = shufflingRef.get_committee_index(aggregate.data.index)
|
||||
when signedAggregateAndProof is electra.SignedAggregateAndProof:
|
||||
let idx = get_committee_index_one(aggregate.committee_bits)
|
||||
elif signedAggregateAndProof is phase0.SignedAggregateAndProof:
|
||||
let idx = shufflingRef.get_committee_index(aggregate.data.index)
|
||||
else:
|
||||
static: doAssert false
|
||||
if idx.isErr():
|
||||
return pool.checkedReject(
|
||||
"Attestation: committee index not within expected range")
|
||||
|
|
|
@ -960,7 +960,7 @@ proc readResponse(conn: Connection, peer: Peer,
|
|||
var results: MsgType
|
||||
while true:
|
||||
# Because we interleave networking with response processing, it may
|
||||
# happen that reading all chunks takes longer than a strict dealine
|
||||
# happen that reading all chunks takes longer than a strict deadline
|
||||
# timeout would allow, so we allow each chunk a new timeout instead.
|
||||
# The problem is exacerbated by the large number of round-trips to the
|
||||
# poll loop that each future along the way causes.
|
||||
|
|
|
@ -15,7 +15,7 @@ See the [execution client comparison](https://ethereum.org/en/developers/docs/no
|
|||
|
||||
### 1. Install execution client
|
||||
|
||||
Select an execution client and install it, configuring it such that that the authenticated JSON-RPC interface is enabled and a JWT secret file is created.
|
||||
Select an execution client and install it, configuring it such that the authenticated JSON-RPC interface is enabled and a JWT secret file is created.
|
||||
|
||||
=== "Nimbus"
|
||||
|
||||
|
|
|
@ -258,7 +258,7 @@ func `as`(blk: BlockObject, T: type bellatrix.ExecutionPayloadHeader): T =
|
|||
state_root: blk.stateRoot as Eth2Digest,
|
||||
receipts_root: blk.receiptsRoot as Eth2Digest,
|
||||
logs_bloom: BloomLogs(data: distinctBase(blk.logsBloom)),
|
||||
prev_randao: Eth2Digest(data: blk.difficulty.toByteArrayBE), # Is BE correct here?
|
||||
prev_randao: Eth2Digest(data: blk.difficulty.toBytesBE), # Is BE correct here?
|
||||
block_number: uint64 blk.number,
|
||||
gas_limit: uint64 blk.gasLimit,
|
||||
gas_used: uint64 blk.gasUsed,
|
||||
|
@ -274,7 +274,7 @@ func `as`(blk: BlockObject, T: type capella.ExecutionPayloadHeader): T =
|
|||
state_root: blk.stateRoot as Eth2Digest,
|
||||
receipts_root: blk.receiptsRoot as Eth2Digest,
|
||||
logs_bloom: BloomLogs(data: distinctBase(blk.logsBloom)),
|
||||
prev_randao: Eth2Digest(data: blk.difficulty.toByteArrayBE),
|
||||
prev_randao: Eth2Digest(data: blk.difficulty.toBytesBE),
|
||||
block_number: uint64 blk.number,
|
||||
gas_limit: uint64 blk.gasLimit,
|
||||
gas_used: uint64 blk.gasUsed,
|
||||
|
@ -291,7 +291,7 @@ func `as`(blk: BlockObject, T: type deneb.ExecutionPayloadHeader): T =
|
|||
state_root: blk.stateRoot as Eth2Digest,
|
||||
receipts_root: blk.receiptsRoot as Eth2Digest,
|
||||
logs_bloom: BloomLogs(data: distinctBase(blk.logsBloom)),
|
||||
prev_randao: Eth2Digest(data: blk.difficulty.toByteArrayBE),
|
||||
prev_randao: Eth2Digest(data: blk.difficulty.toBytesBE),
|
||||
block_number: uint64 blk.number,
|
||||
gas_limit: uint64 blk.gasLimit,
|
||||
gas_used: uint64 blk.gasUsed,
|
||||
|
@ -310,7 +310,7 @@ func `as`(blk: BlockObject, T: type electra.ExecutionPayloadHeader): T =
|
|||
state_root: blk.stateRoot as Eth2Digest,
|
||||
receipts_root: blk.receiptsRoot as Eth2Digest,
|
||||
logs_bloom: BloomLogs(data: distinctBase(blk.logsBloom)),
|
||||
prev_randao: Eth2Digest(data: blk.difficulty.toByteArrayBE),
|
||||
prev_randao: Eth2Digest(data: blk.difficulty.toBytesBE),
|
||||
block_number: uint64 blk.number,
|
||||
gas_limit: uint64 blk.gasLimit,
|
||||
gas_used: uint64 blk.gasUsed,
|
||||
|
@ -329,7 +329,7 @@ func `as`(blk: BlockObject, T: type fulu.ExecutionPayloadHeader): T =
|
|||
state_root: blk.stateRoot as Eth2Digest,
|
||||
receipts_root: blk.receiptsRoot as Eth2Digest,
|
||||
logs_bloom: BloomLogs(data: distinctBase(blk.logsBloom)),
|
||||
prev_randao: Eth2Digest(data: blk.difficulty.toByteArrayBE),
|
||||
prev_randao: Eth2Digest(data: blk.difficulty.toBytesBE),
|
||||
block_number: uint64 blk.number,
|
||||
gas_limit: uint64 blk.gasLimit,
|
||||
gas_used: uint64 blk.gasUsed,
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[strutils, os, options, uri, json, tables],
|
||||
results,
|
||||
|
@ -1031,7 +1033,7 @@ proc workerLoop(address: TransportAddress, uri: Uri, worker: int,
|
|||
worker = worker
|
||||
return
|
||||
except CatchableError as exc:
|
||||
warn "Unexpected exception while running test test run", host = hostname,
|
||||
warn "Unexpected exception while running test run", host = hostname,
|
||||
error_name = exc.name, error_msg = exc.msg, index = index,
|
||||
worker = worker
|
||||
return
|
||||
|
@ -1156,6 +1158,9 @@ proc run(conf: RestTesterConf): int =
|
|||
waitFor(checkConnection(conf, uri))
|
||||
except ConnectionError:
|
||||
return 1
|
||||
except CatchableError as exc:
|
||||
fatal "Unexpected test failure", error_name = exc.name, error_msg = exc.msg
|
||||
return 1
|
||||
|
||||
try:
|
||||
return waitFor(startTests(conf, uri, jnodes))
|
||||
|
|
Loading…
Reference in New Issue