support GOSSIP_MAX_SIZE_MERGE blocks; prevent fork choice stutter via aggregate attestations (#3230)
* support GOSSIP_MAX_SIZE_MERGE-sized blocks; prevent fork choice clock stutter via aggregate attestations * relay max gossip size to libp2p, use tight uncompressed bounds for fixed-size messages * Update beacon_chain/networking/eth2_network.nim Co-authored-by: Jacek Sieka <jacek@status.im> * Update beacon_chain/networking/eth2_network.nim Co-authored-by: Jacek Sieka <jacek@status.im> Co-authored-by: Jacek Sieka <jacek@status.im>
This commit is contained in:
parent
12429a71c5
commit
e78d12beb9
|
@ -278,7 +278,7 @@ proc attestationValidator*(
|
||||||
await self.attestationPool.validateAttestation(
|
await self.attestationPool.validateAttestation(
|
||||||
self.batchCrypto, attestation, wallTime, subnet_id, checkSignature)
|
self.batchCrypto, attestation, wallTime, subnet_id, checkSignature)
|
||||||
return if v.isOk():
|
return if v.isOk():
|
||||||
# Due to async validation the wallSlot here might have changed
|
# Due to async validation the wallTime here might have changed
|
||||||
wallTime = self.getCurrentBeaconTime()
|
wallTime = self.getCurrentBeaconTime()
|
||||||
|
|
||||||
let (attester_index, sig) = v.get()
|
let (attester_index, sig) = v.get()
|
||||||
|
@ -304,8 +304,8 @@ proc attestationValidator*(
|
||||||
proc aggregateValidator*(
|
proc aggregateValidator*(
|
||||||
self: ref Eth2Processor, src: MsgSource,
|
self: ref Eth2Processor, src: MsgSource,
|
||||||
signedAggregateAndProof: SignedAggregateAndProof): Future[ValidationRes] {.async.} =
|
signedAggregateAndProof: SignedAggregateAndProof): Future[ValidationRes] {.async.} =
|
||||||
let wallTime = self.getCurrentBeaconTime()
|
var wallTime = self.getCurrentBeaconTime()
|
||||||
var (afterGenesis, wallSlot) = wallTime.toSlot()
|
let (afterGenesis, wallSlot) = wallTime.toSlot()
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
aggregate = shortLog(signedAggregateAndProof.message.aggregate)
|
aggregate = shortLog(signedAggregateAndProof.message.aggregate)
|
||||||
|
@ -328,8 +328,8 @@ proc aggregateValidator*(
|
||||||
self.batchCrypto, signedAggregateAndProof, wallTime)
|
self.batchCrypto, signedAggregateAndProof, wallTime)
|
||||||
|
|
||||||
return if v.isOk():
|
return if v.isOk():
|
||||||
# Due to async validation the wallSlot here might have changed
|
# Due to async validation the wallTime here might have changed
|
||||||
wallSlot = self.getCurrentBeaconTime().slotOrZero()
|
wallTime = self.getCurrentBeaconTime()
|
||||||
|
|
||||||
let (attesting_indices, sig) = v.get()
|
let (attesting_indices, sig) = v.get()
|
||||||
|
|
||||||
|
|
|
@ -1822,6 +1822,32 @@ proc newBeaconSwitch*(config: BeaconNodeConf, seckey: PrivateKey,
|
||||||
.withTcpTransport({ServerFlags.ReuseAddr})
|
.withTcpTransport({ServerFlags.ReuseAddr})
|
||||||
.build()
|
.build()
|
||||||
|
|
||||||
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.7/specs/phase0/p2p-interface.md#configuration
|
||||||
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.7/specs/merge/p2p-interface.md#configuration
|
||||||
|
func maxGossipMaxSize(): auto {.compileTime.} =
|
||||||
|
max(GOSSIP_MAX_SIZE, GOSSIP_MAX_SIZE_MERGE)
|
||||||
|
|
||||||
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.7/specs/phase0/p2p-interface.md#configuration
|
||||||
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.7/specs/merge/p2p-interface.md#configuration
|
||||||
|
template gossipMaxSize(T: untyped): uint32 =
|
||||||
|
const maxSize = static:
|
||||||
|
when isFixedSize(T):
|
||||||
|
fixedPortionSize(T)
|
||||||
|
elif T is merge.SignedBeaconBlock:
|
||||||
|
GOSSIP_MAX_SIZE_MERGE
|
||||||
|
# TODO https://github.com/status-im/nim-ssz-serialization/issues/20 for
|
||||||
|
# Attestation, AttesterSlashing, and SignedAggregateAndProof, which all
|
||||||
|
# have lists bounded at MAX_VALIDATORS_PER_COMMITTEE (2048) items, thus
|
||||||
|
# having max sizes significantly smaller than GOSSIP_MAX_SIZE.
|
||||||
|
elif T is Attestation or T is AttesterSlashing or
|
||||||
|
T is SignedAggregateAndProof or T is phase0.SignedBeaconBlock or
|
||||||
|
T is altair.SignedBeaconBlock:
|
||||||
|
GOSSIP_MAX_SIZE
|
||||||
|
else:
|
||||||
|
static: raiseAssert "unknown type"
|
||||||
|
static: doAssert maxSize <= maxGossipMaxSize()
|
||||||
|
maxSize.uint32
|
||||||
|
|
||||||
proc createEth2Node*(rng: ref BrHmacDrbgContext,
|
proc createEth2Node*(rng: ref BrHmacDrbgContext,
|
||||||
config: BeaconNodeConf,
|
config: BeaconNodeConf,
|
||||||
netKeys: NetKeyPair,
|
netKeys: NetKeyPair,
|
||||||
|
@ -1862,7 +1888,9 @@ proc createEth2Node*(rng: ref BrHmacDrbgContext,
|
||||||
if m.topicIDs.len > 0: m.topicIDs[0] else: ""
|
if m.topicIDs.len > 0: m.topicIDs[0] else: ""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
let decoded = snappy.decode(m.data, GOSSIP_MAX_SIZE)
|
# This doesn't have to be a tight bound, just enough to avoid denial of
|
||||||
|
# service attacks.
|
||||||
|
let decoded = snappy.decode(m.data, maxGossipMaxSize())
|
||||||
gossipId(decoded, altairPrefix, topic, true)
|
gossipId(decoded, altairPrefix, topic, true)
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
gossipId(m.data, altairPrefix, topic, false)
|
gossipId(m.data, altairPrefix, topic, false)
|
||||||
|
@ -1918,6 +1946,7 @@ proc createEth2Node*(rng: ref BrHmacDrbgContext,
|
||||||
sign = false,
|
sign = false,
|
||||||
verifySignature = false,
|
verifySignature = false,
|
||||||
anonymize = true,
|
anonymize = true,
|
||||||
|
maxMessageSize = maxGossipMaxSize(),
|
||||||
parameters = params)
|
parameters = params)
|
||||||
|
|
||||||
switch.mount(pubsub)
|
switch.mount(pubsub)
|
||||||
|
@ -1969,7 +1998,7 @@ proc addValidator*[MsgType](node: Eth2Node,
|
||||||
inc nbc_gossip_messages_received
|
inc nbc_gossip_messages_received
|
||||||
trace "Validating incoming gossip message", len = message.data.len, topic
|
trace "Validating incoming gossip message", len = message.data.len, topic
|
||||||
|
|
||||||
var decompressed = snappy.decode(message.data, GOSSIP_MAX_SIZE)
|
var decompressed = snappy.decode(message.data, gossipMaxSize(MsgType))
|
||||||
let res = if decompressed.len > 0:
|
let res = if decompressed.len > 0:
|
||||||
try:
|
try:
|
||||||
let decoded = SSZ.decode(decompressed, MsgType)
|
let decoded = SSZ.decode(decompressed, MsgType)
|
||||||
|
@ -2000,7 +2029,7 @@ proc addAsyncValidator*[MsgType](node: Eth2Node,
|
||||||
inc nbc_gossip_messages_received
|
inc nbc_gossip_messages_received
|
||||||
trace "Validating incoming gossip message", len = message.data.len, topic
|
trace "Validating incoming gossip message", len = message.data.len, topic
|
||||||
|
|
||||||
var decompressed = snappy.decode(message.data, GOSSIP_MAX_SIZE)
|
var decompressed = snappy.decode(message.data, gossipMaxSize(MsgType))
|
||||||
if decompressed.len > 0:
|
if decompressed.len > 0:
|
||||||
try:
|
try:
|
||||||
let decoded = SSZ.decode(decompressed, MsgType)
|
let decoded = SSZ.decode(decompressed, MsgType)
|
||||||
|
@ -2045,7 +2074,7 @@ proc broadcast*(node: Eth2Node, topic: string, msg: auto) =
|
||||||
|
|
||||||
# This is only for messages we create. A message this large amounts to an
|
# This is only for messages we create. A message this large amounts to an
|
||||||
# internal logic error.
|
# internal logic error.
|
||||||
doAssert uncompressed.len <= GOSSIP_MAX_SIZE
|
doAssert uncompressed.len <= maxGossipMaxSize()
|
||||||
inc nbc_gossip_messages_sent
|
inc nbc_gossip_messages_sent
|
||||||
|
|
||||||
var futSnappy = node.pubsub.publish(topic, compressed)
|
var futSnappy = node.pubsub.publish(topic, compressed)
|
||||||
|
|
Loading…
Reference in New Issue