Merge branch 'unstable' into dev/pedro/unified_changes

This commit is contained in:
Pedro Miranda 2024-12-19 01:00:52 +00:00 committed by GitHub
commit 7389c2f4d2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 159 additions and 97 deletions

View File

@ -1,10 +1,10 @@
# Networking # Networking
This folders hold a collection of modules to: These folders hold a collection of modules to:
- configure the Eth2 P2P network - configure the Eth2 P2P network
- discover, connect, and maintain quality Eth2 peers - discover, connect, and maintain quality Eth2 peers
Data received is handed other to the `../gossip_processing` modules for validation. Data received is handed over to the `../gossip_processing` modules for validation.
## Security concerns ## Security concerns

View File

@ -86,3 +86,10 @@ const
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#withdrawal-prefixes # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#withdrawal-prefixes
COMPOUNDING_WITHDRAWAL_PREFIX* = 0x02 COMPOUNDING_WITHDRAWAL_PREFIX* = 0x02
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/electra/beacon-chain.md#execution-1
MAX_BLOBS_PER_BLOCK_ELECTRA* = 9'u64
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/electra/p2p-interface.md#configuration
MAX_REQUEST_BLOB_SIDECARS_ELECTRA* =
MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA

View File

@ -824,6 +824,8 @@ proc readRuntimeConfig*(
checkCompatibility MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK, checkCompatibility MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK,
"MAX_REQUEST_BLOB_SIDECARS" "MAX_REQUEST_BLOB_SIDECARS"
checkCompatibility BLOB_SIDECAR_SUBNET_COUNT checkCompatibility BLOB_SIDECAR_SUBNET_COUNT
checkCompatibility MAX_BLOBS_PER_BLOCK_ELECTRA
checkCompatibility MAX_REQUEST_BLOB_SIDECARS_ELECTRA
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/phase0/fork-choice.md#configuration # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/phase0/fork-choice.md#configuration
# Isn't being used as a preset in the usual way: at any time, there's one correct value # Isn't being used as a preset in the usual way: at any time, there's one correct value

View File

@ -107,6 +107,99 @@ proc readChunkPayload*(
{.pop.} # TODO fix p2p macro for raises {.pop.} # TODO fix p2p macro for raises
template getBlobSidecarsByRoot(
versionNumber: static string, peer: Peer, dag: ChainDAGRef, response: auto,
blobIds: BlobIdentifierList) =
trace "got v" & versionNumber & " blobs range request",
peer, len = blobIds.len
if blobIds.len == 0:
raise newException(InvalidInputsError, "No blobs requested")
let count = blobIds.len
var
found = 0
bytes: seq[byte]
for i in 0..<count:
let blockRef = dag.getBlockRef(blobIds[i].block_root).valueOr:
continue
let index = blobIds[i].index
if dag.db.getBlobSidecarSZ(blockRef.bid.root, index, bytes):
let uncompressedLen = uncompressedLenFramed(bytes).valueOr:
warn "Cannot read blob size, database corrupt?",
bytes = bytes.len(), blck = shortLog(blockRef), blobindex = index
continue
peer.awaitQuota(
blobResponseCost, "blob_sidecars_by_root/" & versionNumber)
peer.network.awaitQuota(
blobResponseCost, "blob_sidecars_by_root/" & versionNumber)
await response.writeBytesSZ(
uncompressedLen, bytes,
peer.network.forkDigestAtEpoch(blockRef.slot.epoch).data)
inc found
debug "Blob root v" & versionNumber & " request done",
peer, roots = blobIds.len, count, found
template getBlobSidecarsByRange(
versionNumber: static string, peer: Peer, dag: ChainDAGRef, response: auto,
startSlot: Slot, reqCount: uint64, blobsPerBlock: static uint64,
maxReqSidecars: static uint64) =
trace "got v" & versionNumber & " blobs range request",
peer, startSlot, count = reqCount
if reqCount == 0:
raise newException(InvalidInputsError, "Empty range requested")
let epochBoundary =
if dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS >= dag.head.slot.epoch:
GENESIS_EPOCH
else:
dag.head.slot.epoch - dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
if startSlot.epoch < epochBoundary:
raise newException(ResourceUnavailableError, BlobsOutOfRange)
var blockIds: array[int(maxReqSidecars), BlockId]
let
count = int min(reqCount, blockIds.lenu64)
endIndex = count - 1
startIndex =
dag.getBlockRange(startSlot, 1, blockIds.toOpenArray(0, endIndex))
var
found = 0
bytes: seq[byte]
for i in startIndex..endIndex:
for j in 0..<blobsPerBlock:
if dag.db.getBlobSidecarSZ(blockIds[i].root, BlobIndex(j), bytes):
if not dag.head.executionValid:
continue
let uncompressedLen = uncompressedLenFramed(bytes).valueOr:
warn "Cannot read blobs sidecar size, database corrupt?",
bytes = bytes.len(), blck = shortLog(blockIds[i])
continue
# TODO extract from libp2pProtocol
peer.awaitQuota(
blobResponseCost, "blobs_sidecars_by_range/" & versionNumber)
peer.network.awaitQuota(
blobResponseCost, "blobs_sidecars_by_range/" & versionNumber)
await response.writeBytesSZ(
uncompressedLen, bytes,
peer.network.forkDigestAtEpoch(blockIds[i].slot.epoch).data)
inc found
else:
break
debug "BlobSidecar v" & versionNumber & " range request done",
peer, startSlot, count = reqCount, found
p2pProtocol BeaconSync(version = 1, p2pProtocol BeaconSync(version = 1,
networkState = BeaconSyncNetworkState): networkState = BeaconSyncNetworkState):
proc beaconBlocksByRange_v2( proc beaconBlocksByRange_v2(
@ -259,38 +352,7 @@ p2pProtocol BeaconSync(version = 1,
# client call that returns `seq[ref BlobSidecar]` will # client call that returns `seq[ref BlobSidecar]` will
# will be generated by the libp2p macro - we guarantee that seq items # will be generated by the libp2p macro - we guarantee that seq items
# are `not-nil` in the implementation # are `not-nil` in the implementation
trace "got blobs range request", peer, len = blobIds.len getBlobSidecarsByRoot("1", peer, peer.networkState.dag, response, blobIds)
if blobIds.len == 0:
raise newException(InvalidInputsError, "No blobs requested")
let
dag = peer.networkState.dag
count = blobIds.len
var
found = 0
bytes: seq[byte]
for i in 0..<count:
let blockRef = dag.getBlockRef(blobIds[i].block_root).valueOr:
continue
let index = blobIds[i].index
if dag.db.getBlobSidecarSZ(blockRef.bid.root, index, bytes):
let uncompressedLen = uncompressedLenFramed(bytes).valueOr:
warn "Cannot read blob size, database corrupt?",
bytes = bytes.len(), blck = shortLog(blockRef), blobindex = index
continue
peer.awaitQuota(blobResponseCost, "blob_sidecars_by_root/1")
peer.network.awaitQuota(blobResponseCost, "blob_sidecars_by_root/1")
await response.writeBytesSZ(
uncompressedLen, bytes,
peer.network.forkDigestAtEpoch(blockRef.slot.epoch).data)
inc found
debug "Blob root request done",
peer, roots = blobIds.len, count, found
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/p2p-interface.md#blobsidecarsbyrange-v1 # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/p2p-interface.md#blobsidecarsbyrange-v1
proc blobSidecarsByRange( proc blobSidecarsByRange(
@ -308,61 +370,52 @@ p2pProtocol BeaconSync(version = 1,
# client call that returns `seq[ref BlobSidecar]` will # client call that returns `seq[ref BlobSidecar]` will
# will be generated by the libp2p macro - we guarantee that seq items # will be generated by the libp2p macro - we guarantee that seq items
# are `not-nil` in the implementation # are `not-nil` in the implementation
getBlobSidecarsByRange(
"1", peer, peer.networkState.dag, response, startSlot, reqCount,
MAX_BLOBS_PER_BLOCK, MAX_REQUEST_BLOB_SIDECARS)
trace "got blobs range request", peer, startSlot, count = reqCount # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/p2p-interface.md#blobsidecarsbyroot-v1
if reqCount == 0: # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/electra/p2p-interface.md#blobsidecarsbyroot-v2
raise newException(InvalidInputsError, "Empty range requested") proc blobSidecarsByRoot_v2(
peer: Peer,
blobIds: BlobIdentifierList,
response: MultipleChunksResponse[
ref BlobSidecar, Limit(MAX_REQUEST_BLOB_SIDECARS_ELECTRA)])
{.async, libp2pProtocol("blob_sidecars_by_root", 2).} =
# TODO Semantically, this request should return a non-ref, but doing so
# runs into extreme inefficiency due to the compiler introducing
# hidden copies - in future nim versions with move support, this should
# be revisited
# TODO This code is more complicated than it needs to be, since the type
# of the multiple chunks response is not actually used in this server
# implementation (it's used to derive the signature of the client
# function, not in the code below!)
# TODO although you can't tell from this function definition, a magic
# client call that returns `seq[ref BlobSidecar]` will
# will be generated by the libp2p macro - we guarantee that seq items
# are `not-nil` in the implementation
getBlobSidecarsByRoot("2", peer, peer.networkState.dag, response, blobIds)
let # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/p2p-interface.md#blobsidecarsbyrange-v1
dag = peer.networkState.dag # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/electra/p2p-interface.md#blobsidecarsbyrange-v2
epochBoundary = proc blobSidecarsByRange_v2(
if dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS >= dag.head.slot.epoch: peer: Peer,
GENESIS_EPOCH startSlot: Slot,
else: reqCount: uint64,
dag.head.slot.epoch - dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS response: MultipleChunksResponse[
ref BlobSidecar, Limit(MAX_REQUEST_BLOB_SIDECARS_ELECTRA)])
if startSlot.epoch < epochBoundary: {.async, libp2pProtocol("blob_sidecars_by_range", 2).} =
raise newException(ResourceUnavailableError, BlobsOutOfRange) # TODO This code is more complicated than it needs to be, since the type
# of the multiple chunks response is not actually used in this server
var blockIds: array[int(MAX_REQUEST_BLOB_SIDECARS), BlockId] # implementation (it's used to derive the signature of the client
let # function, not in the code below!)
count = int min(reqCount, blockIds.lenu64) # TODO although you can't tell from this function definition, a magic
endIndex = count - 1 # client call that returns `seq[ref BlobSidecar]` will
startIndex = # will be generated by the libp2p macro - we guarantee that seq items
dag.getBlockRange(startSlot, 1, blockIds.toOpenArray(0, endIndex)) # are `not-nil` in the implementation
getBlobSidecarsByRange(
var "2", peer, peer.networkState.dag, response, startSlot, reqCount,
found = 0 MAX_BLOBS_PER_BLOCK_ELECTRA, MAX_REQUEST_BLOB_SIDECARS_ELECTRA)
bytes: seq[byte]
for i in startIndex..endIndex:
for j in 0..<MAX_BLOBS_PER_BLOCK:
if dag.db.getBlobSidecarSZ(blockIds[i].root, BlobIndex(j), bytes):
# In general, there is not much intermediate time between post-merge
# blocks all being optimistic and none of them being optimistic. The
# EL catches up, tells the CL the head is verified, and that's it.
if blockIds[i].slot.epoch >= dag.cfg.BELLATRIX_FORK_EPOCH and
not dag.head.executionValid:
continue
let uncompressedLen = uncompressedLenFramed(bytes).valueOr:
warn "Cannot read blobs sidecar size, database corrupt?",
bytes = bytes.len(), blck = shortLog(blockIds[i])
continue
# TODO extract from libp2pProtocol
peer.awaitQuota(blobResponseCost, "blobs_sidecars_by_range/1")
peer.network.awaitQuota(blobResponseCost, "blobs_sidecars_by_range/1")
await response.writeBytesSZ(
uncompressedLen, bytes,
peer.network.forkDigestAtEpoch(blockIds[i].slot.epoch).data)
inc found
else:
break
debug "BlobSidecar range request done",
peer, startSlot, count = reqCount, found
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/_features/eip7594/p2p-interface.md#datacolumnsidecarsbyroot-v1 # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/_features/eip7594/p2p-interface.md#datacolumnsidecarsbyroot-v1
proc dataColumnSidecarsByRoot( proc dataColumnSidecarsByRoot(
@ -486,7 +539,7 @@ p2pProtocol BeaconSync(version = 1,
debug "Data column range request done", debug "Data column range request done",
peer, startSlot, count = reqCount, columns = reqColumns, found peer, startSlot, count = reqCount, columns = reqColumns, found
proc init*(T: type BeaconSync.NetworkState, dag: ChainDAGRef): T = func init*(T: type BeaconSync.NetworkState, dag: ChainDAGRef): T =
T( T(
dag: dag, dag: dag,
) )

View File

@ -1,4 +1,4 @@
You probably don't want to recreate and push these base images to Docker Hub, You probably don't want to re-create and push these base images to Docker Hub,
because when older images expire and get deleted, it will no longer be possible because when older images expire and get deleted, it will no longer be possible
to reproduce old releases. to reproduce old releases.