introduced custody groups, and renamed csc to cgc (#6789)
* introduced custody groups, and renamed csc to cgc * accomodate tests * revert to naming columns * applied review changes * updated all tests file * addressed review 2 * merged in typo fixes by airdop farmers/other spam PRs * handle lint ci * shift to iterators, avoid redundant copying
This commit is contained in:
parent
50ab4cf392
commit
c0108c2f2a
|
@ -196,15 +196,15 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
|||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||
## EF - EIP7594 - Networking [Preset: mainnet]
|
||||
```diff
|
||||
+ Networking - Get Custody Columns - mainnet/fulu/networking/get_custody_columns/pyspec_test OK
|
||||
+ Networking - Get Custody Columns - mainnet/fulu/networking/get_custody_columns/pyspec_test OK
|
||||
+ Networking - Get Custody Columns - mainnet/fulu/networking/get_custody_columns/pyspec_test OK
|
||||
+ Networking - Get Custody Columns - mainnet/fulu/networking/get_custody_columns/pyspec_test OK
|
||||
+ Networking - Get Custody Columns - mainnet/fulu/networking/get_custody_columns/pyspec_test OK
|
||||
+ Networking - Get Custody Columns - mainnet/fulu/networking/get_custody_columns/pyspec_test OK
|
||||
+ Networking - Get Custody Columns - mainnet/fulu/networking/get_custody_columns/pyspec_test OK
|
||||
+ Networking - Get Custody Columns - mainnet/fulu/networking/get_custody_columns/pyspec_test OK
|
||||
+ Networking - Get Custody Columns - mainnet/fulu/networking/get_custody_columns/pyspec_test OK
|
||||
+ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_columns/pyspec_tests OK
|
||||
+ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_columns/pyspec_tests OK
|
||||
+ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_columns/pyspec_tests OK
|
||||
+ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_columns/pyspec_tests OK
|
||||
+ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_columns/pyspec_tests OK
|
||||
+ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_columns/pyspec_tests OK
|
||||
+ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_columns/pyspec_tests OK
|
||||
+ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_columns/pyspec_tests OK
|
||||
+ Networking - Get Custody Groups - mainnet/fulu/networking/get_custody_columns/pyspec_tests OK
|
||||
```
|
||||
OK: 9/9 Fail: 0/9 Skip: 0/9
|
||||
## EF - KZG
|
||||
|
|
|
@ -2407,7 +2407,7 @@ It also brings further performance optimizations.
|
|||
* A new `slashingdb` sub-command with `import` and `export` options. This allows for
|
||||
safely migrating to Nimbus from another client (as per the [EIP-3076](https://eips.ethereum.org/EIPS/eip-3076)
|
||||
slashing protection interchange format).
|
||||
Please see the the newly prepared [migration guides](https://nimbus.guide/migration.html) for the details.
|
||||
Please see the newly prepared [migration guides](https://nimbus.guide/migration.html) for the details.
|
||||
|
||||
* A new `ncli_db validatorPerf` command. This can be used to perform a textual
|
||||
report for the attestation performance of a particular validator
|
||||
|
|
|
@ -94,7 +94,7 @@ proc processSignedBeaconBlock*(
|
|||
# Block validation is delegated to the sync committee and is done with delay.
|
||||
# If we forward invalid spam blocks, we may be disconnected + IP banned,
|
||||
# so we avoid accepting any blocks. Since we don't meaningfully contribute
|
||||
# to the blocks gossip, we may also accummulate negative peer score over time.
|
||||
# to the blocks gossip, we may also accumulate negative peer score over time.
|
||||
# However, we are actively contributing to other topics, so some of the
|
||||
# negative peer score may be offset through those different topics.
|
||||
# The practical impact depends on the actually deployed scoring heuristics.
|
||||
|
|
|
@ -127,7 +127,7 @@ proc queryRandom*(
|
|||
forkId: ENRForkID,
|
||||
wantedAttnets: AttnetBits,
|
||||
wantedSyncnets: SyncnetBits,
|
||||
wantedCscnets: CscBits,
|
||||
wantedCgcnets: CgcBits,
|
||||
minScore: int): Future[seq[Node]] {.async: (raises: [CancelledError]).} =
|
||||
## Perform a discovery query for a random target
|
||||
## (forkId) and matching at least one of the attestation subnets.
|
||||
|
@ -152,17 +152,17 @@ proc queryRandom*(
|
|||
if not forkId.isCompatibleForkId(peerForkId):
|
||||
continue
|
||||
|
||||
let cscCountBytes = n.record.get(enrCustodySubnetCountField, seq[byte])
|
||||
if cscCountBytes.isOk():
|
||||
let cscCountNode =
|
||||
let cgcCountBytes = n.record.get(enrCustodySubnetCountField, seq[byte])
|
||||
if cgcCountBytes.isOk():
|
||||
let cgcCountNode =
|
||||
try:
|
||||
SSZ.decode(cscCountBytes.get(), uint8)
|
||||
SSZ.decode(cgcCountBytes.get(), uint8)
|
||||
except SerializationError as e:
|
||||
debug "Could not decode the csc ENR field of peer",
|
||||
debug "Could not decode the cgc ENR field of peer",
|
||||
peer = n.record.toURI(), exception = e.name, msg = e.msg
|
||||
continue
|
||||
|
||||
if wantedCscnets.countOnes().uint8 == cscCountNode:
|
||||
if wantedCgcnets.countOnes().uint8 == cgcCountNode:
|
||||
score += 1
|
||||
|
||||
let attnetsBytes = n.record.get(enrAttestationSubnetsField, seq[byte])
|
||||
|
|
|
@ -1506,7 +1506,7 @@ proc trimConnections(node: Eth2Node, count: int) =
|
|||
if toKick <= 0: return
|
||||
|
||||
proc getLowSubnets(node: Eth2Node, epoch: Epoch):
|
||||
(AttnetBits, SyncnetBits, CscBits) =
|
||||
(AttnetBits, SyncnetBits, CgcBits) =
|
||||
# Returns the subnets required to have a healthy mesh
|
||||
# The subnets are computed, to, in order:
|
||||
# - Have 0 subnet with < `dLow` peers from topic subscription
|
||||
|
@ -1575,7 +1575,7 @@ proc getLowSubnets(node: Eth2Node, epoch: Epoch):
|
|||
if epoch >= node.cfg.FULU_FORK_EPOCH:
|
||||
findLowSubnets(getDataColumnSidecarTopic, uint64, (DATA_COLUMN_SIDECAR_SUBNET_COUNT).int)
|
||||
else:
|
||||
default(CscBits)
|
||||
default(CgcBits)
|
||||
)
|
||||
|
||||
proc runDiscoveryLoop(node: Eth2Node) {.async: (raises: [CancelledError]).} =
|
||||
|
@ -1584,20 +1584,20 @@ proc runDiscoveryLoop(node: Eth2Node) {.async: (raises: [CancelledError]).} =
|
|||
while true:
|
||||
let
|
||||
currentEpoch = node.getBeaconTime().slotOrZero.epoch
|
||||
(wantedAttnets, wantedSyncnets, wantedCscnets) = node.getLowSubnets(currentEpoch)
|
||||
(wantedAttnets, wantedSyncnets, wantedCgcnets) = node.getLowSubnets(currentEpoch)
|
||||
wantedAttnetsCount = wantedAttnets.countOnes()
|
||||
wantedSyncnetsCount = wantedSyncnets.countOnes()
|
||||
wantedCscnetsCount = wantedCscnets.countOnes()
|
||||
wantedCgcnetsCount = wantedCgcnets.countOnes()
|
||||
outgoingPeers = node.peerPool.lenCurrent({PeerType.Outgoing})
|
||||
targetOutgoingPeers = max(node.wantedPeers div 10, 3)
|
||||
|
||||
if wantedAttnetsCount > 0 or wantedSyncnetsCount > 0 or
|
||||
wantedCscnetsCount > 0 or outgoingPeers < targetOutgoingPeers:
|
||||
wantedCgcnetsCount > 0 or outgoingPeers < targetOutgoingPeers:
|
||||
|
||||
let
|
||||
minScore =
|
||||
if wantedAttnetsCount > 0 or wantedSyncnetsCount > 0 or
|
||||
wantedCscnetsCount > 0:
|
||||
wantedCgcnetsCount > 0:
|
||||
1
|
||||
else:
|
||||
0
|
||||
|
@ -1605,7 +1605,7 @@ proc runDiscoveryLoop(node: Eth2Node) {.async: (raises: [CancelledError]).} =
|
|||
node.discoveryForkId,
|
||||
wantedAttnets,
|
||||
wantedSyncnets,
|
||||
wantedCscnets,
|
||||
wantedCgcnets,
|
||||
minScore)
|
||||
|
||||
let newPeers = block:
|
||||
|
@ -2435,18 +2435,18 @@ func announcedENR*(node: Eth2Node): enr.Record =
|
|||
doAssert node.discovery != nil, "The Eth2Node must be initialized"
|
||||
node.discovery.localNode.record
|
||||
|
||||
proc lookupCscFromPeer*(peer: Peer): uint64 =
|
||||
proc lookupCgcFromPeer*(peer: Peer): uint64 =
|
||||
# Fetches the custody column count from a remote peer.
|
||||
# If the peer advertises their custody column count via the `csc` ENR field,
|
||||
# If the peer advertises their custody column count via the `cgc` ENR field,
|
||||
# that value is returned. Otherwise, the default value `CUSTODY_REQUIREMENT`
|
||||
# is assumed.
|
||||
|
||||
let metadata = peer.metadata
|
||||
if metadata.isOk:
|
||||
return metadata.get.custody_subnet_count
|
||||
return metadata.get.custody_group_count
|
||||
|
||||
# Try getting the custody count from ENR if metadata fetch fails.
|
||||
debug "Could not get csc from metadata, trying from ENR",
|
||||
debug "Could not get cgc from metadata, trying from ENR",
|
||||
peer_id = peer.peerId
|
||||
let enrOpt = peer.enr
|
||||
if not enrOpt.isNone:
|
||||
|
@ -2454,8 +2454,8 @@ proc lookupCscFromPeer*(peer: Peer): uint64 =
|
|||
let enrFieldOpt = enr.get(enrCustodySubnetCountField, seq[byte])
|
||||
if enrFieldOpt.isOk:
|
||||
try:
|
||||
let csc = SSZ.decode(enrFieldOpt.get, uint8)
|
||||
return csc.uint64
|
||||
let cgc = SSZ.decode(enrFieldOpt.get, uint8)
|
||||
return cgc.uint64
|
||||
except SszError, SerializationError:
|
||||
discard # Ignore decoding errors and fallback to default
|
||||
|
||||
|
@ -2623,19 +2623,19 @@ proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) =
|
|||
else:
|
||||
debug "Stability subnets changed; updated ENR attnets", attnets
|
||||
|
||||
proc loadCscnetMetadataAndEnr*(node: Eth2Node, cscnets: CscCount) =
|
||||
node.metadata.custody_subnet_count = cscnets.uint64
|
||||
proc loadCgcnetMetadataAndEnr*(node: Eth2Node, cgcnets: CgcCount) =
|
||||
node.metadata.custody_group_count = cgcnets.uint64
|
||||
let res =
|
||||
node.discovery.updateRecord({
|
||||
enrCustodySubnetCountField: SSZ.encode(cscnets)
|
||||
enrCustodySubnetCountField: SSZ.encode(cgcnets)
|
||||
})
|
||||
|
||||
if res.isErr:
|
||||
# This should not occur in this scenario as the private key would always
|
||||
# be the correct one and the ENR will not increase in size
|
||||
warn "Failed to update the ENR csc field", error = res.error
|
||||
warn "Failed to update the ENR cgc field", error = res.error
|
||||
else:
|
||||
debug "Updated ENR csc", cscnets
|
||||
debug "Updated ENR cgc", cgcnets
|
||||
|
||||
proc updateSyncnetsMetadata*(node: Eth2Node, syncnets: SyncnetBits) =
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#sync-committee-subnet-stability
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/uri,
|
||||
stew/io2, chronos, chronos/apps/http/httpclient, snappy,
|
||||
|
@ -41,7 +43,7 @@ proc fetchGenesisBytes*(
|
|||
result = await downloadFile(genesisStateUrlOverride.get(parseUri metadata.genesis.url))
|
||||
# Under the built-in default URL, we serve a snappy-encoded BeaconState in order
|
||||
# to reduce the size of the downloaded file with roughly 50% (this precise ratio
|
||||
# depends on the number of validator recors). The user is still free to provide
|
||||
# depends on the number of validator records). The user is still free to provide
|
||||
# any URL which may serve an uncompressed state (e.g. a Beacon API endpoint)
|
||||
#
|
||||
# Since a SSZ-encoded BeaconState will start with a LittleEndian genesis time
|
||||
|
|
|
@ -417,14 +417,15 @@ proc initFullNode(
|
|||
blobQuarantine = newClone(BlobQuarantine.init(onBlobSidecarAdded))
|
||||
dataColumnQuarantine = newClone(DataColumnQuarantine.init())
|
||||
supernode = node.config.peerdasSupernode
|
||||
localCustodySubnets =
|
||||
localCustodyGroups =
|
||||
if supernode:
|
||||
DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint64
|
||||
NUMBER_OF_CUSTODY_GROUPS.uint64
|
||||
else:
|
||||
CUSTODY_REQUIREMENT.uint64
|
||||
custody_columns_set =
|
||||
node.network.nodeId.get_custody_columns_set(max(SAMPLES_PER_SLOT.uint64,
|
||||
localCustodySubnets))
|
||||
node.network.nodeId.resolve_column_sets_from_custody_groups(
|
||||
max(SAMPLES_PER_SLOT.uint64,
|
||||
localCustodyGroups))
|
||||
consensusManager = ConsensusManager.new(
|
||||
dag, attestationPool, quarantine, node.elManager,
|
||||
ActionTracker.init(node.network.nodeId, config.subscribeAllSubnets),
|
||||
|
@ -548,26 +549,27 @@ proc initFullNode(
|
|||
# really variable. Whenever the BN is a supernode, column quarantine
|
||||
# essentially means all the NUMBER_OF_COLUMNS, as per mentioned in the
|
||||
# spec. However, in terms of fullnode, quarantine is really dependent
|
||||
# on the randomly assigned columns, by `get_custody_columns`.
|
||||
# on the randomly assigned columns, by `resolve_columns_from_custody_groups`.
|
||||
|
||||
# Hence, in order to keep column quarantine accurate and error proof
|
||||
# the custody columns are computed once as the BN boots. Then the values
|
||||
# are used globally around the codebase.
|
||||
|
||||
# `get_custody_columns` is not a very expensive function, but there
|
||||
# are multiple instances of computing custody columns, especially
|
||||
# `resolve_columns_from_custody_groups` is not a very expensive function,
|
||||
# but there are multiple instances of computing custody columns, especially
|
||||
# during peer selection, sync with columns, and so on. That is why,
|
||||
# the rationale of populating it at boot and using it gloabally.
|
||||
|
||||
dataColumnQuarantine[].supernode = supernode
|
||||
dataColumnQuarantine[].custody_columns =
|
||||
node.network.nodeId.get_custody_columns(max(SAMPLES_PER_SLOT.uint64,
|
||||
localCustodySubnets))
|
||||
node.network.nodeId.resolve_columns_from_custody_groups(
|
||||
max(SAMPLES_PER_SLOT.uint64,
|
||||
localCustodyGroups))
|
||||
|
||||
if node.config.peerdasSupernode:
|
||||
node.network.loadCscnetMetadataAndEnr(DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint8)
|
||||
node.network.loadCgcnetMetadataAndEnr(NUMBER_OF_CUSTODY_GROUPS.uint8)
|
||||
else:
|
||||
node.network.loadCscnetMetadataAndEnr(CUSTODY_REQUIREMENT.uint8)
|
||||
node.network.loadCgcnetMetadataAndEnr(CUSTODY_REQUIREMENT.uint8)
|
||||
|
||||
if node.config.lightClientDataServe:
|
||||
proc scheduleSendingLightClientUpdates(slot: Slot) =
|
||||
|
|
|
@ -177,7 +177,7 @@ proc installEventApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
discard await race(handlers)
|
||||
except ValueError:
|
||||
raiseAssert "There should be more than one event handler at this point!"
|
||||
# One of the handlers finished, it means that connection has been droped, so
|
||||
# One of the handlers finished, it means that connection has been dropped, so
|
||||
# we cancelling all other handlers.
|
||||
let pending =
|
||||
handlers.filterIt(not(it.finished())).mapIt(it.cancelAndWait())
|
||||
|
|
|
@ -59,9 +59,13 @@ const
|
|||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/_features/eip7594/das-core.md#networking
|
||||
DATA_COLUMN_SIDECAR_SUBNET_COUNT* = 128
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/_features/eip7594/das-core.md#custody-setting
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-setting
|
||||
SAMPLES_PER_SLOT* = 8
|
||||
CUSTODY_REQUIREMENT* = 4
|
||||
NUMBER_OF_CUSTODY_GROUPS* = 128
|
||||
|
||||
# Number of columns in the network per custody group
|
||||
COLUMNS_PER_GROUP* = NUMBER_OF_COLUMNS div NUMBER_OF_CUSTODY_GROUPS
|
||||
|
||||
type
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.6/specs/_features/eip7594/polynomial-commitments-sampling.md#custom-types
|
||||
|
@ -74,10 +78,11 @@ type
|
|||
Cells* = KzgCells
|
||||
CellsAndProofs* = KzgCellsAndKzgProofs
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/_features/eip7594/das-core.md#custom-types
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custom-types
|
||||
RowIndex* = uint64
|
||||
ColumnIndex* = uint64
|
||||
CellIndex* = uint64
|
||||
CustodyIndex* = uint64
|
||||
|
||||
|
||||
type
|
||||
|
@ -108,16 +113,16 @@ type
|
|||
row_index*: RowIndex
|
||||
|
||||
# Not in spec, defined in order to compute custody subnets
|
||||
CscBits* = BitArray[DATA_COLUMN_SIDECAR_SUBNET_COUNT]
|
||||
CgcBits* = BitArray[DATA_COLUMN_SIDECAR_SUBNET_COUNT]
|
||||
|
||||
CscCount* = uint8
|
||||
CgcCount* = uint8
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/_features/eip7594/p2p-interface.md#metadata
|
||||
MetaData* = object
|
||||
seq_number*: uint64
|
||||
attnets*: AttnetBits
|
||||
syncnets*: SyncnetBits
|
||||
custody_subnet_count*: uint64
|
||||
custody_group_count*: uint64
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/beacon-chain.md#executionpayload
|
||||
ExecutionPayload* = object
|
||||
|
|
|
@ -47,7 +47,7 @@ const
|
|||
|
||||
enrAttestationSubnetsField* = "attnets"
|
||||
enrSyncSubnetsField* = "syncnets"
|
||||
enrCustodySubnetCountField* = "csc"
|
||||
enrCustodySubnetCountField* = "cgc"
|
||||
enrForkIdField* = "eth2"
|
||||
|
||||
template eth2Prefix(forkDigest: ForkDigest): string =
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
# Uncategorized helper functions from the spec
|
||||
import
|
||||
std/algorithm,
|
||||
std/[algorithm, sequtils],
|
||||
results,
|
||||
eth/p2p/discoveryv5/[node],
|
||||
kzg4844/[kzg],
|
||||
|
@ -24,6 +24,7 @@ type
|
|||
CellBytes = array[fulu.CELLS_PER_EXT_BLOB, Cell]
|
||||
ProofBytes = array[fulu.CELLS_PER_EXT_BLOB, KzgProof]
|
||||
|
||||
# Shall be deprecated once alpha 11 tests are released
|
||||
func sortedColumnIndices(columnsPerSubnet: ColumnIndex,
|
||||
subnetIds: HashSet[uint64]):
|
||||
seq[ColumnIndex] =
|
||||
|
@ -35,18 +36,7 @@ func sortedColumnIndices(columnsPerSubnet: ColumnIndex,
|
|||
res.sort
|
||||
res
|
||||
|
||||
func sortedColumnIndexList(columnsPerSubnet: ColumnIndex,
|
||||
subnetIds: HashSet[uint64]):
|
||||
List[ColumnIndex, NUMBER_OF_COLUMNS] =
|
||||
var
|
||||
res: seq[ColumnIndex]
|
||||
for i in 0'u64 ..< columnsPerSubnet:
|
||||
for subnetId in subnetIds:
|
||||
let index = DATA_COLUMN_SIDECAR_SUBNET_COUNT * i + subnetId
|
||||
res.add(ColumnIndex(index))
|
||||
res.sort()
|
||||
List[ColumnIndex, NUMBER_OF_COLUMNS].init(res)
|
||||
|
||||
# Shall be deprecated once alpha 11 tests are released
|
||||
func get_custody_column_subnets*(node_id: NodeId,
|
||||
custody_subnet_count: uint64):
|
||||
HashSet[uint64] =
|
||||
|
@ -81,6 +71,7 @@ func get_custody_column_subnets*(node_id: NodeId,
|
|||
subnet_ids
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/_features/eip7594/das-core.md#get_custody_columns
|
||||
# Shall be deprecated once alpha 11 tests are released
|
||||
func get_custody_columns*(node_id: NodeId,
|
||||
custody_subnet_count: uint64):
|
||||
seq[ColumnIndex] =
|
||||
|
@ -93,34 +84,70 @@ func get_custody_columns*(node_id: NodeId,
|
|||
|
||||
sortedColumnIndices(ColumnIndex(columns_per_subnet), subnet_ids)
|
||||
|
||||
func get_custody_columns_set*(node_id: NodeId,
|
||||
custody_subnet_count: uint64):
|
||||
HashSet[ColumnIndex] =
|
||||
# This method returns a HashSet of column indices,
|
||||
# the method is specifically relevant while peer filtering
|
||||
iterator compute_columns_for_custody_group(custody_group: CustodyIndex):
|
||||
ColumnIndex =
|
||||
for i in 0'u64 ..< COLUMNS_PER_GROUP:
|
||||
yield ColumnIndex(NUMBER_OF_CUSTODY_GROUPS * i + custody_group)
|
||||
|
||||
func handle_custody_groups(node_id: NodeId,
|
||||
custody_group_count: CustodyIndex):
|
||||
HashSet[CustodyIndex] =
|
||||
|
||||
# Decouples the custody group computation from
|
||||
# `get_custody_groups`, in order to later use this custody
|
||||
# group list across various types of output types
|
||||
|
||||
var
|
||||
custody_groups: HashSet[CustodyIndex]
|
||||
current_id = node_id
|
||||
|
||||
while custody_groups.lenu64 < custody_group_count:
|
||||
var hashed_bytes: array[8, byte]
|
||||
|
||||
let
|
||||
current_id_bytes = current_id.toBytesLE()
|
||||
hashed_current_id = eth2digest(current_id_bytes)
|
||||
|
||||
hashed_bytes[0..7] = hashed_current_id.data.toOpenArray(0,7)
|
||||
let custody_group = bytes_to_uint64(hashed_bytes) mod
|
||||
NUMBER_OF_CUSTODY_GROUPS
|
||||
|
||||
custody_groups.incl custody_group
|
||||
|
||||
inc current_id
|
||||
|
||||
custody_groups
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#get_custody_groups
|
||||
func get_custody_groups*(node_id: NodeId,
|
||||
custody_group_count: CustodyIndex):
|
||||
seq[CustodyIndex] =
|
||||
let custody_groups =
|
||||
node_id.handle_custody_groups(custody_group_count)
|
||||
|
||||
var groups = custody_groups.toSeq()
|
||||
groups.sort()
|
||||
groups
|
||||
|
||||
func resolve_columns_from_custody_groups*(node_id: NodeId,
|
||||
custody_group_count: CustodyIndex):
|
||||
seq[ColumnIndex] =
|
||||
|
||||
let
|
||||
subnet_ids =
|
||||
get_custody_column_subnets(node_id, custody_subnet_count)
|
||||
const
|
||||
columns_per_subnet =
|
||||
NUMBER_OF_COLUMNS div DATA_COLUMN_SIDECAR_SUBNET_COUNT
|
||||
custody_groups = node_id.get_custody_groups(custody_group_count)
|
||||
|
||||
sortedColumnIndices(ColumnIndex(columns_per_subnet), subnet_ids).toHashSet()
|
||||
var flattened =
|
||||
newSeqOfCap[ColumnIndex](COLUMNS_PER_GROUP * custody_groups.len)
|
||||
for group in custody_groups:
|
||||
for index in compute_columns_for_custody_group(group):
|
||||
flattened.add index
|
||||
flattened
|
||||
|
||||
func get_custody_column_list*(node_id: NodeId,
|
||||
custody_subnet_count: uint64):
|
||||
List[ColumnIndex, NUMBER_OF_COLUMNS] =
|
||||
func resolve_column_sets_from_custody_groups*(node_id: NodeId,
|
||||
custody_group_count: CustodyIndex):
|
||||
HashSet[ColumnIndex] =
|
||||
|
||||
# Not in spec in the exact format, but it is useful in sorting custody columns
|
||||
# before sending, data_column_sidecars_by_range requests
|
||||
let
|
||||
subnet_ids =
|
||||
get_custody_column_subnets(node_id, custody_subnet_count)
|
||||
const
|
||||
columns_per_subnet =
|
||||
NUMBER_OF_COLUMNS div DATA_COLUMN_SIDECAR_SUBNET_COUNT
|
||||
|
||||
sortedColumnIndexList(ColumnIndex(columns_per_subnet), subnet_ids)
|
||||
node_id.resolve_columns_from_custody_groups(custody_group_count).toHashSet()
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/_features/eip7594/das-core.md#compute_matrix
|
||||
proc compute_matrix*(blobs: seq[KzgBlob]): Result[seq[MatrixEntry], cstring] =
|
||||
|
@ -235,7 +262,7 @@ proc get_data_column_sidecars*(signed_beacon_block: electra.TrustedSignedBeaconB
|
|||
# blobs from blob bundles
|
||||
proc get_data_column_sidecars*(signed_beacon_block: electra.SignedBeaconBlock,
|
||||
blobs: seq[KzgBlob]):
|
||||
Result[seq[DataColumnSidecar], string] =
|
||||
Result[seq[DataColumnSidecar], cstring] =
|
||||
## Given a signed beacon block and the blobs corresponding to the block,
|
||||
## this function assembles the sidecars which can be distributed to
|
||||
## the peers post data column reconstruction at every slot start.
|
||||
|
|
|
@ -306,29 +306,30 @@ proc checkPeerCustody*(rman: RequestManager,
|
|||
# to filter other supernodes, rather than filter
|
||||
# too many full nodes that have a subset of the custody
|
||||
# columns
|
||||
if peer.lookupCscFromPeer() ==
|
||||
DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint64:
|
||||
if peer.lookupCgcFromPeer() ==
|
||||
NUMBER_OF_CUSTODY_GROUPS.uint64:
|
||||
return true
|
||||
|
||||
else:
|
||||
if peer.lookupCscFromPeer() ==
|
||||
DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint64:
|
||||
if peer.lookupCgcFromPeer() ==
|
||||
NUMBER_OF_CUSTODY_GROUPS.uint64:
|
||||
return true
|
||||
|
||||
elif peer.lookupCscFromPeer() ==
|
||||
elif peer.lookupCgcFromPeer() ==
|
||||
CUSTODY_REQUIREMENT.uint64:
|
||||
|
||||
# Fetch the remote custody count
|
||||
let remoteCustodySubnetCount =
|
||||
peer.lookupCscFromPeer()
|
||||
let remoteCustodyGroupCount =
|
||||
peer.lookupCgcFromPeer()
|
||||
|
||||
# Extract remote peer's nodeID from peerID
|
||||
# Fetch custody columns from remote peer
|
||||
let
|
||||
remoteNodeId = fetchNodeIdFromPeerId(peer)
|
||||
remoteCustodyColumns =
|
||||
remoteNodeId.get_custody_columns_set(max(SAMPLES_PER_SLOT.uint64,
|
||||
remoteCustodySubnetCount))
|
||||
remoteNodeId.resolve_column_sets_from_custody_groups(
|
||||
max(SAMPLES_PER_SLOT.uint64,
|
||||
remoteCustodyGroupCount))
|
||||
|
||||
for local_column in rman.custody_columns_set:
|
||||
if local_column notin remoteCustodyColumns:
|
||||
|
|
|
@ -53,4 +53,4 @@ The string of letters -- what we call the `sync worker map` (in the above case r
|
|||
```
|
||||
|
||||
!!! tip
|
||||
You can also use you calls outlined in the [REST API page](./rest-api.md) to retrieve similar information.
|
||||
You can also use the calls outlined in the [REST API page](./rest-api.md) to retrieve similar information.
|
||||
|
|
|
@ -22,7 +22,7 @@ from std/sequtils import mapIt
|
|||
|
||||
proc runGetCustodyColumns(suiteName, path: string) =
|
||||
let relativePathComponent = path.relativeTestPathComponent()
|
||||
test "Networking - Get Custody Columns - " & relativePathComponent:
|
||||
test "Networking - Get Custody Groups - " & relativePathComponent:
|
||||
type TestMetaYaml = object
|
||||
node_id: string
|
||||
custody_group_count: uint64
|
||||
|
@ -38,13 +38,14 @@ proc runGetCustodyColumns(suiteName, path: string) =
|
|||
custody_group_count = meta.custody_group_count
|
||||
reslt = (meta.result).mapIt(it)
|
||||
|
||||
let columns = get_custody_columns(node_id, custody_group_count)
|
||||
let columns = get_custody_groups(node_id, custody_group_count)
|
||||
|
||||
for i in 0..<columns.lenu64:
|
||||
check columns[i] == reslt[i]
|
||||
|
||||
suite "EF - EIP7594 - Networking" & preset():
|
||||
const presetPath = SszTestsDir/const_preset
|
||||
# foldering to be resolved in alpha 11 release of consensus spec tests
|
||||
let basePath =
|
||||
presetPath/"fulu"/"networking"/"get_custody_columns"/"pyspec_tests"
|
||||
for kind, path in walkDir(basePath, relative = true, checkDir = true):
|
||||
|
|
|
@ -36,7 +36,7 @@ proc generateNode(rng: ref HmacDrbgContext, port: Port,
|
|||
|
||||
# TODO: Add tests with a syncnets preference
|
||||
const noSyncnetsPreference = SyncnetBits()
|
||||
const noCscnetsPreference = CscBits()
|
||||
const noCgcnetsPreference = CgcBits()
|
||||
|
||||
procSuite "Eth2 specific discovery tests":
|
||||
let
|
||||
|
@ -69,7 +69,7 @@ procSuite "Eth2 specific discovery tests":
|
|||
|
||||
let discovered = await node1.queryRandom(
|
||||
enrForkId, attnetsSelected, noSyncnetsPreference,
|
||||
noCscnetsPreference, 1)
|
||||
noCgcnetsPreference, 1)
|
||||
check discovered.len == 1
|
||||
|
||||
await node1.closeWait()
|
||||
|
@ -108,7 +108,7 @@ procSuite "Eth2 specific discovery tests":
|
|||
|
||||
let discovered = await node1.queryRandom(
|
||||
enrForkId, attnetsSelected, noSyncnetsPreference,
|
||||
noCscnetsPreference, 1)
|
||||
noCgcnetsPreference, 1)
|
||||
check discovered.len == 1
|
||||
|
||||
await node1.closeWait()
|
||||
|
@ -137,7 +137,7 @@ procSuite "Eth2 specific discovery tests":
|
|||
block:
|
||||
let discovered = await node1.queryRandom(
|
||||
enrForkId, attnetsSelected, noSyncnetsPreference,
|
||||
noCscnetsPreference, 1)
|
||||
noCgcnetsPreference, 1)
|
||||
check discovered.len == 0
|
||||
|
||||
block:
|
||||
|
@ -153,7 +153,7 @@ procSuite "Eth2 specific discovery tests":
|
|||
|
||||
let discovered = await node1.queryRandom(
|
||||
enrForkId, attnetsSelected, noSyncnetsPreference,
|
||||
noCscnetsPreference, 1)
|
||||
noCgcnetsPreference, 1)
|
||||
check discovered.len == 1
|
||||
|
||||
await node1.closeWait()
|
||||
|
|
Loading…
Reference in New Issue