rework on subnet calculation

This commit is contained in:
Agnish Ghosh 2024-09-03 01:03:15 +05:30
parent 8c7d18ec51
commit cf0cf815b6
5 changed files with 46 additions and 39 deletions
beacon_chain
tests/consensus_spec
vendor

View File

@ -539,25 +539,25 @@ proc storeBlock(
parent_root = signedBlock.message.parent_root
parentBlck = dag.getForkedBlock(parent_root)
if parentBlck.isSome():
var blobsOk = true
let blobs =
withBlck(parentBlck.get()):
when consensusFork >= ConsensusFork.Deneb:
var blob_sidecars: BlobSidecars
for i in 0 ..< forkyBlck.message.body.blob_kzg_commitments.len:
let blob = BlobSidecar.new()
if not dag.db.getBlobSidecar(parent_root, i.BlobIndex, blob[]):
blobsOk = false # Pruned, or inconsistent DB
break
blob_sidecars.add blob
Opt.some blob_sidecars
else:
Opt.none BlobSidecars
# var blobsOk = true
# let blobs =
# withBlck(parentBlck.get()):
# when consensusFork >= ConsensusFork.Deneb:
# var blob_sidecars: BlobSidecars
# for i in 0 ..< forkyBlck.message.body.blob_kzg_commitments.len:
# let blob = BlobSidecar.new()
# if not dag.db.getBlobSidecar(parent_root, i.BlobIndex, blob[]):
# blobsOk = false # Pruned, or inconsistent DB
# break
# blob_sidecars.add blob
# Opt.some blob_sidecars
# else:
# Opt.none BlobSidecars
if blobsOk:
debug "Loaded parent block from storage", parent_root
self[].enqueueBlock(
MsgSource.gossip, parentBlck.unsafeGet().asSigned(), blobs, Opt.none(DataColumnSidecars))
# if blobsOk:
# debug "Loaded parent block from storage", parent_root
# self[].enqueueBlock(
# MsgSource.gossip, parentBlck.unsafeGet().asSigned(), blobs, Opt.none(DataColumnSidecars))
var columnsOk = true
let data_columns =
@ -856,13 +856,13 @@ proc storeBlock(
error = res.error()
continue
if self.blobQuarantine[].hasBlobs(forkyBlck):
let blobs = self.blobQuarantine[].popBlobs(
forkyBlck.root, forkyBlck)
self[].enqueueBlock(MsgSource.gossip, quarantined, Opt.some(blobs), Opt.none(DataColumnSidecars))
else:
discard self.consensusManager.quarantine[].addBlobless(
dag.finalizedHead.slot, forkyBlck)
# if self.blobQuarantine[].hasBlobs(forkyBlck):
# let blobs = self.blobQuarantine[].popBlobs(
# forkyBlck.root, forkyBlck)
# self[].enqueueBlock(MsgSource.gossip, quarantined, Opt.some(blobs), Opt.none(DataColumnSidecars))
# else:
# discard self.consensusManager.quarantine[].addBlobless(
# dag.finalizedHead.slot, forkyBlck)
if self.dataColumnQuarantine[].hasDataColumns(forkyBlck):
let data_columns = self.dataColumnQuarantine[].popDataColumns(

View File

@ -10,8 +10,7 @@
import
std/[sequtils],
"."/[altair, base, deneb],
kzg4844,
stew/[byteutils]
kzg4844
from std/sequtils import mapIt
from std/strutils import join
@ -27,7 +26,7 @@ const
CELLS_PER_EXT_BLOB* = FIELD_ELEMENTS_PER_EXT_BLOB div FIELD_ELEMENTS_PER_CELL
# The number of cells in an extended blob |
# RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN = 'RCKZGCBATCH__V1_'
KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH* = 4 # TODO dedupe vs network
KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH* = 4
type
BLSFieldElement* = KzgBytes32

View File

@ -53,8 +53,11 @@ proc sortedColumnIndexList*(columnsPerSubnet: ColumnIndex,
proc get_custody_column_subnet*(node_id: NodeId,
custody_subnet_count: uint64):
Result[HashSet[uint64], cstring] =
# fetches the subnets for custody column for the current node
# assert custody_subnet_count <= DATA_COLUMN_SIDECAR_SUBNET_COUNT
# Decouples the custody subnet computation part from
# `get_custody_columns`, in order to later use this subnet list
# in order to maintain subscription to specific column subnets.
if not (custody_subnet_count <= DATA_COLUMN_SIDECAR_SUBNET_COUNT):
return err("Eip7594: Custody subnet count exceeds the DATA_COLUMN_SIDECAR_SUBNET_COUNT")
@ -64,11 +67,19 @@ proc get_custody_column_subnet*(node_id: NodeId,
while subnet_ids.len < int(custody_subnet_count):
var subnet_id_bytes: array[8, byte]
subnet_id_bytes[0..7] = current_id.toBytesLE().toOpenArray(0,7)
var
current_id_bytes: array[32, byte]
hashed_bytes: array[8, byte]
current_id_bytes = current_id.toBytesBE()
current_id_bytes.reverse()
var subnet_id = bytes_to_uint64(subnet_id_bytes) mod
DATA_COLUMN_SIDECAR_SUBNET_COUNT
let
hashed_current_id = eth2digest(current_id_bytes)
hashed_bytes[0..7] = hashed_current_id.data.toOpenArray(0,7)
var subnet_id = bytes_to_uint64(hashed_bytes) mod
DATA_COLUMN_SIDECAR_SUBNET_COUNT
if subnet_id notin subnet_ids:
subnet_ids.incl(subnet_id)
@ -78,12 +89,9 @@ proc get_custody_column_subnet*(node_id: NodeId,
current_id = NodeId(StUint[256].zero)
current_id += NodeId(StUint[256].one)
# assert len(subnet_ids) == len(set(subnet_ids))
if not (subnet_ids.len == subnet_ids.len):
return err("Eip7594: Subnet ids are not unique")
ok(subnet_ids)
# https://github.com/ethereum/consensus-specs/blob/5f48840f4d768bf0e0a8156a3ed06ec333589007/specs/_features/eip7594/das-core.md#get_custody_columns
proc get_custody_columns*(node_id: NodeId,
custody_subnet_count: uint64):

Binary file not shown.

@ -1 +1 @@
Subproject commit fc7a45a731736248b96ad5827a8356c0e14d3b8c
Subproject commit 4748d838797fd42bcb57c38f682adcb4522a152a