diff --git a/beacon_chain/gossip_processing/block_processor.nim b/beacon_chain/gossip_processing/block_processor.nim index bf992b165..f9ff552bf 100644 --- a/beacon_chain/gossip_processing/block_processor.nim +++ b/beacon_chain/gossip_processing/block_processor.nim @@ -539,25 +539,25 @@ proc storeBlock( parent_root = signedBlock.message.parent_root parentBlck = dag.getForkedBlock(parent_root) if parentBlck.isSome(): - var blobsOk = true - let blobs = - withBlck(parentBlck.get()): - when consensusFork >= ConsensusFork.Deneb: - var blob_sidecars: BlobSidecars - for i in 0 ..< forkyBlck.message.body.blob_kzg_commitments.len: - let blob = BlobSidecar.new() - if not dag.db.getBlobSidecar(parent_root, i.BlobIndex, blob[]): - blobsOk = false # Pruned, or inconsistent DB - break - blob_sidecars.add blob - Opt.some blob_sidecars - else: - Opt.none BlobSidecars + # var blobsOk = true + # let blobs = + # withBlck(parentBlck.get()): + # when consensusFork >= ConsensusFork.Deneb: + # var blob_sidecars: BlobSidecars + # for i in 0 ..< forkyBlck.message.body.blob_kzg_commitments.len: + # let blob = BlobSidecar.new() + # if not dag.db.getBlobSidecar(parent_root, i.BlobIndex, blob[]): + # blobsOk = false # Pruned, or inconsistent DB + # break + # blob_sidecars.add blob + # Opt.some blob_sidecars + # else: + # Opt.none BlobSidecars - if blobsOk: - debug "Loaded parent block from storage", parent_root - self[].enqueueBlock( - MsgSource.gossip, parentBlck.unsafeGet().asSigned(), blobs, Opt.none(DataColumnSidecars)) + # if blobsOk: + # debug "Loaded parent block from storage", parent_root + # self[].enqueueBlock( + # MsgSource.gossip, parentBlck.unsafeGet().asSigned(), blobs, Opt.none(DataColumnSidecars)) var columnsOk = true let data_columns = @@ -856,13 +856,13 @@ proc storeBlock( error = res.error() continue - if self.blobQuarantine[].hasBlobs(forkyBlck): - let blobs = self.blobQuarantine[].popBlobs( - forkyBlck.root, forkyBlck) - self[].enqueueBlock(MsgSource.gossip, quarantined, Opt.some(blobs), Opt.none(DataColumnSidecars)) - else: - discard self.consensusManager.quarantine[].addBlobless( - dag.finalizedHead.slot, forkyBlck) + # if self.blobQuarantine[].hasBlobs(forkyBlck): + # let blobs = self.blobQuarantine[].popBlobs( + # forkyBlck.root, forkyBlck) + # self[].enqueueBlock(MsgSource.gossip, quarantined, Opt.some(blobs), Opt.none(DataColumnSidecars)) + # else: + # discard self.consensusManager.quarantine[].addBlobless( + # dag.finalizedHead.slot, forkyBlck) if self.dataColumnQuarantine[].hasDataColumns(forkyBlck): let data_columns = self.dataColumnQuarantine[].popDataColumns( diff --git a/beacon_chain/spec/datatypes/eip7594.nim b/beacon_chain/spec/datatypes/eip7594.nim index fff0db6c4..9e7ec21fa 100644 --- a/beacon_chain/spec/datatypes/eip7594.nim +++ b/beacon_chain/spec/datatypes/eip7594.nim @@ -10,8 +10,7 @@ import std/[sequtils], "."/[altair, base, deneb], - kzg4844, - stew/[byteutils] + kzg4844 from std/sequtils import mapIt from std/strutils import join @@ -27,7 +26,7 @@ const CELLS_PER_EXT_BLOB* = FIELD_ELEMENTS_PER_EXT_BLOB div FIELD_ELEMENTS_PER_CELL # The number of cells in an extended blob | # RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN = 'RCKZGCBATCH__V1_' - KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH* = 4 # TODO dedupe vs network + KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH* = 4 type BLSFieldElement* = KzgBytes32 diff --git a/beacon_chain/spec/eip7594_helpers.nim b/beacon_chain/spec/eip7594_helpers.nim index d6a44b6ca..f28aa90c2 100644 --- a/beacon_chain/spec/eip7594_helpers.nim +++ b/beacon_chain/spec/eip7594_helpers.nim @@ -53,8 +53,11 @@ proc sortedColumnIndexList*(columnsPerSubnet: ColumnIndex, proc get_custody_column_subnet*(node_id: NodeId, custody_subnet_count: uint64): Result[HashSet[uint64], cstring] = - # fetches the subnets for custody column for the current node - # assert custody_subnet_count <= DATA_COLUMN_SIDECAR_SUBNET_COUNT + + # Decouples the custody subnet computation part from + # `get_custody_columns`, in order to later use this subnet list + # in order to maintain subscription to specific column subnets. + if not (custody_subnet_count <= DATA_COLUMN_SIDECAR_SUBNET_COUNT): return err("Eip7594: Custody subnet count exceeds the DATA_COLUMN_SIDECAR_SUBNET_COUNT") @@ -64,11 +67,19 @@ proc get_custody_column_subnet*(node_id: NodeId, while subnet_ids.len < int(custody_subnet_count): - var subnet_id_bytes: array[8, byte] - subnet_id_bytes[0..7] = current_id.toBytesLE().toOpenArray(0,7) + var + current_id_bytes: array[32, byte] + hashed_bytes: array[8, byte] + + current_id_bytes = current_id.toBytesBE() + current_id_bytes.reverse() - var subnet_id = bytes_to_uint64(subnet_id_bytes) mod - DATA_COLUMN_SIDECAR_SUBNET_COUNT + let + hashed_current_id = eth2digest(current_id_bytes) + + hashed_bytes[0..7] = hashed_current_id.data.toOpenArray(0,7) + var subnet_id = bytes_to_uint64(hashed_bytes) mod + DATA_COLUMN_SIDECAR_SUBNET_COUNT if subnet_id notin subnet_ids: subnet_ids.incl(subnet_id) @@ -78,12 +89,9 @@ proc get_custody_column_subnet*(node_id: NodeId, current_id = NodeId(StUint[256].zero) current_id += NodeId(StUint[256].one) - # assert len(subnet_ids) == len(set(subnet_ids)) - if not (subnet_ids.len == subnet_ids.len): - return err("Eip7594: Subnet ids are not unique") - ok(subnet_ids) + # https://github.com/ethereum/consensus-specs/blob/5f48840f4d768bf0e0a8156a3ed06ec333589007/specs/_features/eip7594/das-core.md#get_custody_columns proc get_custody_columns*(node_id: NodeId, custody_subnet_count: uint64): diff --git a/tests/consensus_spec/test_fixture_networking b/tests/consensus_spec/test_fixture_networking new file mode 100755 index 000000000..99b1fc0fc Binary files /dev/null and b/tests/consensus_spec/test_fixture_networking differ diff --git a/vendor/nim-eth2-scenarios b/vendor/nim-eth2-scenarios index fc7a45a73..4748d8387 160000 --- a/vendor/nim-eth2-scenarios +++ b/vendor/nim-eth2-scenarios @@ -1 +1 @@ -Subproject commit fc7a45a731736248b96ad5827a8356c0e14d3b8c +Subproject commit 4748d838797fd42bcb57c38f682adcb4522a152a