upstream peerdas alpha3 related spec changes + fix upstream related issues (#6468)

* reworked some of the das core specs, pr'd to check whether whether the conflicting type issue is centric to my machine or not

* bumped nim-blscurve to 9c6e80c6109133c0af3025654f5a8820282cff05, same as unstable

* bumped nim-eth2-scenarios, nim-nat-traversal at par with unstable, added more pathches, made peerdas devnet branch backward compatible, peerdas passing new ssz tests as per alpha3, disabled electra fixture tests, as branch hasn't been rebased for a while

* refactor test fixture files

* rm: serializeDataColumn

* refactor: took data columns extracted from blobs during block proposal to the heap

* disable blob broadcast in pd devnet

* fix addBlock in message router

* fix: data column iterator

* added debug checkpoints to check CI

* refactor if else conditions

* add: updated das core specs to alpha 3, and unit tests pass
This commit is contained in:
Agnish Ghosh 2024-08-05 19:27:39 +05:30 committed by GitHub
parent 20e6b189e8
commit b32205de7c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
50 changed files with 620 additions and 3876 deletions

4
.gitmodules vendored
View File

@ -212,9 +212,9 @@
branch = main
[submodule "vendor/nim-kzg4844"]
path = vendor/nim-kzg4844
url = https://github.com/status-im/nim-kzg4844.git
url = https://github.com/agnxsh/nim-kzg4844.git
ignore = untracked
branch = peerdas
branch = ckzg-7594/upstream
[submodule "vendor/nim-results"]
path = vendor/nim-results
url = https://github.com/arnetheduck/nim-results.git

View File

@ -445,17 +445,6 @@ OK: 9/9 Fail: 0/9 Skip: 0/9
OK: 253/253 Fail: 0/253 Skip: 0/253
## EF - KZG - EIP7594
```diff
+ KZG - Compute Cells - compute_cells_case_invalid_blob_26555bdcbf18a267 OK
+ KZG - Compute Cells - compute_cells_case_invalid_blob_79fb3cb1ef585a86 OK
+ KZG - Compute Cells - compute_cells_case_invalid_blob_7e99dea8893c104a OK
+ KZG - Compute Cells - compute_cells_case_invalid_blob_9d88c33852eb782d OK
+ KZG - Compute Cells - compute_cells_case_valid_419245fbfe69f145 OK
+ KZG - Compute Cells - compute_cells_case_valid_4aedd1a2a3933c3e OK
+ KZG - Compute Cells - compute_cells_case_valid_6e773f256383918c OK
+ KZG - Compute Cells - compute_cells_case_valid_b0731ef77b166ca8 OK
+ KZG - Compute Cells - compute_cells_case_valid_b81d309b22788820 OK
+ KZG - Compute Cells - compute_cells_case_valid_ed8b5001151417d5 OK
+ KZG - Compute Cells - compute_cells_case_valid_edeb8500a6507818 OK
+ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_invalid_blob_26555bdcbf OK
+ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_invalid_blob_79fb3cb1ef OK
+ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_invalid_blob_7e99dea889 OK
@ -467,93 +456,8 @@ OK: 253/253 Fail: 0/253 Skip: 0/253
+ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_valid_b81d309b22788820 OK
+ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_valid_ed8b5001151417d5 OK
+ KZG - Compute Cells And Proofs - compute_cells_and_kzg_proofs_case_valid_edeb8500a6507818 OK
+ KZG - Recover All Cells - recover_all_cells_case_invalid_all_cells_are_missing_f46bf2cbb03 OK
+ KZG - Recover All Cells - recover_all_cells_case_invalid_cell_0f26a378535d3131 OK
+ KZG - Recover All Cells - recover_all_cells_case_invalid_cell_7a3f7f2910fe230a OK
+ KZG - Recover All Cells - recover_all_cells_case_invalid_cell_8be2d351449aa7b6 OK
+ KZG - Recover All Cells - recover_all_cells_case_invalid_cell_e1ac5e027103239d OK
+ KZG - Recover All Cells - recover_all_cells_case_invalid_cell_id_be00192b1a139275 OK
+ KZG - Recover All Cells - recover_all_cells_case_invalid_duplicate_cell_id_988d8aa16e4ef84 OK
+ KZG - Recover All Cells - recover_all_cells_case_invalid_more_cell_ids_than_cells_8eaea8a3 OK
+ KZG - Recover All Cells - recover_all_cells_case_invalid_more_cells_than_cell_ids_a2b10ac8 OK
+ KZG - Recover All Cells - recover_all_cells_case_invalid_more_than_half_missing_474f5c5c2a OK
+ KZG - Recover All Cells - recover_all_cells_case_valid_half_missing_every_other_cell_ae1b7 OK
+ KZG - Recover All Cells - recover_all_cells_case_valid_half_missing_first_half_bbb851083a6 OK
+ KZG - Recover All Cells - recover_all_cells_case_valid_half_missing_second_half_696b33f5da OK
+ KZG - Recover All Cells - recover_all_cells_case_valid_no_missing_9546b3ad9977aa40 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_incorrect_cell_30dd1bdc76ff70fb OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_incorrect_cell_5138cdd3534e8705 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_incorrect_cell_76140fc51e7da7a5 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_incorrect_commitment_307f4ebc067c OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_incorrect_commitment_351fd262b984 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_incorrect_commitment_71fec3ac464b OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_incorrect_commitment_736703b3e23d OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_incorrect_commitment_7c1a1ac24c1f OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_incorrect_commitment_9624a42384c3 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_incorrect_commitment_aef3e72488c4 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_incorrect_proof_0223e6a42aeb7c72 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_incorrect_proof_0ed7c15183b218d9 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_incorrect_proof_29635b8440e1e10f OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_incorrect_proof_504a37d7088fa4e7 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_incorrect_proof_65e1ad97362a27d8 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_incorrect_proof_ab041dcc87d0a4fc OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_incorrect_proof_dcf5a8bd294aaa6f OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_invalid_cell_1535daa3d170da94 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_invalid_cell_1962af1b36fc07b2 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_invalid_cell_b9598308bd764e64 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_invalid_cell_e29abaaa0519a74f OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_invalid_cell_id_683cc4551f0ad97e OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_invalid_cell_id_f134fd5b36145b80 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_invalid_commitment_ac0c6311a92593 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_invalid_commitment_afe4829eb27b14 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_invalid_commitment_ebd7c7f8c02f05 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_invalid_commitment_fd08e705ede464 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_invalid_proof_0c35bb98c57669db OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_invalid_proof_25efe063234b38bb OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_invalid_proof_50589f444e37d476 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_invalid_proof_f900beacae9218db OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_valid_0c0acf27962a7e82 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_valid_402b30d8dc9e972d OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_valid_7db1d069d57ec097 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_valid_b4c7e9397878471c OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_valid_cc46f83ded6d6191 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_valid_dcf3e16a678fadc5 OK
+ KZG - Verify Cell Kzg Proof - verify_cell_kzg_proof_case_valid_f6d5ccfa04edf349 OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_incorrect_cell_9ff2df OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_incorrect_proof_59c63 OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_incorrect_row_commitm OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_cell_2e1699f9 OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_cell_5f0a7e48 OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_cell_745046c5 OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_cell_83f39012 OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_column_index_ OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_missing_cell_ OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_missing_colum OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_missing_proof OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_missing_row_c OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_missing_row_i OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_proof_135836e OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_proof_d592b72 OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_proof_e65b54c OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_proof_eded2aa OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_row_commitmen OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_row_commitmen OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_row_commitmen OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_row_commitmen OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_invalid_row_index_55c OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_21b209cb4f64d0e OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_49f1f992af68d85 OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_7dc4b00d04efff0 OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_abe54dfc8ce6f34 OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_ae0a9c4f3313b3d OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_aedf5f25f4e3eea OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_fad5448f3ceb097 OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_multiple_blobs_ OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_same_cell_multi OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_unused_row_comm OK
+ KZG - Verify Cell Kzg Proof Batch - verify_cell_kzg_proof_batch_case_valid_zero_cells_92ee OK
```
OK: 107/107 Fail: 0/107 Skip: 0/107
OK: 11/11 Fail: 0/11 Skip: 0/11
## EF - SSZ generic types
```diff
Testing basic_vector inputs - invalid Skip
@ -957,9 +861,10 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
## Spec helpers
```diff
+ build_proof - BeaconState OK
+ hypergeom_cdf OK
+ integer_squareroot OK
```
OK: 2/2 Fail: 0/2 Skip: 0/2
OK: 3/3 Fail: 0/3 Skip: 0/3
## Specific field types
```diff
+ root update OK
@ -987,34 +892,6 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
+ isSeen OK
```
OK: 7/7 Fail: 0/7 Skip: 0/7
## SyncManager test suite
```diff
+ Process all unviable blocks OK
+ [SyncManager] groupBlobs() test OK
+ [SyncQueue#Backward] Async unordered push test OK
+ [SyncQueue#Backward] Async unordered push with rewind test OK
+ [SyncQueue#Backward] Good response with missing values towards end OK
+ [SyncQueue#Backward] Handle out-of-band sync progress advancement OK
+ [SyncQueue#Backward] Pass through established limits test OK
+ [SyncQueue#Backward] Smoke test OK
+ [SyncQueue#Backward] Start and finish slots equal OK
+ [SyncQueue#Backward] Two full requests success/fail OK
+ [SyncQueue#Backward] getRewindPoint() test OK
+ [SyncQueue#Forward] Async unordered push test OK
+ [SyncQueue#Forward] Async unordered push with rewind test OK
+ [SyncQueue#Forward] Good response with missing values towards end OK
+ [SyncQueue#Forward] Handle out-of-band sync progress advancement OK
+ [SyncQueue#Forward] Pass through established limits test OK
+ [SyncQueue#Forward] Smoke test OK
+ [SyncQueue#Forward] Start and finish slots equal OK
+ [SyncQueue#Forward] Two full requests success/fail OK
+ [SyncQueue#Forward] getRewindPoint() test OK
+ [SyncQueue] checkResponse() test OK
+ [SyncQueue] contains() test OK
+ [SyncQueue] getLastNonEmptySlot() test OK
+ [SyncQueue] hasEndGap() test OK
```
OK: 24/24 Fail: 0/24 Skip: 0/24
## Type helpers
```diff
+ BeaconBlock OK
@ -1155,4 +1032,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
OK: 9/9 Fail: 0/9 Skip: 0/9
---TOTAL---
OK: 804/809 Fail: 0/809 Skip: 5/809
OK: 685/690 Fail: 0/690 Skip: 5/690

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1495,11 +1495,11 @@ proc loadKzgTrustedSetup*(): Result[void, string] =
vendorDir & "/nim-kzg4844/kzg4844/csources/src/trusted_setup.txt")
static: doAssert const_preset in ["mainnet", "gnosis", "minimal"]
Kzg.loadTrustedSetupFromString(trustedSetup)
Kzg.loadTrustedSetupFromString(trustedSetup, 0)
proc loadKzgTrustedSetup*(trustedSetupPath: string): Result[void, string] =
try:
Kzg.loadTrustedSetupFromString(readFile(trustedSetupPath))
Kzg.loadTrustedSetupFromString(readFile(trustedSetupPath), 0)
except IOError as err:
err(err.msg)

View File

@ -491,9 +491,11 @@ func asConsensusType*(payload: engine_api.GetPayloadV3Response):
# Both are defined as `array[N, byte]` under the hood.
blobsBundle: deneb.BlobsBundle(
commitments: KzgCommitments.init(
payload.blobsBundle.commitments.mapIt(it.bytes)),
payload.blobsBundle.commitments.mapIt(
kzg_abi.KzgCommitment(bytes: it.bytes))),
proofs: KzgProofs.init(
payload.blobsBundle.proofs.mapIt(it.bytes)),
payload.blobsBundle.proofs.mapIt(
kzg_abi.KzgProof(bytes: it.bytes))),
blobs: Blobs.init(
payload.blobsBundle.blobs.mapIt(it.bytes))))
@ -560,9 +562,11 @@ func asConsensusType*(payload: engine_api.GetPayloadV4Response):
# Both are defined as `array[N, byte]` under the hood.
blobsBundle: deneb.BlobsBundle(
commitments: KzgCommitments.init(
payload.blobsBundle.commitments.mapIt(it.bytes)),
payload.blobsBundle.commitments.mapIt(
kzg_abi.KzgCommitment(bytes: it.bytes))),
proofs: KzgProofs.init(
payload.blobsBundle.proofs.mapIt(it.bytes)),
payload.blobsBundle.proofs.mapIt(
kzg_abi.KzgProof(bytes: it.bytes))),
blobs: Blobs.init(
payload.blobsBundle.blobs.mapIt(it.bytes))))

View File

@ -144,7 +144,7 @@ proc new*(T: type BlockProcessor,
consensusManager: consensusManager,
validatorMonitor: validatorMonitor,
blobQuarantine: blobQuarantine,
dataColumnQuarantine: dataCOlumnQuarantine,
dataColumnQuarantine: dataColumnQuarantine,
getBeaconTime: getBeaconTime,
verifier: BatchVerifier.init(rng, taskpool)
)
@ -192,25 +192,26 @@ proc storeBackfillBlock(
# writing the block in case of blob error.
# var blobsOk = true
var columnsOk = true
# when typeof(signedBlock).kind >= ConsensusFork.Deneb:
# if blobsOpt.isSome:
# let blobs = blobsOpt.get()
# let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq
# if blobs.len > 0 or kzgCommits.len > 0:
# let r = validate_blobs(kzgCommits, blobs.mapIt(it.blob),
# blobs.mapIt(it.kzg_proof))
# if r.isErr():
# debug "backfill blob validation failed",
# blockRoot = shortLog(signedBlock.root),
# blobs = shortLog(blobs),
# blck = shortLog(signedBlock.message),
# kzgCommits = mapIt(kzgCommits, shortLog(it)),
# signature = shortLog(signedBlock.signature),
# msg = r.error()
# blobsOk = r.isOk()
var blobsOk = true
when typeof(signedBlock).kind >= ConsensusFork.Deneb:
if blobsOpt.isSome:
let blobs = blobsOpt.get()
let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq
if blobs.len > 0 or kzgCommits.len > 0:
let r = validate_blobs(kzgCommits, blobs.mapIt(KzgBlob(bytes: it.blob)),
blobs.mapIt(it.kzg_proof))
if r.isErr():
debug "backfill blob validation failed",
blockRoot = shortLog(signedBlock.root),
blobs = shortLog(blobs),
blck = shortLog(signedBlock.message),
kzgCommits = mapIt(kzgCommits, shortLog(it)),
signature = shortLog(signedBlock.signature),
msg = r.error()
blobsOk = r.isOk()
# if not blobsOk:
# return err(VerifierError.Invalid)
if not blobsOk:
return err(VerifierError.Invalid)
when typeof(signedBlock).kind >= ConsensusFork.Deneb:
if dataColumnsOpt.isSome:
@ -221,7 +222,6 @@ proc storeBackfillBlock(
if r.isErr():
debug "backfill datacolumn validation failed",
blockRoot = shortLog(signedBlock.root),
data_column = shortLog(data_columns[i][]),
blck = shortLog(signedBlock.message),
signature = shortLog(signedBlock.signature),
msg = r.error()
@ -632,7 +632,7 @@ proc storeBlock(
let blobs = blobsOpt.get()
let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq
if blobs.len > 0 or kzgCommits.len > 0:
let r = validate_blobs(kzgCommits, blobs.mapIt(it.blob),
let r = validate_blobs(kzgCommits, blobs.mapIt(KzgBlob(bytes: it.blob)),
blobs.mapIt(it.kzg_proof))
if r.isErr():
debug "blob validation failed",
@ -653,7 +653,6 @@ proc storeBlock(
if r.isErr():
debug "data column sidecar verification failed",
blockroot = shortLog(signedBlock.root),
column = shortLog(data_column_sidecars[i][].column),
blck = shortLog(signedBlock.message),
kzgCommits =
mapIt(data_column_sidecars[i][].kzg_commitments,
@ -854,20 +853,21 @@ proc storeBlock(
else:
if len(forkyBlck.message.body.blob_kzg_commitments) == 0:
self[].enqueueBlock(
MsgSource.gossip, quarantined, Opt.none(BlobSidecars), Opt.some(DataColumnSidecars @[]))
MsgSource.gossip, quarantined, Opt.some(BlobSidecars @[]), Opt.some(DataColumnSidecars @[]))
else:
if (let res = checkBloblessSignature(self[], forkyBlck); res.isErr):
warn "Failed to verify signature of unorphaned blobless/columnless block",
blck = shortLog(forkyBlck),
error = res.error()
continue
# if self.blobQuarantine[].hasBlobs(forkyBlck):
# let blobs = self.blobQuarantine[].popBlobs(
# forkyBlck.root, forkyBlck)
# self[].enqueueBlock(MsgSource.gossip, quarantined, Opt.some(blobs), Opt.none(DataColumnSidecars))
# else:
# discard self.consensusManager.quarantine[].addBlobless(
# dag.finalizedHead.slot, forkyBlck)
if self.blobQuarantine[].hasBlobs(forkyBlck):
let blobs = self.blobQuarantine[].popBlobs(
forkyBlck.root, forkyBlck)
self[].enqueueBlock(MsgSource.gossip, quarantined, Opt.some(blobs), Opt.none(DataColumnSidecars))
else:
discard self.consensusManager.quarantine[].addBlobless(
dag.finalizedHead.slot, forkyBlck)
if self.dataColumnQuarantine[].hasDataColumns(forkyBlck):
let data_columns = self.dataColumnQuarantine[].popDataColumns(

View File

@ -445,60 +445,61 @@ proc checkForPotentialDoppelganger(
attestation = shortLog(attestation)
quitDoppelganger()
proc processDataColumnReconstruction*(
self: ref Eth2Processor,
node: Eth2Node,
signed_block: deneb.SignedBeaconBlock |
electra.SignedBeaconBlock):
Future[ValidationRes] {.async: (raises: [CancelledError]).} =
#TODO: need to revamp `recover_blobs` and rewrite this
# proc processDataColumnReconstruction*(
# self: ref Eth2Processor,
# node: Eth2Node,
# signed_block: deneb.SignedBeaconBlock |
# electra.SignedBeaconBlock):
# Future[ValidationRes] {.async: (raises: [CancelledError]).} =
let
dag = self.dag
root = signed_block.root
custodiedColumnIndices = get_custody_columns(
node.nodeId,
CUSTODY_REQUIREMENT)
# let
# dag = self.dag
# root = signed_block.root
# custodiedColumnIndices = get_custody_columns(
# node.nodeId,
# CUSTODY_REQUIREMENT)
var
data_column_sidecars: seq[DataColumnSidecar]
columnsOk = true
storedColumns: seq[ColumnIndex]
# var
# data_column_sidecars: seq[DataColumnSidecar]
# columnsOk = true
# storedColumns: seq[ColumnIndex]
# Loading the data columns from the database
for custody_column in custodiedColumnIndices.get:
let data_column = DataColumnSidecar.new()
if not dag.db.getDataColumnSidecar(root, custody_column, data_column[]):
columnsOk = false
break
data_column_sidecars.add data_column[]
storedColumns.add data_column.index
# # Loading the data columns from the database
# for custody_column in custodiedColumnIndices.get:
# let data_column = DataColumnSidecar.new()
# if not dag.db.getDataColumnSidecar(root, custody_column, data_column[]):
# columnsOk = false
# break
# data_column_sidecars.add data_column[]
# storedColumns.add data_column.index
if columnsOk:
debug "Loaded data column for reconstruction"
# if columnsOk:
# debug "Loaded data column for reconstruction"
# storedColumn number is less than the NUMBER_OF_COLUMNS
# then reconstruction is not possible, and if all the data columns
# are already stored then we do not need to reconstruct at all
if storedColumns.len < NUMBER_OF_COLUMNS or storedColumns.len == NUMBER_OF_COLUMNS:
return ok()
else:
return errIgnore ("DataColumnSidecar: Reconstruction error!")
# # storedColumn number is less than the NUMBER_OF_COLUMNS
# # then reconstruction is not possible, and if all the data columns
# # are already stored then we do not need to reconstruct at all
# if storedColumns.len < NUMBER_OF_COLUMNS or storedColumns.len == NUMBER_OF_COLUMNS:
# return ok()
# else:
# return errIgnore ("DataColumnSidecar: Reconstruction error!")
# Recover blobs from saved data column sidecars
let recovered_blobs = recover_blobs(data_column_sidecars, storedColumns.len, signed_block)
if not recovered_blobs.isOk:
return errIgnore ("Error recovering blobs from data columns")
# # Recover blobs from saved data column sidecars
# let recovered_blobs = recover_blobs(data_column_sidecars, storedColumns.len, signed_block)
# if not recovered_blobs.isOk:
# return errIgnore ("Error recovering blobs from data columns")
# Reconstruct data column sidecars from recovered blobs
let reconstructedDataColumns = get_data_column_sidecars(signed_block, recovered_blobs.get)
# # Reconstruct data column sidecars from recovered blobs
# let reconstructedDataColumns = get_data_column_sidecars(signed_block, recovered_blobs.get)
for data_column in data_column_sidecars:
if data_column.index notin custodiedColumnIndices.get:
continue
# for data_column in data_column_sidecars:
# if data_column.index notin custodiedColumnIndices.get:
# continue
dag.db.putDataColumnSidecar(data_column)
# dag.db.putDataColumnSidecar(data_column)
ok()
# ok()
proc processAttestation*(
self: ref Eth2Processor, src: MsgSource,

View File

@ -474,7 +474,7 @@ proc validateBlobSidecar*(
# blob_sidecar.blob, blob_sidecar.kzg_commitment, blob_sidecar.kzg_proof)`.
block:
let ok = verifyProof(
blob_sidecar.blob,
KzgBlob(bytes: blob_sidecar.blob),
blob_sidecar.kzg_commitment,
blob_sidecar.kzg_proof).valueOr:
return dag.checkedReject("BlobSidecar: blob verify failed")

View File

@ -408,35 +408,41 @@ proc initFullNode(
Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} =
withBlck(signedBlock):
when consensusFork >= ConsensusFork.Deneb:
# if not blobQuarantine[].hasBlobs(forkyBlck):
# # We don't have all the blobs for this block, so we have
# # to put it in blobless quarantine.
# if not quarantine[].addBlobless(dag.finalizedHead.slot, forkyBlck):
# err(VerifierError.UnviableFork)
# else:
# err(VerifierError.MissingParent)
# elif blobQuarantine[].hasBlobs(forkyBlck):
# let blobs = blobQuarantine[].popBlobs(forkyBlck.root, forkyBlck)
# await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
# Opt.some(blobs), Opt.none(DataColumnSidecars),
# maybeFinalized = maybeFinalized)
if not dataColumnQuarantine[].hasDataColumns(forkyBlck):
# We don't have all the data columns for this block, so we have
# to put it in columnless quarantine.
if not quarantine[].addColumnless(dag.finalizedHead.slot, forkyBlck):
if not blobQuarantine[].hasBlobs(forkyBlck):
# We don't have all the blobs for this block, so we have
# to put it in blobless quarantine.
if not quarantine[].addBlobless(dag.finalizedHead.slot, forkyBlck):
err(VerifierError.UnviableFork)
else:
err(VerifierError.MissingParent)
else:
let data_columns = dataColumnQuarantine[].popDataColumns(forkyBlck.root, forkyBlck)
let blobs = blobQuarantine[].popBlobs(forkyBlck.root, forkyBlck)
await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
Opt.none(BlobSidecars), Opt.some(data_columns),
Opt.some(blobs), Opt.none(DataColumnSidecars),
maybeFinalized = maybeFinalized)
else:
await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
Opt.none(BlobSidecars), Opt.none(DataColumnSidecars),
maybeFinalized = maybeFinalized)
# when consensusFork >= ConsensusFork.Deneb:
# if not dataColumnQuarantine[].hasDataColumns(forkyBlck):
# # We don't have all the data columns for this block, so we have
# # to put it in columnless quarantine.
# if not quarantine[].addColumnless(dag.finalizedHead.slot, forkyBlck):
# err(VerifierError.UnviableFork)
# else:
# err(VerifierError.MissingParent)
# else:
# let data_columns = dataColumnQuarantine[].popDataColumns(forkyBlck.root, forkyBlck)
# await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
# Opt.none(BlobSidecars), Opt.some(data_columns),
# maybeFinalized = maybeFinalized)
# else:
# await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
# Opt.none(BlobSidecars), Opt.none(DataColumnSidecars),
# maybeFinalized = maybeFinalized)
rmanBlockLoader = proc(
blockRoot: Eth2Digest): Opt[ForkedTrustedSignedBeaconBlock] =
dag.getForkedBlock(blockRoot)

View File

@ -74,7 +74,7 @@ export
tables, results, endians2, json_serialization, sszTypes, beacon_time, crypto,
digest, presets
const SPEC_VERSION* = "1.5.0-alpha.2"
const SPEC_VERSION* = "1.5.0-alpha.3"
## Spec version we're aiming to be compatible with, right now
const

View File

@ -528,7 +528,7 @@ func initHashedBeaconState*(s: BeaconState): HashedBeaconState =
HashedBeaconState(data: s)
func shortLog*(v: KzgCommitment | KzgProof): auto =
to0xHex(v)
to0xHex(v.bytes)
func shortLog*(v: Blob): auto =
to0xHex(v.toOpenArray(0, 31))

View File

@ -41,6 +41,7 @@ type
CellID* = uint64
RowIndex* = uint64
ColumnIndex* = uint64
CellIndex* = uint64
const
NUMBER_OF_COLUMNS* = 128
@ -78,17 +79,19 @@ type
CscBits* = BitArray[DATA_COLUMN_SIDECAR_SUBNET_COUNT]
func serializeDataColumn(data_column: DataColumn): auto =
var counter = 0
var serd : array[MAX_BLOB_COMMITMENTS_PER_BLOCK * KzgCellSize, byte]
for i in 0..<MAX_BLOB_COMMITMENTS_PER_BLOCK:
for j in 0..<KzgCellSize:
serd[counter] = data_column[i][j]
inc(counter)
serd
# func serializeDataColumn(data_column: DataColumn): auto =
# var counter = 0
# var serd : array[MAX_BLOB_COMMITMENTS_PER_BLOCK * BYTES_PER_CELL, byte]
# for i in 0..<MAX_BLOB_COMMITMENTS_PER_BLOCK:
# var inter: array[BYTES_PER_CELL, byte]
# inter = data_column[i].bytes
# for j in 0..<BYTES_PER_CELL:
# serd[counter] = inter[j].byte
# inc(counter)
# serd
func shortLog*(v: DataColumn): auto =
to0xHex(v.serializeDataColumn())
# func shortLog*(v: DataColumn): auto =
# to0xHex(v.serializeDataColumn())
func shortLog*(v: DataColumnSidecar): auto =
(

View File

@ -81,125 +81,114 @@ proc get_custody_columns*(node_id: NodeId,
ok(sortedColumnIndices(ColumnIndex(columns_per_subnet), subnet_ids))
# https://github.com/ethereum/consensus-specs/blob/5f48840f4d768bf0e0a8156a3ed06ec333589007/specs/_features/eip7594/das-core.md#compute_extended_matrix
proc compute_extended_matrix* (blobs: seq[KzgBlob]): Result[ExtendedMatrix, cstring] =
# This helper demonstrates the relationship between blobs and `ExtendedMatrix`
var extended_matrix: ExtendedMatrix
for i in 0..<blobs.len:
let res = computeCells(blobs[i])
if res.isErr:
return err("Error computing kzg cells and kzg proofs")
discard extended_matrix.add(res.get())
ok(extended_matrix)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/_features/eip7594/das-core.md#compute_extended_matrix
proc compute_extended_matrix* (blobs: seq[KzgBlob]): Result[seq[MatrixEntry], cstring] =
# This helper demonstrates the relationship between blobs and the `MatrixEntries`
var extended_matrix: seq[MatrixEntry]
# https://github.com/ethereum/consensus-specs/blob/5f48840f4d768bf0e0a8156a3ed06ec333589007/specs/_features/eip7594/das-core.md#recover_matrix
proc recover_matrix*(cells_dict: Table[(BlobIndex, CellID), Cell],
blobCount: uint64):
Result[ExtendedMatrix, cstring] =
# This helper demonstrates how to apply recover_all_cells
# The data structure for storing cells is implementation-dependent
for blbIdx, blob in blobs.pairs:
let cellsAndProofs = computeCellsAndKzgProofs(blob)
if not cellsAndProofs.isOk:
return err("Computing Extended Matrix: Issue computing cells and proofs")
var extended_matrix: ExtendedMatrix
for blobIndex in 0'u64..<blobCount:
var
cellIds: seq[CellID] = @[]
blIdx: BlobIndex
cellId: CellID
let key = (blIdx, cellId)
for key, cell in pairs(cells_dict):
if blIdx == blobIndex:
cellIds.add(cellId)
var cells: seq[Cell]
for cellId in cellIds:
var interim_key = (BlobIndex(blobIndex), cellId)
if cells_dict.hasKey(interim_key):
try:
let cell = cells_dict[interim_key]
cells.add(cell)
except:
debug "DataColumn: Key not found in Cell Dictionary", interim_key
let allCellsForRow = recoverAllCells(cellIds, cells)
let check = extended_matrix.add(allCellsForRow.get())
doAssert check == true, "DataColumn: Could not add cells to the extended matrix"
for i in 0..<eip7594.CELLS_PER_EXT_BLOB:
extended_matrix.add(MatrixEntry(
cell: cellsAndProofs.get.cells[i],
kzg_proof: cellsAndProofs.get.proofs[i],
row_index: blbIdx.uint64,
column_index: i.uint64
))
ok(extended_matrix)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/_features/eip7594/das-core.md#recover_matrix
proc recover_matrix*(partial_matrix: seq[MatrixEntry],
blobCount: int):
Result[seq[MatrixEntry], cstring] =
# This helper demonstrates how to apply recover_cells_and_kzg_proofs
# The data structure for storing cells is implementation-dependent
var extended_matrix: seq[MatrixEntry]
for blob_index in 0..<blobCount:
var
cell_indices: seq[CellID]
cells: seq[Cell]
proofs: seq[KzgProof]
for e in partial_matrix:
if e.row_index == uint64(blob_index):
cell_indices.add(e.column_index)
cells.add(e.cell)
proofs.add(e.kzg_proof)
proc recover_blobs*(
data_columns: seq[DataColumnSidecar],
columnCount: int,
blck: deneb.SignedBeaconBlock |
electra.SignedBeaconBlock |
ForkySignedBeaconBlock):
Result[seq[KzgBlob], cstring] =
# This helper recovers blobs from the data column sidecars
if not (data_columns.len != 0):
return err("DataColumnSidecar: Length should not be 0")
var blobCount = data_columns[0].column.len
for data_column in data_columns:
if not (blobCount == data_column.column.len):
return err ("DataColumns do not have the same length")
var recovered_blobs = newSeqOfCap[KzgBlob](blobCount)
for blobIdx in 0 ..< blobCount:
var
cell_ids = newSeqOfCap[CellID](columnCount)
ckzgCells = newSeqOfCap[KzgCell](columnCount)
for data_column in data_columns:
cell_ids.add(data_column.index)
let recoveredCellsAndKzgProofs =
recoverCellsAndKzgProofs(cell_indices, cells)
if not recoveredCellsAndKzgProofs.isOk:
return err("Issue in recovering cells and proofs")
for i in 0..<recoveredCellsAndKzgProofs.get.cells.len:
let
column = data_column.column
cell = column[blobIdx]
cell = recoveredCellsAndKzgProofs.get.cells[i]
proof = recoveredCellsAndKzgProofs.get.proofs[i]
extended_matrix.add(MatrixEntry(
cell: cell,
kzg_proof: proof,
row_index: blob_index.uint64,
column_index: i.uint64
))
# Transform the cell as a ckzg cell
var ckzgCell: Cell
for i in 0 ..< int(FIELD_ELEMENTS_PER_CELL):
var start = 32 * i
for j in 0 ..< 32:
ckzgCell[start + j] = cell[start+j]
ok(extended_matrix)
ckzgCells.add(ckzgCell)
## THIS METHOD IS DEPRECATED, WILL BE REMOVED ONCE ALPHA 4 IS RELEASED
# proc recover_blobs*(
# data_columns: seq[DataColumnSidecar],
# columnCount: int,
# blck: deneb.SignedBeaconBlock |
# electra.SignedBeaconBlock |
# ForkySignedBeaconBlock):
# Result[seq[KzgBlob], cstring] =
# Recovering the blob
let recovered_cells = recoverAllCells(cell_ids, ckzgCells)
if not recovered_cells.isOk:
return err ("Recovering all cells for blob failed")
# # This helper recovers blobs from the data column sidecars
# if not (data_columns.len != 0):
# return err("DataColumnSidecar: Length should not be 0")
let recovered_blob_res = cellsToBlob(recovered_cells.get)
if not recovered_blob_res.isOk:
return err ("Cells to blob for blob failed")
# var blobCount = data_columns[0].column.len
# for data_column in data_columns:
# if not (blobCount == data_column.column.len):
# return err ("DataColumns do not have the same length")
recovered_blobs.add(recovered_blob_res.get)
# var recovered_blobs = newSeqOfCap[KzgBlob](blobCount)
ok(recovered_blobs)
# for blobIdx in 0 ..< blobCount:
# var
# cell_ids = newSeqOfCap[CellID](columnCount)
# ckzgCells = newSeqOfCap[KzgCell](columnCount)
# for data_column in data_columns:
# cell_ids.add(data_column.index)
# let
# column = data_column.column
# cell = column[blobIdx]
# # Transform the cell as a ckzg cell
# var ckzgCell: Cell
# for i in 0 ..< int(FIELD_ELEMENTS_PER_CELL):
# var start = 32 * i
# for j in 0 ..< 32:
# ckzgCell[start + j] = cell[start+j]
# ckzgCells.add(ckzgCell)
# # Recovering the blob
# let recovered_cells = recoverAllCells(cell_ids, ckzgCells)
# if not recovered_cells.isOk:
# return err ("Recovering all cells for blob failed")
# let recovered_blob_res = cellsToBlob(recovered_cells.get)
# if not recovered_blob_res.isOk:
# return err ("Cells to blob for blob failed")
# recovered_blobs.add(recovered_blob_res.get)
# ok(recovered_blobs)
proc compute_signed_block_header(signed_block: deneb.SignedBeaconBlock |
electra.SignedBeaconBlock):
@ -217,6 +206,48 @@ proc compute_signed_block_header(signed_block: deneb.SignedBeaconBlock |
signature: signed_block.signature
)
# https://github.com/ethereum/consensus-specs/blob/bb8f3caafc92590cdcf2d14974adb602db9b5ca3/specs/_features/eip7594/das-core.md#get_data_column_sidecars
proc get_data_column_sidecars*(signed_block: deneb.SignedBeaconBlock |
electra.SignedBeaconBlock,
cellsAndProofs: CellsAndProofs):
Result[seq[DataColumnSidecar], string] =
# Given a signed block and the cells/proofs associated with each blob
# in the block, assemble the sidecars which can be distributed to peers.
var
blck = signed_block.message
signed_beacon_block_header =
compute_signed_block_header(signed_block)
kzg_incl_proof: array[4, Eth2Digest]
var sidecars = newSeq[DataColumnSidecar](CELLS_PER_EXT_BLOB)
if cellsAndProofs.cells.len == 0 or
cellsAndProofs.proof.len == 0:
return ok(sidecars)
for column_index in 0..<NUMBER_OF_COLUMNS:
var
column_cells: DataColumn
column_proofs: KzgProofs
for i in 0..<cellsAndProofs.cells.len:
let check1 = column_cells.add(cellsAndProofs.cells[column_index])
doAssert check1 == true, "Issue fetching cell from CellsAndProofs"
let check2 = column_proofs.add(cellsAndProofs.proofs[column_index])
doAssert check2 == true, "Issue fetching proof from CellsAndProofs"
var sidecar = DataColumnSidecar(
index: ColumnIndex(column_index),
column: DataColumn(column_cells),
kzgCommitments: blck.body.blob_kzg_commitments,
kzgProofs: KzgProofs(column_proofs),
signed_block_header: signed_beacon_block_header)
blck.body.build_proof(
27.GeneralizedIndex,
sidecar.kzg_commitments_inclusion_proof).expect("Valid gindex")
sidecars.add(sidecar)
ok(sidecars)
# https://github.com/ethereum/consensus-specs/blob/5f48840f4d768bf0e0a8156a3ed06ec333589007/specs/_features/eip7594/das-core.md#get_data_column_sidecars
proc get_data_column_sidecars*(signed_block: deneb.SignedBeaconBlock |
electra.SignedBeaconBlock,
@ -248,8 +279,9 @@ proc get_data_column_sidecars*(signed_block: deneb.SignedBeaconBlock |
let blobCount = blobs.len
for columnIndex in 0..<CELLS_PER_EXT_BLOB:
var column: DataColumn
var kzgProofOfColumn: KzgProofs
var
column: DataColumn
kzgProofOfColumn: KzgProofs
for rowIndex in 0..<blobCount:
discard column.add(cells[rowIndex][columnIndex])
@ -271,11 +303,10 @@ proc get_data_column_sidecars*(signed_block: deneb.SignedBeaconBlock |
# Helper function to `verifyCellKzgProofBatch` at https://github.com/ethereum/c-kzg-4844/blob/das/bindings/nim/kzg_ex.nim#L170
proc validate_data_column_sidecar*(
expected_commitments: seq[KzgCommitment],
rowIndex: seq[RowIndex],
columnIndex: seq[ColumnIndex],
cellIndex: seq[CellIndex],
column: seq[Cell],
proofs: seq[KzgProof]): Result[void, string] =
let res = verifyCellKzgProofBatch(expected_commitments, rowIndex, columnIndex, column, proofs).valueOr:
let res = verifyCellKzgProofBatch(expected_commitments, cellIndex, column, proofs).valueOr:
return err("DataColumnSidecar: Proof verification error: " & error())
if not res:
@ -298,22 +329,17 @@ proc verify_data_column_sidecar_kzg_proofs*(sidecar: DataColumnSidecar): Result[
if not (sidecar.kzg_commitments.len == sidecar.kzg_proofs.len):
return err("EIP7594: Data column sidecar kzg_commitments length is not equal to the kzg_proofs length")
# Iterate through the row indices
var rowIndices: seq[RowIndex]
for i in 0..<sidecar.column.len:
rowIndices.add(RowIndex(i))
# Iterate through the column indices
var colIndices: seq[ColumnIndex]
# Iterate through the cell indices
var cellIndices: seq[CellIndex]
for _ in 0..<sidecar.column.len:
colIndices.add(sidecar.index * sidecar.column.lenu64)
cellIndices.add(sidecar.index * sidecar.column.lenu64)
let
kzgCommits = sidecar.kzg_commitments.asSeq
sidecarCol = sidecar.column.asSeq
kzgProofs = sidecar.kzg_proofs.asSeq
let res = validate_data_column_sidecar(kzgCommits, rowIndices, colIndices, sidecarCol, kzgProofs)
let res = validate_data_column_sidecar(kzgCommits, cellIndices, sidecarCol, kzgProofs)
if res.isErr():
return err("DataColumnSidecar: validation failed")
@ -345,6 +371,8 @@ proc selfReconstructDataColumns*(numCol: uint64):
if numCol >= columnsNeeded:
return true
false
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/_features/eip7594/das-core.md#compute_extended_matrix
proc get_extended_sample_count*(samples_per_slot: int,
allowed_failures: int):
int =

View File

@ -1359,7 +1359,7 @@ proc readValue*(reader: var JsonReader[RestJson],
value: var (KzgCommitment|KzgProof)) {.
raises: [IOError, SerializationError].} =
try:
hexToByteArray(reader.readValue(string), distinctBase(value))
hexToByteArray(reader.readValue(string), distinctBase(value.bytes))
except ValueError:
raiseUnexpectedValue(reader,
"KzgCommitment value should be a valid hex string")
@ -1367,7 +1367,7 @@ proc readValue*(reader: var JsonReader[RestJson],
proc writeValue*(
writer: var JsonWriter[RestJson], value: KzgCommitment | KzgProof
) {.raises: [IOError].} =
writeValue(writer, hexOriginal(distinctBase(value)))
writeValue(writer, hexOriginal(distinctBase(value.bytes)))
## GraffitiBytes
proc writeValue*(

View File

@ -531,6 +531,7 @@ proc compute_execution_block_hash*(blck: ForkyBeaconBlock): Eth2Digest =
from std/math import exp, ln
from std/sequtils import foldl
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/_features/eip7594/das-core.md#compute_extended_matrix
func ln_binomial(n, k: int): float64 =
if k > n:
low(float64)

View File

@ -1086,7 +1086,7 @@ func kzg_commitment_to_versioned_hash*(
var res: VersionedHash
res[0] = VERSIONED_HASH_VERSION_KZG
res[1 .. 31] = eth2digest(kzg_commitment).data.toOpenArray(1, 31)
res[1 .. 31] = eth2digest(kzg_commitment.bytes).data.toOpenArray(1, 31)
res
proc validate_blobs*(

View File

@ -636,14 +636,14 @@ proc start*(rman: var RequestManager) =
## Start Request Manager's loops.
rman.blockLoopFuture = rman.requestManagerBlockLoop()
rman.dataColumnLoopFuture = rman.requestManagerDataColumnLoop()
# rman.blobLoopFuture = rman.requestManagerBlobLoop()
rman.blobLoopFuture = rman.requestManagerBlobLoop()
proc stop*(rman: RequestManager) =
## Stop Request Manager's loop.
if not(isNil(rman.blockLoopFuture)):
rman.blockLoopFuture.cancelSoon()
# if not(isNil(rman.blobLoopFuture)):
# rman.blobLoopFuture.cancelSoon()
if not(isNil(rman.blobLoopFuture)):
rman.blobLoopFuture.cancelSoon()
if not(isNil(rman.dataColumnLoopFuture)):
rman.dataColumnLoopFuture.cancelSoon()

View File

@ -554,18 +554,18 @@ proc syncStep[A, B](man: SyncManager[A, B], index: int, peer: A)
request = req
return
# let shouldGetBlobs =
# if not man.shouldGetBlobs(req.slot.epoch):
# false
# else:
# var hasBlobs = false
# for blck in blockData:
# withBlck(blck[]):
# when consensusFork >= ConsensusFork.Deneb:
# if forkyBlck.message.body.blob_kzg_commitments.len > 0:
# hasBlobs = true
# break
# hasBlobs
let shouldGetBlobs =
if not man.shouldGetBlobs(req.slot.epoch):
false
else:
var hasBlobs = false
for blck in blockData:
withBlck(blck[]):
when consensusFork >= ConsensusFork.Deneb:
if forkyBlck.message.body.blob_kzg_commitments.len > 0:
hasBlobs = true
break
hasBlobs
func combine(acc: seq[Slot], cur: Slot): seq[Slot] =
var copy = acc
@ -573,49 +573,49 @@ proc syncStep[A, B](man: SyncManager[A, B], index: int, peer: A)
copy.add(cur)
copy
# let blobData =
# if shouldGetBlobs:
# let blobs = await man.getBlobSidecars(peer, req)
# if blobs.isErr():
# peer.updateScore(PeerScoreNoValues)
# man.queue.push(req)
# debug "Failed to receive blobs on request",
# request = req, err = blobs.error
# return
# let blobData = blobs.get().asSeq()
# let blobSmap = getShortMap(req, blobData)
# debug "Received blobs on request", blobs_count = len(blobData),
# blobs_map = blobSmap, request = req
let blobData =
if shouldGetBlobs:
let blobs = await man.getBlobSidecars(peer, req)
if blobs.isErr():
peer.updateScore(PeerScoreNoValues)
man.queue.push(req)
debug "Failed to receive blobs on request",
request = req, err = blobs.error
return
let blobData = blobs.get().asSeq()
let blobSmap = getShortMap(req, blobData)
debug "Received blobs on request", blobs_count = len(blobData),
blobs_map = blobSmap, request = req
# if len(blobData) > 0:
# let slots = mapIt(blobData, it[].signed_block_header.message.slot)
# let uniqueSlots = foldl(slots, combine(a, b), @[slots[0]])
# if not(checkResponse(req, uniqueSlots)):
# peer.updateScore(PeerScoreBadResponse)
# man.queue.push(req)
# warn "Received blobs sequence is not in requested range",
# blobs_count = len(blobData), blobs_map = getShortMap(req, blobData),
# request = req
# return
# let groupedBlobs = groupBlobs(req, blockData, blobData)
# if groupedBlobs.isErr():
# peer.updateScore(PeerScoreNoValues)
# man.queue.push(req)
# info "Received blobs sequence is inconsistent",
# blobs_map = getShortMap(req, blobData), request = req, msg=groupedBlobs.error()
# return
# if (let checkRes = groupedBlobs.get.checkBlobs(); checkRes.isErr):
# peer.updateScore(PeerScoreBadResponse)
# man.queue.push(req)
# warn "Received blobs sequence is invalid",
# blobs_count = len(blobData),
# blobs_map = getShortMap(req, blobData),
# request = req,
# msg = checkRes.error
# return
# Opt.some(groupedBlobs.get())
# else:
# Opt.none(seq[BlobSidecars])
if len(blobData) > 0:
let slots = mapIt(blobData, it[].signed_block_header.message.slot)
let uniqueSlots = foldl(slots, combine(a, b), @[slots[0]])
if not(checkResponse(req, uniqueSlots)):
peer.updateScore(PeerScoreBadResponse)
man.queue.push(req)
warn "Received blobs sequence is not in requested range",
blobs_count = len(blobData), blobs_map = getShortMap(req, blobData),
request = req
return
let groupedBlobs = groupBlobs(req, blockData, blobData)
if groupedBlobs.isErr():
peer.updateScore(PeerScoreNoValues)
man.queue.push(req)
info "Received blobs sequence is inconsistent",
blobs_map = getShortMap(req, blobData), request = req, msg=groupedBlobs.error()
return
if (let checkRes = groupedBlobs.get.checkBlobs(); checkRes.isErr):
peer.updateScore(PeerScoreBadResponse)
man.queue.push(req)
warn "Received blobs sequence is invalid",
blobs_count = len(blobData),
blobs_map = getShortMap(req, blobData),
request = req,
msg = checkRes.error
return
Opt.some(groupedBlobs.get())
else:
Opt.none(seq[BlobSidecars])
let shouldGetDataColumns =
if not man.shouldGetDataColumns(req.slot.epoch):

View File

@ -727,37 +727,37 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T],
# Nim versions, remove workaround and move `res` into for loop
res: Result[void, VerifierError]
# var i=0
# for blk, blb in sq.blocks(item):
# res = await sq.blockVerifier(blk[], blb, Opt.none(DataColumnSidecars), maybeFinalized)
# inc(i)
var i=0
for blk, blb in sq.blocks(item):
res = await sq.blockVerifier(blk[], blb, Opt.none(DataColumnSidecars), maybeFinalized)
inc(i)
# if res.isOk():
# goodBlock = some(blk[].slot)
# else:
# case res.error()
# of VerifierError.MissingParent:
# missingParentSlot = some(blk[].slot)
# break
# of VerifierError.Duplicate:
# # Keep going, happens naturally
# discard
# of VerifierError.UnviableFork:
# # Keep going so as to register other unviable blocks with the
# # quarantine
# if unviableBlock.isNone:
# # Remember the first unviable block, so we can log it
# unviableBlock = some((blk[].root, blk[].slot))
if res.isOk():
goodBlock = some(blk[].slot)
else:
case res.error()
of VerifierError.MissingParent:
missingParentSlot = some(blk[].slot)
break
of VerifierError.Duplicate:
# Keep going, happens naturally
discard
of VerifierError.UnviableFork:
# Keep going so as to register other unviable blocks with the
# quarantine
if unviableBlock.isNone:
# Remember the first unviable block, so we can log it
unviableBlock = some((blk[].root, blk[].slot))
# of VerifierError.Invalid:
# hasInvalidBlock = true
of VerifierError.Invalid:
hasInvalidBlock = true
# let req = item.request
# notice "Received invalid sequence of blocks", request = req,
# blocks_count = len(item.data),
# blocks_map = getShortMap(req, item.data)
# req.item.updateScore(PeerScoreBadValues)
# break
let req = item.request
notice "Received invalid sequence of blocks", request = req,
blocks_count = len(item.data),
blocks_map = getShortMap(req, item.data)
req.item.updateScore(PeerScoreBadValues)
break
var counter = 0
for blk, col in sq.das_blocks(item):

View File

@ -112,8 +112,10 @@ proc routeSignedBeaconBlock*(
let blobs = blobsOpt.get()
let kzgCommits = blck.message.body.blob_kzg_commitments.asSeq
if blobs.len > 0 or kzgCommits.len > 0:
let res = validate_blobs(kzgCommits, blobs.mapIt(it.blob),
blobs.mapIt(it.kzg_proof))
let res = validate_blobs(
kzgCommits,
blobs.mapIt(KzgBlob(bytes: it.blob)),
blobs.mapIt(it.kzg_proof))
if res.isErr():
warn "blobs failed validation",
blockRoot = shortLog(blck.root),
@ -122,7 +124,6 @@ proc routeSignedBeaconBlock*(
signature = shortLog(blck.signature),
msg = res.error()
return err(res.error())
let
sendTime = router[].getCurrentBeaconTime()
delay = sendTime - blck.message.slot.block_deadline()
@ -145,6 +146,7 @@ proc routeSignedBeaconBlock*(
blockRoot = shortLog(blck.root), blck = shortLog(blck.message),
signature = shortLog(blck.signature), error = res.error()
# PREVENT PROPOSING BLOB SIDECARS IN PEERDAS DEVNET
var blobRefs = Opt.none(BlobSidecars)
if blobsOpt.isSome():
let blobs = blobsOpt.get()
@ -168,25 +170,25 @@ proc routeSignedBeaconBlock*(
if blobsOpt.isSome():
let blobs = blobsOpt.get()
if blobs.len != 0:
let dataColumnsOpt = get_data_column_sidecars(blck, blobs.mapIt(it.blob))
if not dataColumnsOpt.isOk:
let dataColumnsOpt = newClone get_data_column_sidecars(blck, blobs.mapIt(KzgBlob(bytes: it.blob)))
if not dataColumnsOpt[].isOk:
debug "Issue with computing data column from blob bundle"
let data_columns = dataColumnsOpt.get()
var das_workers = newSeq[Future[SendResult]](len(data_columns))
let data_columns = dataColumnsOpt[].get()
var das_workers = newSeq[Future[SendResult]](len(dataColumnsOpt[].get()))
for i in 0..<data_columns.len:
let subnet_id = compute_subnet_for_data_column_sidecar(uint64(i))
das_workers[i] =
router[].network.broadcastDataColumnSidecar(subnet_id, data_columns[int(i)])
router[].network.broadcastDataColumnSidecar(subnet_id, dataColumnsOpt[].get()[int(i)])
let allres = await allFinished(das_workers)
for i in 0..<allres.len:
let res = allres[i]
doAssert res.finished()
if res.failed():
notice "Data Columns not sent",
data_column = shortLog(data_columns[i]), error = res.error[]
data_column = shortLog(dataColumnsOpt[].get()[i]), error = res.error[]
else:
notice "Data columns sent", data_column = shortLog(data_columns[i])
dataColumnRefs = Opt.some(data_columns.mapIt(newClone(it)))
notice "Data columns sent", data_column = shortLog(dataColumnsOpt[].get()[i])
dataColumnRefs = Opt.some(dataColumnsOpt[].get().mapIt(newClone(it)))
let added = await router[].blockProcessor[].addBlock(
MsgSource.api, ForkedSignedBeaconBlock.init(blck), blobRefs, dataColumnRefs)

View File

@ -128,7 +128,7 @@ proc unblindAndRouteBlockMEV*(
bundle.data.blobs_bundle.commitments:
return err("unblinded blobs bundle has unexpected commitments")
let ok = verifyProofs(
asSeq blobs_bundle.blobs,
blobs_bundle.blobs.mapIt(KzgBlob(bytes: it)),
asSeq blobs_bundle.commitments,
asSeq blobs_bundle.proofs).valueOr:
return err("unblinded blobs bundle fails verification")

View File

@ -49,7 +49,6 @@ import # Unit test
./test_spec,
./test_statediff,
./test_sync_committee_pool,
./test_sync_manager,
./test_toblindedblock,
./test_validator_change_pool,
./test_validator_pool,

View File

@ -12,7 +12,6 @@ import
# Utilities
chronicles,
unittest2,
stew/results,
# Beacon chain internals
../../../beacon_chain/spec/[beaconstate, state_transition_block],
../../../beacon_chain/spec/datatypes/altair,

View File

@ -108,10 +108,10 @@ suite "EF - Altair - SSZ consensus objects " & preset():
let hash = loadExpectedHashTreeRoot(path)
case sszType:
of "AggregateAndProof": checkSSZ(AggregateAndProof, path, hash)
of "Attestation": checkSSZ(Attestation, path, hash)
of "AggregateAndProof": checkSSZ(phase0.AggregateAndProof, path, hash)
of "Attestation": checkSSZ(phase0.Attestation, path, hash)
of "AttestationData": checkSSZ(AttestationData, path, hash)
of "AttesterSlashing": checkSSZ(AttesterSlashing, path, hash)
of "AttesterSlashing": checkSSZ(phase0.AttesterSlashing, path, hash)
of "BeaconBlock": checkSSZ(altair.BeaconBlock, path, hash)
of "BeaconBlockBody": checkSSZ(altair.BeaconBlockBody, path, hash)
of "BeaconBlockHeader": checkSSZ(BeaconBlockHeader, path, hash)
@ -126,7 +126,8 @@ suite "EF - Altair - SSZ consensus objects " & preset():
of "Fork": checkSSZ(Fork, path, hash)
of "ForkData": checkSSZ(ForkData, path, hash)
of "HistoricalBatch": checkSSZ(HistoricalBatch, path, hash)
of "IndexedAttestation": checkSSZ(IndexedAttestation, path, hash)
of "IndexedAttestation":
checkSSZ(phase0.IndexedAttestation, path, hash)
of "LightClientBootstrap":
checkSSZ(altair.LightClientBootstrap, path, hash)
of "LightClientHeader":
@ -140,7 +141,7 @@ suite "EF - Altair - SSZ consensus objects " & preset():
of "PendingAttestation": checkSSZ(PendingAttestation, path, hash)
of "ProposerSlashing": checkSSZ(ProposerSlashing, path, hash)
of "SignedAggregateAndProof":
checkSSZ(SignedAggregateAndProof, path, hash)
checkSSZ(phase0.SignedAggregateAndProof, path, hash)
of "SignedBeaconBlock": checkSSZ(altair.SignedBeaconBlock, path, hash)
of "SignedBeaconBlockHeader":
checkSSZ(SignedBeaconBlockHeader, path, hash)
@ -158,4 +159,4 @@ suite "EF - Altair - SSZ consensus objects " & preset():
of "Validator": checkSSZ(Validator, path, hash)
of "VoluntaryExit": checkSSZ(VoluntaryExit, path, hash)
else:
raise newException(ValueError, "Unsupported test: " & sszType)
raise newException(ValueError, "Unsupported test: " & sszType)

View File

@ -12,7 +12,6 @@ import
# Utilities
chronicles,
unittest2,
stew/results,
# Beacon chain internals
../../../beacon_chain/spec/state_transition_block,
../../../beacon_chain/spec/datatypes/bellatrix,
@ -76,7 +75,7 @@ proc runTest[T, U](
suite baseDescription & "Attestation " & preset():
proc applyAttestation(
preState: var bellatrix.BeaconState, attestation: Attestation):
preState: var bellatrix.BeaconState, attestation: phase0.Attestation):
Result[void, cstring] =
var cache: StateCache
let
@ -91,7 +90,7 @@ suite baseDescription & "Attestation " & preset():
ok()
for path in walkTests(OpAttestationsDir):
runTest[Attestation, typeof applyAttestation](
runTest[phase0.Attestation, typeof applyAttestation](
OpAttestationsDir, suiteName, "Attestation", "attestation",
applyAttestation, path)

View File

@ -9,26 +9,26 @@
{.used.}
import
# Standard library
std/[
strutils, streams, strformat,
macros],
# Third-party
yaml,
# Beacon chain internals
../../../beacon_chain/spec/datatypes/[altair, bellatrix],
../../../beacon_chain/spec/datatypes/altair,
# Status libraries
snappy,
# Test utilities
../../testutil, ../fixtures_utils, ../os_ops
from std/streams import close, openFileStream
from std/strformat import `&`
from std/strutils import toLowerAscii
# SSZ tests of consensus objects (minimal/mainnet preset specific)
# Parsing definitions
# ----------------------------------------------------------------
const
SSZDir = SszTestsDir/const_preset/"bellatrix"/"ssz_static"
SSZDir = SszTestsDir/const_preset/"altair"/"ssz_static"
type
SSZHashTreeRoot = object
@ -42,26 +42,26 @@ type
# Checking the values against the yaml file is TODO (require more flexible Yaml parser)
proc checkSSZ(
T: type bellatrix.SignedBeaconBlock,
T: type altair.SignedBeaconBlock,
dir: string,
expectedHash: SSZHashTreeRoot
) {.raises: [IOError, SerializationError, UnconsumedInput].} =
# Deserialize into a ref object to not fill Nim stack
let encoded = snappy.decode(
readFileBytes(dir/"serialized.ssz_snappy"), MaxObjectSize)
let deserialized = newClone(sszDecodeEntireInput(encoded, T))
# Deserialize into a ref object to not fill Nim stack
let encoded = snappy.decode(
readFileBytes(dir/"serialized.ssz_snappy"), MaxObjectSize)
let deserialized = newClone(sszDecodeEntireInput(encoded, T))
# SignedBeaconBlocks usually not hashed because they're identified by
# htr(BeaconBlock), so do it manually
check: expectedHash.root == "0x" & toLowerAscii($hash_tree_root(
[hash_tree_root(deserialized.message),
hash_tree_root(deserialized.signature)]))
# SignedBeaconBlocks usually not hashed because they're identified by
# htr(BeaconBlock), so do it manually
check: expectedHash.root == "0x" & toLowerAscii($hash_tree_root(
[hash_tree_root(deserialized.message),
hash_tree_root(deserialized.signature)]))
check deserialized.root == hash_tree_root(deserialized.message)
check SSZ.encode(deserialized[]) == encoded
check sszSize(deserialized[]) == encoded.len
check deserialized.root == hash_tree_root(deserialized.message)
check SSZ.encode(deserialized[]) == encoded
check sszSize(deserialized[]) == encoded.len
# TODO check the value (requires YAML loader)
# TODO check the value (requires YAML loader)
proc checkSSZ(
T: type,
@ -91,7 +91,7 @@ proc loadExpectedHashTreeRoot(
# Test runner
# ----------------------------------------------------------------
suite "EF - Bellatrix - SSZ consensus objects " & preset():
suite "EF - Altair - SSZ consensus objects " & preset():
doAssert dirExists(SSZDir), "You need to run the \"download_test_vectors.sh\" script to retrieve the consensus spec test vectors."
for pathKind, sszType in walkDir(SSZDir, relative = true, checkDir = true):
doAssert pathKind == pcDir
@ -108,14 +108,14 @@ suite "EF - Bellatrix - SSZ consensus objects " & preset():
let hash = loadExpectedHashTreeRoot(path)
case sszType:
of "AggregateAndProof": checkSSZ(AggregateAndProof, path, hash)
of "Attestation": checkSSZ(Attestation, path, hash)
of "AggregateAndProof": checkSSZ(phase0.AggregateAndProof, path, hash)
of "Attestation": checkSSZ(phase0.Attestation, path, hash)
of "AttestationData": checkSSZ(AttestationData, path, hash)
of "AttesterSlashing": checkSSZ(phase0.AttesterSlashing, path, hash)
of "BeaconBlock": checkSSZ(bellatrix.BeaconBlock, path, hash)
of "BeaconBlockBody": checkSSZ(bellatrix.BeaconBlockBody, path, hash)
of "BeaconBlock": checkSSZ(altair.BeaconBlock, path, hash)
of "BeaconBlockBody": checkSSZ(altair.BeaconBlockBody, path, hash)
of "BeaconBlockHeader": checkSSZ(BeaconBlockHeader, path, hash)
of "BeaconState": checkSSZ(bellatrix.BeaconState, path, hash)
of "BeaconState": checkSSZ(altair.BeaconState, path, hash)
of "Checkpoint": checkSSZ(Checkpoint, path, hash)
of "ContributionAndProof": checkSSZ(ContributionAndProof, path, hash)
of "Deposit": checkSSZ(Deposit, path, hash)
@ -123,9 +123,6 @@ suite "EF - Bellatrix - SSZ consensus objects " & preset():
of "DepositMessage": checkSSZ(DepositMessage, path, hash)
of "Eth1Block": checkSSZ(Eth1Block, path, hash)
of "Eth1Data": checkSSZ(Eth1Data, path, hash)
of "ExecutionPayload": checkSSZ(ExecutionPayload, path, hash)
of "ExecutionPayloadHeader":
checkSSZ(ExecutionPayloadHeader, path, hash)
of "Fork": checkSSZ(Fork, path, hash)
of "ForkData": checkSSZ(ForkData, path, hash)
of "HistoricalBatch": checkSSZ(HistoricalBatch, path, hash)
@ -142,12 +139,10 @@ suite "EF - Bellatrix - SSZ consensus objects " & preset():
of "LightClientOptimisticUpdate":
checkSSZ(altair.LightClientOptimisticUpdate, path, hash)
of "PendingAttestation": checkSSZ(PendingAttestation, path, hash)
of "PowBlock": checkSSZ(PowBlock, path, hash)
of "ProposerSlashing": checkSSZ(ProposerSlashing, path, hash)
of "SignedAggregateAndProof":
checkSSZ(SignedAggregateAndProof, path, hash)
of "SignedBeaconBlock":
checkSSZ(bellatrix.SignedBeaconBlock, path, hash)
checkSSZ(phase0.SignedAggregateAndProof, path, hash)
of "SignedBeaconBlock": checkSSZ(altair.SignedBeaconBlock, path, hash)
of "SignedBeaconBlockHeader":
checkSSZ(SignedBeaconBlockHeader, path, hash)
of "SignedContributionAndProof":

View File

@ -12,7 +12,6 @@ import
# Utilities
chronicles,
unittest2,
stew/results,
# Beacon chain internals
../../../beacon_chain/spec/state_transition_block,
../../../beacon_chain/spec/datatypes/capella,
@ -80,7 +79,7 @@ proc runTest[T, U](
suite baseDescription & "Attestation " & preset():
proc applyAttestation(
preState: var capella.BeaconState, attestation: Attestation):
preState: var capella.BeaconState, attestation: phase0.Attestation):
Result[void, cstring] =
var cache: StateCache
let
@ -95,14 +94,14 @@ suite baseDescription & "Attestation " & preset():
ok()
for path in walkTests(OpAttestationsDir):
runTest[Attestation, typeof applyAttestation](
runTest[phase0.Attestation, typeof applyAttestation](
OpAttestationsDir, suiteName, "Attestation", "attestation",
applyAttestation, path)
suite baseDescription & "Attester Slashing " & preset():
proc applyAttesterSlashing(
preState: var capella.BeaconState, attesterSlashing: AttesterSlashing):
Result[void, cstring] =
preState: var capella.BeaconState,
attesterSlashing: phase0.AttesterSlashing): Result[void, cstring] =
var cache: StateCache
doAssert (? process_attester_slashing(
defaultRuntimeConfig, preState, attesterSlashing, {strictVerification},
@ -110,7 +109,7 @@ suite baseDescription & "Attester Slashing " & preset():
ok()
for path in walkTests(OpAttSlashingDir):
runTest[AttesterSlashing, typeof applyAttesterSlashing](
runTest[phase0.AttesterSlashing, typeof applyAttesterSlashing](
OpAttSlashingDir, suiteName, "Attester Slashing", "attester_slashing",
applyAttesterSlashing, path)

View File

@ -110,10 +110,10 @@ suite "EF - Capella - SSZ consensus objects " & preset():
let hash = loadExpectedHashTreeRoot(path)
case sszType:
of "AggregateAndProof": checkSSZ(AggregateAndProof, path, hash)
of "Attestation": checkSSZ(Attestation, path, hash)
of "AggregateAndProof": checkSSZ(phase0.AggregateAndProof, path, hash)
of "Attestation": checkSSZ(phase0.Attestation, path, hash)
of "AttestationData": checkSSZ(AttestationData, path, hash)
of "AttesterSlashing": checkSSZ(AttesterSlashing, path, hash)
of "AttesterSlashing": checkSSZ(phase0.AttesterSlashing, path, hash)
of "BeaconBlock": checkSSZ(capella.BeaconBlock, path, hash)
of "BeaconBlockBody": checkSSZ(capella.BeaconBlockBody, path, hash)
of "BeaconBlockHeader": checkSSZ(BeaconBlockHeader, path, hash)
@ -126,14 +126,16 @@ suite "EF - Capella - SSZ consensus objects " & preset():
of "DepositMessage": checkSSZ(DepositMessage, path, hash)
of "Eth1Block": checkSSZ(Eth1Block, path, hash)
of "Eth1Data": checkSSZ(Eth1Data, path, hash)
of "ExecutionPayload": checkSSZ(ExecutionPayload, path, hash)
of "ExecutionPayload":
checkSSZ(capella.ExecutionPayload, path, hash)
of "ExecutionPayloadHeader":
checkSSZ(ExecutionPayloadHeader, path, hash)
checkSSZ(capella.ExecutionPayloadHeader, path, hash)
of "Fork": checkSSZ(Fork, path, hash)
of "ForkData": checkSSZ(ForkData, path, hash)
of "HistoricalBatch": checkSSZ(HistoricalBatch, path, hash)
of "HistoricalSummary": checkSSZ(HistoricalSummary, path, hash)
of "IndexedAttestation": checkSSZ(IndexedAttestation, path, hash)
of "IndexedAttestation":
checkSSZ(phase0.IndexedAttestation, path, hash)
of "LightClientBootstrap":
checkSSZ(capella.LightClientBootstrap, path, hash)
of "LightClientHeader":
@ -148,7 +150,7 @@ suite "EF - Capella - SSZ consensus objects " & preset():
of "PowBlock": checkSSZ(PowBlock, path, hash)
of "ProposerSlashing": checkSSZ(ProposerSlashing, path, hash)
of "SignedAggregateAndProof":
checkSSZ(SignedAggregateAndProof, path, hash)
checkSSZ(phase0.SignedAggregateAndProof, path, hash)
of "SignedBeaconBlock":
checkSSZ(capella.SignedBeaconBlock, path, hash)
of "SignedBeaconBlockHeader":
@ -170,4 +172,4 @@ suite "EF - Capella - SSZ consensus objects " & preset():
of "Validator": checkSSZ(Validator, path, hash)
of "VoluntaryExit": checkSSZ(VoluntaryExit, path, hash)
else:
raise newException(ValueError, "Unsupported test: " & sszType)
raise newException(ValueError, "Unsupported test: " & sszType)

View File

@ -18,15 +18,11 @@ import
./capella/all_capella_fixtures,
./deneb/all_deneb_fixtures,
./eip7594/all_eip7594_fixtures,
./electra/all_electra_fixtures,
./test_fixture_fork,
./test_fixture_fork_choice,
./test_fixture_light_client_single_merkle_proof,
./test_fixture_light_client_sync,
./test_fixture_light_client_update_ranking,
./test_fixture_merkle_proof,
./test_fixture_sanity_blocks,
./test_fixture_sanity_slots,
./test_fixture_transition
./test_fixture_merkle_proof
summarizeLongTests("ConsensusSpecPreset")

View File

@ -12,7 +12,6 @@ import
# Utilities
chronicles,
unittest2,
stew/results,
# Beacon chain internals
../../../beacon_chain/spec/state_transition_block,
../../../beacon_chain/spec/datatypes/deneb,
@ -80,7 +79,7 @@ proc runTest[T, U](
suite baseDescription & "Attestation " & preset():
proc applyAttestation(
preState: var deneb.BeaconState, attestation: Attestation):
preState: var deneb.BeaconState, attestation: phase0.Attestation):
Result[void, cstring] =
var cache: StateCache
let
@ -95,14 +94,14 @@ suite baseDescription & "Attestation " & preset():
ok()
for path in walkTests(OpAttestationsDir):
runTest[Attestation, typeof applyAttestation](
runTest[phase0.Attestation, typeof applyAttestation](
OpAttestationsDir, suiteName, "Attestation", "attestation",
applyAttestation, path)
suite baseDescription & "Attester Slashing " & preset():
proc applyAttesterSlashing(
preState: var deneb.BeaconState, attesterSlashing: AttesterSlashing):
Result[void, cstring] =
preState: var deneb.BeaconState,
attesterSlashing: phase0.AttesterSlashing): Result[void, cstring] =
var cache: StateCache
doAssert (? process_attester_slashing(
defaultRuntimeConfig, preState, attesterSlashing, {strictVerification},
@ -110,7 +109,7 @@ suite baseDescription & "Attester Slashing " & preset():
ok()
for path in walkTests(OpAttSlashingDir):
runTest[AttesterSlashing, typeof applyAttesterSlashing](
runTest[phase0.AttesterSlashing, typeof applyAttesterSlashing](
OpAttSlashingDir, suiteName, "Attester Slashing", "attester_slashing",
applyAttesterSlashing, path)

View File

@ -113,10 +113,10 @@ suite "EF - Deneb - SSZ consensus objects " & preset():
let hash = loadExpectedHashTreeRoot(path)
case sszType:
of "AggregateAndProof": checkSSZ(AggregateAndProof, path, hash)
of "Attestation": checkSSZ(Attestation, path, hash)
of "AggregateAndProof": checkSSZ(phase0.AggregateAndProof, path, hash)
of "Attestation": checkSSZ(phase0.Attestation, path, hash)
of "AttestationData": checkSSZ(AttestationData, path, hash)
of "AttesterSlashing": checkSSZ(AttesterSlashing, path, hash)
of "AttesterSlashing": checkSSZ(phase0.AttesterSlashing, path, hash)
of "BeaconBlock": checkSSZ(deneb.BeaconBlock, path, hash)
of "BeaconBlockBody": checkSSZ(deneb.BeaconBlockBody, path, hash)
of "BeaconBlockHeader": checkSSZ(BeaconBlockHeader, path, hash)
@ -131,18 +131,22 @@ suite "EF - Deneb - SSZ consensus objects " & preset():
of "DepositMessage": checkSSZ(DepositMessage, path, hash)
of "Eth1Block": checkSSZ(Eth1Block, path, hash)
of "Eth1Data": checkSSZ(Eth1Data, path, hash)
of "ExecutionPayload": checkSSZ(ExecutionPayload, path, hash)
of "ExecutionPayload":
checkSSZ(deneb.ExecutionPayload, path, hash)
of "ExecutionPayloadHeader":
checkSSZ(ExecutionPayloadHeader, path, hash)
checkSSZ(deneb.ExecutionPayloadHeader, path, hash)
of "Fork": checkSSZ(Fork, path, hash)
of "ForkData": checkSSZ(ForkData, path, hash)
of "HistoricalBatch": checkSSZ(HistoricalBatch, path, hash)
of "HistoricalSummary": checkSSZ(HistoricalSummary, path, hash)
of "IndexedAttestation": checkSSZ(IndexedAttestation, path, hash)
of "IndexedAttestation":
checkSSZ(phase0.IndexedAttestation, path, hash)
of "LightClientBootstrap":
checkSSZ(deneb.LightClientBootstrap, path, hash)
of "LightClientHeader": checkSSZ(deneb.LightClientHeader, path, hash)
of "LightClientUpdate": checkSSZ(deneb.LightClientUpdate, path, hash)
of "LightClientHeader":
checkSSZ(deneb.LightClientHeader, path, hash)
of "LightClientUpdate":
checkSSZ(deneb.LightClientUpdate, path, hash)
of "LightClientFinalityUpdate":
checkSSZ(deneb.LightClientFinalityUpdate, path, hash)
of "LightClientOptimisticUpdate":
@ -151,7 +155,7 @@ suite "EF - Deneb - SSZ consensus objects " & preset():
of "PowBlock": checkSSZ(PowBlock, path, hash)
of "ProposerSlashing": checkSSZ(ProposerSlashing, path, hash)
of "SignedAggregateAndProof":
checkSSZ(SignedAggregateAndProof, path, hash)
checkSSZ(phase0.SignedAggregateAndProof, path, hash)
of "SignedBeaconBlock":
checkSSZ(deneb.SignedBeaconBlock, path, hash)
of "SignedBeaconBlockHeader":
@ -173,4 +177,4 @@ suite "EF - Deneb - SSZ consensus objects " & preset():
of "Validator": checkSSZ(Validator, path, hash)
of "VoluntaryExit": checkSSZ(VoluntaryExit, path, hash)
else:
raise newException(ValueError, "Unsupported test: " & sszType)
raise newException(ValueError, "Unsupported test: " & sszType)

View File

@ -117,10 +117,10 @@ suite "EF - EIP7594 - SSZ consensus objects " & preset():
let hash = loadExpectedHashTreeRoot(path)
case sszType:
of "AggregateAndProof": checkSSZ(AggregateAndProof, path, hash)
of "Attestation": checkSSZ(Attestation, path, hash)
of "AggregateAndProof": checkSSZ(phase0.AggregateAndProof, path, hash)
of "Attestation": checkSSZ(phase0.Attestation, path, hash)
of "AttestationData": checkSSZ(AttestationData, path, hash)
of "AttesterSlashing": checkSSZ(AttesterSlashing, path, hash)
of "AttesterSlashing": checkSSZ(phase0.AttesterSlashing, path, hash)
of "BeaconBlock": checkSSZ(deneb.BeaconBlock, path, hash)
of "BeaconBlockBody": checkSSZ(deneb.BeaconBlockBody, path, hash)
of "BeaconBlockHeader": checkSSZ(BeaconBlockHeader, path, hash)
@ -130,34 +130,40 @@ suite "EF - EIP7594 - SSZ consensus objects " & preset():
of "BLSToExecutionChange": checkSSZ(BLSToExecutionChange, path, hash)
of "Checkpoint": checkSSZ(Checkpoint, path, hash)
of "ContributionAndProof": checkSSZ(ContributionAndProof, path, hash)
of "DataColumnIdentifier": checkSSZ(DataColumnIdentifier, path, hash)
of "DataColumnSidecar": checkSSZ(DataColumnSidecar, path, hash)
of "DataColumnIdentifier": checkSSZ(DataColumnIdentifier, path, hash)
of "Deposit": checkSSZ(Deposit, path, hash)
of "DepositData": checkSSZ(DepositData, path, hash)
of "DepositMessage": checkSSZ(DepositMessage, path, hash)
of "Eth1Block": checkSSZ(Eth1Block, path, hash)
of "Eth1Data": checkSSZ(Eth1Data, path, hash)
of "ExecutionPayload": checkSSZ(ExecutionPayload, path, hash)
of "ExecutionPayload":
checkSSZ(deneb.ExecutionPayload, path, hash)
of "ExecutionPayloadHeader":
checkSSZ(ExecutionPayloadHeader, path, hash)
checkSSZ(deneb.ExecutionPayloadHeader, path, hash)
of "Fork": checkSSZ(Fork, path, hash)
of "ForkData": checkSSZ(ForkData, path, hash)
of "HistoricalBatch": checkSSZ(HistoricalBatch, path, hash)
of "HistoricalSummary": checkSSZ(HistoricalSummary, path, hash)
of "IndexedAttestation": checkSSZ(IndexedAttestation, path, hash)
of "IndexedAttestation":
checkSSZ(phase0.IndexedAttestation, path, hash)
of "LightClientBootstrap":
checkSSZ(deneb.LightClientBootstrap, path, hash)
of "LightClientHeader": checkSSZ(deneb.LightClientHeader, path, hash)
of "LightClientUpdate": checkSSZ(deneb.LightClientUpdate, path, hash)
of "LightClientHeader":
checkSSZ(deneb.LightClientHeader, path, hash)
of "LightClientUpdate":
checkSSZ(deneb.LightClientUpdate, path, hash)
of "LightClientFinalityUpdate":
checkSSZ(deneb.LightClientFinalityUpdate, path, hash)
of "LightClientOptimisticUpdate":
checkSSZ(deneb.LightClientOptimisticUpdate, path, hash)
of "MatrixEntry":
checkSSZ(MatrixEntry, path, hash)
of "PendingAttestation": checkSSZ(PendingAttestation, path, hash)
of "PowBlock": checkSSZ(PowBlock, path, hash)
of "ProposerSlashing": checkSSZ(ProposerSlashing, path, hash)
of "SignedAggregateAndProof":
checkSSZ(SignedAggregateAndProof, path, hash)
checkSSZ(phase0.SignedAggregateAndProof, path, hash)
of "SignedBeaconBlock":
checkSSZ(deneb.SignedBeaconBlock, path, hash)
of "SignedBeaconBlockHeader":
@ -179,4 +185,4 @@ suite "EF - EIP7594 - SSZ consensus objects " & preset():
of "Validator": checkSSZ(Validator, path, hash)
of "VoluntaryExit": checkSSZ(VoluntaryExit, path, hash)
else:
raise newException(ValueError, "Unsupported test: " & sszType)
raise newException(ValueError, "Unsupported test: " & sszType)

View File

@ -138,9 +138,9 @@ suite "EF - Electra - SSZ consensus objects " & preset():
of "Eth1Data": checkSSZ(Eth1Data, path, hash)
of "ExecutionLayerWithdrawalRequest":
checkSSZ(ExecutionLayerWithdrawalRequest, path, hash)
of "ExecutionPayload": checkSSZ(ExecutionPayload, path, hash)
of "ExecutionPayload": checkSSZ(electra.ExecutionPayload, path, hash)
of "ExecutionPayloadHeader":
checkSSZ(ExecutionPayloadHeader, path, hash)
checkSSZ(electra.ExecutionPayloadHeader, path, hash)
of "Fork": checkSSZ(Fork, path, hash)
of "ForkData": checkSSZ(ForkData, path, hash)
of "HistoricalBatch": checkSSZ(HistoricalBatch, path, hash)

View File

@ -12,10 +12,9 @@ import
# Utilities
chronicles,
unittest2,
stew/results,
# Beacon chain internals
../../../beacon_chain/spec/[beaconstate, state_transition_block],
../../../beacon_chain/spec/datatypes/phase0,
../../../beacon_chain/spec/datatypes/altair,
# Test utilities
../../testutil,
../fixtures_utils, ../os_ops,
@ -24,19 +23,21 @@ import
from std/sequtils import mapIt, toSeq
const
OpDir = SszTestsDir/const_preset/"phase0"/"operations"
OpDir = SszTestsDir/const_preset/"altair"/"operations"
OpAttestationsDir = OpDir/"attestation"
OpAttSlashingDir = OpDir/"attester_slashing"
OpBlockHeaderDir = OpDir/"block_header"
OpDepositsDir = OpDir/"deposit"
OpProposerSlashingDir = OpDir/"proposer_slashing"
OpSyncAggregateDir = OpDir/"sync_aggregate"
OpVoluntaryExitDir = OpDir/"voluntary_exit"
baseDescription = "EF - Phase 0 - Operations - "
baseDescription = "EF - Altair - Operations - "
doAssert toHashSet(mapIt(toSeq(walkDir(OpDir, relative = false)), it.path)) ==
toHashSet([OpAttestationsDir, OpAttSlashingDir, OpBlockHeaderDir,
OpDepositsDir, OpProposerSlashingDir, OpVoluntaryExitDir])
OpDepositsDir, OpProposerSlashingDir, OpSyncAggregateDir,
OpVoluntaryExitDir])
proc runTest[T, U](
testSuiteDir, suiteName, opName, applyFile: string,
@ -49,15 +50,15 @@ proc runTest[T, U](
else:
"[Invalid] "
test prefix & baseDescription & suiteName & " - " & identifier:
test prefix & baseDescription & opName & " - " & identifier:
let preState = newClone(
parseTest(testDir/"pre.ssz_snappy", SSZ, phase0.BeaconState))
parseTest(testDir/"pre.ssz_snappy", SSZ, altair.BeaconState))
let done = applyProc(
preState[], parseTest(testDir/(applyFile & ".ssz_snappy"), SSZ, T))
if fileExists(testDir/"post.ssz_snappy"):
let postState =
newClone(parseTest(testDir/"post.ssz_snappy", SSZ, phase0.BeaconState))
newClone(parseTest(testDir/"post.ssz_snappy", SSZ, altair.BeaconState))
check:
done.isOk()
@ -68,11 +69,18 @@ proc runTest[T, U](
suite baseDescription & "Attestation " & preset():
proc applyAttestation(
preState: var phase0.BeaconState, attestation: phase0.Attestation):
preState: var altair.BeaconState, attestation: phase0.Attestation):
Result[void, cstring] =
var cache: StateCache
doAssert (? process_attestation(
preState, attestation, {}, 0.Gwei, cache)) == 0.Gwei
let
total_active_balance = get_total_active_balance(preState, cache)
base_reward_per_increment =
get_base_reward_per_increment(total_active_balance)
# This returns the proposer reward for including the attestation, which
# isn't tested here.
discard ? process_attestation(
preState, attestation, {}, base_reward_per_increment, cache)
ok()
for path in walkTests(OpAttestationsDir):
@ -82,7 +90,7 @@ suite baseDescription & "Attestation " & preset():
suite baseDescription & "Attester Slashing " & preset():
proc applyAttesterSlashing(
preState: var phase0.BeaconState,
preState: var altair.BeaconState,
attesterSlashing: phase0.AttesterSlashing): Result[void, cstring] =
var cache: StateCache
doAssert (? process_attester_slashing(
@ -97,21 +105,20 @@ suite baseDescription & "Attester Slashing " & preset():
suite baseDescription & "Block Header " & preset():
func applyBlockHeader(
preState: var phase0.BeaconState, blck: phase0.BeaconBlock):
preState: var altair.BeaconState, blck: altair.BeaconBlock):
Result[void, cstring] =
var cache: StateCache
process_block_header(preState, blck, {}, cache)
for path in walkTests(OpBlockHeaderDir):
runTest[phase0.BeaconBlock, typeof applyBlockHeader](
OpBlockHeaderDir, suiteName, "Block Header", "block",
applyBlockHeader, path)
runTest[altair.BeaconBlock, typeof applyBlockHeader](
OpBlockHeaderDir, suiteName, "Block Header", "block", applyBlockHeader, path)
from ".."/".."/".."/beacon_chain/bloomfilter import constructBloomFilter
suite baseDescription & "Deposit " & preset():
proc applyDeposit(
preState: var phase0.BeaconState, deposit: Deposit):
preState: var altair.BeaconState, deposit: Deposit):
Result[void, cstring] =
process_deposit(
defaultRuntimeConfig, preState,
@ -123,7 +130,7 @@ suite baseDescription & "Deposit " & preset():
suite baseDescription & "Proposer Slashing " & preset():
proc applyProposerSlashing(
preState: var phase0.BeaconState, proposerSlashing: ProposerSlashing):
preState: var altair.BeaconState, proposerSlashing: ProposerSlashing):
Result[void, cstring] =
var cache: StateCache
doAssert (? process_proposer_slashing(
@ -136,9 +143,24 @@ suite baseDescription & "Proposer Slashing " & preset():
OpProposerSlashingDir, suiteName, "Proposer Slashing", "proposer_slashing",
applyProposerSlashing, path)
suite baseDescription & "Sync Aggregate " & preset():
proc applySyncAggregate(
preState: var altair.BeaconState, syncAggregate: SyncAggregate):
Result[void, cstring] =
var cache: StateCache
doAssert (? process_sync_aggregate(
preState, syncAggregate, get_total_active_balance(preState, cache),
{}, cache)) > 0.Gwei
ok()
for path in walkTests(OpSyncAggregateDir):
runTest[SyncAggregate, typeof applySyncAggregate](
OpSyncAggregateDir, suiteName, "Sync Aggregate", "sync_aggregate",
applySyncAggregate, path)
suite baseDescription & "Voluntary Exit " & preset():
proc applyVoluntaryExit(
preState: var phase0.BeaconState, voluntaryExit: SignedVoluntaryExit):
preState: var altair.BeaconState, voluntaryExit: SignedVoluntaryExit):
Result[void, cstring] =
var cache: StateCache
if process_voluntary_exit(

View File

@ -108,10 +108,10 @@ suite "EF - Phase 0 - SSZ consensus objects " & preset():
let hash = loadExpectedHashTreeRoot(path)
case sszType:
of "AggregateAndProof": checkSSZ(AggregateAndProof, path, hash)
of "Attestation": checkSSZ(Attestation, path, hash)
of "AggregateAndProof": checkSSZ(phase0.AggregateAndProof, path, hash)
of "Attestation": checkSSZ(phase0.Attestation, path, hash)
of "AttestationData": checkSSZ(AttestationData, path, hash)
of "AttesterSlashing": checkSSZ(AttesterSlashing, path, hash)
of "AttesterSlashing": checkSSZ(phase0.AttesterSlashing, path, hash)
of "BeaconBlock": checkSSZ(phase0.BeaconBlock, path, hash)
of "BeaconBlockBody": checkSSZ(phase0.BeaconBlockBody, path, hash)
of "BeaconBlockHeader": checkSSZ(BeaconBlockHeader, path, hash)
@ -125,11 +125,12 @@ suite "EF - Phase 0 - SSZ consensus objects " & preset():
of "Fork": checkSSZ(Fork, path, hash)
of "ForkData": checkSSZ(ForkData, path, hash)
of "HistoricalBatch": checkSSZ(HistoricalBatch, path, hash)
of "IndexedAttestation": checkSSZ(IndexedAttestation, path, hash)
of "IndexedAttestation":
checkSSZ(phase0.IndexedAttestation, path, hash)
of "PendingAttestation": checkSSZ(PendingAttestation, path, hash)
of "ProposerSlashing": checkSSZ(ProposerSlashing, path, hash)
of "SignedAggregateAndProof":
checkSSZ(SignedAggregateAndProof, path, hash)
checkSSZ(phase0.SignedAggregateAndProof, path, hash)
of "SignedBeaconBlock": checkSSZ(phase0.SignedBeaconBlock, path, hash)
of "SignedBeaconBlockHeader":
checkSSZ(SignedBeaconBlockHeader, path, hash)
@ -138,4 +139,4 @@ suite "EF - Phase 0 - SSZ consensus objects " & preset():
of "Validator": checkSSZ(Validator, path, hash)
of "VoluntaryExit": checkSSZ(VoluntaryExit, path, hash)
else:
raise newException(ValueError, "Unsupported test: " & sszType)
raise newException(ValueError, "Unsupported test: " & sszType)

View File

@ -78,11 +78,11 @@ suite "EF - Deneb - Fork " & preset():
runTest(capella.BeaconState, deneb.BeaconState, "Deneb", OpForkDir,
upgrade_to_deneb, suiteName, path)
from ../../beacon_chain/spec/datatypes/electra import BeaconState
# from ../../beacon_chain/spec/datatypes/electra import BeaconState
suite "EF - Electra - Fork " & preset():
const OpForkDir =
SszTestsDir/const_preset/"electra"/"fork"/"fork"/"pyspec_tests"
for kind, path in walkDir(OpForkDir, relative = true, checkDir = true):
runTest(deneb.BeaconState, electra.BeaconState, "Electra", OpForkDir,
upgrade_to_electra, suiteName, path)
# suite "EF - Electra - Fork " & preset():
# const OpForkDir =
# SszTestsDir/const_preset/"electra"/"fork"/"fork"/"pyspec_tests"
# for kind, path in walkDir(OpForkDir, relative = true, checkDir = true):
# runTest(deneb.BeaconState, electra.BeaconState, "Electra", OpForkDir,
# upgrade_to_electra, suiteName, path)

View File

@ -136,7 +136,8 @@ proc loadOps(
blobs: distinctBase(parseTest(
path/(step["blobs"].getStr()) & ".ssz_snappy",
SSZ, List[KzgBlob, Limit MAX_BLOBS_PER_BLOCK])),
proofs: step["proofs"].mapIt(KzgProof.fromHex(it.getStr())))
proofs: step["proofs"].mapIt(
KzgProof(bytes: fromHex(array[48, byte], it.getStr()))))
else:
Opt.none(BlobData)
else:

View File

@ -10,11 +10,9 @@
import
std/json,
yaml,
yaml/tojson,
kzg4844/kzg_ex,
stint,
chronicles,
stew/[byteutils, results],
stew/byteutils,
../testutil,
./fixtures_utils, ./os_ops
@ -43,13 +41,13 @@ block:
template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0]
doAssert Kzg.loadTrustedSetup(
sourceDir &
"/../../vendor/nim-kzg4844/kzg4844/csources/src/trusted_setup.txt").isOk
"/../../vendor/nim-kzg4844/kzg4844/csources/src/trusted_setup.txt", 0).isOk
proc runBlobToKzgCommitmentTest(suiteName, suitePath, path: string) =
let relativePathComponent = path.relativeTestPathComponent(suitePath)
test "KZG - Blob to KZG commitment - " & relativePathComponent:
let
data = yaml.loadToJson(os_ops.readFile(path/"data.yaml"))[0]
data = loadToJson(os_ops.readFile(path/"data.yaml"))[0]
output = data["output"]
blob = fromHex[131072](data["input"]["blob"].getStr)
@ -60,18 +58,18 @@ proc runBlobToKzgCommitmentTest(suiteName, suitePath, path: string) =
if blob.isNone:
check output.kind == JNull
else:
let commitment = blobToKzgCommitment(blob.get)
let commitment = blobToKzgCommitment(KzgBlob(bytes: blob.get))
check:
if commitment.isErr:
output.kind == JNull
else:
commitment.get == fromHex[48](output.getStr).get
commitment.get().bytes == fromHex[48](output.getStr).get
proc runVerifyKzgProofTest(suiteName, suitePath, path: string) =
let relativePathComponent = path.relativeTestPathComponent(suitePath)
test "KZG - Verify KZG proof - " & relativePathComponent:
let
data = yaml.loadToJson(os_ops.readFile(path/"data.yaml"))[0]
data = loadToJson(os_ops.readFile(path/"data.yaml"))[0]
output = data["output"]
commitment = fromHex[48](data["input"]["commitment"].getStr)
z = fromHex[32](data["input"]["z"].getStr)
@ -85,7 +83,10 @@ proc runVerifyKzgProofTest(suiteName, suitePath, path: string) =
if commitment.isNone or z.isNone or y.isNone or proof.isNone:
check output.kind == JNull
else:
let v = verifyProof(commitment.get, z.get, y.get, proof.get)
let v = verifyProof(
KzgCommitment(bytes: commitment.get),
KzgBytes32(bytes: z.get), KzgBytes32(bytes: y.get),
KzgBytes48(bytes: proof.get))
check:
if v.isErr:
output.kind == JNull
@ -96,7 +97,7 @@ proc runVerifyBlobKzgProofTest(suiteName, suitePath, path: string) =
let relativePathComponent = path.relativeTestPathComponent(suitePath)
test "KZG - Verify blob KZG proof - " & relativePathComponent:
let
data = yaml.loadToJson(os_ops.readFile(path/"data.yaml"))[0]
data = loadToJson(os_ops.readFile(path/"data.yaml"))[0]
output = data["output"]
blob = fromHex[131072](data["input"]["blob"].getStr)
commitment = fromHex[48](data["input"]["commitment"].getStr)
@ -110,7 +111,10 @@ proc runVerifyBlobKzgProofTest(suiteName, suitePath, path: string) =
if blob.isNone or commitment.isNone or proof.isNone:
check output.kind == JNull
else:
let v = verifyBlobKzgProof(blob.get, commitment.get, proof.get)
let v = verifyBlobKzgProof(
KzgBlob(bytes: blob.get),
KzgBytes48(bytes: commitment.get),
KzgBytes48(bytes: proof.get))
check:
if v.isErr:
output.kind == JNull
@ -121,7 +125,7 @@ proc runVerifyBlobKzgProofBatchTest(suiteName, suitePath, path: string) =
let relativePathComponent = path.relativeTestPathComponent(suitePath)
test "KZG - Verify blob KZG proof batch - " & relativePathComponent:
let
data = yaml.loadToJson(os_ops.readFile(path/"data.yaml"))[0]
data = loadToJson(os_ops.readFile(path/"data.yaml"))[0]
output = data["output"]
blobs = data["input"]["blobs"].mapIt(fromHex[131072](it.getStr))
commitments = data["input"]["commitments"].mapIt(fromHex[48](it.getStr))
@ -137,7 +141,9 @@ proc runVerifyBlobKzgProofBatchTest(suiteName, suitePath, path: string) =
check output.kind == JNull
else:
let v = verifyBlobKzgProofBatch(
blobs.mapIt(it.get), commitments.mapIt(it.get), proofs.mapIt(it.get))
blobs.mapIt(KzgBlob(bytes: it.get)),
commitments.mapIt(KzgCommitment(bytes: it.get)),
proofs.mapIt(KzgProof(bytes: it.get)))
check:
if v.isErr:
output.kind == JNull
@ -148,7 +154,7 @@ proc runComputeKzgProofTest(suiteName, suitePath, path: string) =
let relativePathComponent = path.relativeTestPathComponent(suitePath)
test "KZG - Compute KZG proof - " & relativePathComponent:
let
data = yaml.loadToJson(os_ops.readFile(path/"data.yaml"))[0]
data = loadToJson(os_ops.readFile(path/"data.yaml"))[0]
output = data["output"]
blob = fromHex[131072](data["input"]["blob"].getStr)
z = fromHex[32](data["input"]["z"].getStr)
@ -160,7 +166,8 @@ proc runComputeKzgProofTest(suiteName, suitePath, path: string) =
if blob.isNone or z.isNone:
check output.kind == JNull
else:
let p = computeKzgProof(blob.get, z.get)
let p = computeKzgProof(
KzgBlob(bytes: blob.get), KzgBytes32(bytes: z.get))
if p.isErr:
check output.kind == JNull
else:
@ -168,14 +175,14 @@ proc runComputeKzgProofTest(suiteName, suitePath, path: string) =
proof = fromHex[48](output[0].getStr)
y = fromHex[32](output[1].getStr)
check:
p.get.proof == proof.get
p.get.y == y.get
p.get.proof.bytes == proof.get
p.get.y.bytes == y.get
proc runComputeBlobKzgProofTest(suiteName, suitePath, path: string) =
let relativePathComponent = path.relativeTestPathComponent(suitePath)
test "KZG - Compute blob KZG proof - " & relativePathComponent:
let
data = yaml.loadToJson(os_ops.readFile(path/"data.yaml"))[0]
data = loadToJson(os_ops.readFile(path/"data.yaml"))[0]
output = data["output"]
blob = fromHex[131072](data["input"]["blob"].getStr)
commitment = fromHex[48](data["input"]["commitment"].getStr)
@ -187,39 +194,18 @@ proc runComputeBlobKzgProofTest(suiteName, suitePath, path: string) =
if blob.isNone or commitment.isNone:
check output.kind == JNull
else:
let p = computeBlobKzgProof(blob.get, commitment.get)
let p = computeBlobKzgProof(
KzgBlob(bytes: blob.get), KzgBytes48(bytes: commitment.get))
if p.isErr:
check output.kind == JNull
else:
check p.get == fromHex[48](output.getStr).get
check p.get.bytes == fromHex[48](output.getStr).get
proc runComputeCellsTest(suiteName, suitePath, path: string) =
let relativePathComponent = path.relativeTestPathComponent(suitePath)
test "KZG - Compute Cells - " & relativePathComponent:
let
data = yaml.loadToJson(os_ops.readFile(path/"data.yaml"))[0]
output = data["output"]
blob = fromHex[131072](data["input"]["blob"].getStr)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/tests/formats/kzg_7594/verify_cell_kzg_proof.md#condition
# If the blob is invalid (e.g. incorrect length or one of the 32-byte
# blocks does not represent a BLS field element) it should error, i.e. the
# the output should be `null`.
if blob.isNone:
check output.kind == JNull
else:
let p = computeCells(blob.get)
if p.isErr:
check output.kind == JNull
else:
for i in 0..<len(p.get):
check p.get[i] == fromHex[2048](output[i].getStr).get
proc runComputeCellsAndProofsTest(suiteName, suitePath, path: string) =
proc runComputeCellsAndKzgProofsTest(suiteName, suitePath, path: string) =
let relativePathComponent = path.relativeTestPathComponent(suitePath)
test "KZG - Compute Cells And Proofs - " & relativePathComponent:
let
data = yaml.loadToJson(os_ops.readFile(path/"data.yaml"))[0]
data = loadToJson(os_ops.readFile(path/"data.yaml"))[0]
output = data["output"]
blob = fromHex[131072](data["input"]["blob"].getStr)
@ -230,47 +216,23 @@ proc runComputeCellsAndProofsTest(suiteName, suitePath, path: string) =
if blob.isNone:
check output.kind == JNull
else:
let p = computeCellsAndProofs(blob.get)
if p.isErr:
let p = newClone computeCellsAndKzgProofs(KzgBlob(bytes: blob.get))
if p[].isErr:
check output.kind == JNull
else:
let p_val = p[].get
for i in 0..<CELLS_PER_EXT_BLOB:
check p.get.cells[i] == fromHex[2048](output[0][i].getStr).get
check p.get.proofs[i] == fromHex[48](output[1][i].getStr).get
proc runVerifyCellKzgProofsTest(suiteName, suitePath, path: string) =
let relativePathComponent = path.relativeTestPathComponent(suitePath)
test "KZG - Verify Cell Kzg Proof - " & relativePathComponent:
let
data = yaml.loadToJson(os_ops.readFile(path/"data.yaml"))[0]
output = data["output"]
commitment = fromHex[48](data["input"]["commitment"].getStr)
proof = fromHex[48](data["input"]["proof"].getStr)
cell = fromHex[2048](data["input"]["cell"].getStr)
cell_id = toUInt64(data["input"]["cell_id"].getInt)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/tests/formats/kzg_7594/verify_cell_kzg_proof.md#condition
# If the blob is invalid (e.g. incorrect length or one of the 32-byte
# blocks does not represent a BLS field element) it should error, i.e. the
# the output should be `null`.
if commitment.isNone or proof.isNone or cell.isNone or cell_id.isNone:
check output.kind == JNull
else:
let p = verifyCellKzgProof(commitment.get, cell_id.get, cell.get, proof.get)
if p.isErr:
check output.kind == JNull
else:
check p.get == output.getBool
check p_val.cells[i].bytes == fromHex[2048](output[0][i].getStr).get
check p_val.proofs[i].bytes == fromHex[48](output[1][i].getStr).get
proc runVerifyCellKzgProofBatchTest(suiteName, suitePath, path: string) =
let relativePathCompnent = path.relativeTestPathComponent(suitePath)
test "KZG - Verify Cell Kzg Proof Batch - " & relativePathCompnent:
let
data = yaml.loadToJson(os_ops.readFile(path/"data.yaml"))[0]
data = loadToJson(os_ops.readFile(path/"data.yaml"))[0]
output = data["output"]
row_commitments = data["input"]["row_commitments"].mapIt(fromHex[48](it.getStr))
row_indices = data["input"]["row_indices"].mapIt(toUInt64(it.getInt))
column_indices = data["input"]["column_indices"].mapIt(toUInt64(it.getInt))
commitments = data["input"]["row_commitments"].mapIt(fromHex[48](it.getStr))
cell_indices = data["input"]["cell_indices"].mapIt(toUInt64(it.getInt))
cells = data["input"]["cells"].mapIt(fromHex[2048](it.getStr))
proofs = data["input"]["proofs"].mapIt(fromHex[48](it.getStr))
@ -278,46 +240,50 @@ proc runVerifyCellKzgProofBatchTest(suiteName, suitePath, path: string) =
# If the blob is invalid (e.g. incorrect length or one of the 32-byte
# blocks does not represent a BLS field element) it should error, i.e. the
# the output should be `null`.
if row_commitments.anyIt(it.isNone) or row_indices.anyIt(it.isNone) or
column_indices.anyIt(it.isNone) or proofs.anyIt(it.isNone) or
if commitments.anyIt(it.isNone) or
cell_indices.anyIt(it.isNone) or
proofs.anyIt(it.isNone) or
cells.anyIt(it.isNone):
check output.kind == JNull
else:
let v = verifyCellKzgProofBatch(
row_commitments.mapIt(it.get),
row_indices.mapIt(it.get),
column_indices.mapIt(it.get),
cells.mapIt(it.get),
proofs.mapIt(it.get)
let v = newClone verifyCellKzgProofBatch(
commitments.mapIt(KzgCommitment(bytes: it.get)),
cell_indices.mapIt(it.get),
cells.mapIt(KzgCell(bytes: it.get)),
proofs.mapIt(KzgBytes48(bytes: it.get))
)
check:
if v.isErr:
if v[].isErr:
output.kind == JNull
else:
v.get == output.getBool
v[].get == output.getBool
proc runRecoverAllCellsTest(suiteName, suitePath, path: string) =
proc runRecoverCellsAndKzgProofsTest(suiteName, suitePath, path: string) =
let relativePathComponent = path.relativeTestPathComponent(suitePath)
test "KZG - Recover All Cells - " & relativePathComponent:
test "KZG - Recover Cells And Kzg Proofs - " & relativePathComponent:
let
data = yaml.loadToJson(os_ops.readFile(path/"data.yaml"))[0]
data = loadToJson(os_ops.readFile(path/"data.yaml"))[0]
output = data["output"]
cell_ids = data["input"]["cell_ids"].mapIt(toUInt64(it.getInt))
cell_ids = data["input"]["cell_indices"].mapIt(toUInt64(it.getInt))
cells = data["input"]["cells"].mapIt(fromHex[2048](it.getStr))
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/tests/formats/kzg_7594/recover_all_cells.md#condition
# If the blob is invalid (e.g. incorrect length or one of the 32-byte
# blocks does not represent a BLS field element) it should error, i.e. the
# the output should be `null`.
if cell_ids.anyIt(it.isNone) or cells.anyIt(it.isNone):
if cell_ids.anyIt(it.isNone) or
cells.anyIt(it.isNone):
check output.kind == JNull
else:
let v = recoverAllCells(cell_ids.mapIt(it.get), cells.mapIt(it.get))
if v.isErr:
let v = newClone recoverCellsAndKzgProofs(
cell_ids.mapIt(it.get),
cells.mapIt(KzgCell(bytes: it.get)))
if v[].isErr:
check output.kind == JNull
else:
let val = v[].get
for i in 0..<CELLS_PER_EXT_BLOB:
check v.get[i] == fromHex[2048](output[i].getStr).get
check val.cells[i].bytes == fromHex[2048](output[0][i].getStr).get
check val.proofs[i].bytes == fromHex[48](output[1][i].getStr).get
from std/algorithm import sorted
@ -371,32 +337,24 @@ suite suiteName:
# TODO also check that the only direct subdirectory of each is kzg-mainnet
doAssert sorted(mapIt(
toSeq(walkDir(suitePath, relative = true, checkDir = true)), it.path)) ==
["compute_cells", "compute_cells_and_kzg_proofs", "recover_all_cells",
["compute_cells_and_kzg_proofs", "recover_cells_and_kzg_proofs",
"verify_cell_kzg_proof", "verify_cell_kzg_proof_batch"]
block:
let testsDir = suitePath/"compute_cells"/"kzg-mainnet"
for kind, path in walkDir(testsDir, relative = true, checkDir = true):
runComputeCellsTest(suiteName, testsDir, testsDir/path)
block:
let testsDir = suitePath/"compute_cells_and_kzg_proofs"/"kzg-mainnet"
for kind, path in walkDir(testsDir, relative = true, checkDir = true):
runComputeCellsAndProofsTest(suiteName, testsDir, testsDir/path)
runComputeCellsAndKzgProofsTest(suiteName, testsDir, testsDir/path)
block:
let testsDir = suitePath/"recover_all_cells"/"kzg-mainnet"
for kind, path in walkDir(testsDir, relative = true, checkDir = true):
runRecoverAllCellsTest(suiteName, testsDir, testsDir/path)
block:
let testsDir = suitePath/"verify_cell_kzg_proof"/"kzg-mainnet"
for kind, path in walkDir(testsDir, relative = true, checkDir = true):
runVerifyCellKzgProofsTest(suiteName, testsDir, testsDir/path)
# TODO: disabled until EF releases new test vectors
# block:
# let testsDir = suitePath/"recover_cells_and_kzg_proofs"/"kzg-mainnet"
# for kind, path in walkDir(testsDir, relative = true, checkDir = true):
# runRecoverCellsAndKzgProofsTest(suiteName, testsDir, testsDir/path)
block:
let testsDir = suitePath/"verify_cell_kzg_proof_batch"/"kzg-mainnet"
for kind, path in walkDir(testsDir, relative = true, checkDir = true):
runVerifyCellKzgProofBatchTest(suiteName, testsDir, testsDir/path)
# block:
# let testsDir = suitePath/"verify_cell_kzg_proof_batch"/"kzg-mainnet"
# for kind, path in walkDir(testsDir, relative = true, checkDir = true):
# runVerifyCellKzgProofBatchTest(suiteName, testsDir, testsDir/path)
doAssert Kzg.freeTrustedSetup().isOk

View File

@ -117,4 +117,4 @@ suite "Block quarantine":
quarantine.addOrphan(Slot 0, b2).isOk
b0.root in quarantine.missing
b1.root notin quarantine.missing
b2.root notin quarantine.missing
b2.root notin quarantine.missing

View File

@ -36,6 +36,7 @@ proc generateNode(rng: ref HmacDrbgContext, port: Port,
# TODO: Add tests with a syncnets preference
const noSyncnetsPreference = SyncnetBits()
const noCscnetsPreference = CscBits()
procSuite "Eth2 specific discovery tests":
let
@ -67,7 +68,7 @@ procSuite "Eth2 specific discovery tests":
attnetsSelected.setBit(34)
let discovered = await node1.queryRandom(
enrForkId, attnetsSelected, noSyncnetsPreference, 1)
enrForkId, attnetsSelected, noSyncnetsPreference, noCscnetsPreference, 1)
check discovered.len == 1
await node1.closeWait()
@ -105,7 +106,7 @@ procSuite "Eth2 specific discovery tests":
attnetsSelected.setBit(42)
let discovered = await node1.queryRandom(
enrForkId, attnetsSelected, noSyncnetsPreference, 1)
enrForkId, attnetsSelected, noSyncnetsPreference, noCscnetsPreference, 1)
check discovered.len == 1
await node1.closeWait()
@ -133,7 +134,7 @@ procSuite "Eth2 specific discovery tests":
block:
let discovered = await node1.queryRandom(
enrForkId, attnetsSelected, noSyncnetsPreference, 1)
enrForkId, attnetsSelected, noSyncnetsPreference, noCscnetsPreference, 1)
check discovered.len == 0
block:
@ -148,7 +149,7 @@ procSuite "Eth2 specific discovery tests":
discard node1.addNode(nodes[][0])
let discovered = await node1.queryRandom(
enrForkId, attnetsSelected, noSyncnetsPreference, 1)
enrForkId, attnetsSelected, noSyncnetsPreference, noCscnetsPreference, 1)
check discovered.len == 1
await node1.closeWait()

View File

@ -31,19 +31,19 @@ block:
template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0]
doAssert Kzg.loadTrustedSetup(
sourceDir &
"/../vendor/nim-kzg4844/kzg4844/csources/src/trusted_setup.txt").isOk
"/../vendor/nim-kzg4844/kzg4844/csources/src/trusted_setup.txt", 0).isOk
const MAX_TOP_BYTE = 114
proc createSampleKzgBlobs(n: int): Result[seq[KzgBlob], cstring] =
var blob: KzgBlob
var blob: array[BYTES_PER_BLOB, byte]
var blobs: seq[KzgBlob]
for i in 0..<n:
discard urandom(blob)
for i in 0..<blob.len:
for i in 0..<BYTES_PER_BLOB.int:
if blob[i] > MAX_TOP_BYTE and i %% kzg_abi.BYTES_PER_FIELD_ELEMENT == 0:
blob[i] = MAX_TOP_BYTE
blobs.add(blob)
blobs.add(KzgBlob(bytes: blob))
ok(blobs)
@ -63,7 +63,7 @@ suite "EIP-7594 Unit Tests":
doAssert extended_matrix.get.len == kzg_abi.CELLS_PER_EXT_BLOB * blob_count
let
chunkSize = kzg_abi.CELLS_PER_EXT_BLOB
rows = chunks(extended_matrix.get.asSeq, kzg_abi.CELLS_PER_EXT_BLOB)
rows = chunks(extended_matrix.get, kzg_abi.CELLS_PER_EXT_BLOB)
for row in rows:
doAssert len(row) == kzg_abi.CELLS_PER_EXT_BLOB
testComputeExtendedMatrix()
@ -82,13 +82,18 @@ suite "EIP-7594 Unit Tests":
extended_matrix = compute_extended_matrix(blobs.get)
# Construct a matrix with some entries missing
var partial_matrix: ExtendedMatrix
for blob_entries in chunks(extended_matrix.get.asSeq, kzg_abi.CELLS_PER_EXT_BLOB):
var partial_matrix: seq[MatrixEntry]
for blob_entries in chunks(extended_matrix.get, kzg_abi.CELLS_PER_EXT_BLOB):
var blb_entry = blob_entries
rng.shuffle(blb_entry)
discard partial_matrix.add(blob_entries[0..N_SAMPLES-1])
partial_matrix.add(blob_entries[0..N_SAMPLES-1])
# TODO: refactor on spec change
# Given the partial matrix, recover the missing entries
let recovered_matrix = recover_matrix(partial_matrix, blob_count)
# Ensure that the recovered matrix matches the original matrix
doAssert recovered_matrix == extended_matrix, "Both matrices don't match!"
testRecoverMatrix()
suite "EIP-7594 Sampling Tests":
test "EIP7594: Extended Sample Count":

View File

@ -213,7 +213,7 @@ from stew/byteutils import hexToByteArray
func fromHex(T: typedesc[KzgCommitment], s: string): T {.
raises: [ValueError].} =
var res: T
hexToByteArray(s, res)
hexToByteArray(s, res.bytes)
res
suite "REST JSON encoding and decoding":

2
vendor/nim-blscurve vendored

@ -1 +1 @@
Subproject commit d091a579a2e7c4668140e675a6fb2c78b8c6dc57
Subproject commit 9c6e80c6109133c0af3025654f5a8820282cff05

@ -1 +1 @@
Subproject commit 54675b5f1fd8156a508e75991693df57a281642e
Subproject commit fc7a45a731736248b96ad5827a8356c0e14d3b8c

@ -1 +1 @@
Subproject commit 11b9d952a80ec87e2443405a6a5382f9daac51f8
Subproject commit dbc4a95df60238157dcf286f6125188cb72f37c1

@ -1 +1 @@
Subproject commit be57dbc902d36f37540897e98c69aa80f868cb45
Subproject commit 98496aa24d9364d1652e531f5f346de9b7cb3e15

2
vendor/nim-kzg4844 vendored

@ -1 +1 @@
Subproject commit d915948dd58c2ad23b551cd408066046cf5e46db
Subproject commit e576c9bab93c425c6c8c0f3603d98a149599d942

@ -1 +1 @@
Subproject commit 54bf8aa2913e8550463779beeae4b469941e039c
Subproject commit 9c7dc8c58ff9c3dfb11c2d333171b47659ed824c

@ -1 +1 @@
Subproject commit 3db16f8ece5d0eba310c8f1ed812c6ff5a21a34a
Subproject commit 8c8959d84c12ecda6ea14c67bd68675b1936f8cf